1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/FileSystem.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
36 #include "llvm/Transforms/Utils/Local.h"
37 #include <cassert>
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "attributor"
42 
43 static cl::opt<bool> ManifestInternal(
44     "attributor-manifest-internal", cl::Hidden,
45     cl::desc("Manifest Attributor internal string attributes."),
46     cl::init(false));
47 
48 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
49                                        cl::Hidden);
50 
51 template <>
52 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
53 
54 static cl::opt<unsigned, true> MaxPotentialValues(
55     "attributor-max-potential-values", cl::Hidden,
56     cl::desc("Maximum number of potential values to be "
57              "tracked for each position."),
58     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
59     cl::init(7));
60 
61 STATISTIC(NumAAs, "Number of abstract attributes created");
62 
63 // Some helper macros to deal with statistics tracking.
64 //
65 // Usage:
66 // For simple IR attribute tracking overload trackStatistics in the abstract
67 // attribute and choose the right STATS_DECLTRACK_********* macro,
68 // e.g.,:
69 //  void trackStatistics() const override {
70 //    STATS_DECLTRACK_ARG_ATTR(returned)
71 //  }
72 // If there is a single "increment" side one can use the macro
73 // STATS_DECLTRACK with a custom message. If there are multiple increment
74 // sides, STATS_DECL and STATS_TRACK can also be used separately.
75 //
76 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
77   ("Number of " #TYPE " marked '" #NAME "'")
78 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
79 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
80 #define STATS_DECL(NAME, TYPE, MSG)                                            \
81   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
82 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
83 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
84   {                                                                            \
85     STATS_DECL(NAME, TYPE, MSG)                                                \
86     STATS_TRACK(NAME, TYPE)                                                    \
87   }
88 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
89   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
90 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
91   STATS_DECLTRACK(NAME, CSArguments,                                           \
92                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
93 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
94   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
95 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
96   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
97 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
98   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
99                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
100 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
101   STATS_DECLTRACK(NAME, CSReturn,                                              \
102                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
103 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
104   STATS_DECLTRACK(NAME, Floating,                                              \
105                   ("Number of floating values known to be '" #NAME "'"))
106 
107 // Specialization of the operator<< for abstract attributes subclasses. This
108 // disambiguates situations where multiple operators are applicable.
109 namespace llvm {
110 #define PIPE_OPERATOR(CLASS)                                                   \
111   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
112     return OS << static_cast<const AbstractAttribute &>(AA);                   \
113   }
114 
115 PIPE_OPERATOR(AAIsDead)
116 PIPE_OPERATOR(AANoUnwind)
117 PIPE_OPERATOR(AANoSync)
118 PIPE_OPERATOR(AANoRecurse)
119 PIPE_OPERATOR(AAWillReturn)
120 PIPE_OPERATOR(AANoReturn)
121 PIPE_OPERATOR(AAReturnedValues)
122 PIPE_OPERATOR(AANonNull)
123 PIPE_OPERATOR(AANoAlias)
124 PIPE_OPERATOR(AADereferenceable)
125 PIPE_OPERATOR(AAAlign)
126 PIPE_OPERATOR(AANoCapture)
127 PIPE_OPERATOR(AAValueSimplify)
128 PIPE_OPERATOR(AANoFree)
129 PIPE_OPERATOR(AAHeapToStack)
130 PIPE_OPERATOR(AAReachability)
131 PIPE_OPERATOR(AAMemoryBehavior)
132 PIPE_OPERATOR(AAMemoryLocation)
133 PIPE_OPERATOR(AAValueConstantRange)
134 PIPE_OPERATOR(AAPrivatizablePtr)
135 PIPE_OPERATOR(AAUndefinedBehavior)
136 PIPE_OPERATOR(AAPotentialValues)
137 PIPE_OPERATOR(AANoUndef)
138 PIPE_OPERATOR(AACallEdges)
139 
140 #undef PIPE_OPERATOR
141 } // namespace llvm
142 
143 namespace {
144 
145 /// Get pointer operand of memory accessing instruction. If \p I is
146 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
147 /// is set to false and the instruction is volatile, return nullptr.
148 static const Value *getPointerOperand(const Instruction *I,
149                                       bool AllowVolatile) {
150   if (!AllowVolatile && I->isVolatile())
151     return nullptr;
152 
153   if (auto *LI = dyn_cast<LoadInst>(I)) {
154     return LI->getPointerOperand();
155   }
156 
157   if (auto *SI = dyn_cast<StoreInst>(I)) {
158     return SI->getPointerOperand();
159   }
160 
161   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
162     return CXI->getPointerOperand();
163   }
164 
165   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
166     return RMWI->getPointerOperand();
167   }
168 
169   return nullptr;
170 }
171 
172 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
173 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
174 /// getelement pointer instructions that traverse the natural type of \p Ptr if
175 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
176 /// through a cast to i8*.
177 ///
178 /// TODO: This could probably live somewhere more prominantly if it doesn't
179 ///       already exist.
180 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
181                                int64_t Offset, IRBuilder<NoFolder> &IRB,
182                                const DataLayout &DL) {
183   assert(Offset >= 0 && "Negative offset not supported yet!");
184   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
185                     << "-bytes as " << *ResTy << "\n");
186 
187   if (Offset) {
188     SmallVector<Value *, 4> Indices;
189     std::string GEPName = Ptr->getName().str() + ".0";
190 
191     // Add 0 index to look through the pointer.
192     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
193            "Offset out of bounds");
194     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
195 
196     Type *Ty = PtrElemTy;
197     do {
198       auto *STy = dyn_cast<StructType>(Ty);
199       if (!STy)
200         // Non-aggregate type, we cast and make byte-wise progress now.
201         break;
202 
203       const StructLayout *SL = DL.getStructLayout(STy);
204       if (int64_t(SL->getSizeInBytes()) < Offset)
205         break;
206 
207       uint64_t Idx = SL->getElementContainingOffset(Offset);
208       assert(Idx < STy->getNumElements() && "Offset calculation error!");
209       uint64_t Rem = Offset - SL->getElementOffset(Idx);
210       Ty = STy->getElementType(Idx);
211 
212       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
213                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
214 
215       GEPName += "." + std::to_string(Idx);
216       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
217       Offset = Rem;
218     } while (Offset);
219 
220     // Create a GEP for the indices collected above.
221     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
222 
223     // If an offset is left we use byte-wise adjustment.
224     if (Offset) {
225       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
226       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
227                           GEPName + ".b" + Twine(Offset));
228     }
229   }
230 
231   // Ensure the result has the requested type.
232   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
233 
234   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
235   return Ptr;
236 }
237 
238 /// Recursively visit all values that might become \p IRP at some point. This
239 /// will be done by looking through cast instructions, selects, phis, and calls
240 /// with the "returned" attribute. Once we cannot look through the value any
241 /// further, the callback \p VisitValueCB is invoked and passed the current
242 /// value, the \p State, and a flag to indicate if we stripped anything.
243 /// Stripped means that we unpacked the value associated with \p IRP at least
244 /// once. Note that the value used for the callback may still be the value
245 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
246 /// we will never visit more values than specified by \p MaxValues.
247 template <typename AAType, typename StateTy>
248 static bool genericValueTraversal(
249     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
250     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
251         VisitValueCB,
252     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
253     function_ref<Value *(Value *)> StripCB = nullptr) {
254 
255   const AAIsDead *LivenessAA = nullptr;
256   if (IRP.getAnchorScope())
257     LivenessAA = &A.getAAFor<AAIsDead>(
258         QueryingAA,
259         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
260         DepClassTy::NONE);
261   bool AnyDead = false;
262 
263   using Item = std::pair<Value *, const Instruction *>;
264   SmallSet<Item, 16> Visited;
265   SmallVector<Item, 16> Worklist;
266   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
267 
268   int Iteration = 0;
269   do {
270     Item I = Worklist.pop_back_val();
271     Value *V = I.first;
272     CtxI = I.second;
273     if (StripCB)
274       V = StripCB(V);
275 
276     // Check if we should process the current value. To prevent endless
277     // recursion keep a record of the values we followed!
278     if (!Visited.insert(I).second)
279       continue;
280 
281     // Make sure we limit the compile time for complex expressions.
282     if (Iteration++ >= MaxValues)
283       return false;
284 
285     // Explicitly look through calls with a "returned" attribute if we do
286     // not have a pointer as stripPointerCasts only works on them.
287     Value *NewV = nullptr;
288     if (V->getType()->isPointerTy()) {
289       NewV = V->stripPointerCasts();
290     } else {
291       auto *CB = dyn_cast<CallBase>(V);
292       if (CB && CB->getCalledFunction()) {
293         for (Argument &Arg : CB->getCalledFunction()->args())
294           if (Arg.hasReturnedAttr()) {
295             NewV = CB->getArgOperand(Arg.getArgNo());
296             break;
297           }
298       }
299     }
300     if (NewV && NewV != V) {
301       Worklist.push_back({NewV, CtxI});
302       continue;
303     }
304 
305     // Look through select instructions, visit both potential values.
306     if (auto *SI = dyn_cast<SelectInst>(V)) {
307       Worklist.push_back({SI->getTrueValue(), CtxI});
308       Worklist.push_back({SI->getFalseValue(), CtxI});
309       continue;
310     }
311 
312     // Look through phi nodes, visit all live operands.
313     if (auto *PHI = dyn_cast<PHINode>(V)) {
314       assert(LivenessAA &&
315              "Expected liveness in the presence of instructions!");
316       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
317         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
318         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
319                             LivenessAA,
320                             /* CheckBBLivenessOnly */ true)) {
321           AnyDead = true;
322           continue;
323         }
324         Worklist.push_back(
325             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
326       }
327       continue;
328     }
329 
330     if (UseValueSimplify && !isa<Constant>(V)) {
331       bool UsedAssumedInformation = false;
332       Optional<Constant *> C =
333           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
334       if (!C.hasValue())
335         continue;
336       if (Value *NewV = C.getValue()) {
337         Worklist.push_back({NewV, CtxI});
338         continue;
339       }
340     }
341 
342     // Once a leaf is reached we inform the user through the callback.
343     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
344       return false;
345   } while (!Worklist.empty());
346 
347   // If we actually used liveness information so we have to record a dependence.
348   if (AnyDead)
349     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
350 
351   // All values have been visited.
352   return true;
353 }
354 
355 const Value *stripAndAccumulateMinimalOffsets(
356     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
357     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
358     bool UseAssumed = false) {
359 
360   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
361     const IRPosition &Pos = IRPosition::value(V);
362     // Only track dependence if we are going to use the assumed info.
363     const AAValueConstantRange &ValueConstantRangeAA =
364         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
365                                          UseAssumed ? DepClassTy::OPTIONAL
366                                                     : DepClassTy::NONE);
367     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
368                                      : ValueConstantRangeAA.getKnown();
369     // We can only use the lower part of the range because the upper part can
370     // be higher than what the value can really be.
371     ROffset = Range.getSignedMin();
372     return true;
373   };
374 
375   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
376                                                 AttributorAnalysis);
377 }
378 
379 static const Value *getMinimalBaseOfAccsesPointerOperand(
380     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
381     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
382   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
383   if (!Ptr)
384     return nullptr;
385   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
386   const Value *Base = stripAndAccumulateMinimalOffsets(
387       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
388 
389   BytesOffset = OffsetAPInt.getSExtValue();
390   return Base;
391 }
392 
393 static const Value *
394 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
395                                      const DataLayout &DL,
396                                      bool AllowNonInbounds = false) {
397   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
398   if (!Ptr)
399     return nullptr;
400 
401   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
402                                           AllowNonInbounds);
403 }
404 
405 /// Helper function to clamp a state \p S of type \p StateType with the
406 /// information in \p R and indicate/return if \p S did change (as-in update is
407 /// required to be run again).
408 template <typename StateType>
409 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
410   auto Assumed = S.getAssumed();
411   S ^= R;
412   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
413                                    : ChangeStatus::CHANGED;
414 }
415 
416 /// Clamp the information known for all returned values of a function
417 /// (identified by \p QueryingAA) into \p S.
418 template <typename AAType, typename StateType = typename AAType::StateType>
419 static void clampReturnedValueStates(
420     Attributor &A, const AAType &QueryingAA, StateType &S,
421     const IRPosition::CallBaseContext *CBContext = nullptr) {
422   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
423                     << QueryingAA << " into " << S << "\n");
424 
425   assert((QueryingAA.getIRPosition().getPositionKind() ==
426               IRPosition::IRP_RETURNED ||
427           QueryingAA.getIRPosition().getPositionKind() ==
428               IRPosition::IRP_CALL_SITE_RETURNED) &&
429          "Can only clamp returned value states for a function returned or call "
430          "site returned position!");
431 
432   // Use an optional state as there might not be any return values and we want
433   // to join (IntegerState::operator&) the state of all there are.
434   Optional<StateType> T;
435 
436   // Callback for each possibly returned value.
437   auto CheckReturnValue = [&](Value &RV) -> bool {
438     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
439     const AAType &AA =
440         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
441     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
442                       << " @ " << RVPos << "\n");
443     const StateType &AAS = AA.getState();
444     if (T.hasValue())
445       *T &= AAS;
446     else
447       T = AAS;
448     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
449                       << "\n");
450     return T->isValidState();
451   };
452 
453   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
454     S.indicatePessimisticFixpoint();
455   else if (T.hasValue())
456     S ^= *T;
457 }
458 
459 /// Helper class for generic deduction: return value -> returned position.
460 template <typename AAType, typename BaseType,
461           typename StateType = typename BaseType::StateType,
462           bool PropagateCallBaseContext = false>
463 struct AAReturnedFromReturnedValues : public BaseType {
464   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
465       : BaseType(IRP, A) {}
466 
467   /// See AbstractAttribute::updateImpl(...).
468   ChangeStatus updateImpl(Attributor &A) override {
469     StateType S(StateType::getBestState(this->getState()));
470     clampReturnedValueStates<AAType, StateType>(
471         A, *this, S,
472         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
473     // TODO: If we know we visited all returned values, thus no are assumed
474     // dead, we can take the known information from the state T.
475     return clampStateAndIndicateChange<StateType>(this->getState(), S);
476   }
477 };
478 
479 /// Clamp the information known at all call sites for a given argument
480 /// (identified by \p QueryingAA) into \p S.
481 template <typename AAType, typename StateType = typename AAType::StateType>
482 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
483                                         StateType &S) {
484   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
485                     << QueryingAA << " into " << S << "\n");
486 
487   assert(QueryingAA.getIRPosition().getPositionKind() ==
488              IRPosition::IRP_ARGUMENT &&
489          "Can only clamp call site argument states for an argument position!");
490 
491   // Use an optional state as there might not be any return values and we want
492   // to join (IntegerState::operator&) the state of all there are.
493   Optional<StateType> T;
494 
495   // The argument number which is also the call site argument number.
496   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
497 
498   auto CallSiteCheck = [&](AbstractCallSite ACS) {
499     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
500     // Check if a coresponding argument was found or if it is on not associated
501     // (which can happen for callback calls).
502     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
503       return false;
504 
505     const AAType &AA =
506         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
507     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
508                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
509     const StateType &AAS = AA.getState();
510     if (T.hasValue())
511       *T &= AAS;
512     else
513       T = AAS;
514     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
515                       << "\n");
516     return T->isValidState();
517   };
518 
519   bool AllCallSitesKnown;
520   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
521                               AllCallSitesKnown))
522     S.indicatePessimisticFixpoint();
523   else if (T.hasValue())
524     S ^= *T;
525 }
526 
527 /// This function is the bridge between argument position and the call base
528 /// context.
529 template <typename AAType, typename BaseType,
530           typename StateType = typename AAType::StateType>
531 bool getArgumentStateFromCallBaseContext(Attributor &A,
532                                          BaseType &QueryingAttribute,
533                                          IRPosition &Pos, StateType &State) {
534   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
535          "Expected an 'argument' position !");
536   const CallBase *CBContext = Pos.getCallBaseContext();
537   if (!CBContext)
538     return false;
539 
540   int ArgNo = Pos.getCallSiteArgNo();
541   assert(ArgNo >= 0 && "Invalid Arg No!");
542 
543   const auto &AA = A.getAAFor<AAType>(
544       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
545       DepClassTy::REQUIRED);
546   const StateType &CBArgumentState =
547       static_cast<const StateType &>(AA.getState());
548 
549   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
550                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
551                     << "\n");
552 
553   // NOTE: If we want to do call site grouping it should happen here.
554   State ^= CBArgumentState;
555   return true;
556 }
557 
558 /// Helper class for generic deduction: call site argument -> argument position.
559 template <typename AAType, typename BaseType,
560           typename StateType = typename AAType::StateType,
561           bool BridgeCallBaseContext = false>
562 struct AAArgumentFromCallSiteArguments : public BaseType {
563   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
564       : BaseType(IRP, A) {}
565 
566   /// See AbstractAttribute::updateImpl(...).
567   ChangeStatus updateImpl(Attributor &A) override {
568     StateType S = StateType::getBestState(this->getState());
569 
570     if (BridgeCallBaseContext) {
571       bool Success =
572           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
573               A, *this, this->getIRPosition(), S);
574       if (Success)
575         return clampStateAndIndicateChange<StateType>(this->getState(), S);
576     }
577     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
578 
579     // TODO: If we know we visited all incoming values, thus no are assumed
580     // dead, we can take the known information from the state T.
581     return clampStateAndIndicateChange<StateType>(this->getState(), S);
582   }
583 };
584 
585 /// Helper class for generic replication: function returned -> cs returned.
586 template <typename AAType, typename BaseType,
587           typename StateType = typename BaseType::StateType,
588           bool IntroduceCallBaseContext = false>
589 struct AACallSiteReturnedFromReturned : public BaseType {
590   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
591       : BaseType(IRP, A) {}
592 
593   /// See AbstractAttribute::updateImpl(...).
594   ChangeStatus updateImpl(Attributor &A) override {
595     assert(this->getIRPosition().getPositionKind() ==
596                IRPosition::IRP_CALL_SITE_RETURNED &&
597            "Can only wrap function returned positions for call site returned "
598            "positions!");
599     auto &S = this->getState();
600 
601     const Function *AssociatedFunction =
602         this->getIRPosition().getAssociatedFunction();
603     if (!AssociatedFunction)
604       return S.indicatePessimisticFixpoint();
605 
606     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
607     if (IntroduceCallBaseContext)
608       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
609                         << CBContext << "\n");
610 
611     IRPosition FnPos = IRPosition::returned(
612         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
613     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
614     return clampStateAndIndicateChange(S, AA.getState());
615   }
616 };
617 
618 /// Helper function to accumulate uses.
619 template <class AAType, typename StateType = typename AAType::StateType>
620 static void followUsesInContext(AAType &AA, Attributor &A,
621                                 MustBeExecutedContextExplorer &Explorer,
622                                 const Instruction *CtxI,
623                                 SetVector<const Use *> &Uses,
624                                 StateType &State) {
625   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
626   for (unsigned u = 0; u < Uses.size(); ++u) {
627     const Use *U = Uses[u];
628     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
629       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
630       if (Found && AA.followUseInMBEC(A, U, UserI, State))
631         for (const Use &Us : UserI->uses())
632           Uses.insert(&Us);
633     }
634   }
635 }
636 
637 /// Use the must-be-executed-context around \p I to add information into \p S.
638 /// The AAType class is required to have `followUseInMBEC` method with the
639 /// following signature and behaviour:
640 ///
641 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
642 /// U - Underlying use.
643 /// I - The user of the \p U.
644 /// Returns true if the value should be tracked transitively.
645 ///
646 template <class AAType, typename StateType = typename AAType::StateType>
647 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
648                              Instruction &CtxI) {
649 
650   // Container for (transitive) uses of the associated value.
651   SetVector<const Use *> Uses;
652   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
653     Uses.insert(&U);
654 
655   MustBeExecutedContextExplorer &Explorer =
656       A.getInfoCache().getMustBeExecutedContextExplorer();
657 
658   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
659 
660   if (S.isAtFixpoint())
661     return;
662 
663   SmallVector<const BranchInst *, 4> BrInsts;
664   auto Pred = [&](const Instruction *I) {
665     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
666       if (Br->isConditional())
667         BrInsts.push_back(Br);
668     return true;
669   };
670 
671   // Here, accumulate conditional branch instructions in the context. We
672   // explore the child paths and collect the known states. The disjunction of
673   // those states can be merged to its own state. Let ParentState_i be a state
674   // to indicate the known information for an i-th branch instruction in the
675   // context. ChildStates are created for its successors respectively.
676   //
677   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
678   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
679   //      ...
680   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
681   //
682   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
683   //
684   // FIXME: Currently, recursive branches are not handled. For example, we
685   // can't deduce that ptr must be dereferenced in below function.
686   //
687   // void f(int a, int c, int *ptr) {
688   //    if(a)
689   //      if (b) {
690   //        *ptr = 0;
691   //      } else {
692   //        *ptr = 1;
693   //      }
694   //    else {
695   //      if (b) {
696   //        *ptr = 0;
697   //      } else {
698   //        *ptr = 1;
699   //      }
700   //    }
701   // }
702 
703   Explorer.checkForAllContext(&CtxI, Pred);
704   for (const BranchInst *Br : BrInsts) {
705     StateType ParentState;
706 
707     // The known state of the parent state is a conjunction of children's
708     // known states so it is initialized with a best state.
709     ParentState.indicateOptimisticFixpoint();
710 
711     for (const BasicBlock *BB : Br->successors()) {
712       StateType ChildState;
713 
714       size_t BeforeSize = Uses.size();
715       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
716 
717       // Erase uses which only appear in the child.
718       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
719         It = Uses.erase(It);
720 
721       ParentState &= ChildState;
722     }
723 
724     // Use only known state.
725     S += ParentState;
726   }
727 }
728 
729 /// -----------------------NoUnwind Function Attribute--------------------------
730 
731 struct AANoUnwindImpl : AANoUnwind {
732   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
733 
734   const std::string getAsStr() const override {
735     return getAssumed() ? "nounwind" : "may-unwind";
736   }
737 
738   /// See AbstractAttribute::updateImpl(...).
739   ChangeStatus updateImpl(Attributor &A) override {
740     auto Opcodes = {
741         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
742         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
743         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
744 
745     auto CheckForNoUnwind = [&](Instruction &I) {
746       if (!I.mayThrow())
747         return true;
748 
749       if (const auto *CB = dyn_cast<CallBase>(&I)) {
750         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
751             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
752         return NoUnwindAA.isAssumedNoUnwind();
753       }
754       return false;
755     };
756 
757     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
758       return indicatePessimisticFixpoint();
759 
760     return ChangeStatus::UNCHANGED;
761   }
762 };
763 
764 struct AANoUnwindFunction final : public AANoUnwindImpl {
765   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
766       : AANoUnwindImpl(IRP, A) {}
767 
768   /// See AbstractAttribute::trackStatistics()
769   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
770 };
771 
772 /// NoUnwind attribute deduction for a call sites.
773 struct AANoUnwindCallSite final : AANoUnwindImpl {
774   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
775       : AANoUnwindImpl(IRP, A) {}
776 
777   /// See AbstractAttribute::initialize(...).
778   void initialize(Attributor &A) override {
779     AANoUnwindImpl::initialize(A);
780     Function *F = getAssociatedFunction();
781     if (!F || F->isDeclaration())
782       indicatePessimisticFixpoint();
783   }
784 
785   /// See AbstractAttribute::updateImpl(...).
786   ChangeStatus updateImpl(Attributor &A) override {
787     // TODO: Once we have call site specific value information we can provide
788     //       call site specific liveness information and then it makes
789     //       sense to specialize attributes for call sites arguments instead of
790     //       redirecting requests to the callee argument.
791     Function *F = getAssociatedFunction();
792     const IRPosition &FnPos = IRPosition::function(*F);
793     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
794     return clampStateAndIndicateChange(getState(), FnAA.getState());
795   }
796 
797   /// See AbstractAttribute::trackStatistics()
798   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
799 };
800 
801 /// --------------------- Function Return Values -------------------------------
802 
803 /// "Attribute" that collects all potential returned values and the return
804 /// instructions that they arise from.
805 ///
806 /// If there is a unique returned value R, the manifest method will:
807 ///   - mark R with the "returned" attribute, if R is an argument.
808 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
809 
810   /// Mapping of values potentially returned by the associated function to the
811   /// return instructions that might return them.
812   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
813 
814   /// Mapping to remember the number of returned values for a call site such
815   /// that we can avoid updates if nothing changed.
816   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
817 
818   /// Set of unresolved calls returned by the associated function.
819   SmallSetVector<CallBase *, 4> UnresolvedCalls;
820 
821   /// State flags
822   ///
823   ///{
824   bool IsFixed = false;
825   bool IsValidState = true;
826   ///}
827 
828 public:
829   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
830       : AAReturnedValues(IRP, A) {}
831 
832   /// See AbstractAttribute::initialize(...).
833   void initialize(Attributor &A) override {
834     // Reset the state.
835     IsFixed = false;
836     IsValidState = true;
837     ReturnedValues.clear();
838 
839     Function *F = getAssociatedFunction();
840     if (!F || F->isDeclaration()) {
841       indicatePessimisticFixpoint();
842       return;
843     }
844     assert(!F->getReturnType()->isVoidTy() &&
845            "Did not expect a void return type!");
846 
847     // The map from instruction opcodes to those instructions in the function.
848     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
849 
850     // Look through all arguments, if one is marked as returned we are done.
851     for (Argument &Arg : F->args()) {
852       if (Arg.hasReturnedAttr()) {
853         auto &ReturnInstSet = ReturnedValues[&Arg];
854         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
855           for (Instruction *RI : *Insts)
856             ReturnInstSet.insert(cast<ReturnInst>(RI));
857 
858         indicateOptimisticFixpoint();
859         return;
860       }
861     }
862 
863     if (!A.isFunctionIPOAmendable(*F))
864       indicatePessimisticFixpoint();
865   }
866 
867   /// See AbstractAttribute::manifest(...).
868   ChangeStatus manifest(Attributor &A) override;
869 
870   /// See AbstractAttribute::getState(...).
871   AbstractState &getState() override { return *this; }
872 
873   /// See AbstractAttribute::getState(...).
874   const AbstractState &getState() const override { return *this; }
875 
876   /// See AbstractAttribute::updateImpl(Attributor &A).
877   ChangeStatus updateImpl(Attributor &A) override;
878 
879   llvm::iterator_range<iterator> returned_values() override {
880     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
881   }
882 
883   llvm::iterator_range<const_iterator> returned_values() const override {
884     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
885   }
886 
887   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
888     return UnresolvedCalls;
889   }
890 
891   /// Return the number of potential return values, -1 if unknown.
892   size_t getNumReturnValues() const override {
893     return isValidState() ? ReturnedValues.size() : -1;
894   }
895 
896   /// Return an assumed unique return value if a single candidate is found. If
897   /// there cannot be one, return a nullptr. If it is not clear yet, return the
898   /// Optional::NoneType.
899   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
900 
901   /// See AbstractState::checkForAllReturnedValues(...).
902   bool checkForAllReturnedValuesAndReturnInsts(
903       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
904       const override;
905 
906   /// Pretty print the attribute similar to the IR representation.
907   const std::string getAsStr() const override;
908 
909   /// See AbstractState::isAtFixpoint().
910   bool isAtFixpoint() const override { return IsFixed; }
911 
912   /// See AbstractState::isValidState().
913   bool isValidState() const override { return IsValidState; }
914 
915   /// See AbstractState::indicateOptimisticFixpoint(...).
916   ChangeStatus indicateOptimisticFixpoint() override {
917     IsFixed = true;
918     return ChangeStatus::UNCHANGED;
919   }
920 
921   ChangeStatus indicatePessimisticFixpoint() override {
922     IsFixed = true;
923     IsValidState = false;
924     return ChangeStatus::CHANGED;
925   }
926 };
927 
928 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
929   ChangeStatus Changed = ChangeStatus::UNCHANGED;
930 
931   // Bookkeeping.
932   assert(isValidState());
933   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
934                   "Number of function with known return values");
935 
936   // Check if we have an assumed unique return value that we could manifest.
937   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
938 
939   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
940     return Changed;
941 
942   // Bookkeeping.
943   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
944                   "Number of function with unique return");
945 
946   // Callback to replace the uses of CB with the constant C.
947   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
948     if (CB.use_empty())
949       return ChangeStatus::UNCHANGED;
950     if (A.changeValueAfterManifest(CB, C))
951       return ChangeStatus::CHANGED;
952     return ChangeStatus::UNCHANGED;
953   };
954 
955   // If the assumed unique return value is an argument, annotate it.
956   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
957     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
958             getAssociatedFunction()->getReturnType())) {
959       getIRPosition() = IRPosition::argument(*UniqueRVArg);
960       Changed = IRAttribute::manifest(A);
961     }
962   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
963     // We can replace the returned value with the unique returned constant.
964     Value &AnchorValue = getAnchorValue();
965     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
966       for (const Use &U : F->uses())
967         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
968           if (CB->isCallee(&U)) {
969             Constant *RVCCast =
970                 CB->getType() == RVC->getType()
971                     ? RVC
972                     : ConstantExpr::getPointerCast(RVC, CB->getType());
973             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
974           }
975     } else {
976       assert(isa<CallBase>(AnchorValue) &&
977              "Expcected a function or call base anchor!");
978       Constant *RVCCast =
979           AnchorValue.getType() == RVC->getType()
980               ? RVC
981               : ConstantExpr::getPointerCast(RVC, AnchorValue.getType());
982       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
983     }
984     if (Changed == ChangeStatus::CHANGED)
985       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
986                       "Number of function returns replaced by constant return");
987   }
988 
989   return Changed;
990 }
991 
992 const std::string AAReturnedValuesImpl::getAsStr() const {
993   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
994          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
995          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
996 }
997 
998 Optional<Value *>
999 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1000   // If checkForAllReturnedValues provides a unique value, ignoring potential
1001   // undef values that can also be present, it is assumed to be the actual
1002   // return value and forwarded to the caller of this method. If there are
1003   // multiple, a nullptr is returned indicating there cannot be a unique
1004   // returned value.
1005   Optional<Value *> UniqueRV;
1006 
1007   auto Pred = [&](Value &RV) -> bool {
1008     // If we found a second returned value and neither the current nor the saved
1009     // one is an undef, there is no unique returned value. Undefs are special
1010     // since we can pretend they have any value.
1011     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1012         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1013       UniqueRV = nullptr;
1014       return false;
1015     }
1016 
1017     // Do not overwrite a value with an undef.
1018     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1019       UniqueRV = &RV;
1020 
1021     return true;
1022   };
1023 
1024   if (!A.checkForAllReturnedValues(Pred, *this))
1025     UniqueRV = nullptr;
1026 
1027   return UniqueRV;
1028 }
1029 
1030 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1031     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1032     const {
1033   if (!isValidState())
1034     return false;
1035 
1036   // Check all returned values but ignore call sites as long as we have not
1037   // encountered an overdefined one during an update.
1038   for (auto &It : ReturnedValues) {
1039     Value *RV = It.first;
1040 
1041     CallBase *CB = dyn_cast<CallBase>(RV);
1042     if (CB && !UnresolvedCalls.count(CB))
1043       continue;
1044 
1045     if (!Pred(*RV, It.second))
1046       return false;
1047   }
1048 
1049   return true;
1050 }
1051 
1052 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1053   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1054   bool Changed = false;
1055 
1056   // State used in the value traversals starting in returned values.
1057   struct RVState {
1058     // The map in which we collect return values -> return instrs.
1059     decltype(ReturnedValues) &RetValsMap;
1060     // The flag to indicate a change.
1061     bool &Changed;
1062     // The return instrs we come from.
1063     SmallSetVector<ReturnInst *, 4> RetInsts;
1064   };
1065 
1066   // Callback for a leaf value returned by the associated function.
1067   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1068                          bool) -> bool {
1069     auto Size = RVS.RetValsMap[&Val].size();
1070     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1071     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1072     RVS.Changed |= Inserted;
1073     LLVM_DEBUG({
1074       if (Inserted)
1075         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1076                << " => " << RVS.RetInsts.size() << "\n";
1077     });
1078     return true;
1079   };
1080 
1081   // Helper method to invoke the generic value traversal.
1082   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1083                                 const Instruction *CtxI) {
1084     IRPosition RetValPos = IRPosition::value(RV, getCallBaseContext());
1085     return genericValueTraversal<AAReturnedValues, RVState>(
1086         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1087         /* UseValueSimplify */ false);
1088   };
1089 
1090   // Callback for all "return intructions" live in the associated function.
1091   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1092     ReturnInst &Ret = cast<ReturnInst>(I);
1093     RVState RVS({ReturnedValues, Changed, {}});
1094     RVS.RetInsts.insert(&Ret);
1095     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1096   };
1097 
1098   // Start by discovering returned values from all live returned instructions in
1099   // the associated function.
1100   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1101     return indicatePessimisticFixpoint();
1102 
1103   // Once returned values "directly" present in the code are handled we try to
1104   // resolve returned calls. To avoid modifications to the ReturnedValues map
1105   // while we iterate over it we kept record of potential new entries in a copy
1106   // map, NewRVsMap.
1107   decltype(ReturnedValues) NewRVsMap;
1108 
1109   auto HandleReturnValue = [&](Value *RV,
1110                                SmallSetVector<ReturnInst *, 4> &RIs) {
1111     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1112                       << RIs.size() << " RIs\n");
1113     CallBase *CB = dyn_cast<CallBase>(RV);
1114     if (!CB || UnresolvedCalls.count(CB))
1115       return;
1116 
1117     if (!CB->getCalledFunction()) {
1118       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1119                         << "\n");
1120       UnresolvedCalls.insert(CB);
1121       return;
1122     }
1123 
1124     // TODO: use the function scope once we have call site AAReturnedValues.
1125     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1126         *this, IRPosition::function(*CB->getCalledFunction()),
1127         DepClassTy::REQUIRED);
1128     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1129                       << RetValAA << "\n");
1130 
1131     // Skip dead ends, thus if we do not know anything about the returned
1132     // call we mark it as unresolved and it will stay that way.
1133     if (!RetValAA.getState().isValidState()) {
1134       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1135                         << "\n");
1136       UnresolvedCalls.insert(CB);
1137       return;
1138     }
1139 
1140     // Do not try to learn partial information. If the callee has unresolved
1141     // return values we will treat the call as unresolved/opaque.
1142     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1143     if (!RetValAAUnresolvedCalls.empty()) {
1144       UnresolvedCalls.insert(CB);
1145       return;
1146     }
1147 
1148     // Now check if we can track transitively returned values. If possible, thus
1149     // if all return value can be represented in the current scope, do so.
1150     bool Unresolved = false;
1151     for (auto &RetValAAIt : RetValAA.returned_values()) {
1152       Value *RetVal = RetValAAIt.first;
1153       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1154           isa<Constant>(RetVal))
1155         continue;
1156       // Anything that did not fit in the above categories cannot be resolved,
1157       // mark the call as unresolved.
1158       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1159                            "cannot be translated: "
1160                         << *RetVal << "\n");
1161       UnresolvedCalls.insert(CB);
1162       Unresolved = true;
1163       break;
1164     }
1165 
1166     if (Unresolved)
1167       return;
1168 
1169     // Now track transitively returned values.
1170     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1171     if (NumRetAA == RetValAA.getNumReturnValues()) {
1172       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1173                            "changed since it was seen last\n");
1174       return;
1175     }
1176     NumRetAA = RetValAA.getNumReturnValues();
1177 
1178     for (auto &RetValAAIt : RetValAA.returned_values()) {
1179       Value *RetVal = RetValAAIt.first;
1180       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1181         // Arguments are mapped to call site operands and we begin the traversal
1182         // again.
1183         bool Unused = false;
1184         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1185         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1186         continue;
1187       }
1188       if (isa<CallBase>(RetVal)) {
1189         // Call sites are resolved by the callee attribute over time, no need to
1190         // do anything for us.
1191         continue;
1192       }
1193       if (isa<Constant>(RetVal)) {
1194         // Constants are valid everywhere, we can simply take them.
1195         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1196         continue;
1197       }
1198     }
1199   };
1200 
1201   for (auto &It : ReturnedValues)
1202     HandleReturnValue(It.first, It.second);
1203 
1204   // Because processing the new information can again lead to new return values
1205   // we have to be careful and iterate until this iteration is complete. The
1206   // idea is that we are in a stable state at the end of an update. All return
1207   // values have been handled and properly categorized. We might not update
1208   // again if we have not requested a non-fix attribute so we cannot "wait" for
1209   // the next update to analyze a new return value.
1210   while (!NewRVsMap.empty()) {
1211     auto It = std::move(NewRVsMap.back());
1212     NewRVsMap.pop_back();
1213 
1214     assert(!It.second.empty() && "Entry does not add anything.");
1215     auto &ReturnInsts = ReturnedValues[It.first];
1216     for (ReturnInst *RI : It.second)
1217       if (ReturnInsts.insert(RI)) {
1218         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1219                           << *It.first << " => " << *RI << "\n");
1220         HandleReturnValue(It.first, ReturnInsts);
1221         Changed = true;
1222       }
1223   }
1224 
1225   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1226   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1227 }
1228 
1229 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1230   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1231       : AAReturnedValuesImpl(IRP, A) {}
1232 
1233   /// See AbstractAttribute::trackStatistics()
1234   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1235 };
1236 
1237 /// Returned values information for a call sites.
1238 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1239   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1240       : AAReturnedValuesImpl(IRP, A) {}
1241 
1242   /// See AbstractAttribute::initialize(...).
1243   void initialize(Attributor &A) override {
1244     // TODO: Once we have call site specific value information we can provide
1245     //       call site specific liveness information and then it makes
1246     //       sense to specialize attributes for call sites instead of
1247     //       redirecting requests to the callee.
1248     llvm_unreachable("Abstract attributes for returned values are not "
1249                      "supported for call sites yet!");
1250   }
1251 
1252   /// See AbstractAttribute::updateImpl(...).
1253   ChangeStatus updateImpl(Attributor &A) override {
1254     return indicatePessimisticFixpoint();
1255   }
1256 
1257   /// See AbstractAttribute::trackStatistics()
1258   void trackStatistics() const override {}
1259 };
1260 
1261 /// ------------------------ NoSync Function Attribute -------------------------
1262 
1263 struct AANoSyncImpl : AANoSync {
1264   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1265 
1266   const std::string getAsStr() const override {
1267     return getAssumed() ? "nosync" : "may-sync";
1268   }
1269 
1270   /// See AbstractAttribute::updateImpl(...).
1271   ChangeStatus updateImpl(Attributor &A) override;
1272 
1273   /// Helper function used to determine whether an instruction is non-relaxed
1274   /// atomic. In other words, if an atomic instruction does not have unordered
1275   /// or monotonic ordering
1276   static bool isNonRelaxedAtomic(Instruction *I);
1277 
1278   /// Helper function specific for intrinsics which are potentially volatile
1279   static bool isNoSyncIntrinsic(Instruction *I);
1280 };
1281 
1282 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1283   if (!I->isAtomic())
1284     return false;
1285 
1286   if (auto *FI = dyn_cast<FenceInst>(I))
1287     // All legal orderings for fence are stronger than monotonic.
1288     return FI->getSyncScopeID() != SyncScope::SingleThread;
1289   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1290     // Unordered is not a legal ordering for cmpxchg.
1291     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1292             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1293   }
1294 
1295   AtomicOrdering Ordering;
1296   switch (I->getOpcode()) {
1297   case Instruction::AtomicRMW:
1298     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1299     break;
1300   case Instruction::Store:
1301     Ordering = cast<StoreInst>(I)->getOrdering();
1302     break;
1303   case Instruction::Load:
1304     Ordering = cast<LoadInst>(I)->getOrdering();
1305     break;
1306   default:
1307     llvm_unreachable(
1308         "New atomic operations need to be known in the attributor.");
1309   }
1310 
1311   return (Ordering != AtomicOrdering::Unordered &&
1312           Ordering != AtomicOrdering::Monotonic);
1313 }
1314 
1315 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1316 /// which would be nosync except that they have a volatile flag.  All other
1317 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1318 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1319   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1320     return !MI->isVolatile();
1321   return false;
1322 }
1323 
1324 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1325 
1326   auto CheckRWInstForNoSync = [&](Instruction &I) {
1327     /// We are looking for volatile instructions or Non-Relaxed atomics.
1328 
1329     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1330       if (CB->hasFnAttr(Attribute::NoSync))
1331         return true;
1332 
1333       if (isNoSyncIntrinsic(&I))
1334         return true;
1335 
1336       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1337           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1338       return NoSyncAA.isAssumedNoSync();
1339     }
1340 
1341     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1342       return true;
1343 
1344     return false;
1345   };
1346 
1347   auto CheckForNoSync = [&](Instruction &I) {
1348     // At this point we handled all read/write effects and they are all
1349     // nosync, so they can be skipped.
1350     if (I.mayReadOrWriteMemory())
1351       return true;
1352 
1353     // non-convergent and readnone imply nosync.
1354     return !cast<CallBase>(I).isConvergent();
1355   };
1356 
1357   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1358       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1359     return indicatePessimisticFixpoint();
1360 
1361   return ChangeStatus::UNCHANGED;
1362 }
1363 
1364 struct AANoSyncFunction final : public AANoSyncImpl {
1365   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1366       : AANoSyncImpl(IRP, A) {}
1367 
1368   /// See AbstractAttribute::trackStatistics()
1369   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1370 };
1371 
1372 /// NoSync attribute deduction for a call sites.
1373 struct AANoSyncCallSite final : AANoSyncImpl {
1374   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1375       : AANoSyncImpl(IRP, A) {}
1376 
1377   /// See AbstractAttribute::initialize(...).
1378   void initialize(Attributor &A) override {
1379     AANoSyncImpl::initialize(A);
1380     Function *F = getAssociatedFunction();
1381     if (!F || F->isDeclaration())
1382       indicatePessimisticFixpoint();
1383   }
1384 
1385   /// See AbstractAttribute::updateImpl(...).
1386   ChangeStatus updateImpl(Attributor &A) override {
1387     // TODO: Once we have call site specific value information we can provide
1388     //       call site specific liveness information and then it makes
1389     //       sense to specialize attributes for call sites arguments instead of
1390     //       redirecting requests to the callee argument.
1391     Function *F = getAssociatedFunction();
1392     const IRPosition &FnPos = IRPosition::function(*F);
1393     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1394     return clampStateAndIndicateChange(getState(), FnAA.getState());
1395   }
1396 
1397   /// See AbstractAttribute::trackStatistics()
1398   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1399 };
1400 
1401 /// ------------------------ No-Free Attributes ----------------------------
1402 
1403 struct AANoFreeImpl : public AANoFree {
1404   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1405 
1406   /// See AbstractAttribute::updateImpl(...).
1407   ChangeStatus updateImpl(Attributor &A) override {
1408     auto CheckForNoFree = [&](Instruction &I) {
1409       const auto &CB = cast<CallBase>(I);
1410       if (CB.hasFnAttr(Attribute::NoFree))
1411         return true;
1412 
1413       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1414           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1415       return NoFreeAA.isAssumedNoFree();
1416     };
1417 
1418     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1419       return indicatePessimisticFixpoint();
1420     return ChangeStatus::UNCHANGED;
1421   }
1422 
1423   /// See AbstractAttribute::getAsStr().
1424   const std::string getAsStr() const override {
1425     return getAssumed() ? "nofree" : "may-free";
1426   }
1427 };
1428 
1429 struct AANoFreeFunction final : public AANoFreeImpl {
1430   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1431       : AANoFreeImpl(IRP, A) {}
1432 
1433   /// See AbstractAttribute::trackStatistics()
1434   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1435 };
1436 
1437 /// NoFree attribute deduction for a call sites.
1438 struct AANoFreeCallSite final : AANoFreeImpl {
1439   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1440       : AANoFreeImpl(IRP, A) {}
1441 
1442   /// See AbstractAttribute::initialize(...).
1443   void initialize(Attributor &A) override {
1444     AANoFreeImpl::initialize(A);
1445     Function *F = getAssociatedFunction();
1446     if (!F || F->isDeclaration())
1447       indicatePessimisticFixpoint();
1448   }
1449 
1450   /// See AbstractAttribute::updateImpl(...).
1451   ChangeStatus updateImpl(Attributor &A) override {
1452     // TODO: Once we have call site specific value information we can provide
1453     //       call site specific liveness information and then it makes
1454     //       sense to specialize attributes for call sites arguments instead of
1455     //       redirecting requests to the callee argument.
1456     Function *F = getAssociatedFunction();
1457     const IRPosition &FnPos = IRPosition::function(*F);
1458     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1459     return clampStateAndIndicateChange(getState(), FnAA.getState());
1460   }
1461 
1462   /// See AbstractAttribute::trackStatistics()
1463   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1464 };
1465 
1466 /// NoFree attribute for floating values.
1467 struct AANoFreeFloating : AANoFreeImpl {
1468   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1469       : AANoFreeImpl(IRP, A) {}
1470 
1471   /// See AbstractAttribute::trackStatistics()
1472   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1473 
1474   /// See Abstract Attribute::updateImpl(...).
1475   ChangeStatus updateImpl(Attributor &A) override {
1476     const IRPosition &IRP = getIRPosition();
1477 
1478     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1479         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1480     if (NoFreeAA.isAssumedNoFree())
1481       return ChangeStatus::UNCHANGED;
1482 
1483     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1484     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1485       Instruction *UserI = cast<Instruction>(U.getUser());
1486       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1487         if (CB->isBundleOperand(&U))
1488           return false;
1489         if (!CB->isArgOperand(&U))
1490           return true;
1491         unsigned ArgNo = CB->getArgOperandNo(&U);
1492 
1493         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1494             *this, IRPosition::callsite_argument(*CB, ArgNo),
1495             DepClassTy::REQUIRED);
1496         return NoFreeArg.isAssumedNoFree();
1497       }
1498 
1499       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1500           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1501         Follow = true;
1502         return true;
1503       }
1504       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
1505           isa<ReturnInst>(UserI))
1506         return true;
1507 
1508       // Unknown user.
1509       return false;
1510     };
1511     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1512       return indicatePessimisticFixpoint();
1513 
1514     return ChangeStatus::UNCHANGED;
1515   }
1516 };
1517 
1518 /// NoFree attribute for a call site argument.
1519 struct AANoFreeArgument final : AANoFreeFloating {
1520   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1521       : AANoFreeFloating(IRP, A) {}
1522 
1523   /// See AbstractAttribute::trackStatistics()
1524   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1525 };
1526 
1527 /// NoFree attribute for call site arguments.
1528 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1529   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1530       : AANoFreeFloating(IRP, A) {}
1531 
1532   /// See AbstractAttribute::updateImpl(...).
1533   ChangeStatus updateImpl(Attributor &A) override {
1534     // TODO: Once we have call site specific value information we can provide
1535     //       call site specific liveness information and then it makes
1536     //       sense to specialize attributes for call sites arguments instead of
1537     //       redirecting requests to the callee argument.
1538     Argument *Arg = getAssociatedArgument();
1539     if (!Arg)
1540       return indicatePessimisticFixpoint();
1541     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1542     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1543     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1544   }
1545 
1546   /// See AbstractAttribute::trackStatistics()
1547   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1548 };
1549 
1550 /// NoFree attribute for function return value.
1551 struct AANoFreeReturned final : AANoFreeFloating {
1552   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1553       : AANoFreeFloating(IRP, A) {
1554     llvm_unreachable("NoFree is not applicable to function returns!");
1555   }
1556 
1557   /// See AbstractAttribute::initialize(...).
1558   void initialize(Attributor &A) override {
1559     llvm_unreachable("NoFree is not applicable to function returns!");
1560   }
1561 
1562   /// See AbstractAttribute::updateImpl(...).
1563   ChangeStatus updateImpl(Attributor &A) override {
1564     llvm_unreachable("NoFree is not applicable to function returns!");
1565   }
1566 
1567   /// See AbstractAttribute::trackStatistics()
1568   void trackStatistics() const override {}
1569 };
1570 
1571 /// NoFree attribute deduction for a call site return value.
1572 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1573   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1574       : AANoFreeFloating(IRP, A) {}
1575 
1576   ChangeStatus manifest(Attributor &A) override {
1577     return ChangeStatus::UNCHANGED;
1578   }
1579   /// See AbstractAttribute::trackStatistics()
1580   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1581 };
1582 
1583 /// ------------------------ NonNull Argument Attribute ------------------------
1584 static int64_t getKnownNonNullAndDerefBytesForUse(
1585     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1586     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1587   TrackUse = false;
1588 
1589   const Value *UseV = U->get();
1590   if (!UseV->getType()->isPointerTy())
1591     return 0;
1592 
1593   // We need to follow common pointer manipulation uses to the accesses they
1594   // feed into. We can try to be smart to avoid looking through things we do not
1595   // like for now, e.g., non-inbounds GEPs.
1596   if (isa<CastInst>(I)) {
1597     TrackUse = true;
1598     return 0;
1599   }
1600 
1601   if (isa<GetElementPtrInst>(I)) {
1602     TrackUse = true;
1603     return 0;
1604   }
1605 
1606   Type *PtrTy = UseV->getType();
1607   const Function *F = I->getFunction();
1608   bool NullPointerIsDefined =
1609       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1610   const DataLayout &DL = A.getInfoCache().getDL();
1611   if (const auto *CB = dyn_cast<CallBase>(I)) {
1612     if (CB->isBundleOperand(U)) {
1613       if (RetainedKnowledge RK = getKnowledgeFromUse(
1614               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1615         IsNonNull |=
1616             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1617         return RK.ArgValue;
1618       }
1619       return 0;
1620     }
1621 
1622     if (CB->isCallee(U)) {
1623       IsNonNull |= !NullPointerIsDefined;
1624       return 0;
1625     }
1626 
1627     unsigned ArgNo = CB->getArgOperandNo(U);
1628     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1629     // As long as we only use known information there is no need to track
1630     // dependences here.
1631     auto &DerefAA =
1632         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1633     IsNonNull |= DerefAA.isKnownNonNull();
1634     return DerefAA.getKnownDereferenceableBytes();
1635   }
1636 
1637   int64_t Offset;
1638   const Value *Base =
1639       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1640   if (Base) {
1641     if (Base == &AssociatedValue &&
1642         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1643       int64_t DerefBytes =
1644           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1645 
1646       IsNonNull |= !NullPointerIsDefined;
1647       return std::max(int64_t(0), DerefBytes);
1648     }
1649   }
1650 
1651   /// Corner case when an offset is 0.
1652   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1653                                               /*AllowNonInbounds*/ true);
1654   if (Base) {
1655     if (Offset == 0 && Base == &AssociatedValue &&
1656         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1657       int64_t DerefBytes =
1658           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1659       IsNonNull |= !NullPointerIsDefined;
1660       return std::max(int64_t(0), DerefBytes);
1661     }
1662   }
1663 
1664   return 0;
1665 }
1666 
1667 struct AANonNullImpl : AANonNull {
1668   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1669       : AANonNull(IRP, A),
1670         NullIsDefined(NullPointerIsDefined(
1671             getAnchorScope(),
1672             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1673 
1674   /// See AbstractAttribute::initialize(...).
1675   void initialize(Attributor &A) override {
1676     Value &V = getAssociatedValue();
1677     if (!NullIsDefined &&
1678         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1679                 /* IgnoreSubsumingPositions */ false, &A)) {
1680       indicateOptimisticFixpoint();
1681       return;
1682     }
1683 
1684     if (isa<ConstantPointerNull>(V)) {
1685       indicatePessimisticFixpoint();
1686       return;
1687     }
1688 
1689     AANonNull::initialize(A);
1690 
1691     bool CanBeNull, CanBeFreed;
1692     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
1693                                          CanBeFreed)) {
1694       if (!CanBeNull) {
1695         indicateOptimisticFixpoint();
1696         return;
1697       }
1698     }
1699 
1700     if (isa<GlobalValue>(&getAssociatedValue())) {
1701       indicatePessimisticFixpoint();
1702       return;
1703     }
1704 
1705     if (Instruction *CtxI = getCtxI())
1706       followUsesInMBEC(*this, A, getState(), *CtxI);
1707   }
1708 
1709   /// See followUsesInMBEC
1710   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1711                        AANonNull::StateType &State) {
1712     bool IsNonNull = false;
1713     bool TrackUse = false;
1714     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1715                                        IsNonNull, TrackUse);
1716     State.setKnown(IsNonNull);
1717     return TrackUse;
1718   }
1719 
1720   /// See AbstractAttribute::getAsStr().
1721   const std::string getAsStr() const override {
1722     return getAssumed() ? "nonnull" : "may-null";
1723   }
1724 
1725   /// Flag to determine if the underlying value can be null and still allow
1726   /// valid accesses.
1727   const bool NullIsDefined;
1728 };
1729 
1730 /// NonNull attribute for a floating value.
1731 struct AANonNullFloating : public AANonNullImpl {
1732   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1733       : AANonNullImpl(IRP, A) {}
1734 
1735   /// See AbstractAttribute::updateImpl(...).
1736   ChangeStatus updateImpl(Attributor &A) override {
1737     const DataLayout &DL = A.getDataLayout();
1738 
1739     DominatorTree *DT = nullptr;
1740     AssumptionCache *AC = nullptr;
1741     InformationCache &InfoCache = A.getInfoCache();
1742     if (const Function *Fn = getAnchorScope()) {
1743       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1744       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1745     }
1746 
1747     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1748                             AANonNull::StateType &T, bool Stripped) -> bool {
1749       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1750                                              DepClassTy::REQUIRED);
1751       if (!Stripped && this == &AA) {
1752         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1753           T.indicatePessimisticFixpoint();
1754       } else {
1755         // Use abstract attribute information.
1756         const AANonNull::StateType &NS = AA.getState();
1757         T ^= NS;
1758       }
1759       return T.isValidState();
1760     };
1761 
1762     StateType T;
1763     if (!genericValueTraversal<AANonNull, StateType>(
1764             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1765       return indicatePessimisticFixpoint();
1766 
1767     return clampStateAndIndicateChange(getState(), T);
1768   }
1769 
1770   /// See AbstractAttribute::trackStatistics()
1771   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1772 };
1773 
1774 /// NonNull attribute for function return value.
1775 struct AANonNullReturned final
1776     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1777   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1778       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1779 
1780   /// See AbstractAttribute::getAsStr().
1781   const std::string getAsStr() const override {
1782     return getAssumed() ? "nonnull" : "may-null";
1783   }
1784 
1785   /// See AbstractAttribute::trackStatistics()
1786   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1787 };
1788 
1789 /// NonNull attribute for function argument.
1790 struct AANonNullArgument final
1791     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1792   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1793       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1794 
1795   /// See AbstractAttribute::trackStatistics()
1796   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1797 };
1798 
1799 struct AANonNullCallSiteArgument final : AANonNullFloating {
1800   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1801       : AANonNullFloating(IRP, A) {}
1802 
1803   /// See AbstractAttribute::trackStatistics()
1804   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1805 };
1806 
1807 /// NonNull attribute for a call site return position.
1808 struct AANonNullCallSiteReturned final
1809     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1810   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1811       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1812 
1813   /// See AbstractAttribute::trackStatistics()
1814   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1815 };
1816 
1817 /// ------------------------ No-Recurse Attributes ----------------------------
1818 
1819 struct AANoRecurseImpl : public AANoRecurse {
1820   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1821 
1822   /// See AbstractAttribute::getAsStr()
1823   const std::string getAsStr() const override {
1824     return getAssumed() ? "norecurse" : "may-recurse";
1825   }
1826 };
1827 
1828 struct AANoRecurseFunction final : AANoRecurseImpl {
1829   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1830       : AANoRecurseImpl(IRP, A) {}
1831 
1832   /// See AbstractAttribute::initialize(...).
1833   void initialize(Attributor &A) override {
1834     AANoRecurseImpl::initialize(A);
1835     if (const Function *F = getAnchorScope())
1836       if (A.getInfoCache().getSccSize(*F) != 1)
1837         indicatePessimisticFixpoint();
1838   }
1839 
1840   /// See AbstractAttribute::updateImpl(...).
1841   ChangeStatus updateImpl(Attributor &A) override {
1842 
1843     // If all live call sites are known to be no-recurse, we are as well.
1844     auto CallSitePred = [&](AbstractCallSite ACS) {
1845       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1846           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1847           DepClassTy::NONE);
1848       return NoRecurseAA.isKnownNoRecurse();
1849     };
1850     bool AllCallSitesKnown;
1851     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1852       // If we know all call sites and all are known no-recurse, we are done.
1853       // If all known call sites, which might not be all that exist, are known
1854       // to be no-recurse, we are not done but we can continue to assume
1855       // no-recurse. If one of the call sites we have not visited will become
1856       // live, another update is triggered.
1857       if (AllCallSitesKnown)
1858         indicateOptimisticFixpoint();
1859       return ChangeStatus::UNCHANGED;
1860     }
1861 
1862     // If the above check does not hold anymore we look at the calls.
1863     auto CheckForNoRecurse = [&](Instruction &I) {
1864       const auto &CB = cast<CallBase>(I);
1865       if (CB.hasFnAttr(Attribute::NoRecurse))
1866         return true;
1867 
1868       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1869           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1870       if (!NoRecurseAA.isAssumedNoRecurse())
1871         return false;
1872 
1873       // Recursion to the same function
1874       if (CB.getCalledFunction() == getAnchorScope())
1875         return false;
1876 
1877       return true;
1878     };
1879 
1880     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1881       return indicatePessimisticFixpoint();
1882     return ChangeStatus::UNCHANGED;
1883   }
1884 
1885   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1886 };
1887 
1888 /// NoRecurse attribute deduction for a call sites.
1889 struct AANoRecurseCallSite final : AANoRecurseImpl {
1890   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1891       : AANoRecurseImpl(IRP, A) {}
1892 
1893   /// See AbstractAttribute::initialize(...).
1894   void initialize(Attributor &A) override {
1895     AANoRecurseImpl::initialize(A);
1896     Function *F = getAssociatedFunction();
1897     if (!F || F->isDeclaration())
1898       indicatePessimisticFixpoint();
1899   }
1900 
1901   /// See AbstractAttribute::updateImpl(...).
1902   ChangeStatus updateImpl(Attributor &A) override {
1903     // TODO: Once we have call site specific value information we can provide
1904     //       call site specific liveness information and then it makes
1905     //       sense to specialize attributes for call sites arguments instead of
1906     //       redirecting requests to the callee argument.
1907     Function *F = getAssociatedFunction();
1908     const IRPosition &FnPos = IRPosition::function(*F);
1909     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1910     return clampStateAndIndicateChange(getState(), FnAA.getState());
1911   }
1912 
1913   /// See AbstractAttribute::trackStatistics()
1914   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1915 };
1916 
1917 /// -------------------- Undefined-Behavior Attributes ------------------------
1918 
1919 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1920   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1921       : AAUndefinedBehavior(IRP, A) {}
1922 
1923   /// See AbstractAttribute::updateImpl(...).
1924   // through a pointer (i.e. also branches etc.)
1925   ChangeStatus updateImpl(Attributor &A) override {
1926     const size_t UBPrevSize = KnownUBInsts.size();
1927     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1928 
1929     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1930       // Skip instructions that are already saved.
1931       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1932         return true;
1933 
1934       // If we reach here, we know we have an instruction
1935       // that accesses memory through a pointer operand,
1936       // for which getPointerOperand() should give it to us.
1937       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1938       assert(PtrOp &&
1939              "Expected pointer operand of memory accessing instruction");
1940 
1941       // Either we stopped and the appropriate action was taken,
1942       // or we got back a simplified value to continue.
1943       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1944       if (!SimplifiedPtrOp.hasValue())
1945         return true;
1946       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1947 
1948       // A memory access through a pointer is considered UB
1949       // only if the pointer has constant null value.
1950       // TODO: Expand it to not only check constant values.
1951       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1952         AssumedNoUBInsts.insert(&I);
1953         return true;
1954       }
1955       const Type *PtrTy = PtrOpVal->getType();
1956 
1957       // Because we only consider instructions inside functions,
1958       // assume that a parent function exists.
1959       const Function *F = I.getFunction();
1960 
1961       // A memory access using constant null pointer is only considered UB
1962       // if null pointer is _not_ defined for the target platform.
1963       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1964         AssumedNoUBInsts.insert(&I);
1965       else
1966         KnownUBInsts.insert(&I);
1967       return true;
1968     };
1969 
1970     auto InspectBrInstForUB = [&](Instruction &I) {
1971       // A conditional branch instruction is considered UB if it has `undef`
1972       // condition.
1973 
1974       // Skip instructions that are already saved.
1975       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1976         return true;
1977 
1978       // We know we have a branch instruction.
1979       auto BrInst = cast<BranchInst>(&I);
1980 
1981       // Unconditional branches are never considered UB.
1982       if (BrInst->isUnconditional())
1983         return true;
1984 
1985       // Either we stopped and the appropriate action was taken,
1986       // or we got back a simplified value to continue.
1987       Optional<Value *> SimplifiedCond =
1988           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1989       if (!SimplifiedCond.hasValue())
1990         return true;
1991       AssumedNoUBInsts.insert(&I);
1992       return true;
1993     };
1994 
1995     auto InspectCallSiteForUB = [&](Instruction &I) {
1996       // Check whether a callsite always cause UB or not
1997 
1998       // Skip instructions that are already saved.
1999       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2000         return true;
2001 
2002       // Check nonnull and noundef argument attribute violation for each
2003       // callsite.
2004       CallBase &CB = cast<CallBase>(I);
2005       Function *Callee = CB.getCalledFunction();
2006       if (!Callee)
2007         return true;
2008       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2009         // If current argument is known to be simplified to null pointer and the
2010         // corresponding argument position is known to have nonnull attribute,
2011         // the argument is poison. Furthermore, if the argument is poison and
2012         // the position is known to have noundef attriubte, this callsite is
2013         // considered UB.
2014         if (idx >= Callee->arg_size())
2015           break;
2016         Value *ArgVal = CB.getArgOperand(idx);
2017         if (!ArgVal)
2018           continue;
2019         // Here, we handle three cases.
2020         //   (1) Not having a value means it is dead. (we can replace the value
2021         //       with undef)
2022         //   (2) Simplified to undef. The argument violate noundef attriubte.
2023         //   (3) Simplified to null pointer where known to be nonnull.
2024         //       The argument is a poison value and violate noundef attribute.
2025         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2026         auto &NoUndefAA =
2027             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2028         if (!NoUndefAA.isKnownNoUndef())
2029           continue;
2030         bool UsedAssumedInformation = false;
2031         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2032             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2033         if (UsedAssumedInformation)
2034           continue;
2035         if (!SimplifiedVal.hasValue() ||
2036             isa<UndefValue>(*SimplifiedVal.getValue())) {
2037           KnownUBInsts.insert(&I);
2038           continue;
2039         }
2040         if (!ArgVal->getType()->isPointerTy() ||
2041             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2042           continue;
2043         auto &NonNullAA =
2044             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2045         if (NonNullAA.isKnownNonNull())
2046           KnownUBInsts.insert(&I);
2047       }
2048       return true;
2049     };
2050 
2051     auto InspectReturnInstForUB =
2052         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2053           // Check if a return instruction always cause UB or not
2054           // Note: It is guaranteed that the returned position of the anchor
2055           //       scope has noundef attribute when this is called.
2056           //       We also ensure the return position is not "assumed dead"
2057           //       because the returned value was then potentially simplified to
2058           //       `undef` in AAReturnedValues without removing the `noundef`
2059           //       attribute yet.
2060 
2061           // When the returned position has noundef attriubte, UB occur in the
2062           // following cases.
2063           //   (1) Returned value is known to be undef.
2064           //   (2) The value is known to be a null pointer and the returned
2065           //       position has nonnull attribute (because the returned value is
2066           //       poison).
2067           bool FoundUB = false;
2068           if (isa<UndefValue>(V)) {
2069             FoundUB = true;
2070           } else {
2071             if (isa<ConstantPointerNull>(V)) {
2072               auto &NonNullAA = A.getAAFor<AANonNull>(
2073                   *this, IRPosition::returned(*getAnchorScope()),
2074                   DepClassTy::NONE);
2075               if (NonNullAA.isKnownNonNull())
2076                 FoundUB = true;
2077             }
2078           }
2079 
2080           if (FoundUB)
2081             for (ReturnInst *RI : RetInsts)
2082               KnownUBInsts.insert(RI);
2083           return true;
2084         };
2085 
2086     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2087                               {Instruction::Load, Instruction::Store,
2088                                Instruction::AtomicCmpXchg,
2089                                Instruction::AtomicRMW},
2090                               /* CheckBBLivenessOnly */ true);
2091     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2092                               /* CheckBBLivenessOnly */ true);
2093     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2094 
2095     // If the returned position of the anchor scope has noundef attriubte, check
2096     // all returned instructions.
2097     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2098       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2099       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2100         auto &RetPosNoUndefAA =
2101             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2102         if (RetPosNoUndefAA.isKnownNoUndef())
2103           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2104                                                     *this);
2105       }
2106     }
2107 
2108     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2109         UBPrevSize != KnownUBInsts.size())
2110       return ChangeStatus::CHANGED;
2111     return ChangeStatus::UNCHANGED;
2112   }
2113 
2114   bool isKnownToCauseUB(Instruction *I) const override {
2115     return KnownUBInsts.count(I);
2116   }
2117 
2118   bool isAssumedToCauseUB(Instruction *I) const override {
2119     // In simple words, if an instruction is not in the assumed to _not_
2120     // cause UB, then it is assumed UB (that includes those
2121     // in the KnownUBInsts set). The rest is boilerplate
2122     // is to ensure that it is one of the instructions we test
2123     // for UB.
2124 
2125     switch (I->getOpcode()) {
2126     case Instruction::Load:
2127     case Instruction::Store:
2128     case Instruction::AtomicCmpXchg:
2129     case Instruction::AtomicRMW:
2130       return !AssumedNoUBInsts.count(I);
2131     case Instruction::Br: {
2132       auto BrInst = cast<BranchInst>(I);
2133       if (BrInst->isUnconditional())
2134         return false;
2135       return !AssumedNoUBInsts.count(I);
2136     } break;
2137     default:
2138       return false;
2139     }
2140     return false;
2141   }
2142 
2143   ChangeStatus manifest(Attributor &A) override {
2144     if (KnownUBInsts.empty())
2145       return ChangeStatus::UNCHANGED;
2146     for (Instruction *I : KnownUBInsts)
2147       A.changeToUnreachableAfterManifest(I);
2148     return ChangeStatus::CHANGED;
2149   }
2150 
2151   /// See AbstractAttribute::getAsStr()
2152   const std::string getAsStr() const override {
2153     return getAssumed() ? "undefined-behavior" : "no-ub";
2154   }
2155 
2156   /// Note: The correctness of this analysis depends on the fact that the
2157   /// following 2 sets will stop changing after some point.
2158   /// "Change" here means that their size changes.
2159   /// The size of each set is monotonically increasing
2160   /// (we only add items to them) and it is upper bounded by the number of
2161   /// instructions in the processed function (we can never save more
2162   /// elements in either set than this number). Hence, at some point,
2163   /// they will stop increasing.
2164   /// Consequently, at some point, both sets will have stopped
2165   /// changing, effectively making the analysis reach a fixpoint.
2166 
2167   /// Note: These 2 sets are disjoint and an instruction can be considered
2168   /// one of 3 things:
2169   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2170   ///    the KnownUBInsts set.
2171   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2172   ///    has a reason to assume it).
2173   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2174   ///    could not find a reason to assume or prove that it can cause UB,
2175   ///    hence it assumes it doesn't. We have a set for these instructions
2176   ///    so that we don't reprocess them in every update.
2177   ///    Note however that instructions in this set may cause UB.
2178 
2179 protected:
2180   /// A set of all live instructions _known_ to cause UB.
2181   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2182 
2183 private:
2184   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2185   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2186 
2187   // Should be called on updates in which if we're processing an instruction
2188   // \p I that depends on a value \p V, one of the following has to happen:
2189   // - If the value is assumed, then stop.
2190   // - If the value is known but undef, then consider it UB.
2191   // - Otherwise, do specific processing with the simplified value.
2192   // We return None in the first 2 cases to signify that an appropriate
2193   // action was taken and the caller should stop.
2194   // Otherwise, we return the simplified value that the caller should
2195   // use for specific processing.
2196   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2197                                          Instruction *I) {
2198     bool UsedAssumedInformation = false;
2199     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2200         IRPosition::value(*V), *this, UsedAssumedInformation);
2201     if (UsedAssumedInformation) {
2202       // Don't depend on assumed values.
2203       return llvm::None;
2204     }
2205     if (!SimplifiedV.hasValue()) {
2206       // If it is known (which we tested above) but it doesn't have a value,
2207       // then we can assume `undef` and hence the instruction is UB.
2208       KnownUBInsts.insert(I);
2209       return llvm::None;
2210     }
2211     Value *Val = SimplifiedV.getValue();
2212     if (isa<UndefValue>(Val)) {
2213       KnownUBInsts.insert(I);
2214       return llvm::None;
2215     }
2216     return Val;
2217   }
2218 };
2219 
2220 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2221   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2222       : AAUndefinedBehaviorImpl(IRP, A) {}
2223 
2224   /// See AbstractAttribute::trackStatistics()
2225   void trackStatistics() const override {
2226     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2227                "Number of instructions known to have UB");
2228     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2229         KnownUBInsts.size();
2230   }
2231 };
2232 
2233 /// ------------------------ Will-Return Attributes ----------------------------
2234 
2235 // Helper function that checks whether a function has any cycle which we don't
2236 // know if it is bounded or not.
2237 // Loops with maximum trip count are considered bounded, any other cycle not.
2238 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2239   ScalarEvolution *SE =
2240       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2241   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2242   // If either SCEV or LoopInfo is not available for the function then we assume
2243   // any cycle to be unbounded cycle.
2244   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2245   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2246   if (!SE || !LI) {
2247     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2248       if (SCCI.hasCycle())
2249         return true;
2250     return false;
2251   }
2252 
2253   // If there's irreducible control, the function may contain non-loop cycles.
2254   if (mayContainIrreducibleControl(F, LI))
2255     return true;
2256 
2257   // Any loop that does not have a max trip count is considered unbounded cycle.
2258   for (auto *L : LI->getLoopsInPreorder()) {
2259     if (!SE->getSmallConstantMaxTripCount(L))
2260       return true;
2261   }
2262   return false;
2263 }
2264 
2265 struct AAWillReturnImpl : public AAWillReturn {
2266   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2267       : AAWillReturn(IRP, A) {}
2268 
2269   /// See AbstractAttribute::initialize(...).
2270   void initialize(Attributor &A) override {
2271     AAWillReturn::initialize(A);
2272 
2273     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2274       indicateOptimisticFixpoint();
2275       return;
2276     }
2277   }
2278 
2279   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2280   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2281     // Check for `mustprogress` in the scope and the associated function which
2282     // might be different if this is a call site.
2283     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2284         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2285       return false;
2286 
2287     const auto &MemAA =
2288         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2289     if (!MemAA.isAssumedReadOnly())
2290       return false;
2291     if (KnownOnly && !MemAA.isKnownReadOnly())
2292       return false;
2293     if (!MemAA.isKnownReadOnly())
2294       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2295 
2296     return true;
2297   }
2298 
2299   /// See AbstractAttribute::updateImpl(...).
2300   ChangeStatus updateImpl(Attributor &A) override {
2301     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2302       return ChangeStatus::UNCHANGED;
2303 
2304     auto CheckForWillReturn = [&](Instruction &I) {
2305       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2306       const auto &WillReturnAA =
2307           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2308       if (WillReturnAA.isKnownWillReturn())
2309         return true;
2310       if (!WillReturnAA.isAssumedWillReturn())
2311         return false;
2312       const auto &NoRecurseAA =
2313           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2314       return NoRecurseAA.isAssumedNoRecurse();
2315     };
2316 
2317     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2318       return indicatePessimisticFixpoint();
2319 
2320     return ChangeStatus::UNCHANGED;
2321   }
2322 
2323   /// See AbstractAttribute::getAsStr()
2324   const std::string getAsStr() const override {
2325     return getAssumed() ? "willreturn" : "may-noreturn";
2326   }
2327 };
2328 
2329 struct AAWillReturnFunction final : AAWillReturnImpl {
2330   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2331       : AAWillReturnImpl(IRP, A) {}
2332 
2333   /// See AbstractAttribute::initialize(...).
2334   void initialize(Attributor &A) override {
2335     AAWillReturnImpl::initialize(A);
2336 
2337     Function *F = getAnchorScope();
2338     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2339       indicatePessimisticFixpoint();
2340   }
2341 
2342   /// See AbstractAttribute::trackStatistics()
2343   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2344 };
2345 
2346 /// WillReturn attribute deduction for a call sites.
2347 struct AAWillReturnCallSite final : AAWillReturnImpl {
2348   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2349       : AAWillReturnImpl(IRP, A) {}
2350 
2351   /// See AbstractAttribute::initialize(...).
2352   void initialize(Attributor &A) override {
2353     AAWillReturnImpl::initialize(A);
2354     Function *F = getAssociatedFunction();
2355     if (!F || !A.isFunctionIPOAmendable(*F))
2356       indicatePessimisticFixpoint();
2357   }
2358 
2359   /// See AbstractAttribute::updateImpl(...).
2360   ChangeStatus updateImpl(Attributor &A) override {
2361     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2362       return ChangeStatus::UNCHANGED;
2363 
2364     // TODO: Once we have call site specific value information we can provide
2365     //       call site specific liveness information and then it makes
2366     //       sense to specialize attributes for call sites arguments instead of
2367     //       redirecting requests to the callee argument.
2368     Function *F = getAssociatedFunction();
2369     const IRPosition &FnPos = IRPosition::function(*F);
2370     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2371     return clampStateAndIndicateChange(getState(), FnAA.getState());
2372   }
2373 
2374   /// See AbstractAttribute::trackStatistics()
2375   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2376 };
2377 
2378 /// -------------------AAReachability Attribute--------------------------
2379 
2380 struct AAReachabilityImpl : AAReachability {
2381   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2382       : AAReachability(IRP, A) {}
2383 
2384   const std::string getAsStr() const override {
2385     // TODO: Return the number of reachable queries.
2386     return "reachable";
2387   }
2388 
2389   /// See AbstractAttribute::updateImpl(...).
2390   ChangeStatus updateImpl(Attributor &A) override {
2391     return ChangeStatus::UNCHANGED;
2392   }
2393 };
2394 
2395 struct AAReachabilityFunction final : public AAReachabilityImpl {
2396   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2397       : AAReachabilityImpl(IRP, A) {}
2398 
2399   /// See AbstractAttribute::trackStatistics()
2400   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2401 };
2402 
2403 /// ------------------------ NoAlias Argument Attribute ------------------------
2404 
2405 struct AANoAliasImpl : AANoAlias {
2406   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2407     assert(getAssociatedType()->isPointerTy() &&
2408            "Noalias is a pointer attribute");
2409   }
2410 
2411   const std::string getAsStr() const override {
2412     return getAssumed() ? "noalias" : "may-alias";
2413   }
2414 };
2415 
2416 /// NoAlias attribute for a floating value.
2417 struct AANoAliasFloating final : AANoAliasImpl {
2418   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2419       : AANoAliasImpl(IRP, A) {}
2420 
2421   /// See AbstractAttribute::initialize(...).
2422   void initialize(Attributor &A) override {
2423     AANoAliasImpl::initialize(A);
2424     Value *Val = &getAssociatedValue();
2425     do {
2426       CastInst *CI = dyn_cast<CastInst>(Val);
2427       if (!CI)
2428         break;
2429       Value *Base = CI->getOperand(0);
2430       if (!Base->hasOneUse())
2431         break;
2432       Val = Base;
2433     } while (true);
2434 
2435     if (!Val->getType()->isPointerTy()) {
2436       indicatePessimisticFixpoint();
2437       return;
2438     }
2439 
2440     if (isa<AllocaInst>(Val))
2441       indicateOptimisticFixpoint();
2442     else if (isa<ConstantPointerNull>(Val) &&
2443              !NullPointerIsDefined(getAnchorScope(),
2444                                    Val->getType()->getPointerAddressSpace()))
2445       indicateOptimisticFixpoint();
2446     else if (Val != &getAssociatedValue()) {
2447       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2448           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2449       if (ValNoAliasAA.isKnownNoAlias())
2450         indicateOptimisticFixpoint();
2451     }
2452   }
2453 
2454   /// See AbstractAttribute::updateImpl(...).
2455   ChangeStatus updateImpl(Attributor &A) override {
2456     // TODO: Implement this.
2457     return indicatePessimisticFixpoint();
2458   }
2459 
2460   /// See AbstractAttribute::trackStatistics()
2461   void trackStatistics() const override {
2462     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2463   }
2464 };
2465 
2466 /// NoAlias attribute for an argument.
2467 struct AANoAliasArgument final
2468     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2469   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2470   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2471 
2472   /// See AbstractAttribute::initialize(...).
2473   void initialize(Attributor &A) override {
2474     Base::initialize(A);
2475     // See callsite argument attribute and callee argument attribute.
2476     if (hasAttr({Attribute::ByVal}))
2477       indicateOptimisticFixpoint();
2478   }
2479 
2480   /// See AbstractAttribute::update(...).
2481   ChangeStatus updateImpl(Attributor &A) override {
2482     // We have to make sure no-alias on the argument does not break
2483     // synchronization when this is a callback argument, see also [1] below.
2484     // If synchronization cannot be affected, we delegate to the base updateImpl
2485     // function, otherwise we give up for now.
2486 
2487     // If the function is no-sync, no-alias cannot break synchronization.
2488     const auto &NoSyncAA =
2489         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2490                              DepClassTy::OPTIONAL);
2491     if (NoSyncAA.isAssumedNoSync())
2492       return Base::updateImpl(A);
2493 
2494     // If the argument is read-only, no-alias cannot break synchronization.
2495     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2496         *this, getIRPosition(), DepClassTy::OPTIONAL);
2497     if (MemBehaviorAA.isAssumedReadOnly())
2498       return Base::updateImpl(A);
2499 
2500     // If the argument is never passed through callbacks, no-alias cannot break
2501     // synchronization.
2502     bool AllCallSitesKnown;
2503     if (A.checkForAllCallSites(
2504             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2505             true, AllCallSitesKnown))
2506       return Base::updateImpl(A);
2507 
2508     // TODO: add no-alias but make sure it doesn't break synchronization by
2509     // introducing fake uses. See:
2510     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2511     //     International Workshop on OpenMP 2018,
2512     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2513 
2514     return indicatePessimisticFixpoint();
2515   }
2516 
2517   /// See AbstractAttribute::trackStatistics()
2518   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2519 };
2520 
2521 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2522   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2523       : AANoAliasImpl(IRP, A) {}
2524 
2525   /// See AbstractAttribute::initialize(...).
2526   void initialize(Attributor &A) override {
2527     // See callsite argument attribute and callee argument attribute.
2528     const auto &CB = cast<CallBase>(getAnchorValue());
2529     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2530       indicateOptimisticFixpoint();
2531     Value &Val = getAssociatedValue();
2532     if (isa<ConstantPointerNull>(Val) &&
2533         !NullPointerIsDefined(getAnchorScope(),
2534                               Val.getType()->getPointerAddressSpace()))
2535       indicateOptimisticFixpoint();
2536   }
2537 
2538   /// Determine if the underlying value may alias with the call site argument
2539   /// \p OtherArgNo of \p ICS (= the underlying call site).
2540   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2541                             const AAMemoryBehavior &MemBehaviorAA,
2542                             const CallBase &CB, unsigned OtherArgNo) {
2543     // We do not need to worry about aliasing with the underlying IRP.
2544     if (this->getCalleeArgNo() == (int)OtherArgNo)
2545       return false;
2546 
2547     // If it is not a pointer or pointer vector we do not alias.
2548     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2549     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2550       return false;
2551 
2552     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2553         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2554 
2555     // If the argument is readnone, there is no read-write aliasing.
2556     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2557       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2558       return false;
2559     }
2560 
2561     // If the argument is readonly and the underlying value is readonly, there
2562     // is no read-write aliasing.
2563     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2564     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2565       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2566       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2567       return false;
2568     }
2569 
2570     // We have to utilize actual alias analysis queries so we need the object.
2571     if (!AAR)
2572       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2573 
2574     // Try to rule it out at the call site.
2575     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2576     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2577                          "callsite arguments: "
2578                       << getAssociatedValue() << " " << *ArgOp << " => "
2579                       << (IsAliasing ? "" : "no-") << "alias \n");
2580 
2581     return IsAliasing;
2582   }
2583 
2584   bool
2585   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2586                                          const AAMemoryBehavior &MemBehaviorAA,
2587                                          const AANoAlias &NoAliasAA) {
2588     // We can deduce "noalias" if the following conditions hold.
2589     // (i)   Associated value is assumed to be noalias in the definition.
2590     // (ii)  Associated value is assumed to be no-capture in all the uses
2591     //       possibly executed before this callsite.
2592     // (iii) There is no other pointer argument which could alias with the
2593     //       value.
2594 
2595     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2596     if (!AssociatedValueIsNoAliasAtDef) {
2597       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2598                         << " is not no-alias at the definition\n");
2599       return false;
2600     }
2601 
2602     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2603 
2604     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2605     const Function *ScopeFn = VIRP.getAnchorScope();
2606     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2607     // Check whether the value is captured in the scope using AANoCapture.
2608     //      Look at CFG and check only uses possibly executed before this
2609     //      callsite.
2610     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2611       Instruction *UserI = cast<Instruction>(U.getUser());
2612 
2613       // If UserI is the curr instruction and there is a single potential use of
2614       // the value in UserI we allow the use.
2615       // TODO: We should inspect the operands and allow those that cannot alias
2616       //       with the value.
2617       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2618         return true;
2619 
2620       if (ScopeFn) {
2621         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2622             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2623 
2624         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2625           return true;
2626 
2627         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2628           if (CB->isArgOperand(&U)) {
2629 
2630             unsigned ArgNo = CB->getArgOperandNo(&U);
2631 
2632             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2633                 *this, IRPosition::callsite_argument(*CB, ArgNo),
2634                 DepClassTy::OPTIONAL);
2635 
2636             if (NoCaptureAA.isAssumedNoCapture())
2637               return true;
2638           }
2639         }
2640       }
2641 
2642       // For cases which can potentially have more users
2643       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2644           isa<SelectInst>(U)) {
2645         Follow = true;
2646         return true;
2647       }
2648 
2649       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2650       return false;
2651     };
2652 
2653     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2654       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2655         LLVM_DEBUG(
2656             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2657                    << " cannot be noalias as it is potentially captured\n");
2658         return false;
2659       }
2660     }
2661     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2662 
2663     // Check there is no other pointer argument which could alias with the
2664     // value passed at this call site.
2665     // TODO: AbstractCallSite
2666     const auto &CB = cast<CallBase>(getAnchorValue());
2667     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2668          OtherArgNo++)
2669       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2670         return false;
2671 
2672     return true;
2673   }
2674 
2675   /// See AbstractAttribute::updateImpl(...).
2676   ChangeStatus updateImpl(Attributor &A) override {
2677     // If the argument is readnone we are done as there are no accesses via the
2678     // argument.
2679     auto &MemBehaviorAA =
2680         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2681     if (MemBehaviorAA.isAssumedReadNone()) {
2682       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2683       return ChangeStatus::UNCHANGED;
2684     }
2685 
2686     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2687     const auto &NoAliasAA =
2688         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2689 
2690     AAResults *AAR = nullptr;
2691     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2692                                                NoAliasAA)) {
2693       LLVM_DEBUG(
2694           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2695       return ChangeStatus::UNCHANGED;
2696     }
2697 
2698     return indicatePessimisticFixpoint();
2699   }
2700 
2701   /// See AbstractAttribute::trackStatistics()
2702   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2703 };
2704 
2705 /// NoAlias attribute for function return value.
2706 struct AANoAliasReturned final : AANoAliasImpl {
2707   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2708       : AANoAliasImpl(IRP, A) {}
2709 
2710   /// See AbstractAttribute::initialize(...).
2711   void initialize(Attributor &A) override {
2712     AANoAliasImpl::initialize(A);
2713     Function *F = getAssociatedFunction();
2714     if (!F || F->isDeclaration())
2715       indicatePessimisticFixpoint();
2716   }
2717 
2718   /// See AbstractAttribute::updateImpl(...).
2719   virtual ChangeStatus updateImpl(Attributor &A) override {
2720 
2721     auto CheckReturnValue = [&](Value &RV) -> bool {
2722       if (Constant *C = dyn_cast<Constant>(&RV))
2723         if (C->isNullValue() || isa<UndefValue>(C))
2724           return true;
2725 
2726       /// For now, we can only deduce noalias if we have call sites.
2727       /// FIXME: add more support.
2728       if (!isa<CallBase>(&RV))
2729         return false;
2730 
2731       const IRPosition &RVPos = IRPosition::value(RV);
2732       const auto &NoAliasAA =
2733           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2734       if (!NoAliasAA.isAssumedNoAlias())
2735         return false;
2736 
2737       const auto &NoCaptureAA =
2738           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2739       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2740     };
2741 
2742     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2743       return indicatePessimisticFixpoint();
2744 
2745     return ChangeStatus::UNCHANGED;
2746   }
2747 
2748   /// See AbstractAttribute::trackStatistics()
2749   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2750 };
2751 
2752 /// NoAlias attribute deduction for a call site return value.
2753 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2754   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2755       : AANoAliasImpl(IRP, A) {}
2756 
2757   /// See AbstractAttribute::initialize(...).
2758   void initialize(Attributor &A) override {
2759     AANoAliasImpl::initialize(A);
2760     Function *F = getAssociatedFunction();
2761     if (!F || F->isDeclaration())
2762       indicatePessimisticFixpoint();
2763   }
2764 
2765   /// See AbstractAttribute::updateImpl(...).
2766   ChangeStatus updateImpl(Attributor &A) override {
2767     // TODO: Once we have call site specific value information we can provide
2768     //       call site specific liveness information and then it makes
2769     //       sense to specialize attributes for call sites arguments instead of
2770     //       redirecting requests to the callee argument.
2771     Function *F = getAssociatedFunction();
2772     const IRPosition &FnPos = IRPosition::returned(*F);
2773     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2774     return clampStateAndIndicateChange(getState(), FnAA.getState());
2775   }
2776 
2777   /// See AbstractAttribute::trackStatistics()
2778   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2779 };
2780 
2781 /// -------------------AAIsDead Function Attribute-----------------------
2782 
2783 struct AAIsDeadValueImpl : public AAIsDead {
2784   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2785 
2786   /// See AAIsDead::isAssumedDead().
2787   bool isAssumedDead() const override { return getAssumed(); }
2788 
2789   /// See AAIsDead::isKnownDead().
2790   bool isKnownDead() const override { return getKnown(); }
2791 
2792   /// See AAIsDead::isAssumedDead(BasicBlock *).
2793   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2794 
2795   /// See AAIsDead::isKnownDead(BasicBlock *).
2796   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2797 
2798   /// See AAIsDead::isAssumedDead(Instruction *I).
2799   bool isAssumedDead(const Instruction *I) const override {
2800     return I == getCtxI() && isAssumedDead();
2801   }
2802 
2803   /// See AAIsDead::isKnownDead(Instruction *I).
2804   bool isKnownDead(const Instruction *I) const override {
2805     return isAssumedDead(I) && getKnown();
2806   }
2807 
2808   /// See AbstractAttribute::getAsStr().
2809   const std::string getAsStr() const override {
2810     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2811   }
2812 
2813   /// Check if all uses are assumed dead.
2814   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2815     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2816     // Explicitly set the dependence class to required because we want a long
2817     // chain of N dependent instructions to be considered live as soon as one is
2818     // without going through N update cycles. This is not required for
2819     // correctness.
2820     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2821   }
2822 
2823   /// Determine if \p I is assumed to be side-effect free.
2824   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2825     if (!I || wouldInstructionBeTriviallyDead(I))
2826       return true;
2827 
2828     auto *CB = dyn_cast<CallBase>(I);
2829     if (!CB || isa<IntrinsicInst>(CB))
2830       return false;
2831 
2832     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2833     const auto &NoUnwindAA =
2834         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2835     if (!NoUnwindAA.isAssumedNoUnwind())
2836       return false;
2837     if (!NoUnwindAA.isKnownNoUnwind())
2838       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2839 
2840     const auto &MemBehaviorAA =
2841         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2842     if (MemBehaviorAA.isAssumedReadOnly()) {
2843       if (!MemBehaviorAA.isKnownReadOnly())
2844         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2845       return true;
2846     }
2847     return false;
2848   }
2849 };
2850 
2851 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2852   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2853       : AAIsDeadValueImpl(IRP, A) {}
2854 
2855   /// See AbstractAttribute::initialize(...).
2856   void initialize(Attributor &A) override {
2857     if (isa<UndefValue>(getAssociatedValue())) {
2858       indicatePessimisticFixpoint();
2859       return;
2860     }
2861 
2862     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2863     if (!isAssumedSideEffectFree(A, I))
2864       indicatePessimisticFixpoint();
2865   }
2866 
2867   /// See AbstractAttribute::updateImpl(...).
2868   ChangeStatus updateImpl(Attributor &A) override {
2869     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2870     if (!isAssumedSideEffectFree(A, I))
2871       return indicatePessimisticFixpoint();
2872 
2873     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2874       return indicatePessimisticFixpoint();
2875     return ChangeStatus::UNCHANGED;
2876   }
2877 
2878   /// See AbstractAttribute::manifest(...).
2879   ChangeStatus manifest(Attributor &A) override {
2880     Value &V = getAssociatedValue();
2881     if (auto *I = dyn_cast<Instruction>(&V)) {
2882       // If we get here we basically know the users are all dead. We check if
2883       // isAssumedSideEffectFree returns true here again because it might not be
2884       // the case and only the users are dead but the instruction (=call) is
2885       // still needed.
2886       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2887         A.deleteAfterManifest(*I);
2888         return ChangeStatus::CHANGED;
2889       }
2890     }
2891     if (V.use_empty())
2892       return ChangeStatus::UNCHANGED;
2893 
2894     bool UsedAssumedInformation = false;
2895     Optional<Constant *> C =
2896         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2897     if (C.hasValue() && C.getValue())
2898       return ChangeStatus::UNCHANGED;
2899 
2900     // Replace the value with undef as it is dead but keep droppable uses around
2901     // as they provide information we don't want to give up on just yet.
2902     UndefValue &UV = *UndefValue::get(V.getType());
2903     bool AnyChange =
2904         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2905     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2906   }
2907 
2908   /// See AbstractAttribute::trackStatistics()
2909   void trackStatistics() const override {
2910     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2911   }
2912 };
2913 
2914 struct AAIsDeadArgument : public AAIsDeadFloating {
2915   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2916       : AAIsDeadFloating(IRP, A) {}
2917 
2918   /// See AbstractAttribute::initialize(...).
2919   void initialize(Attributor &A) override {
2920     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2921       indicatePessimisticFixpoint();
2922   }
2923 
2924   /// See AbstractAttribute::manifest(...).
2925   ChangeStatus manifest(Attributor &A) override {
2926     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2927     Argument &Arg = *getAssociatedArgument();
2928     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2929       if (A.registerFunctionSignatureRewrite(
2930               Arg, /* ReplacementTypes */ {},
2931               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2932               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2933         Arg.dropDroppableUses();
2934         return ChangeStatus::CHANGED;
2935       }
2936     return Changed;
2937   }
2938 
2939   /// See AbstractAttribute::trackStatistics()
2940   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2941 };
2942 
2943 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2944   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2945       : AAIsDeadValueImpl(IRP, A) {}
2946 
2947   /// See AbstractAttribute::initialize(...).
2948   void initialize(Attributor &A) override {
2949     if (isa<UndefValue>(getAssociatedValue()))
2950       indicatePessimisticFixpoint();
2951   }
2952 
2953   /// See AbstractAttribute::updateImpl(...).
2954   ChangeStatus updateImpl(Attributor &A) override {
2955     // TODO: Once we have call site specific value information we can provide
2956     //       call site specific liveness information and then it makes
2957     //       sense to specialize attributes for call sites arguments instead of
2958     //       redirecting requests to the callee argument.
2959     Argument *Arg = getAssociatedArgument();
2960     if (!Arg)
2961       return indicatePessimisticFixpoint();
2962     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2963     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
2964     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2965   }
2966 
2967   /// See AbstractAttribute::manifest(...).
2968   ChangeStatus manifest(Attributor &A) override {
2969     CallBase &CB = cast<CallBase>(getAnchorValue());
2970     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2971     assert(!isa<UndefValue>(U.get()) &&
2972            "Expected undef values to be filtered out!");
2973     UndefValue &UV = *UndefValue::get(U->getType());
2974     if (A.changeUseAfterManifest(U, UV))
2975       return ChangeStatus::CHANGED;
2976     return ChangeStatus::UNCHANGED;
2977   }
2978 
2979   /// See AbstractAttribute::trackStatistics()
2980   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2981 };
2982 
2983 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2984   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2985       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2986 
2987   /// See AAIsDead::isAssumedDead().
2988   bool isAssumedDead() const override {
2989     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2990   }
2991 
2992   /// See AbstractAttribute::initialize(...).
2993   void initialize(Attributor &A) override {
2994     if (isa<UndefValue>(getAssociatedValue())) {
2995       indicatePessimisticFixpoint();
2996       return;
2997     }
2998 
2999     // We track this separately as a secondary state.
3000     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3001   }
3002 
3003   /// See AbstractAttribute::updateImpl(...).
3004   ChangeStatus updateImpl(Attributor &A) override {
3005     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3006     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3007       IsAssumedSideEffectFree = false;
3008       Changed = ChangeStatus::CHANGED;
3009     }
3010 
3011     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3012       return indicatePessimisticFixpoint();
3013     return Changed;
3014   }
3015 
3016   /// See AbstractAttribute::trackStatistics()
3017   void trackStatistics() const override {
3018     if (IsAssumedSideEffectFree)
3019       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3020     else
3021       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3022   }
3023 
3024   /// See AbstractAttribute::getAsStr().
3025   const std::string getAsStr() const override {
3026     return isAssumedDead()
3027                ? "assumed-dead"
3028                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3029   }
3030 
3031 private:
3032   bool IsAssumedSideEffectFree;
3033 };
3034 
3035 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3036   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3037       : AAIsDeadValueImpl(IRP, A) {}
3038 
3039   /// See AbstractAttribute::updateImpl(...).
3040   ChangeStatus updateImpl(Attributor &A) override {
3041 
3042     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3043                               {Instruction::Ret});
3044 
3045     auto PredForCallSite = [&](AbstractCallSite ACS) {
3046       if (ACS.isCallbackCall() || !ACS.getInstruction())
3047         return false;
3048       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3049     };
3050 
3051     bool AllCallSitesKnown;
3052     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3053                                 AllCallSitesKnown))
3054       return indicatePessimisticFixpoint();
3055 
3056     return ChangeStatus::UNCHANGED;
3057   }
3058 
3059   /// See AbstractAttribute::manifest(...).
3060   ChangeStatus manifest(Attributor &A) override {
3061     // TODO: Rewrite the signature to return void?
3062     bool AnyChange = false;
3063     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3064     auto RetInstPred = [&](Instruction &I) {
3065       ReturnInst &RI = cast<ReturnInst>(I);
3066       if (!isa<UndefValue>(RI.getReturnValue()))
3067         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3068       return true;
3069     };
3070     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3071     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3072   }
3073 
3074   /// See AbstractAttribute::trackStatistics()
3075   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3076 };
3077 
3078 struct AAIsDeadFunction : public AAIsDead {
3079   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3080 
3081   /// See AbstractAttribute::initialize(...).
3082   void initialize(Attributor &A) override {
3083     const Function *F = getAnchorScope();
3084     if (F && !F->isDeclaration()) {
3085       // We only want to compute liveness once. If the function is not part of
3086       // the SCC, skip it.
3087       if (A.isRunOn(*const_cast<Function *>(F))) {
3088         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3089         assumeLive(A, F->getEntryBlock());
3090       } else {
3091         indicatePessimisticFixpoint();
3092       }
3093     }
3094   }
3095 
3096   /// See AbstractAttribute::getAsStr().
3097   const std::string getAsStr() const override {
3098     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3099            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3100            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3101            std::to_string(KnownDeadEnds.size()) + "]";
3102   }
3103 
3104   /// See AbstractAttribute::manifest(...).
3105   ChangeStatus manifest(Attributor &A) override {
3106     assert(getState().isValidState() &&
3107            "Attempted to manifest an invalid state!");
3108 
3109     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3110     Function &F = *getAnchorScope();
3111 
3112     if (AssumedLiveBlocks.empty()) {
3113       A.deleteAfterManifest(F);
3114       return ChangeStatus::CHANGED;
3115     }
3116 
3117     // Flag to determine if we can change an invoke to a call assuming the
3118     // callee is nounwind. This is not possible if the personality of the
3119     // function allows to catch asynchronous exceptions.
3120     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3121 
3122     KnownDeadEnds.set_union(ToBeExploredFrom);
3123     for (const Instruction *DeadEndI : KnownDeadEnds) {
3124       auto *CB = dyn_cast<CallBase>(DeadEndI);
3125       if (!CB)
3126         continue;
3127       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3128           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3129       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3130       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3131         continue;
3132 
3133       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3134         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3135       else
3136         A.changeToUnreachableAfterManifest(
3137             const_cast<Instruction *>(DeadEndI->getNextNode()));
3138       HasChanged = ChangeStatus::CHANGED;
3139     }
3140 
3141     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3142     for (BasicBlock &BB : F)
3143       if (!AssumedLiveBlocks.count(&BB)) {
3144         A.deleteAfterManifest(BB);
3145         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3146       }
3147 
3148     return HasChanged;
3149   }
3150 
3151   /// See AbstractAttribute::updateImpl(...).
3152   ChangeStatus updateImpl(Attributor &A) override;
3153 
3154   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3155     return !AssumedLiveEdges.count(std::make_pair(From, To));
3156   }
3157 
3158   /// See AbstractAttribute::trackStatistics()
3159   void trackStatistics() const override {}
3160 
3161   /// Returns true if the function is assumed dead.
3162   bool isAssumedDead() const override { return false; }
3163 
3164   /// See AAIsDead::isKnownDead().
3165   bool isKnownDead() const override { return false; }
3166 
3167   /// See AAIsDead::isAssumedDead(BasicBlock *).
3168   bool isAssumedDead(const BasicBlock *BB) const override {
3169     assert(BB->getParent() == getAnchorScope() &&
3170            "BB must be in the same anchor scope function.");
3171 
3172     if (!getAssumed())
3173       return false;
3174     return !AssumedLiveBlocks.count(BB);
3175   }
3176 
3177   /// See AAIsDead::isKnownDead(BasicBlock *).
3178   bool isKnownDead(const BasicBlock *BB) const override {
3179     return getKnown() && isAssumedDead(BB);
3180   }
3181 
3182   /// See AAIsDead::isAssumed(Instruction *I).
3183   bool isAssumedDead(const Instruction *I) const override {
3184     assert(I->getParent()->getParent() == getAnchorScope() &&
3185            "Instruction must be in the same anchor scope function.");
3186 
3187     if (!getAssumed())
3188       return false;
3189 
3190     // If it is not in AssumedLiveBlocks then it for sure dead.
3191     // Otherwise, it can still be after noreturn call in a live block.
3192     if (!AssumedLiveBlocks.count(I->getParent()))
3193       return true;
3194 
3195     // If it is not after a liveness barrier it is live.
3196     const Instruction *PrevI = I->getPrevNode();
3197     while (PrevI) {
3198       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3199         return true;
3200       PrevI = PrevI->getPrevNode();
3201     }
3202     return false;
3203   }
3204 
3205   /// See AAIsDead::isKnownDead(Instruction *I).
3206   bool isKnownDead(const Instruction *I) const override {
3207     return getKnown() && isAssumedDead(I);
3208   }
3209 
3210   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3211   /// that internal function called from \p BB should now be looked at.
3212   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3213     if (!AssumedLiveBlocks.insert(&BB).second)
3214       return false;
3215 
3216     // We assume that all of BB is (probably) live now and if there are calls to
3217     // internal functions we will assume that those are now live as well. This
3218     // is a performance optimization for blocks with calls to a lot of internal
3219     // functions. It can however cause dead functions to be treated as live.
3220     for (const Instruction &I : BB)
3221       if (const auto *CB = dyn_cast<CallBase>(&I))
3222         if (const Function *F = CB->getCalledFunction())
3223           if (F->hasLocalLinkage())
3224             A.markLiveInternalFunction(*F);
3225     return true;
3226   }
3227 
3228   /// Collection of instructions that need to be explored again, e.g., we
3229   /// did assume they do not transfer control to (one of their) successors.
3230   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3231 
3232   /// Collection of instructions that are known to not transfer control.
3233   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3234 
3235   /// Collection of all assumed live edges
3236   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3237 
3238   /// Collection of all assumed live BasicBlocks.
3239   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3240 };
3241 
3242 static bool
3243 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3244                         AbstractAttribute &AA,
3245                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3246   const IRPosition &IPos = IRPosition::callsite_function(CB);
3247 
3248   const auto &NoReturnAA =
3249       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3250   if (NoReturnAA.isAssumedNoReturn())
3251     return !NoReturnAA.isKnownNoReturn();
3252   if (CB.isTerminator())
3253     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3254   else
3255     AliveSuccessors.push_back(CB.getNextNode());
3256   return false;
3257 }
3258 
3259 static bool
3260 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3261                         AbstractAttribute &AA,
3262                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3263   bool UsedAssumedInformation =
3264       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3265 
3266   // First, determine if we can change an invoke to a call assuming the
3267   // callee is nounwind. This is not possible if the personality of the
3268   // function allows to catch asynchronous exceptions.
3269   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3270     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3271   } else {
3272     const IRPosition &IPos = IRPosition::callsite_function(II);
3273     const auto &AANoUnw =
3274         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3275     if (AANoUnw.isAssumedNoUnwind()) {
3276       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3277     } else {
3278       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3279     }
3280   }
3281   return UsedAssumedInformation;
3282 }
3283 
3284 static bool
3285 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3286                         AbstractAttribute &AA,
3287                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3288   bool UsedAssumedInformation = false;
3289   if (BI.getNumSuccessors() == 1) {
3290     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3291   } else {
3292     Optional<Constant *> C =
3293         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3294     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3295       // No value yet, assume both edges are dead.
3296     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3297       const BasicBlock *SuccBB =
3298           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3299       AliveSuccessors.push_back(&SuccBB->front());
3300     } else {
3301       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3302       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3303       UsedAssumedInformation = false;
3304     }
3305   }
3306   return UsedAssumedInformation;
3307 }
3308 
3309 static bool
3310 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3311                         AbstractAttribute &AA,
3312                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3313   bool UsedAssumedInformation = false;
3314   Optional<Constant *> C =
3315       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3316   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3317     // No value yet, assume all edges are dead.
3318   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3319     for (auto &CaseIt : SI.cases()) {
3320       if (CaseIt.getCaseValue() == C.getValue()) {
3321         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3322         return UsedAssumedInformation;
3323       }
3324     }
3325     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3326     return UsedAssumedInformation;
3327   } else {
3328     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3329       AliveSuccessors.push_back(&SuccBB->front());
3330   }
3331   return UsedAssumedInformation;
3332 }
3333 
3334 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3335   ChangeStatus Change = ChangeStatus::UNCHANGED;
3336 
3337   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3338                     << getAnchorScope()->size() << "] BBs and "
3339                     << ToBeExploredFrom.size() << " exploration points and "
3340                     << KnownDeadEnds.size() << " known dead ends\n");
3341 
3342   // Copy and clear the list of instructions we need to explore from. It is
3343   // refilled with instructions the next update has to look at.
3344   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3345                                                ToBeExploredFrom.end());
3346   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3347 
3348   SmallVector<const Instruction *, 8> AliveSuccessors;
3349   while (!Worklist.empty()) {
3350     const Instruction *I = Worklist.pop_back_val();
3351     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3352 
3353     // Fast forward for uninteresting instructions. We could look for UB here
3354     // though.
3355     while (!I->isTerminator() && !isa<CallBase>(I)) {
3356       Change = ChangeStatus::CHANGED;
3357       I = I->getNextNode();
3358     }
3359 
3360     AliveSuccessors.clear();
3361 
3362     bool UsedAssumedInformation = false;
3363     switch (I->getOpcode()) {
3364     // TODO: look for (assumed) UB to backwards propagate "deadness".
3365     default:
3366       assert(I->isTerminator() &&
3367              "Expected non-terminators to be handled already!");
3368       for (const BasicBlock *SuccBB : successors(I->getParent()))
3369         AliveSuccessors.push_back(&SuccBB->front());
3370       break;
3371     case Instruction::Call:
3372       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3373                                                        *this, AliveSuccessors);
3374       break;
3375     case Instruction::Invoke:
3376       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3377                                                        *this, AliveSuccessors);
3378       break;
3379     case Instruction::Br:
3380       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3381                                                        *this, AliveSuccessors);
3382       break;
3383     case Instruction::Switch:
3384       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3385                                                        *this, AliveSuccessors);
3386       break;
3387     }
3388 
3389     if (UsedAssumedInformation) {
3390       NewToBeExploredFrom.insert(I);
3391     } else {
3392       Change = ChangeStatus::CHANGED;
3393       if (AliveSuccessors.empty() ||
3394           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3395         KnownDeadEnds.insert(I);
3396     }
3397 
3398     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3399                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3400                       << UsedAssumedInformation << "\n");
3401 
3402     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3403       if (!I->isTerminator()) {
3404         assert(AliveSuccessors.size() == 1 &&
3405                "Non-terminator expected to have a single successor!");
3406         Worklist.push_back(AliveSuccessor);
3407       } else {
3408         // record the assumed live edge
3409         AssumedLiveEdges.insert(
3410             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3411         if (assumeLive(A, *AliveSuccessor->getParent()))
3412           Worklist.push_back(AliveSuccessor);
3413       }
3414     }
3415   }
3416 
3417   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3418 
3419   // If we know everything is live there is no need to query for liveness.
3420   // Instead, indicating a pessimistic fixpoint will cause the state to be
3421   // "invalid" and all queries to be answered conservatively without lookups.
3422   // To be in this state we have to (1) finished the exploration and (3) not
3423   // discovered any non-trivial dead end and (2) not ruled unreachable code
3424   // dead.
3425   if (ToBeExploredFrom.empty() &&
3426       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3427       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3428         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3429       }))
3430     return indicatePessimisticFixpoint();
3431   return Change;
3432 }
3433 
3434 /// Liveness information for a call sites.
3435 struct AAIsDeadCallSite final : AAIsDeadFunction {
3436   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3437       : AAIsDeadFunction(IRP, A) {}
3438 
3439   /// See AbstractAttribute::initialize(...).
3440   void initialize(Attributor &A) override {
3441     // TODO: Once we have call site specific value information we can provide
3442     //       call site specific liveness information and then it makes
3443     //       sense to specialize attributes for call sites instead of
3444     //       redirecting requests to the callee.
3445     llvm_unreachable("Abstract attributes for liveness are not "
3446                      "supported for call sites yet!");
3447   }
3448 
3449   /// See AbstractAttribute::updateImpl(...).
3450   ChangeStatus updateImpl(Attributor &A) override {
3451     return indicatePessimisticFixpoint();
3452   }
3453 
3454   /// See AbstractAttribute::trackStatistics()
3455   void trackStatistics() const override {}
3456 };
3457 
3458 /// -------------------- Dereferenceable Argument Attribute --------------------
3459 
3460 template <>
3461 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3462                                                      const DerefState &R) {
3463   ChangeStatus CS0 =
3464       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3465   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3466   return CS0 | CS1;
3467 }
3468 
3469 struct AADereferenceableImpl : AADereferenceable {
3470   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3471       : AADereferenceable(IRP, A) {}
3472   using StateType = DerefState;
3473 
3474   /// See AbstractAttribute::initialize(...).
3475   void initialize(Attributor &A) override {
3476     SmallVector<Attribute, 4> Attrs;
3477     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3478              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3479     for (const Attribute &Attr : Attrs)
3480       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3481 
3482     const IRPosition &IRP = this->getIRPosition();
3483     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3484 
3485     bool CanBeNull, CanBeFreed;
3486     takeKnownDerefBytesMaximum(
3487         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3488             A.getDataLayout(), CanBeNull, CanBeFreed));
3489 
3490     bool IsFnInterface = IRP.isFnInterfaceKind();
3491     Function *FnScope = IRP.getAnchorScope();
3492     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3493       indicatePessimisticFixpoint();
3494       return;
3495     }
3496 
3497     if (Instruction *CtxI = getCtxI())
3498       followUsesInMBEC(*this, A, getState(), *CtxI);
3499   }
3500 
3501   /// See AbstractAttribute::getState()
3502   /// {
3503   StateType &getState() override { return *this; }
3504   const StateType &getState() const override { return *this; }
3505   /// }
3506 
3507   /// Helper function for collecting accessed bytes in must-be-executed-context
3508   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3509                               DerefState &State) {
3510     const Value *UseV = U->get();
3511     if (!UseV->getType()->isPointerTy())
3512       return;
3513 
3514     Type *PtrTy = UseV->getType();
3515     const DataLayout &DL = A.getDataLayout();
3516     int64_t Offset;
3517     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3518             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3519       if (Base == &getAssociatedValue() &&
3520           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3521         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3522         State.addAccessedBytes(Offset, Size);
3523       }
3524     }
3525   }
3526 
3527   /// See followUsesInMBEC
3528   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3529                        AADereferenceable::StateType &State) {
3530     bool IsNonNull = false;
3531     bool TrackUse = false;
3532     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3533         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3534     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3535                       << " for instruction " << *I << "\n");
3536 
3537     addAccessedBytesForUse(A, U, I, State);
3538     State.takeKnownDerefBytesMaximum(DerefBytes);
3539     return TrackUse;
3540   }
3541 
3542   /// See AbstractAttribute::manifest(...).
3543   ChangeStatus manifest(Attributor &A) override {
3544     ChangeStatus Change = AADereferenceable::manifest(A);
3545     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3546       removeAttrs({Attribute::DereferenceableOrNull});
3547       return ChangeStatus::CHANGED;
3548     }
3549     return Change;
3550   }
3551 
3552   void getDeducedAttributes(LLVMContext &Ctx,
3553                             SmallVectorImpl<Attribute> &Attrs) const override {
3554     // TODO: Add *_globally support
3555     if (isAssumedNonNull())
3556       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3557           Ctx, getAssumedDereferenceableBytes()));
3558     else
3559       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3560           Ctx, getAssumedDereferenceableBytes()));
3561   }
3562 
3563   /// See AbstractAttribute::getAsStr().
3564   const std::string getAsStr() const override {
3565     if (!getAssumedDereferenceableBytes())
3566       return "unknown-dereferenceable";
3567     return std::string("dereferenceable") +
3568            (isAssumedNonNull() ? "" : "_or_null") +
3569            (isAssumedGlobal() ? "_globally" : "") + "<" +
3570            std::to_string(getKnownDereferenceableBytes()) + "-" +
3571            std::to_string(getAssumedDereferenceableBytes()) + ">";
3572   }
3573 };
3574 
3575 /// Dereferenceable attribute for a floating value.
3576 struct AADereferenceableFloating : AADereferenceableImpl {
3577   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3578       : AADereferenceableImpl(IRP, A) {}
3579 
3580   /// See AbstractAttribute::updateImpl(...).
3581   ChangeStatus updateImpl(Attributor &A) override {
3582     const DataLayout &DL = A.getDataLayout();
3583 
3584     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3585                             bool Stripped) -> bool {
3586       unsigned IdxWidth =
3587           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3588       APInt Offset(IdxWidth, 0);
3589       const Value *Base =
3590           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3591 
3592       const auto &AA = A.getAAFor<AADereferenceable>(
3593           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3594       int64_t DerefBytes = 0;
3595       if (!Stripped && this == &AA) {
3596         // Use IR information if we did not strip anything.
3597         // TODO: track globally.
3598         bool CanBeNull, CanBeFreed;
3599         DerefBytes =
3600             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
3601         T.GlobalState.indicatePessimisticFixpoint();
3602       } else {
3603         const DerefState &DS = AA.getState();
3604         DerefBytes = DS.DerefBytesState.getAssumed();
3605         T.GlobalState &= DS.GlobalState;
3606       }
3607 
3608       // For now we do not try to "increase" dereferenceability due to negative
3609       // indices as we first have to come up with code to deal with loops and
3610       // for overflows of the dereferenceable bytes.
3611       int64_t OffsetSExt = Offset.getSExtValue();
3612       if (OffsetSExt < 0)
3613         OffsetSExt = 0;
3614 
3615       T.takeAssumedDerefBytesMinimum(
3616           std::max(int64_t(0), DerefBytes - OffsetSExt));
3617 
3618       if (this == &AA) {
3619         if (!Stripped) {
3620           // If nothing was stripped IR information is all we got.
3621           T.takeKnownDerefBytesMaximum(
3622               std::max(int64_t(0), DerefBytes - OffsetSExt));
3623           T.indicatePessimisticFixpoint();
3624         } else if (OffsetSExt > 0) {
3625           // If something was stripped but there is circular reasoning we look
3626           // for the offset. If it is positive we basically decrease the
3627           // dereferenceable bytes in a circluar loop now, which will simply
3628           // drive them down to the known value in a very slow way which we
3629           // can accelerate.
3630           T.indicatePessimisticFixpoint();
3631         }
3632       }
3633 
3634       return T.isValidState();
3635     };
3636 
3637     DerefState T;
3638     if (!genericValueTraversal<AADereferenceable, DerefState>(
3639             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3640       return indicatePessimisticFixpoint();
3641 
3642     return clampStateAndIndicateChange(getState(), T);
3643   }
3644 
3645   /// See AbstractAttribute::trackStatistics()
3646   void trackStatistics() const override {
3647     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3648   }
3649 };
3650 
3651 /// Dereferenceable attribute for a return value.
3652 struct AADereferenceableReturned final
3653     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3654   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3655       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3656             IRP, A) {}
3657 
3658   /// See AbstractAttribute::trackStatistics()
3659   void trackStatistics() const override {
3660     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3661   }
3662 };
3663 
3664 /// Dereferenceable attribute for an argument
3665 struct AADereferenceableArgument final
3666     : AAArgumentFromCallSiteArguments<AADereferenceable,
3667                                       AADereferenceableImpl> {
3668   using Base =
3669       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3670   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3671       : Base(IRP, A) {}
3672 
3673   /// See AbstractAttribute::trackStatistics()
3674   void trackStatistics() const override {
3675     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3676   }
3677 };
3678 
3679 /// Dereferenceable attribute for a call site argument.
3680 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3681   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3682       : AADereferenceableFloating(IRP, A) {}
3683 
3684   /// See AbstractAttribute::trackStatistics()
3685   void trackStatistics() const override {
3686     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3687   }
3688 };
3689 
3690 /// Dereferenceable attribute deduction for a call site return value.
3691 struct AADereferenceableCallSiteReturned final
3692     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3693   using Base =
3694       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3695   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3696       : Base(IRP, A) {}
3697 
3698   /// See AbstractAttribute::trackStatistics()
3699   void trackStatistics() const override {
3700     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3701   }
3702 };
3703 
3704 // ------------------------ Align Argument Attribute ------------------------
3705 
3706 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3707                                     Value &AssociatedValue, const Use *U,
3708                                     const Instruction *I, bool &TrackUse) {
3709   // We need to follow common pointer manipulation uses to the accesses they
3710   // feed into.
3711   if (isa<CastInst>(I)) {
3712     // Follow all but ptr2int casts.
3713     TrackUse = !isa<PtrToIntInst>(I);
3714     return 0;
3715   }
3716   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3717     if (GEP->hasAllConstantIndices())
3718       TrackUse = true;
3719     return 0;
3720   }
3721 
3722   MaybeAlign MA;
3723   if (const auto *CB = dyn_cast<CallBase>(I)) {
3724     if (CB->isBundleOperand(U) || CB->isCallee(U))
3725       return 0;
3726 
3727     unsigned ArgNo = CB->getArgOperandNo(U);
3728     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3729     // As long as we only use known information there is no need to track
3730     // dependences here.
3731     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3732     MA = MaybeAlign(AlignAA.getKnownAlign());
3733   }
3734 
3735   const DataLayout &DL = A.getDataLayout();
3736   const Value *UseV = U->get();
3737   if (auto *SI = dyn_cast<StoreInst>(I)) {
3738     if (SI->getPointerOperand() == UseV)
3739       MA = SI->getAlign();
3740   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3741     if (LI->getPointerOperand() == UseV)
3742       MA = LI->getAlign();
3743   }
3744 
3745   if (!MA || *MA <= QueryingAA.getKnownAlign())
3746     return 0;
3747 
3748   unsigned Alignment = MA->value();
3749   int64_t Offset;
3750 
3751   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3752     if (Base == &AssociatedValue) {
3753       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3754       // So we can say that the maximum power of two which is a divisor of
3755       // gcd(Offset, Alignment) is an alignment.
3756 
3757       uint32_t gcd =
3758           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3759       Alignment = llvm::PowerOf2Floor(gcd);
3760     }
3761   }
3762 
3763   return Alignment;
3764 }
3765 
3766 struct AAAlignImpl : AAAlign {
3767   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3768 
3769   /// See AbstractAttribute::initialize(...).
3770   void initialize(Attributor &A) override {
3771     SmallVector<Attribute, 4> Attrs;
3772     getAttrs({Attribute::Alignment}, Attrs);
3773     for (const Attribute &Attr : Attrs)
3774       takeKnownMaximum(Attr.getValueAsInt());
3775 
3776     Value &V = getAssociatedValue();
3777     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3778     //       use of the function pointer. This was caused by D73131. We want to
3779     //       avoid this for function pointers especially because we iterate
3780     //       their uses and int2ptr is not handled. It is not a correctness
3781     //       problem though!
3782     if (!V.getType()->getPointerElementType()->isFunctionTy())
3783       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3784 
3785     if (getIRPosition().isFnInterfaceKind() &&
3786         (!getAnchorScope() ||
3787          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3788       indicatePessimisticFixpoint();
3789       return;
3790     }
3791 
3792     if (Instruction *CtxI = getCtxI())
3793       followUsesInMBEC(*this, A, getState(), *CtxI);
3794   }
3795 
3796   /// See AbstractAttribute::manifest(...).
3797   ChangeStatus manifest(Attributor &A) override {
3798     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3799 
3800     // Check for users that allow alignment annotations.
3801     Value &AssociatedValue = getAssociatedValue();
3802     for (const Use &U : AssociatedValue.uses()) {
3803       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3804         if (SI->getPointerOperand() == &AssociatedValue)
3805           if (SI->getAlignment() < getAssumedAlign()) {
3806             STATS_DECLTRACK(AAAlign, Store,
3807                             "Number of times alignment added to a store");
3808             SI->setAlignment(Align(getAssumedAlign()));
3809             LoadStoreChanged = ChangeStatus::CHANGED;
3810           }
3811       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3812         if (LI->getPointerOperand() == &AssociatedValue)
3813           if (LI->getAlignment() < getAssumedAlign()) {
3814             LI->setAlignment(Align(getAssumedAlign()));
3815             STATS_DECLTRACK(AAAlign, Load,
3816                             "Number of times alignment added to a load");
3817             LoadStoreChanged = ChangeStatus::CHANGED;
3818           }
3819       }
3820     }
3821 
3822     ChangeStatus Changed = AAAlign::manifest(A);
3823 
3824     Align InheritAlign =
3825         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3826     if (InheritAlign >= getAssumedAlign())
3827       return LoadStoreChanged;
3828     return Changed | LoadStoreChanged;
3829   }
3830 
3831   // TODO: Provide a helper to determine the implied ABI alignment and check in
3832   //       the existing manifest method and a new one for AAAlignImpl that value
3833   //       to avoid making the alignment explicit if it did not improve.
3834 
3835   /// See AbstractAttribute::getDeducedAttributes
3836   virtual void
3837   getDeducedAttributes(LLVMContext &Ctx,
3838                        SmallVectorImpl<Attribute> &Attrs) const override {
3839     if (getAssumedAlign() > 1)
3840       Attrs.emplace_back(
3841           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3842   }
3843 
3844   /// See followUsesInMBEC
3845   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3846                        AAAlign::StateType &State) {
3847     bool TrackUse = false;
3848 
3849     unsigned int KnownAlign =
3850         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3851     State.takeKnownMaximum(KnownAlign);
3852 
3853     return TrackUse;
3854   }
3855 
3856   /// See AbstractAttribute::getAsStr().
3857   const std::string getAsStr() const override {
3858     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3859                                 "-" + std::to_string(getAssumedAlign()) + ">")
3860                              : "unknown-align";
3861   }
3862 };
3863 
3864 /// Align attribute for a floating value.
3865 struct AAAlignFloating : AAAlignImpl {
3866   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3867 
3868   /// See AbstractAttribute::updateImpl(...).
3869   ChangeStatus updateImpl(Attributor &A) override {
3870     const DataLayout &DL = A.getDataLayout();
3871 
3872     auto VisitValueCB = [&](Value &V, const Instruction *,
3873                             AAAlign::StateType &T, bool Stripped) -> bool {
3874       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3875                                            DepClassTy::REQUIRED);
3876       if (!Stripped && this == &AA) {
3877         int64_t Offset;
3878         unsigned Alignment = 1;
3879         if (const Value *Base =
3880                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3881           Align PA = Base->getPointerAlignment(DL);
3882           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3883           // So we can say that the maximum power of two which is a divisor of
3884           // gcd(Offset, Alignment) is an alignment.
3885 
3886           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3887                                                uint32_t(PA.value()));
3888           Alignment = llvm::PowerOf2Floor(gcd);
3889         } else {
3890           Alignment = V.getPointerAlignment(DL).value();
3891         }
3892         // Use only IR information if we did not strip anything.
3893         T.takeKnownMaximum(Alignment);
3894         T.indicatePessimisticFixpoint();
3895       } else {
3896         // Use abstract attribute information.
3897         const AAAlign::StateType &DS = AA.getState();
3898         T ^= DS;
3899       }
3900       return T.isValidState();
3901     };
3902 
3903     StateType T;
3904     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3905                                                    VisitValueCB, getCtxI()))
3906       return indicatePessimisticFixpoint();
3907 
3908     // TODO: If we know we visited all incoming values, thus no are assumed
3909     // dead, we can take the known information from the state T.
3910     return clampStateAndIndicateChange(getState(), T);
3911   }
3912 
3913   /// See AbstractAttribute::trackStatistics()
3914   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3915 };
3916 
3917 /// Align attribute for function return value.
3918 struct AAAlignReturned final
3919     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3920   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3921   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3922 
3923   /// See AbstractAttribute::initialize(...).
3924   void initialize(Attributor &A) override {
3925     Base::initialize(A);
3926     Function *F = getAssociatedFunction();
3927     if (!F || F->isDeclaration())
3928       indicatePessimisticFixpoint();
3929   }
3930 
3931   /// See AbstractAttribute::trackStatistics()
3932   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3933 };
3934 
3935 /// Align attribute for function argument.
3936 struct AAAlignArgument final
3937     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3938   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3939   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3940 
3941   /// See AbstractAttribute::manifest(...).
3942   ChangeStatus manifest(Attributor &A) override {
3943     // If the associated argument is involved in a must-tail call we give up
3944     // because we would need to keep the argument alignments of caller and
3945     // callee in-sync. Just does not seem worth the trouble right now.
3946     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3947       return ChangeStatus::UNCHANGED;
3948     return Base::manifest(A);
3949   }
3950 
3951   /// See AbstractAttribute::trackStatistics()
3952   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3953 };
3954 
3955 struct AAAlignCallSiteArgument final : AAAlignFloating {
3956   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3957       : AAAlignFloating(IRP, A) {}
3958 
3959   /// See AbstractAttribute::manifest(...).
3960   ChangeStatus manifest(Attributor &A) override {
3961     // If the associated argument is involved in a must-tail call we give up
3962     // because we would need to keep the argument alignments of caller and
3963     // callee in-sync. Just does not seem worth the trouble right now.
3964     if (Argument *Arg = getAssociatedArgument())
3965       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3966         return ChangeStatus::UNCHANGED;
3967     ChangeStatus Changed = AAAlignImpl::manifest(A);
3968     Align InheritAlign =
3969         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3970     if (InheritAlign >= getAssumedAlign())
3971       Changed = ChangeStatus::UNCHANGED;
3972     return Changed;
3973   }
3974 
3975   /// See AbstractAttribute::updateImpl(Attributor &A).
3976   ChangeStatus updateImpl(Attributor &A) override {
3977     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3978     if (Argument *Arg = getAssociatedArgument()) {
3979       // We only take known information from the argument
3980       // so we do not need to track a dependence.
3981       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3982           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
3983       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3984     }
3985     return Changed;
3986   }
3987 
3988   /// See AbstractAttribute::trackStatistics()
3989   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3990 };
3991 
3992 /// Align attribute deduction for a call site return value.
3993 struct AAAlignCallSiteReturned final
3994     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3995   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3996   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3997       : Base(IRP, A) {}
3998 
3999   /// See AbstractAttribute::initialize(...).
4000   void initialize(Attributor &A) override {
4001     Base::initialize(A);
4002     Function *F = getAssociatedFunction();
4003     if (!F || F->isDeclaration())
4004       indicatePessimisticFixpoint();
4005   }
4006 
4007   /// See AbstractAttribute::trackStatistics()
4008   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4009 };
4010 
4011 /// ------------------ Function No-Return Attribute ----------------------------
4012 struct AANoReturnImpl : public AANoReturn {
4013   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4014 
4015   /// See AbstractAttribute::initialize(...).
4016   void initialize(Attributor &A) override {
4017     AANoReturn::initialize(A);
4018     Function *F = getAssociatedFunction();
4019     if (!F || F->isDeclaration())
4020       indicatePessimisticFixpoint();
4021   }
4022 
4023   /// See AbstractAttribute::getAsStr().
4024   const std::string getAsStr() const override {
4025     return getAssumed() ? "noreturn" : "may-return";
4026   }
4027 
4028   /// See AbstractAttribute::updateImpl(Attributor &A).
4029   virtual ChangeStatus updateImpl(Attributor &A) override {
4030     auto CheckForNoReturn = [](Instruction &) { return false; };
4031     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4032                                    {(unsigned)Instruction::Ret}))
4033       return indicatePessimisticFixpoint();
4034     return ChangeStatus::UNCHANGED;
4035   }
4036 };
4037 
4038 struct AANoReturnFunction final : AANoReturnImpl {
4039   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4040       : AANoReturnImpl(IRP, A) {}
4041 
4042   /// See AbstractAttribute::trackStatistics()
4043   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4044 };
4045 
4046 /// NoReturn attribute deduction for a call sites.
4047 struct AANoReturnCallSite final : AANoReturnImpl {
4048   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4049       : AANoReturnImpl(IRP, A) {}
4050 
4051   /// See AbstractAttribute::initialize(...).
4052   void initialize(Attributor &A) override {
4053     AANoReturnImpl::initialize(A);
4054     if (Function *F = getAssociatedFunction()) {
4055       const IRPosition &FnPos = IRPosition::function(*F);
4056       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4057       if (!FnAA.isAssumedNoReturn())
4058         indicatePessimisticFixpoint();
4059     }
4060   }
4061 
4062   /// See AbstractAttribute::updateImpl(...).
4063   ChangeStatus updateImpl(Attributor &A) override {
4064     // TODO: Once we have call site specific value information we can provide
4065     //       call site specific liveness information and then it makes
4066     //       sense to specialize attributes for call sites arguments instead of
4067     //       redirecting requests to the callee argument.
4068     Function *F = getAssociatedFunction();
4069     const IRPosition &FnPos = IRPosition::function(*F);
4070     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4071     return clampStateAndIndicateChange(getState(), FnAA.getState());
4072   }
4073 
4074   /// See AbstractAttribute::trackStatistics()
4075   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4076 };
4077 
4078 /// ----------------------- Variable Capturing ---------------------------------
4079 
4080 /// A class to hold the state of for no-capture attributes.
4081 struct AANoCaptureImpl : public AANoCapture {
4082   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4083 
4084   /// See AbstractAttribute::initialize(...).
4085   void initialize(Attributor &A) override {
4086     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4087       indicateOptimisticFixpoint();
4088       return;
4089     }
4090     Function *AnchorScope = getAnchorScope();
4091     if (isFnInterfaceKind() &&
4092         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4093       indicatePessimisticFixpoint();
4094       return;
4095     }
4096 
4097     // You cannot "capture" null in the default address space.
4098     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4099         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4100       indicateOptimisticFixpoint();
4101       return;
4102     }
4103 
4104     const Function *F =
4105         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4106 
4107     // Check what state the associated function can actually capture.
4108     if (F)
4109       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4110     else
4111       indicatePessimisticFixpoint();
4112   }
4113 
4114   /// See AbstractAttribute::updateImpl(...).
4115   ChangeStatus updateImpl(Attributor &A) override;
4116 
4117   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4118   virtual void
4119   getDeducedAttributes(LLVMContext &Ctx,
4120                        SmallVectorImpl<Attribute> &Attrs) const override {
4121     if (!isAssumedNoCaptureMaybeReturned())
4122       return;
4123 
4124     if (isArgumentPosition()) {
4125       if (isAssumedNoCapture())
4126         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4127       else if (ManifestInternal)
4128         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4129     }
4130   }
4131 
4132   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4133   /// depending on the ability of the function associated with \p IRP to capture
4134   /// state in memory and through "returning/throwing", respectively.
4135   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4136                                                    const Function &F,
4137                                                    BitIntegerState &State) {
4138     // TODO: Once we have memory behavior attributes we should use them here.
4139 
4140     // If we know we cannot communicate or write to memory, we do not care about
4141     // ptr2int anymore.
4142     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4143         F.getReturnType()->isVoidTy()) {
4144       State.addKnownBits(NO_CAPTURE);
4145       return;
4146     }
4147 
4148     // A function cannot capture state in memory if it only reads memory, it can
4149     // however return/throw state and the state might be influenced by the
4150     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4151     if (F.onlyReadsMemory())
4152       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4153 
4154     // A function cannot communicate state back if it does not through
4155     // exceptions and doesn not return values.
4156     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4157       State.addKnownBits(NOT_CAPTURED_IN_RET);
4158 
4159     // Check existing "returned" attributes.
4160     int ArgNo = IRP.getCalleeArgNo();
4161     if (F.doesNotThrow() && ArgNo >= 0) {
4162       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4163         if (F.hasParamAttribute(u, Attribute::Returned)) {
4164           if (u == unsigned(ArgNo))
4165             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4166           else if (F.onlyReadsMemory())
4167             State.addKnownBits(NO_CAPTURE);
4168           else
4169             State.addKnownBits(NOT_CAPTURED_IN_RET);
4170           break;
4171         }
4172     }
4173   }
4174 
4175   /// See AbstractState::getAsStr().
4176   const std::string getAsStr() const override {
4177     if (isKnownNoCapture())
4178       return "known not-captured";
4179     if (isAssumedNoCapture())
4180       return "assumed not-captured";
4181     if (isKnownNoCaptureMaybeReturned())
4182       return "known not-captured-maybe-returned";
4183     if (isAssumedNoCaptureMaybeReturned())
4184       return "assumed not-captured-maybe-returned";
4185     return "assumed-captured";
4186   }
4187 };
4188 
4189 /// Attributor-aware capture tracker.
4190 struct AACaptureUseTracker final : public CaptureTracker {
4191 
4192   /// Create a capture tracker that can lookup in-flight abstract attributes
4193   /// through the Attributor \p A.
4194   ///
4195   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4196   /// search is stopped. If a use leads to a return instruction,
4197   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4198   /// If a use leads to a ptr2int which may capture the value,
4199   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4200   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4201   /// set. All values in \p PotentialCopies are later tracked as well. For every
4202   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4203   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4204   /// conservatively set to true.
4205   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4206                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4207                       SmallVectorImpl<const Value *> &PotentialCopies,
4208                       unsigned &RemainingUsesToExplore)
4209       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4210         PotentialCopies(PotentialCopies),
4211         RemainingUsesToExplore(RemainingUsesToExplore) {}
4212 
4213   /// Determine if \p V maybe captured. *Also updates the state!*
4214   bool valueMayBeCaptured(const Value *V) {
4215     if (V->getType()->isPointerTy()) {
4216       PointerMayBeCaptured(V, this);
4217     } else {
4218       State.indicatePessimisticFixpoint();
4219     }
4220     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4221   }
4222 
4223   /// See CaptureTracker::tooManyUses().
4224   void tooManyUses() override {
4225     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4226   }
4227 
4228   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4229     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4230       return true;
4231     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4232         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4233     return DerefAA.getAssumedDereferenceableBytes();
4234   }
4235 
4236   /// See CaptureTracker::captured(...).
4237   bool captured(const Use *U) override {
4238     Instruction *UInst = cast<Instruction>(U->getUser());
4239     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4240                       << "\n");
4241 
4242     // Because we may reuse the tracker multiple times we keep track of the
4243     // number of explored uses ourselves as well.
4244     if (RemainingUsesToExplore-- == 0) {
4245       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4246       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4247                           /* Return */ true);
4248     }
4249 
4250     // Deal with ptr2int by following uses.
4251     if (isa<PtrToIntInst>(UInst)) {
4252       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4253       return valueMayBeCaptured(UInst);
4254     }
4255 
4256     // Explicitly catch return instructions.
4257     if (isa<ReturnInst>(UInst))
4258       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4259                           /* Return */ true);
4260 
4261     // For now we only use special logic for call sites. However, the tracker
4262     // itself knows about a lot of other non-capturing cases already.
4263     auto *CB = dyn_cast<CallBase>(UInst);
4264     if (!CB || !CB->isArgOperand(U))
4265       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4266                           /* Return */ true);
4267 
4268     unsigned ArgNo = CB->getArgOperandNo(U);
4269     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4270     // If we have a abstract no-capture attribute for the argument we can use
4271     // it to justify a non-capture attribute here. This allows recursion!
4272     auto &ArgNoCaptureAA =
4273         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4274     if (ArgNoCaptureAA.isAssumedNoCapture())
4275       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4276                           /* Return */ false);
4277     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4278       addPotentialCopy(*CB);
4279       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4280                           /* Return */ false);
4281     }
4282 
4283     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4284     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4285                         /* Return */ true);
4286   }
4287 
4288   /// Register \p CS as potential copy of the value we are checking.
4289   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4290 
4291   /// See CaptureTracker::shouldExplore(...).
4292   bool shouldExplore(const Use *U) override {
4293     // Check liveness and ignore droppable users.
4294     return !U->getUser()->isDroppable() &&
4295            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4296   }
4297 
4298   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4299   /// \p CapturedInRet, then return the appropriate value for use in the
4300   /// CaptureTracker::captured() interface.
4301   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4302                     bool CapturedInRet) {
4303     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4304                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4305     if (CapturedInMem)
4306       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4307     if (CapturedInInt)
4308       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4309     if (CapturedInRet)
4310       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4311     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4312   }
4313 
4314 private:
4315   /// The attributor providing in-flight abstract attributes.
4316   Attributor &A;
4317 
4318   /// The abstract attribute currently updated.
4319   AANoCapture &NoCaptureAA;
4320 
4321   /// The abstract liveness state.
4322   const AAIsDead &IsDeadAA;
4323 
4324   /// The state currently updated.
4325   AANoCapture::StateType &State;
4326 
4327   /// Set of potential copies of the tracked value.
4328   SmallVectorImpl<const Value *> &PotentialCopies;
4329 
4330   /// Global counter to limit the number of explored uses.
4331   unsigned &RemainingUsesToExplore;
4332 };
4333 
4334 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4335   const IRPosition &IRP = getIRPosition();
4336   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4337                                         : &IRP.getAssociatedValue();
4338   if (!V)
4339     return indicatePessimisticFixpoint();
4340 
4341   const Function *F =
4342       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4343   assert(F && "Expected a function!");
4344   const IRPosition &FnPos = IRPosition::function(*F);
4345   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4346 
4347   AANoCapture::StateType T;
4348 
4349   // Readonly means we cannot capture through memory.
4350   const auto &FnMemAA =
4351       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4352   if (FnMemAA.isAssumedReadOnly()) {
4353     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4354     if (FnMemAA.isKnownReadOnly())
4355       addKnownBits(NOT_CAPTURED_IN_MEM);
4356     else
4357       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4358   }
4359 
4360   // Make sure all returned values are different than the underlying value.
4361   // TODO: we could do this in a more sophisticated way inside
4362   //       AAReturnedValues, e.g., track all values that escape through returns
4363   //       directly somehow.
4364   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4365     bool SeenConstant = false;
4366     for (auto &It : RVAA.returned_values()) {
4367       if (isa<Constant>(It.first)) {
4368         if (SeenConstant)
4369           return false;
4370         SeenConstant = true;
4371       } else if (!isa<Argument>(It.first) ||
4372                  It.first == getAssociatedArgument())
4373         return false;
4374     }
4375     return true;
4376   };
4377 
4378   const auto &NoUnwindAA =
4379       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4380   if (NoUnwindAA.isAssumedNoUnwind()) {
4381     bool IsVoidTy = F->getReturnType()->isVoidTy();
4382     const AAReturnedValues *RVAA =
4383         IsVoidTy ? nullptr
4384                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4385 
4386                                                  DepClassTy::OPTIONAL);
4387     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4388       T.addKnownBits(NOT_CAPTURED_IN_RET);
4389       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4390         return ChangeStatus::UNCHANGED;
4391       if (NoUnwindAA.isKnownNoUnwind() &&
4392           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4393         addKnownBits(NOT_CAPTURED_IN_RET);
4394         if (isKnown(NOT_CAPTURED_IN_MEM))
4395           return indicateOptimisticFixpoint();
4396       }
4397     }
4398   }
4399 
4400   // Use the CaptureTracker interface and logic with the specialized tracker,
4401   // defined in AACaptureUseTracker, that can look at in-flight abstract
4402   // attributes and directly updates the assumed state.
4403   SmallVector<const Value *, 4> PotentialCopies;
4404   unsigned RemainingUsesToExplore =
4405       getDefaultMaxUsesToExploreForCaptureTracking();
4406   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4407                               RemainingUsesToExplore);
4408 
4409   // Check all potential copies of the associated value until we can assume
4410   // none will be captured or we have to assume at least one might be.
4411   unsigned Idx = 0;
4412   PotentialCopies.push_back(V);
4413   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4414     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4415 
4416   AANoCapture::StateType &S = getState();
4417   auto Assumed = S.getAssumed();
4418   S.intersectAssumedBits(T.getAssumed());
4419   if (!isAssumedNoCaptureMaybeReturned())
4420     return indicatePessimisticFixpoint();
4421   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4422                                    : ChangeStatus::CHANGED;
4423 }
4424 
4425 /// NoCapture attribute for function arguments.
4426 struct AANoCaptureArgument final : AANoCaptureImpl {
4427   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4428       : AANoCaptureImpl(IRP, A) {}
4429 
4430   /// See AbstractAttribute::trackStatistics()
4431   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4432 };
4433 
4434 /// NoCapture attribute for call site arguments.
4435 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4436   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4437       : AANoCaptureImpl(IRP, A) {}
4438 
4439   /// See AbstractAttribute::initialize(...).
4440   void initialize(Attributor &A) override {
4441     if (Argument *Arg = getAssociatedArgument())
4442       if (Arg->hasByValAttr())
4443         indicateOptimisticFixpoint();
4444     AANoCaptureImpl::initialize(A);
4445   }
4446 
4447   /// See AbstractAttribute::updateImpl(...).
4448   ChangeStatus updateImpl(Attributor &A) override {
4449     // TODO: Once we have call site specific value information we can provide
4450     //       call site specific liveness information and then it makes
4451     //       sense to specialize attributes for call sites arguments instead of
4452     //       redirecting requests to the callee argument.
4453     Argument *Arg = getAssociatedArgument();
4454     if (!Arg)
4455       return indicatePessimisticFixpoint();
4456     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4457     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4458     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4459   }
4460 
4461   /// See AbstractAttribute::trackStatistics()
4462   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4463 };
4464 
4465 /// NoCapture attribute for floating values.
4466 struct AANoCaptureFloating final : AANoCaptureImpl {
4467   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4468       : AANoCaptureImpl(IRP, A) {}
4469 
4470   /// See AbstractAttribute::trackStatistics()
4471   void trackStatistics() const override {
4472     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4473   }
4474 };
4475 
4476 /// NoCapture attribute for function return value.
4477 struct AANoCaptureReturned final : AANoCaptureImpl {
4478   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4479       : AANoCaptureImpl(IRP, A) {
4480     llvm_unreachable("NoCapture is not applicable to function returns!");
4481   }
4482 
4483   /// See AbstractAttribute::initialize(...).
4484   void initialize(Attributor &A) override {
4485     llvm_unreachable("NoCapture is not applicable to function returns!");
4486   }
4487 
4488   /// See AbstractAttribute::updateImpl(...).
4489   ChangeStatus updateImpl(Attributor &A) override {
4490     llvm_unreachable("NoCapture is not applicable to function returns!");
4491   }
4492 
4493   /// See AbstractAttribute::trackStatistics()
4494   void trackStatistics() const override {}
4495 };
4496 
4497 /// NoCapture attribute deduction for a call site return value.
4498 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4499   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4500       : AANoCaptureImpl(IRP, A) {}
4501 
4502   /// See AbstractAttribute::initialize(...).
4503   void initialize(Attributor &A) override {
4504     const Function *F = getAnchorScope();
4505     // Check what state the associated function can actually capture.
4506     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4507   }
4508 
4509   /// See AbstractAttribute::trackStatistics()
4510   void trackStatistics() const override {
4511     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4512   }
4513 };
4514 
4515 /// ------------------ Value Simplify Attribute ----------------------------
4516 struct AAValueSimplifyImpl : AAValueSimplify {
4517   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4518       : AAValueSimplify(IRP, A) {}
4519 
4520   /// See AbstractAttribute::initialize(...).
4521   void initialize(Attributor &A) override {
4522     if (getAssociatedValue().getType()->isVoidTy())
4523       indicatePessimisticFixpoint();
4524   }
4525 
4526   /// See AbstractAttribute::getAsStr().
4527   const std::string getAsStr() const override {
4528     LLVM_DEBUG({
4529       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
4530       if (SimplifiedAssociatedValue)
4531         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
4532     });
4533     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4534                         : "not-simple";
4535   }
4536 
4537   /// See AbstractAttribute::trackStatistics()
4538   void trackStatistics() const override {}
4539 
4540   /// See AAValueSimplify::getAssumedSimplifiedValue()
4541   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4542     if (!getAssumed())
4543       return const_cast<Value *>(&getAssociatedValue());
4544     return SimplifiedAssociatedValue;
4545   }
4546 
4547   /// Helper function for querying AAValueSimplify and updating candicate.
4548   /// \param IRP The value position we are trying to unify with SimplifiedValue
4549   /// \param AccumulatedSimplifiedValue Current simplification result.
4550   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4551                              const IRPosition &IRP,
4552                              Optional<Value *> &AccumulatedSimplifiedValue) {
4553     // FIXME: Add a typecast support.
4554     bool UsedAssumedInformation = false;
4555     Optional<Value *> QueryingValueSimplified =
4556         A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
4557 
4558     if (!QueryingValueSimplified.hasValue())
4559       return true;
4560 
4561     if (!QueryingValueSimplified.getValue())
4562       return false;
4563 
4564     Value &QueryingValueSimplifiedUnwrapped =
4565         *QueryingValueSimplified.getValue();
4566 
4567     if (AccumulatedSimplifiedValue.hasValue() &&
4568         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4569         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4570       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4571     if (AccumulatedSimplifiedValue.hasValue() &&
4572         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4573       return true;
4574 
4575     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << IRP.getAssociatedValue()
4576                       << " is assumed to be "
4577                       << QueryingValueSimplifiedUnwrapped << "\n");
4578 
4579     AccumulatedSimplifiedValue = QueryingValueSimplified;
4580     return true;
4581   }
4582 
4583   /// Returns a candidate is found or not
4584   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4585     if (!getAssociatedValue().getType()->isIntegerTy())
4586       return false;
4587 
4588     // This will also pass the call base context.
4589     const auto &AA =
4590         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4591 
4592     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4593 
4594     if (!COpt.hasValue()) {
4595       SimplifiedAssociatedValue = llvm::None;
4596       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4597       return true;
4598     }
4599     if (auto *C = COpt.getValue()) {
4600       SimplifiedAssociatedValue = C;
4601       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4602       return true;
4603     }
4604     return false;
4605   }
4606 
4607   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4608     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4609       return true;
4610     if (askSimplifiedValueFor<AAPotentialValues>(A))
4611       return true;
4612     return false;
4613   }
4614 
4615   /// See AbstractAttribute::manifest(...).
4616   ChangeStatus manifest(Attributor &A) override {
4617     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4618 
4619     if (SimplifiedAssociatedValue.hasValue() &&
4620         !SimplifiedAssociatedValue.getValue())
4621       return Changed;
4622 
4623     Value &V = getAssociatedValue();
4624     auto *C = SimplifiedAssociatedValue.hasValue()
4625                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4626                   : UndefValue::get(V.getType());
4627     if (C && C != &V && !V.user_empty()) {
4628       Value *NewV = AA::getWithType(*C, *V.getType());
4629       // We can replace the AssociatedValue with the constant.
4630       if (NewV && NewV != &V) {
4631         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4632                           << " :: " << *this << "\n");
4633         if (A.changeValueAfterManifest(V, *NewV))
4634           Changed = ChangeStatus::CHANGED;
4635       }
4636     }
4637 
4638     return Changed | AAValueSimplify::manifest(A);
4639   }
4640 
4641   /// See AbstractState::indicatePessimisticFixpoint(...).
4642   ChangeStatus indicatePessimisticFixpoint() override {
4643     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4644     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4645     SimplifiedAssociatedValue = &getAssociatedValue();
4646     indicateOptimisticFixpoint();
4647     return ChangeStatus::CHANGED;
4648   }
4649 
4650 protected:
4651   // An assumed simplified value. Initially, it is set to Optional::None, which
4652   // means that the value is not clear under current assumption. If in the
4653   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4654   // returns orignal associated value.
4655   Optional<Value *> SimplifiedAssociatedValue;
4656 };
4657 
4658 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4659   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4660       : AAValueSimplifyImpl(IRP, A) {}
4661 
4662   void initialize(Attributor &A) override {
4663     AAValueSimplifyImpl::initialize(A);
4664     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4665       indicatePessimisticFixpoint();
4666     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4667                  Attribute::StructRet, Attribute::Nest},
4668                 /* IgnoreSubsumingPositions */ true))
4669       indicatePessimisticFixpoint();
4670 
4671     // FIXME: This is a hack to prevent us from propagating function poiner in
4672     // the new pass manager CGSCC pass as it creates call edges the
4673     // CallGraphUpdater cannot handle yet.
4674     Value &V = getAssociatedValue();
4675     if (V.getType()->isPointerTy() &&
4676         V.getType()->getPointerElementType()->isFunctionTy() &&
4677         !A.isModulePass())
4678       indicatePessimisticFixpoint();
4679   }
4680 
4681   /// See AbstractAttribute::updateImpl(...).
4682   ChangeStatus updateImpl(Attributor &A) override {
4683     // Byval is only replacable if it is readonly otherwise we would write into
4684     // the replaced value and not the copy that byval creates implicitly.
4685     Argument *Arg = getAssociatedArgument();
4686     if (Arg->hasByValAttr()) {
4687       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4688       //       there is no race by not copying a constant byval.
4689       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4690                                                        DepClassTy::REQUIRED);
4691       if (!MemAA.isAssumedReadOnly())
4692         return indicatePessimisticFixpoint();
4693     }
4694 
4695     auto Before = SimplifiedAssociatedValue;
4696 
4697     auto PredForCallSite = [&](AbstractCallSite ACS) {
4698       const IRPosition &ACSArgPos =
4699           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4700       // Check if a coresponding argument was found or if it is on not
4701       // associated (which can happen for callback calls).
4702       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4703         return false;
4704 
4705       // We can only propagate thread independent values through callbacks.
4706       // This is different to direct/indirect call sites because for them we
4707       // know the thread executing the caller and callee is the same. For
4708       // callbacks this is not guaranteed, thus a thread dependent value could
4709       // be different for the caller and callee, making it invalid to propagate.
4710       Value &ArgOp = ACSArgPos.getAssociatedValue();
4711       if (ACS.isCallbackCall())
4712         if (auto *C = dyn_cast<Constant>(&ArgOp))
4713           if (C->isThreadDependent())
4714             return false;
4715       return checkAndUpdate(A, *this, ACSArgPos, SimplifiedAssociatedValue);
4716     };
4717 
4718     // Generate a answer specific to a call site context.
4719     bool Success;
4720     bool AllCallSitesKnown;
4721     if (hasCallBaseContext())
4722       Success = PredForCallSite(
4723           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
4724     else
4725       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
4726                                        AllCallSitesKnown);
4727 
4728     if (!Success)
4729       if (!askSimplifiedValueForOtherAAs(A))
4730         return indicatePessimisticFixpoint();
4731 
4732     // If a candicate was found in this update, return CHANGED.
4733     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4734                                                : ChangeStatus ::CHANGED;
4735   }
4736 
4737   /// See AbstractAttribute::trackStatistics()
4738   void trackStatistics() const override {
4739     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4740   }
4741 };
4742 
4743 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4744   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4745       : AAValueSimplifyImpl(IRP, A) {}
4746 
4747   /// See AbstractAttribute::updateImpl(...).
4748   ChangeStatus updateImpl(Attributor &A) override {
4749     auto Before = SimplifiedAssociatedValue;
4750 
4751     auto PredForReturned = [&](Value &V) {
4752       return checkAndUpdate(A, *this,
4753                             IRPosition::value(V, getCallBaseContext()),
4754                             SimplifiedAssociatedValue);
4755     };
4756 
4757     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4758       if (!askSimplifiedValueForOtherAAs(A))
4759         return indicatePessimisticFixpoint();
4760 
4761     // If a candicate was found in this update, return CHANGED.
4762     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4763                                                : ChangeStatus ::CHANGED;
4764   }
4765 
4766   ChangeStatus manifest(Attributor &A) override {
4767     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4768 
4769     if (SimplifiedAssociatedValue.hasValue() &&
4770         !SimplifiedAssociatedValue.getValue())
4771       return Changed | AAValueSimplify::manifest(A);
4772 
4773     auto *C = SimplifiedAssociatedValue.hasValue()
4774                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4775                   : UndefValue::get(getAssociatedType());
4776     if (!C || C == &getAssociatedValue())
4777       return Changed | AAValueSimplify::manifest(A);
4778 
4779     auto PredForReturned =
4780         [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4781           // We can replace the AssociatedValue with the constant.
4782           if (&V == C || isa<UndefValue>(V))
4783             return true;
4784 
4785           for (ReturnInst *RI : RetInsts) {
4786             if (RI->getFunction() != getAnchorScope())
4787               continue;
4788             Value *NewV = AA::getWithType(*C, *RI->getReturnValue()->getType());
4789             if (!NewV)
4790               continue;
4791             LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4792                               << " in " << *RI << " :: " << *this << "\n");
4793             if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
4794               Changed = ChangeStatus::CHANGED;
4795           }
4796           return true;
4797         };
4798     A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4799 
4800     return Changed | AAValueSimplify::manifest(A);
4801   }
4802 
4803   /// See AbstractAttribute::trackStatistics()
4804   void trackStatistics() const override {
4805     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4806   }
4807 };
4808 
4809 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4810   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4811       : AAValueSimplifyImpl(IRP, A) {}
4812 
4813   /// See AbstractAttribute::initialize(...).
4814   void initialize(Attributor &A) override {
4815     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4816     //        Needs investigation.
4817     // AAValueSimplifyImpl::initialize(A);
4818     Value &V = getAnchorValue();
4819 
4820     // TODO: add other stuffs
4821     if (isa<Constant>(V))
4822       indicatePessimisticFixpoint();
4823   }
4824 
4825   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4826   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4827   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4828   /// updated and \p Changed is set appropriately.
4829   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4830                               ChangeStatus &Changed) {
4831     if (!ICmp)
4832       return false;
4833     if (!ICmp->isEquality())
4834       return false;
4835 
4836     // This is a comparison with == or !-. We check for nullptr now.
4837     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4838     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4839     if (!Op0IsNull && !Op1IsNull)
4840       return false;
4841 
4842     LLVMContext &Ctx = ICmp->getContext();
4843     // Check for `nullptr ==/!= nullptr` first:
4844     if (Op0IsNull && Op1IsNull) {
4845       Value *NewVal = ConstantInt::get(
4846           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4847       assert(!SimplifiedAssociatedValue.hasValue() &&
4848              "Did not expect non-fixed value for constant comparison");
4849       SimplifiedAssociatedValue = NewVal;
4850       indicateOptimisticFixpoint();
4851       Changed = ChangeStatus::CHANGED;
4852       return true;
4853     }
4854 
4855     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4856     // non-nullptr operand and if we assume it's non-null we can conclude the
4857     // result of the comparison.
4858     assert((Op0IsNull || Op1IsNull) &&
4859            "Expected nullptr versus non-nullptr comparison at this point");
4860 
4861     // The index is the operand that we assume is not null.
4862     unsigned PtrIdx = Op0IsNull;
4863     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4864         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4865         DepClassTy::REQUIRED);
4866     if (!PtrNonNullAA.isAssumedNonNull())
4867       return false;
4868 
4869     // The new value depends on the predicate, true for != and false for ==.
4870     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4871                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4872 
4873     assert((!SimplifiedAssociatedValue.hasValue() ||
4874             SimplifiedAssociatedValue == NewVal) &&
4875            "Did not expect to change value for zero-comparison");
4876 
4877     auto Before = SimplifiedAssociatedValue;
4878     SimplifiedAssociatedValue = NewVal;
4879 
4880     if (PtrNonNullAA.isKnownNonNull())
4881       indicateOptimisticFixpoint();
4882 
4883     Changed = Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4884                                                   : ChangeStatus ::CHANGED;
4885     return true;
4886   }
4887 
4888   /// See AbstractAttribute::updateImpl(...).
4889   ChangeStatus updateImpl(Attributor &A) override {
4890     auto Before = SimplifiedAssociatedValue;
4891 
4892     ChangeStatus Changed;
4893     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4894                                Changed))
4895       return Changed;
4896 
4897     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4898                             bool Stripped) -> bool {
4899       auto &AA = A.getAAFor<AAValueSimplify>(
4900           *this, IRPosition::value(V, getCallBaseContext()),
4901           DepClassTy::REQUIRED);
4902       if (!Stripped && this == &AA) {
4903         // TODO: Look the instruction and check recursively.
4904 
4905         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4906                           << "\n");
4907         return false;
4908       }
4909       return checkAndUpdate(A, *this,
4910                             IRPosition::value(V, getCallBaseContext()),
4911                             SimplifiedAssociatedValue);
4912     };
4913 
4914     bool Dummy = false;
4915     if (!genericValueTraversal<AAValueSimplify, bool>(
4916             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4917             /* UseValueSimplify */ false))
4918       if (!askSimplifiedValueForOtherAAs(A))
4919         return indicatePessimisticFixpoint();
4920 
4921     // If a candicate was found in this update, return CHANGED.
4922     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4923                                                : ChangeStatus ::CHANGED;
4924   }
4925 
4926   /// See AbstractAttribute::trackStatistics()
4927   void trackStatistics() const override {
4928     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4929   }
4930 };
4931 
4932 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4933   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4934       : AAValueSimplifyImpl(IRP, A) {}
4935 
4936   /// See AbstractAttribute::initialize(...).
4937   void initialize(Attributor &A) override {
4938     SimplifiedAssociatedValue = &getAnchorValue();
4939     indicateOptimisticFixpoint();
4940   }
4941   /// See AbstractAttribute::initialize(...).
4942   ChangeStatus updateImpl(Attributor &A) override {
4943     llvm_unreachable(
4944         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4945   }
4946   /// See AbstractAttribute::trackStatistics()
4947   void trackStatistics() const override {
4948     STATS_DECLTRACK_FN_ATTR(value_simplify)
4949   }
4950 };
4951 
4952 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4953   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4954       : AAValueSimplifyFunction(IRP, A) {}
4955   /// See AbstractAttribute::trackStatistics()
4956   void trackStatistics() const override {
4957     STATS_DECLTRACK_CS_ATTR(value_simplify)
4958   }
4959 };
4960 
4961 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4962   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4963       : AAValueSimplifyReturned(IRP, A) {}
4964 
4965   /// See AbstractAttribute::manifest(...).
4966   ChangeStatus manifest(Attributor &A) override {
4967     return AAValueSimplifyImpl::manifest(A);
4968   }
4969 
4970   void trackStatistics() const override {
4971     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4972   }
4973 };
4974 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4975   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4976       : AAValueSimplifyFloating(IRP, A) {}
4977 
4978   /// See AbstractAttribute::manifest(...).
4979   ChangeStatus manifest(Attributor &A) override {
4980     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4981 
4982     if (SimplifiedAssociatedValue.hasValue() &&
4983         !SimplifiedAssociatedValue.getValue())
4984       return Changed;
4985 
4986     Value &V = getAssociatedValue();
4987     auto *C = SimplifiedAssociatedValue.hasValue()
4988                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4989                   : UndefValue::get(V.getType());
4990     if (C) {
4991       Use &U = cast<CallBase>(&getAnchorValue())
4992                    ->getArgOperandUse(getCallSiteArgNo());
4993       // We can replace the AssociatedValue with the constant.
4994       if (&V != C) {
4995         if (Value *NewV = AA::getWithType(*C, *V.getType()))
4996           if (A.changeUseAfterManifest(U, *NewV))
4997             Changed = ChangeStatus::CHANGED;
4998       }
4999     }
5000 
5001     return Changed | AAValueSimplify::manifest(A);
5002   }
5003 
5004   void trackStatistics() const override {
5005     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5006   }
5007 };
5008 
5009 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5010 struct AAHeapToStackImpl : public AAHeapToStack {
5011   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
5012       : AAHeapToStack(IRP, A) {}
5013 
5014   const std::string getAsStr() const override {
5015     return "[H2S] Mallocs Good/Bad: " + std::to_string(MallocCalls.size()) +
5016            "/" + std::to_string(BadMallocCalls.size());
5017   }
5018 
5019   bool isAssumedHeapToStack(CallBase &CB) const override {
5020     return isValidState() && MallocCalls.contains(&CB) &&
5021            !BadMallocCalls.count(&CB);
5022   }
5023 
5024   bool isKnownHeapToStack(CallBase &CB) const override {
5025     return isValidState() && MallocCalls.contains(&CB) &&
5026            !BadMallocCalls.count(&CB);
5027   }
5028 
5029   ChangeStatus manifest(Attributor &A) override {
5030     assert(getState().isValidState() &&
5031            "Attempted to manifest an invalid state!");
5032 
5033     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5034     Function *F = getAnchorScope();
5035     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5036 
5037     for (Instruction *MallocCall : MallocCalls) {
5038       // This malloc cannot be replaced.
5039       if (BadMallocCalls.count(MallocCall))
5040         continue;
5041 
5042       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
5043         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5044         A.deleteAfterManifest(*FreeCall);
5045         HasChanged = ChangeStatus::CHANGED;
5046       }
5047 
5048       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
5049                         << "\n");
5050 
5051       Align Alignment;
5052       Value *Size;
5053       if (isCallocLikeFn(MallocCall, TLI)) {
5054         auto *Num = MallocCall->getOperand(0);
5055         auto *SizeT = MallocCall->getOperand(1);
5056         IRBuilder<> B(MallocCall);
5057         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5058       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5059         Size = MallocCall->getOperand(1);
5060         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5061                                    ->getValue()
5062                                    .getZExtValue())
5063                         .valueOrOne();
5064       } else {
5065         Size = MallocCall->getOperand(0);
5066       }
5067 
5068       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5069       Instruction *AI =
5070           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5071                          "", MallocCall->getNextNode());
5072 
5073       if (AI->getType() != MallocCall->getType())
5074         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5075                              AI->getNextNode());
5076 
5077       A.changeValueAfterManifest(*MallocCall, *AI);
5078 
5079       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5080         auto *NBB = II->getNormalDest();
5081         BranchInst::Create(NBB, MallocCall->getParent());
5082         A.deleteAfterManifest(*MallocCall);
5083       } else {
5084         A.deleteAfterManifest(*MallocCall);
5085       }
5086 
5087       // Zero out the allocated memory if it was a calloc.
5088       if (isCallocLikeFn(MallocCall, TLI)) {
5089         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5090                                    AI->getNextNode());
5091         Value *Ops[] = {
5092             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5093             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5094 
5095         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5096         Module *M = F->getParent();
5097         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5098         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5099       }
5100       HasChanged = ChangeStatus::CHANGED;
5101     }
5102 
5103     return HasChanged;
5104   }
5105 
5106   /// Collection of all malloc calls in a function.
5107   SmallSetVector<Instruction *, 4> MallocCalls;
5108 
5109   /// Collection of malloc calls that cannot be converted.
5110   DenseSet<const Instruction *> BadMallocCalls;
5111 
5112   /// A map for each malloc call to the set of associated free calls.
5113   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5114 
5115   ChangeStatus updateImpl(Attributor &A) override;
5116 };
5117 
5118 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5119   const Function *F = getAnchorScope();
5120   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5121 
5122   MustBeExecutedContextExplorer &Explorer =
5123       A.getInfoCache().getMustBeExecutedContextExplorer();
5124 
5125   bool StackIsAccessibleByOtherThreads =
5126       A.getInfoCache().stackIsAccessibleByOtherThreads();
5127 
5128   auto FreeCheck = [&](Instruction &I) {
5129     // If the stack is not accessible by other threads, the "must-free" logic
5130     // doesn't apply as the pointer could be shared and needs to be places in
5131     // "shareable" memory.
5132     if (!StackIsAccessibleByOtherThreads) {
5133       auto &NoSyncAA =
5134           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
5135       if (!NoSyncAA.isAssumedNoSync()) {
5136         LLVM_DEBUG(
5137             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
5138                       "other threads and function is not nosync:\n");
5139         return false;
5140       }
5141     }
5142     const auto &Frees = FreesForMalloc.lookup(&I);
5143     if (Frees.size() != 1) {
5144       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
5145                         << Frees.size() << "\n");
5146       return false;
5147     }
5148     Instruction *UniqueFree = *Frees.begin();
5149     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5150   };
5151 
5152   auto UsesCheck = [&](Instruction &I) {
5153     bool ValidUsesOnly = true;
5154     bool MustUse = true;
5155     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5156       Instruction *UserI = cast<Instruction>(U.getUser());
5157       if (isa<LoadInst>(UserI))
5158         return true;
5159       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5160         if (SI->getValueOperand() == U.get()) {
5161           LLVM_DEBUG(dbgs()
5162                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5163           ValidUsesOnly = false;
5164         } else {
5165           // A store into the malloc'ed memory is fine.
5166         }
5167         return true;
5168       }
5169       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5170         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5171           return true;
5172         // Record malloc.
5173         if (isFreeCall(UserI, TLI)) {
5174           if (MustUse) {
5175             FreesForMalloc[&I].insert(UserI);
5176           } else {
5177             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5178                               << *UserI << "\n");
5179             ValidUsesOnly = false;
5180           }
5181           return true;
5182         }
5183 
5184         unsigned ArgNo = CB->getArgOperandNo(&U);
5185 
5186         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5187             *this, IRPosition::callsite_argument(*CB, ArgNo),
5188             DepClassTy::OPTIONAL);
5189 
5190         // If a callsite argument use is nofree, we are fine.
5191         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5192             *this, IRPosition::callsite_argument(*CB, ArgNo),
5193             DepClassTy::OPTIONAL);
5194 
5195         if (!NoCaptureAA.isAssumedNoCapture() ||
5196             !ArgNoFreeAA.isAssumedNoFree()) {
5197           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5198           ValidUsesOnly = false;
5199         }
5200         return true;
5201       }
5202 
5203       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5204           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5205         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5206         Follow = true;
5207         return true;
5208       }
5209       // Unknown user for which we can not track uses further (in a way that
5210       // makes sense).
5211       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5212       ValidUsesOnly = false;
5213       return true;
5214     };
5215     A.checkForAllUses(Pred, *this, I);
5216     return ValidUsesOnly;
5217   };
5218 
5219   auto MallocCallocCheck = [&](Instruction &I) {
5220     if (BadMallocCalls.count(&I))
5221       return true;
5222 
5223     bool IsMalloc = isMallocLikeFn(&I, TLI);
5224     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5225     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5226     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5227       BadMallocCalls.insert(&I);
5228       return true;
5229     }
5230 
5231     if (IsMalloc) {
5232       if (MaxHeapToStackSize == -1) {
5233         if (UsesCheck(I) || FreeCheck(I)) {
5234           MallocCalls.insert(&I);
5235           return true;
5236         }
5237       }
5238       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5239         if (Size->getValue().ule(MaxHeapToStackSize))
5240           if (UsesCheck(I) || FreeCheck(I)) {
5241             MallocCalls.insert(&I);
5242             return true;
5243           }
5244     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5245       if (MaxHeapToStackSize == -1) {
5246         if (UsesCheck(I) || FreeCheck(I)) {
5247           MallocCalls.insert(&I);
5248           return true;
5249         }
5250       }
5251       // Only if the alignment and sizes are constant.
5252       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5253         if (Size->getValue().ule(MaxHeapToStackSize))
5254           if (UsesCheck(I) || FreeCheck(I)) {
5255             MallocCalls.insert(&I);
5256             return true;
5257           }
5258     } else if (IsCalloc) {
5259       if (MaxHeapToStackSize == -1) {
5260         if (UsesCheck(I) || FreeCheck(I)) {
5261           MallocCalls.insert(&I);
5262           return true;
5263         }
5264       }
5265       bool Overflow = false;
5266       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5267         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5268           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5269                   .ule(MaxHeapToStackSize))
5270             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5271               MallocCalls.insert(&I);
5272               return true;
5273             }
5274     }
5275 
5276     BadMallocCalls.insert(&I);
5277     return true;
5278   };
5279 
5280   size_t NumBadMallocs = BadMallocCalls.size();
5281 
5282   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5283 
5284   if (NumBadMallocs != BadMallocCalls.size())
5285     return ChangeStatus::CHANGED;
5286 
5287   return ChangeStatus::UNCHANGED;
5288 }
5289 
5290 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5291   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5292       : AAHeapToStackImpl(IRP, A) {}
5293 
5294   /// See AbstractAttribute::trackStatistics().
5295   void trackStatistics() const override {
5296     STATS_DECL(
5297         MallocCalls, Function,
5298         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5299     for (auto *C : MallocCalls)
5300       if (!BadMallocCalls.count(C))
5301         ++BUILD_STAT_NAME(MallocCalls, Function);
5302   }
5303 };
5304 
5305 /// ----------------------- Privatizable Pointers ------------------------------
5306 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5307   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5308       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5309 
5310   ChangeStatus indicatePessimisticFixpoint() override {
5311     AAPrivatizablePtr::indicatePessimisticFixpoint();
5312     PrivatizableType = nullptr;
5313     return ChangeStatus::CHANGED;
5314   }
5315 
5316   /// Identify the type we can chose for a private copy of the underlying
5317   /// argument. None means it is not clear yet, nullptr means there is none.
5318   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5319 
5320   /// Return a privatizable type that encloses both T0 and T1.
5321   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5322   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5323     if (!T0.hasValue())
5324       return T1;
5325     if (!T1.hasValue())
5326       return T0;
5327     if (T0 == T1)
5328       return T0;
5329     return nullptr;
5330   }
5331 
5332   Optional<Type *> getPrivatizableType() const override {
5333     return PrivatizableType;
5334   }
5335 
5336   const std::string getAsStr() const override {
5337     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5338   }
5339 
5340 protected:
5341   Optional<Type *> PrivatizableType;
5342 };
5343 
5344 // TODO: Do this for call site arguments (probably also other values) as well.
5345 
5346 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5347   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5348       : AAPrivatizablePtrImpl(IRP, A) {}
5349 
5350   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5351   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5352     // If this is a byval argument and we know all the call sites (so we can
5353     // rewrite them), there is no need to check them explicitly.
5354     bool AllCallSitesKnown;
5355     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5356         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5357                                true, AllCallSitesKnown))
5358       return getAssociatedValue().getType()->getPointerElementType();
5359 
5360     Optional<Type *> Ty;
5361     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5362 
5363     // Make sure the associated call site argument has the same type at all call
5364     // sites and it is an allocation we know is safe to privatize, for now that
5365     // means we only allow alloca instructions.
5366     // TODO: We can additionally analyze the accesses in the callee to  create
5367     //       the type from that information instead. That is a little more
5368     //       involved and will be done in a follow up patch.
5369     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5370       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5371       // Check if a coresponding argument was found or if it is one not
5372       // associated (which can happen for callback calls).
5373       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5374         return false;
5375 
5376       // Check that all call sites agree on a type.
5377       auto &PrivCSArgAA =
5378           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5379       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5380 
5381       LLVM_DEBUG({
5382         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5383         if (CSTy.hasValue() && CSTy.getValue())
5384           CSTy.getValue()->print(dbgs());
5385         else if (CSTy.hasValue())
5386           dbgs() << "<nullptr>";
5387         else
5388           dbgs() << "<none>";
5389       });
5390 
5391       Ty = combineTypes(Ty, CSTy);
5392 
5393       LLVM_DEBUG({
5394         dbgs() << " : New Type: ";
5395         if (Ty.hasValue() && Ty.getValue())
5396           Ty.getValue()->print(dbgs());
5397         else if (Ty.hasValue())
5398           dbgs() << "<nullptr>";
5399         else
5400           dbgs() << "<none>";
5401         dbgs() << "\n";
5402       });
5403 
5404       return !Ty.hasValue() || Ty.getValue();
5405     };
5406 
5407     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5408       return nullptr;
5409     return Ty;
5410   }
5411 
5412   /// See AbstractAttribute::updateImpl(...).
5413   ChangeStatus updateImpl(Attributor &A) override {
5414     PrivatizableType = identifyPrivatizableType(A);
5415     if (!PrivatizableType.hasValue())
5416       return ChangeStatus::UNCHANGED;
5417     if (!PrivatizableType.getValue())
5418       return indicatePessimisticFixpoint();
5419 
5420     // The dependence is optional so we don't give up once we give up on the
5421     // alignment.
5422     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5423                         DepClassTy::OPTIONAL);
5424 
5425     // Avoid arguments with padding for now.
5426     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5427         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5428                                                 A.getInfoCache().getDL())) {
5429       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5430       return indicatePessimisticFixpoint();
5431     }
5432 
5433     // Verify callee and caller agree on how the promoted argument would be
5434     // passed.
5435     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5436     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5437     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5438     Function &Fn = *getIRPosition().getAnchorScope();
5439     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5440     ArgsToPromote.insert(getAssociatedArgument());
5441     const auto *TTI =
5442         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5443     if (!TTI ||
5444         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5445             Fn, *TTI, ArgsToPromote, Dummy) ||
5446         ArgsToPromote.empty()) {
5447       LLVM_DEBUG(
5448           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5449                  << Fn.getName() << "\n");
5450       return indicatePessimisticFixpoint();
5451     }
5452 
5453     // Collect the types that will replace the privatizable type in the function
5454     // signature.
5455     SmallVector<Type *, 16> ReplacementTypes;
5456     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5457 
5458     // Register a rewrite of the argument.
5459     Argument *Arg = getAssociatedArgument();
5460     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5461       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5462       return indicatePessimisticFixpoint();
5463     }
5464 
5465     unsigned ArgNo = Arg->getArgNo();
5466 
5467     // Helper to check if for the given call site the associated argument is
5468     // passed to a callback where the privatization would be different.
5469     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5470       SmallVector<const Use *, 4> CallbackUses;
5471       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5472       for (const Use *U : CallbackUses) {
5473         AbstractCallSite CBACS(U);
5474         assert(CBACS && CBACS.isCallbackCall());
5475         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5476           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5477 
5478           LLVM_DEBUG({
5479             dbgs()
5480                 << "[AAPrivatizablePtr] Argument " << *Arg
5481                 << "check if can be privatized in the context of its parent ("
5482                 << Arg->getParent()->getName()
5483                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5484                    "callback ("
5485                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5486                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5487                 << CBACS.getCallArgOperand(CBArg) << " vs "
5488                 << CB.getArgOperand(ArgNo) << "\n"
5489                 << "[AAPrivatizablePtr] " << CBArg << " : "
5490                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5491           });
5492 
5493           if (CBArgNo != int(ArgNo))
5494             continue;
5495           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5496               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5497           if (CBArgPrivAA.isValidState()) {
5498             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5499             if (!CBArgPrivTy.hasValue())
5500               continue;
5501             if (CBArgPrivTy.getValue() == PrivatizableType)
5502               continue;
5503           }
5504 
5505           LLVM_DEBUG({
5506             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5507                    << " cannot be privatized in the context of its parent ("
5508                    << Arg->getParent()->getName()
5509                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5510                       "callback ("
5511                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5512                    << ").\n[AAPrivatizablePtr] for which the argument "
5513                       "privatization is not compatible.\n";
5514           });
5515           return false;
5516         }
5517       }
5518       return true;
5519     };
5520 
5521     // Helper to check if for the given call site the associated argument is
5522     // passed to a direct call where the privatization would be different.
5523     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5524       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5525       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5526       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5527              "Expected a direct call operand for callback call operand");
5528 
5529       LLVM_DEBUG({
5530         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5531                << " check if be privatized in the context of its parent ("
5532                << Arg->getParent()->getName()
5533                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5534                   "direct call of ("
5535                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5536                << ").\n";
5537       });
5538 
5539       Function *DCCallee = DC->getCalledFunction();
5540       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5541         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5542             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
5543             DepClassTy::REQUIRED);
5544         if (DCArgPrivAA.isValidState()) {
5545           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5546           if (!DCArgPrivTy.hasValue())
5547             return true;
5548           if (DCArgPrivTy.getValue() == PrivatizableType)
5549             return true;
5550         }
5551       }
5552 
5553       LLVM_DEBUG({
5554         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5555                << " cannot be privatized in the context of its parent ("
5556                << Arg->getParent()->getName()
5557                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5558                   "direct call of ("
5559                << ACS.getInstruction()->getCalledFunction()->getName()
5560                << ").\n[AAPrivatizablePtr] for which the argument "
5561                   "privatization is not compatible.\n";
5562       });
5563       return false;
5564     };
5565 
5566     // Helper to check if the associated argument is used at the given abstract
5567     // call site in a way that is incompatible with the privatization assumed
5568     // here.
5569     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5570       if (ACS.isDirectCall())
5571         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5572       if (ACS.isCallbackCall())
5573         return IsCompatiblePrivArgOfDirectCS(ACS);
5574       return false;
5575     };
5576 
5577     bool AllCallSitesKnown;
5578     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5579                                 AllCallSitesKnown))
5580       return indicatePessimisticFixpoint();
5581 
5582     return ChangeStatus::UNCHANGED;
5583   }
5584 
5585   /// Given a type to private \p PrivType, collect the constituates (which are
5586   /// used) in \p ReplacementTypes.
5587   static void
5588   identifyReplacementTypes(Type *PrivType,
5589                            SmallVectorImpl<Type *> &ReplacementTypes) {
5590     // TODO: For now we expand the privatization type to the fullest which can
5591     //       lead to dead arguments that need to be removed later.
5592     assert(PrivType && "Expected privatizable type!");
5593 
5594     // Traverse the type, extract constituate types on the outermost level.
5595     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5596       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5597         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5598     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5599       ReplacementTypes.append(PrivArrayType->getNumElements(),
5600                               PrivArrayType->getElementType());
5601     } else {
5602       ReplacementTypes.push_back(PrivType);
5603     }
5604   }
5605 
5606   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5607   /// The values needed are taken from the arguments of \p F starting at
5608   /// position \p ArgNo.
5609   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5610                                    unsigned ArgNo, Instruction &IP) {
5611     assert(PrivType && "Expected privatizable type!");
5612 
5613     IRBuilder<NoFolder> IRB(&IP);
5614     const DataLayout &DL = F.getParent()->getDataLayout();
5615 
5616     // Traverse the type, build GEPs and stores.
5617     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5618       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5619       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5620         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5621         Value *Ptr =
5622             constructPointer(PointeeTy, PrivType, &Base,
5623                              PrivStructLayout->getElementOffset(u), IRB, DL);
5624         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5625       }
5626     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5627       Type *PointeeTy = PrivArrayType->getElementType();
5628       Type *PointeePtrTy = PointeeTy->getPointerTo();
5629       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5630       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5631         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
5632                                       u * PointeeTySize, IRB, DL);
5633         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5634       }
5635     } else {
5636       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5637     }
5638   }
5639 
5640   /// Extract values from \p Base according to the type \p PrivType at the
5641   /// call position \p ACS. The values are appended to \p ReplacementValues.
5642   void createReplacementValues(Align Alignment, Type *PrivType,
5643                                AbstractCallSite ACS, Value *Base,
5644                                SmallVectorImpl<Value *> &ReplacementValues) {
5645     assert(Base && "Expected base value!");
5646     assert(PrivType && "Expected privatizable type!");
5647     Instruction *IP = ACS.getInstruction();
5648 
5649     IRBuilder<NoFolder> IRB(IP);
5650     const DataLayout &DL = IP->getModule()->getDataLayout();
5651 
5652     if (Base->getType()->getPointerElementType() != PrivType)
5653       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5654                                                  "", ACS.getInstruction());
5655 
5656     // Traverse the type, build GEPs and loads.
5657     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5658       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5659       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5660         Type *PointeeTy = PrivStructType->getElementType(u);
5661         Value *Ptr =
5662             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
5663                              PrivStructLayout->getElementOffset(u), IRB, DL);
5664         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5665         L->setAlignment(Alignment);
5666         ReplacementValues.push_back(L);
5667       }
5668     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5669       Type *PointeeTy = PrivArrayType->getElementType();
5670       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5671       Type *PointeePtrTy = PointeeTy->getPointerTo();
5672       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5673         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
5674                                       u * PointeeTySize, IRB, DL);
5675         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5676         L->setAlignment(Alignment);
5677         ReplacementValues.push_back(L);
5678       }
5679     } else {
5680       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5681       L->setAlignment(Alignment);
5682       ReplacementValues.push_back(L);
5683     }
5684   }
5685 
5686   /// See AbstractAttribute::manifest(...)
5687   ChangeStatus manifest(Attributor &A) override {
5688     if (!PrivatizableType.hasValue())
5689       return ChangeStatus::UNCHANGED;
5690     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5691 
5692     // Collect all tail calls in the function as we cannot allow new allocas to
5693     // escape into tail recursion.
5694     // TODO: Be smarter about new allocas escaping into tail calls.
5695     SmallVector<CallInst *, 16> TailCalls;
5696     if (!A.checkForAllInstructions(
5697             [&](Instruction &I) {
5698               CallInst &CI = cast<CallInst>(I);
5699               if (CI.isTailCall())
5700                 TailCalls.push_back(&CI);
5701               return true;
5702             },
5703             *this, {Instruction::Call}))
5704       return ChangeStatus::UNCHANGED;
5705 
5706     Argument *Arg = getAssociatedArgument();
5707     // Query AAAlign attribute for alignment of associated argument to
5708     // determine the best alignment of loads.
5709     const auto &AlignAA =
5710         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
5711 
5712     // Callback to repair the associated function. A new alloca is placed at the
5713     // beginning and initialized with the values passed through arguments. The
5714     // new alloca replaces the use of the old pointer argument.
5715     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5716         [=](const Attributor::ArgumentReplacementInfo &ARI,
5717             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5718           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5719           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5720           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5721                                            Arg->getName() + ".priv", IP);
5722           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5723                                ArgIt->getArgNo(), *IP);
5724 
5725           if (AI->getType() != Arg->getType())
5726             AI =
5727                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5728           Arg->replaceAllUsesWith(AI);
5729 
5730           for (CallInst *CI : TailCalls)
5731             CI->setTailCall(false);
5732         };
5733 
5734     // Callback to repair a call site of the associated function. The elements
5735     // of the privatizable type are loaded prior to the call and passed to the
5736     // new function version.
5737     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5738         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5739                       AbstractCallSite ACS,
5740                       SmallVectorImpl<Value *> &NewArgOperands) {
5741           // When no alignment is specified for the load instruction,
5742           // natural alignment is assumed.
5743           createReplacementValues(
5744               assumeAligned(AlignAA.getAssumedAlign()),
5745               PrivatizableType.getValue(), ACS,
5746               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5747               NewArgOperands);
5748         };
5749 
5750     // Collect the types that will replace the privatizable type in the function
5751     // signature.
5752     SmallVector<Type *, 16> ReplacementTypes;
5753     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5754 
5755     // Register a rewrite of the argument.
5756     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5757                                            std::move(FnRepairCB),
5758                                            std::move(ACSRepairCB)))
5759       return ChangeStatus::CHANGED;
5760     return ChangeStatus::UNCHANGED;
5761   }
5762 
5763   /// See AbstractAttribute::trackStatistics()
5764   void trackStatistics() const override {
5765     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5766   }
5767 };
5768 
5769 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5770   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5771       : AAPrivatizablePtrImpl(IRP, A) {}
5772 
5773   /// See AbstractAttribute::initialize(...).
5774   virtual void initialize(Attributor &A) override {
5775     // TODO: We can privatize more than arguments.
5776     indicatePessimisticFixpoint();
5777   }
5778 
5779   ChangeStatus updateImpl(Attributor &A) override {
5780     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5781                      "updateImpl will not be called");
5782   }
5783 
5784   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5785   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5786     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5787     if (!Obj) {
5788       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5789       return nullptr;
5790     }
5791 
5792     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5793       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5794         if (CI->isOne())
5795           return Obj->getType()->getPointerElementType();
5796     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5797       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
5798           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
5799       if (PrivArgAA.isAssumedPrivatizablePtr())
5800         return Obj->getType()->getPointerElementType();
5801     }
5802 
5803     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5804                          "alloca nor privatizable argument: "
5805                       << *Obj << "!\n");
5806     return nullptr;
5807   }
5808 
5809   /// See AbstractAttribute::trackStatistics()
5810   void trackStatistics() const override {
5811     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5812   }
5813 };
5814 
5815 struct AAPrivatizablePtrCallSiteArgument final
5816     : public AAPrivatizablePtrFloating {
5817   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5818       : AAPrivatizablePtrFloating(IRP, A) {}
5819 
5820   /// See AbstractAttribute::initialize(...).
5821   void initialize(Attributor &A) override {
5822     if (getIRPosition().hasAttr(Attribute::ByVal))
5823       indicateOptimisticFixpoint();
5824   }
5825 
5826   /// See AbstractAttribute::updateImpl(...).
5827   ChangeStatus updateImpl(Attributor &A) override {
5828     PrivatizableType = identifyPrivatizableType(A);
5829     if (!PrivatizableType.hasValue())
5830       return ChangeStatus::UNCHANGED;
5831     if (!PrivatizableType.getValue())
5832       return indicatePessimisticFixpoint();
5833 
5834     const IRPosition &IRP = getIRPosition();
5835     auto &NoCaptureAA =
5836         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
5837     if (!NoCaptureAA.isAssumedNoCapture()) {
5838       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5839       return indicatePessimisticFixpoint();
5840     }
5841 
5842     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
5843     if (!NoAliasAA.isAssumedNoAlias()) {
5844       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5845       return indicatePessimisticFixpoint();
5846     }
5847 
5848     const auto &MemBehaviorAA =
5849         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
5850     if (!MemBehaviorAA.isAssumedReadOnly()) {
5851       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5852       return indicatePessimisticFixpoint();
5853     }
5854 
5855     return ChangeStatus::UNCHANGED;
5856   }
5857 
5858   /// See AbstractAttribute::trackStatistics()
5859   void trackStatistics() const override {
5860     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5861   }
5862 };
5863 
5864 struct AAPrivatizablePtrCallSiteReturned final
5865     : public AAPrivatizablePtrFloating {
5866   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5867       : AAPrivatizablePtrFloating(IRP, A) {}
5868 
5869   /// See AbstractAttribute::initialize(...).
5870   void initialize(Attributor &A) override {
5871     // TODO: We can privatize more than arguments.
5872     indicatePessimisticFixpoint();
5873   }
5874 
5875   /// See AbstractAttribute::trackStatistics()
5876   void trackStatistics() const override {
5877     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5878   }
5879 };
5880 
5881 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5882   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5883       : AAPrivatizablePtrFloating(IRP, A) {}
5884 
5885   /// See AbstractAttribute::initialize(...).
5886   void initialize(Attributor &A) override {
5887     // TODO: We can privatize more than arguments.
5888     indicatePessimisticFixpoint();
5889   }
5890 
5891   /// See AbstractAttribute::trackStatistics()
5892   void trackStatistics() const override {
5893     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5894   }
5895 };
5896 
5897 /// -------------------- Memory Behavior Attributes ----------------------------
5898 /// Includes read-none, read-only, and write-only.
5899 /// ----------------------------------------------------------------------------
5900 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5901   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5902       : AAMemoryBehavior(IRP, A) {}
5903 
5904   /// See AbstractAttribute::initialize(...).
5905   void initialize(Attributor &A) override {
5906     intersectAssumedBits(BEST_STATE);
5907     getKnownStateFromValue(getIRPosition(), getState());
5908     AAMemoryBehavior::initialize(A);
5909   }
5910 
5911   /// Return the memory behavior information encoded in the IR for \p IRP.
5912   static void getKnownStateFromValue(const IRPosition &IRP,
5913                                      BitIntegerState &State,
5914                                      bool IgnoreSubsumingPositions = false) {
5915     SmallVector<Attribute, 2> Attrs;
5916     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5917     for (const Attribute &Attr : Attrs) {
5918       switch (Attr.getKindAsEnum()) {
5919       case Attribute::ReadNone:
5920         State.addKnownBits(NO_ACCESSES);
5921         break;
5922       case Attribute::ReadOnly:
5923         State.addKnownBits(NO_WRITES);
5924         break;
5925       case Attribute::WriteOnly:
5926         State.addKnownBits(NO_READS);
5927         break;
5928       default:
5929         llvm_unreachable("Unexpected attribute!");
5930       }
5931     }
5932 
5933     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5934       if (!I->mayReadFromMemory())
5935         State.addKnownBits(NO_READS);
5936       if (!I->mayWriteToMemory())
5937         State.addKnownBits(NO_WRITES);
5938     }
5939   }
5940 
5941   /// See AbstractAttribute::getDeducedAttributes(...).
5942   void getDeducedAttributes(LLVMContext &Ctx,
5943                             SmallVectorImpl<Attribute> &Attrs) const override {
5944     assert(Attrs.size() == 0);
5945     if (isAssumedReadNone())
5946       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5947     else if (isAssumedReadOnly())
5948       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5949     else if (isAssumedWriteOnly())
5950       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5951     assert(Attrs.size() <= 1);
5952   }
5953 
5954   /// See AbstractAttribute::manifest(...).
5955   ChangeStatus manifest(Attributor &A) override {
5956     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5957       return ChangeStatus::UNCHANGED;
5958 
5959     const IRPosition &IRP = getIRPosition();
5960 
5961     // Check if we would improve the existing attributes first.
5962     SmallVector<Attribute, 4> DeducedAttrs;
5963     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5964     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5965           return IRP.hasAttr(Attr.getKindAsEnum(),
5966                              /* IgnoreSubsumingPositions */ true);
5967         }))
5968       return ChangeStatus::UNCHANGED;
5969 
5970     // Clear existing attributes.
5971     IRP.removeAttrs(AttrKinds);
5972 
5973     // Use the generic manifest method.
5974     return IRAttribute::manifest(A);
5975   }
5976 
5977   /// See AbstractState::getAsStr().
5978   const std::string getAsStr() const override {
5979     if (isAssumedReadNone())
5980       return "readnone";
5981     if (isAssumedReadOnly())
5982       return "readonly";
5983     if (isAssumedWriteOnly())
5984       return "writeonly";
5985     return "may-read/write";
5986   }
5987 
5988   /// The set of IR attributes AAMemoryBehavior deals with.
5989   static const Attribute::AttrKind AttrKinds[3];
5990 };
5991 
5992 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5993     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5994 
5995 /// Memory behavior attribute for a floating value.
5996 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5997   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5998       : AAMemoryBehaviorImpl(IRP, A) {}
5999 
6000   /// See AbstractAttribute::initialize(...).
6001   void initialize(Attributor &A) override {
6002     AAMemoryBehaviorImpl::initialize(A);
6003     addUsesOf(A, getAssociatedValue());
6004   }
6005 
6006   /// See AbstractAttribute::updateImpl(...).
6007   ChangeStatus updateImpl(Attributor &A) override;
6008 
6009   /// See AbstractAttribute::trackStatistics()
6010   void trackStatistics() const override {
6011     if (isAssumedReadNone())
6012       STATS_DECLTRACK_FLOATING_ATTR(readnone)
6013     else if (isAssumedReadOnly())
6014       STATS_DECLTRACK_FLOATING_ATTR(readonly)
6015     else if (isAssumedWriteOnly())
6016       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
6017   }
6018 
6019 private:
6020   /// Return true if users of \p UserI might access the underlying
6021   /// variable/location described by \p U and should therefore be analyzed.
6022   bool followUsersOfUseIn(Attributor &A, const Use *U,
6023                           const Instruction *UserI);
6024 
6025   /// Update the state according to the effect of use \p U in \p UserI.
6026   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
6027 
6028 protected:
6029   /// Add the uses of \p V to the `Uses` set we look at during the update step.
6030   void addUsesOf(Attributor &A, const Value &V);
6031 
6032   /// Container for (transitive) uses of the associated argument.
6033   SmallVector<const Use *, 8> Uses;
6034 
6035   /// Set to remember the uses we already traversed.
6036   SmallPtrSet<const Use *, 8> Visited;
6037 };
6038 
6039 /// Memory behavior attribute for function argument.
6040 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
6041   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6042       : AAMemoryBehaviorFloating(IRP, A) {}
6043 
6044   /// See AbstractAttribute::initialize(...).
6045   void initialize(Attributor &A) override {
6046     intersectAssumedBits(BEST_STATE);
6047     const IRPosition &IRP = getIRPosition();
6048     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
6049     // can query it when we use has/getAttr. That would allow us to reuse the
6050     // initialize of the base class here.
6051     bool HasByVal =
6052         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
6053     getKnownStateFromValue(IRP, getState(),
6054                            /* IgnoreSubsumingPositions */ HasByVal);
6055 
6056     // Initialize the use vector with all direct uses of the associated value.
6057     Argument *Arg = getAssociatedArgument();
6058     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
6059       indicatePessimisticFixpoint();
6060     } else {
6061       addUsesOf(A, *Arg);
6062     }
6063   }
6064 
6065   ChangeStatus manifest(Attributor &A) override {
6066     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6067     if (!getAssociatedValue().getType()->isPointerTy())
6068       return ChangeStatus::UNCHANGED;
6069 
6070     // TODO: From readattrs.ll: "inalloca parameters are always
6071     //                           considered written"
6072     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6073       removeKnownBits(NO_WRITES);
6074       removeAssumedBits(NO_WRITES);
6075     }
6076     return AAMemoryBehaviorFloating::manifest(A);
6077   }
6078 
6079   /// See AbstractAttribute::trackStatistics()
6080   void trackStatistics() const override {
6081     if (isAssumedReadNone())
6082       STATS_DECLTRACK_ARG_ATTR(readnone)
6083     else if (isAssumedReadOnly())
6084       STATS_DECLTRACK_ARG_ATTR(readonly)
6085     else if (isAssumedWriteOnly())
6086       STATS_DECLTRACK_ARG_ATTR(writeonly)
6087   }
6088 };
6089 
6090 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
6091   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6092       : AAMemoryBehaviorArgument(IRP, A) {}
6093 
6094   /// See AbstractAttribute::initialize(...).
6095   void initialize(Attributor &A) override {
6096     // If we don't have an associated attribute this is either a variadic call
6097     // or an indirect call, either way, nothing to do here.
6098     Argument *Arg = getAssociatedArgument();
6099     if (!Arg) {
6100       indicatePessimisticFixpoint();
6101       return;
6102     }
6103     if (Arg->hasByValAttr()) {
6104       addKnownBits(NO_WRITES);
6105       removeKnownBits(NO_READS);
6106       removeAssumedBits(NO_READS);
6107     }
6108     AAMemoryBehaviorArgument::initialize(A);
6109     if (getAssociatedFunction()->isDeclaration())
6110       indicatePessimisticFixpoint();
6111   }
6112 
6113   /// See AbstractAttribute::updateImpl(...).
6114   ChangeStatus updateImpl(Attributor &A) override {
6115     // TODO: Once we have call site specific value information we can provide
6116     //       call site specific liveness liveness information and then it makes
6117     //       sense to specialize attributes for call sites arguments instead of
6118     //       redirecting requests to the callee argument.
6119     Argument *Arg = getAssociatedArgument();
6120     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6121     auto &ArgAA =
6122         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6123     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6124   }
6125 
6126   /// See AbstractAttribute::trackStatistics()
6127   void trackStatistics() const override {
6128     if (isAssumedReadNone())
6129       STATS_DECLTRACK_CSARG_ATTR(readnone)
6130     else if (isAssumedReadOnly())
6131       STATS_DECLTRACK_CSARG_ATTR(readonly)
6132     else if (isAssumedWriteOnly())
6133       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6134   }
6135 };
6136 
6137 /// Memory behavior attribute for a call site return position.
6138 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6139   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6140       : AAMemoryBehaviorFloating(IRP, A) {}
6141 
6142   /// See AbstractAttribute::initialize(...).
6143   void initialize(Attributor &A) override {
6144     AAMemoryBehaviorImpl::initialize(A);
6145     Function *F = getAssociatedFunction();
6146     if (!F || F->isDeclaration())
6147       indicatePessimisticFixpoint();
6148   }
6149 
6150   /// See AbstractAttribute::manifest(...).
6151   ChangeStatus manifest(Attributor &A) override {
6152     // We do not annotate returned values.
6153     return ChangeStatus::UNCHANGED;
6154   }
6155 
6156   /// See AbstractAttribute::trackStatistics()
6157   void trackStatistics() const override {}
6158 };
6159 
6160 /// An AA to represent the memory behavior function attributes.
6161 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6162   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6163       : AAMemoryBehaviorImpl(IRP, A) {}
6164 
6165   /// See AbstractAttribute::updateImpl(Attributor &A).
6166   virtual ChangeStatus updateImpl(Attributor &A) override;
6167 
6168   /// See AbstractAttribute::manifest(...).
6169   ChangeStatus manifest(Attributor &A) override {
6170     Function &F = cast<Function>(getAnchorValue());
6171     if (isAssumedReadNone()) {
6172       F.removeFnAttr(Attribute::ArgMemOnly);
6173       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6174       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6175     }
6176     return AAMemoryBehaviorImpl::manifest(A);
6177   }
6178 
6179   /// See AbstractAttribute::trackStatistics()
6180   void trackStatistics() const override {
6181     if (isAssumedReadNone())
6182       STATS_DECLTRACK_FN_ATTR(readnone)
6183     else if (isAssumedReadOnly())
6184       STATS_DECLTRACK_FN_ATTR(readonly)
6185     else if (isAssumedWriteOnly())
6186       STATS_DECLTRACK_FN_ATTR(writeonly)
6187   }
6188 };
6189 
6190 /// AAMemoryBehavior attribute for call sites.
6191 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6192   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6193       : AAMemoryBehaviorImpl(IRP, A) {}
6194 
6195   /// See AbstractAttribute::initialize(...).
6196   void initialize(Attributor &A) override {
6197     AAMemoryBehaviorImpl::initialize(A);
6198     Function *F = getAssociatedFunction();
6199     if (!F || F->isDeclaration())
6200       indicatePessimisticFixpoint();
6201   }
6202 
6203   /// See AbstractAttribute::updateImpl(...).
6204   ChangeStatus updateImpl(Attributor &A) override {
6205     // TODO: Once we have call site specific value information we can provide
6206     //       call site specific liveness liveness information and then it makes
6207     //       sense to specialize attributes for call sites arguments instead of
6208     //       redirecting requests to the callee argument.
6209     Function *F = getAssociatedFunction();
6210     const IRPosition &FnPos = IRPosition::function(*F);
6211     auto &FnAA =
6212         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
6213     return clampStateAndIndicateChange(getState(), FnAA.getState());
6214   }
6215 
6216   /// See AbstractAttribute::trackStatistics()
6217   void trackStatistics() const override {
6218     if (isAssumedReadNone())
6219       STATS_DECLTRACK_CS_ATTR(readnone)
6220     else if (isAssumedReadOnly())
6221       STATS_DECLTRACK_CS_ATTR(readonly)
6222     else if (isAssumedWriteOnly())
6223       STATS_DECLTRACK_CS_ATTR(writeonly)
6224   }
6225 };
6226 
6227 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6228 
6229   // The current assumed state used to determine a change.
6230   auto AssumedState = getAssumed();
6231 
6232   auto CheckRWInst = [&](Instruction &I) {
6233     // If the instruction has an own memory behavior state, use it to restrict
6234     // the local state. No further analysis is required as the other memory
6235     // state is as optimistic as it gets.
6236     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6237       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6238           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
6239       intersectAssumedBits(MemBehaviorAA.getAssumed());
6240       return !isAtFixpoint();
6241     }
6242 
6243     // Remove access kind modifiers if necessary.
6244     if (I.mayReadFromMemory())
6245       removeAssumedBits(NO_READS);
6246     if (I.mayWriteToMemory())
6247       removeAssumedBits(NO_WRITES);
6248     return !isAtFixpoint();
6249   };
6250 
6251   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6252     return indicatePessimisticFixpoint();
6253 
6254   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6255                                         : ChangeStatus::UNCHANGED;
6256 }
6257 
6258 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6259 
6260   const IRPosition &IRP = getIRPosition();
6261   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6262   AAMemoryBehavior::StateType &S = getState();
6263 
6264   // First, check the function scope. We take the known information and we avoid
6265   // work if the assumed information implies the current assumed information for
6266   // this attribute. This is a valid for all but byval arguments.
6267   Argument *Arg = IRP.getAssociatedArgument();
6268   AAMemoryBehavior::base_t FnMemAssumedState =
6269       AAMemoryBehavior::StateType::getWorstState();
6270   if (!Arg || !Arg->hasByValAttr()) {
6271     const auto &FnMemAA =
6272         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
6273     FnMemAssumedState = FnMemAA.getAssumed();
6274     S.addKnownBits(FnMemAA.getKnown());
6275     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6276       return ChangeStatus::UNCHANGED;
6277   }
6278 
6279   // Make sure the value is not captured (except through "return"), if
6280   // it is, any information derived would be irrelevant anyway as we cannot
6281   // check the potential aliases introduced by the capture. However, no need
6282   // to fall back to anythign less optimistic than the function state.
6283   const auto &ArgNoCaptureAA =
6284       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
6285   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6286     S.intersectAssumedBits(FnMemAssumedState);
6287     return ChangeStatus::CHANGED;
6288   }
6289 
6290   // The current assumed state used to determine a change.
6291   auto AssumedState = S.getAssumed();
6292 
6293   // Liveness information to exclude dead users.
6294   // TODO: Take the FnPos once we have call site specific liveness information.
6295   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6296       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6297       DepClassTy::NONE);
6298 
6299   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6300   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6301     const Use *U = Uses[i];
6302     Instruction *UserI = cast<Instruction>(U->getUser());
6303     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6304                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6305                       << "]\n");
6306     if (A.isAssumedDead(*U, this, &LivenessAA))
6307       continue;
6308 
6309     // Droppable users, e.g., llvm::assume does not actually perform any action.
6310     if (UserI->isDroppable())
6311       continue;
6312 
6313     // Check if the users of UserI should also be visited.
6314     if (followUsersOfUseIn(A, U, UserI))
6315       addUsesOf(A, *UserI);
6316 
6317     // If UserI might touch memory we analyze the use in detail.
6318     if (UserI->mayReadOrWriteMemory())
6319       analyzeUseIn(A, U, UserI);
6320   }
6321 
6322   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6323                                         : ChangeStatus::UNCHANGED;
6324 }
6325 
6326 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6327   SmallVector<const Use *, 8> WL;
6328   for (const Use &U : V.uses())
6329     WL.push_back(&U);
6330 
6331   while (!WL.empty()) {
6332     const Use *U = WL.pop_back_val();
6333     if (!Visited.insert(U).second)
6334       continue;
6335 
6336     const Instruction *UserI = cast<Instruction>(U->getUser());
6337     if (UserI->mayReadOrWriteMemory()) {
6338       Uses.push_back(U);
6339       continue;
6340     }
6341     if (!followUsersOfUseIn(A, U, UserI))
6342       continue;
6343     for (const Use &UU : UserI->uses())
6344       WL.push_back(&UU);
6345   }
6346 }
6347 
6348 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6349                                                   const Instruction *UserI) {
6350   // The loaded value is unrelated to the pointer argument, no need to
6351   // follow the users of the load.
6352   if (isa<LoadInst>(UserI))
6353     return false;
6354 
6355   // By default we follow all uses assuming UserI might leak information on U,
6356   // we have special handling for call sites operands though.
6357   const auto *CB = dyn_cast<CallBase>(UserI);
6358   if (!CB || !CB->isArgOperand(U))
6359     return true;
6360 
6361   // If the use is a call argument known not to be captured, the users of
6362   // the call do not need to be visited because they have to be unrelated to
6363   // the input. Note that this check is not trivial even though we disallow
6364   // general capturing of the underlying argument. The reason is that the
6365   // call might the argument "through return", which we allow and for which we
6366   // need to check call users.
6367   if (U->get()->getType()->isPointerTy()) {
6368     unsigned ArgNo = CB->getArgOperandNo(U);
6369     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6370         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
6371     return !ArgNoCaptureAA.isAssumedNoCapture();
6372   }
6373 
6374   return true;
6375 }
6376 
6377 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6378                                             const Instruction *UserI) {
6379   assert(UserI->mayReadOrWriteMemory());
6380 
6381   switch (UserI->getOpcode()) {
6382   default:
6383     // TODO: Handle all atomics and other side-effect operations we know of.
6384     break;
6385   case Instruction::Load:
6386     // Loads cause the NO_READS property to disappear.
6387     removeAssumedBits(NO_READS);
6388     return;
6389 
6390   case Instruction::Store:
6391     // Stores cause the NO_WRITES property to disappear if the use is the
6392     // pointer operand. Note that we do assume that capturing was taken care of
6393     // somewhere else.
6394     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6395       removeAssumedBits(NO_WRITES);
6396     return;
6397 
6398   case Instruction::Call:
6399   case Instruction::CallBr:
6400   case Instruction::Invoke: {
6401     // For call sites we look at the argument memory behavior attribute (this
6402     // could be recursive!) in order to restrict our own state.
6403     const auto *CB = cast<CallBase>(UserI);
6404 
6405     // Give up on operand bundles.
6406     if (CB->isBundleOperand(U)) {
6407       indicatePessimisticFixpoint();
6408       return;
6409     }
6410 
6411     // Calling a function does read the function pointer, maybe write it if the
6412     // function is self-modifying.
6413     if (CB->isCallee(U)) {
6414       removeAssumedBits(NO_READS);
6415       break;
6416     }
6417 
6418     // Adjust the possible access behavior based on the information on the
6419     // argument.
6420     IRPosition Pos;
6421     if (U->get()->getType()->isPointerTy())
6422       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6423     else
6424       Pos = IRPosition::callsite_function(*CB);
6425     const auto &MemBehaviorAA =
6426         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
6427     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6428     // and at least "known".
6429     intersectAssumedBits(MemBehaviorAA.getAssumed());
6430     return;
6431   }
6432   };
6433 
6434   // Generally, look at the "may-properties" and adjust the assumed state if we
6435   // did not trigger special handling before.
6436   if (UserI->mayReadFromMemory())
6437     removeAssumedBits(NO_READS);
6438   if (UserI->mayWriteToMemory())
6439     removeAssumedBits(NO_WRITES);
6440 }
6441 
6442 } // namespace
6443 
6444 /// -------------------- Memory Locations Attributes ---------------------------
6445 /// Includes read-none, argmemonly, inaccessiblememonly,
6446 /// inaccessiblememorargmemonly
6447 /// ----------------------------------------------------------------------------
6448 
6449 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6450     AAMemoryLocation::MemoryLocationsKind MLK) {
6451   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6452     return "all memory";
6453   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6454     return "no memory";
6455   std::string S = "memory:";
6456   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6457     S += "stack,";
6458   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6459     S += "constant,";
6460   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6461     S += "internal global,";
6462   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6463     S += "external global,";
6464   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6465     S += "argument,";
6466   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6467     S += "inaccessible,";
6468   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6469     S += "malloced,";
6470   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6471     S += "unknown,";
6472   S.pop_back();
6473   return S;
6474 }
6475 
6476 namespace {
6477 struct AAMemoryLocationImpl : public AAMemoryLocation {
6478 
6479   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6480       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6481     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6482       AccessKind2Accesses[u] = nullptr;
6483   }
6484 
6485   ~AAMemoryLocationImpl() {
6486     // The AccessSets are allocated via a BumpPtrAllocator, we call
6487     // the destructor manually.
6488     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6489       if (AccessKind2Accesses[u])
6490         AccessKind2Accesses[u]->~AccessSet();
6491   }
6492 
6493   /// See AbstractAttribute::initialize(...).
6494   void initialize(Attributor &A) override {
6495     intersectAssumedBits(BEST_STATE);
6496     getKnownStateFromValue(A, getIRPosition(), getState());
6497     AAMemoryLocation::initialize(A);
6498   }
6499 
6500   /// Return the memory behavior information encoded in the IR for \p IRP.
6501   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6502                                      BitIntegerState &State,
6503                                      bool IgnoreSubsumingPositions = false) {
6504     // For internal functions we ignore `argmemonly` and
6505     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6506     // constant propagation. It is unclear if this is the best way but it is
6507     // unlikely this will cause real performance problems. If we are deriving
6508     // attributes for the anchor function we even remove the attribute in
6509     // addition to ignoring it.
6510     bool UseArgMemOnly = true;
6511     Function *AnchorFn = IRP.getAnchorScope();
6512     if (AnchorFn && A.isRunOn(*AnchorFn))
6513       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6514 
6515     SmallVector<Attribute, 2> Attrs;
6516     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6517     for (const Attribute &Attr : Attrs) {
6518       switch (Attr.getKindAsEnum()) {
6519       case Attribute::ReadNone:
6520         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6521         break;
6522       case Attribute::InaccessibleMemOnly:
6523         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6524         break;
6525       case Attribute::ArgMemOnly:
6526         if (UseArgMemOnly)
6527           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6528         else
6529           IRP.removeAttrs({Attribute::ArgMemOnly});
6530         break;
6531       case Attribute::InaccessibleMemOrArgMemOnly:
6532         if (UseArgMemOnly)
6533           State.addKnownBits(inverseLocation(
6534               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6535         else
6536           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6537         break;
6538       default:
6539         llvm_unreachable("Unexpected attribute!");
6540       }
6541     }
6542   }
6543 
6544   /// See AbstractAttribute::getDeducedAttributes(...).
6545   void getDeducedAttributes(LLVMContext &Ctx,
6546                             SmallVectorImpl<Attribute> &Attrs) const override {
6547     assert(Attrs.size() == 0);
6548     if (isAssumedReadNone()) {
6549       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6550     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6551       if (isAssumedInaccessibleMemOnly())
6552         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6553       else if (isAssumedArgMemOnly())
6554         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6555       else if (isAssumedInaccessibleOrArgMemOnly())
6556         Attrs.push_back(
6557             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6558     }
6559     assert(Attrs.size() <= 1);
6560   }
6561 
6562   /// See AbstractAttribute::manifest(...).
6563   ChangeStatus manifest(Attributor &A) override {
6564     const IRPosition &IRP = getIRPosition();
6565 
6566     // Check if we would improve the existing attributes first.
6567     SmallVector<Attribute, 4> DeducedAttrs;
6568     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6569     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6570           return IRP.hasAttr(Attr.getKindAsEnum(),
6571                              /* IgnoreSubsumingPositions */ true);
6572         }))
6573       return ChangeStatus::UNCHANGED;
6574 
6575     // Clear existing attributes.
6576     IRP.removeAttrs(AttrKinds);
6577     if (isAssumedReadNone())
6578       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6579 
6580     // Use the generic manifest method.
6581     return IRAttribute::manifest(A);
6582   }
6583 
6584   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6585   bool checkForAllAccessesToMemoryKind(
6586       function_ref<bool(const Instruction *, const Value *, AccessKind,
6587                         MemoryLocationsKind)>
6588           Pred,
6589       MemoryLocationsKind RequestedMLK) const override {
6590     if (!isValidState())
6591       return false;
6592 
6593     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6594     if (AssumedMLK == NO_LOCATIONS)
6595       return true;
6596 
6597     unsigned Idx = 0;
6598     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6599          CurMLK *= 2, ++Idx) {
6600       if (CurMLK & RequestedMLK)
6601         continue;
6602 
6603       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6604         for (const AccessInfo &AI : *Accesses)
6605           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6606             return false;
6607     }
6608 
6609     return true;
6610   }
6611 
6612   ChangeStatus indicatePessimisticFixpoint() override {
6613     // If we give up and indicate a pessimistic fixpoint this instruction will
6614     // become an access for all potential access kinds:
6615     // TODO: Add pointers for argmemonly and globals to improve the results of
6616     //       checkForAllAccessesToMemoryKind.
6617     bool Changed = false;
6618     MemoryLocationsKind KnownMLK = getKnown();
6619     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6620     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6621       if (!(CurMLK & KnownMLK))
6622         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6623                                   getAccessKindFromInst(I));
6624     return AAMemoryLocation::indicatePessimisticFixpoint();
6625   }
6626 
6627 protected:
6628   /// Helper struct to tie together an instruction that has a read or write
6629   /// effect with the pointer it accesses (if any).
6630   struct AccessInfo {
6631 
6632     /// The instruction that caused the access.
6633     const Instruction *I;
6634 
6635     /// The base pointer that is accessed, or null if unknown.
6636     const Value *Ptr;
6637 
6638     /// The kind of access (read/write/read+write).
6639     AccessKind Kind;
6640 
6641     bool operator==(const AccessInfo &RHS) const {
6642       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6643     }
6644     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6645       if (LHS.I != RHS.I)
6646         return LHS.I < RHS.I;
6647       if (LHS.Ptr != RHS.Ptr)
6648         return LHS.Ptr < RHS.Ptr;
6649       if (LHS.Kind != RHS.Kind)
6650         return LHS.Kind < RHS.Kind;
6651       return false;
6652     }
6653   };
6654 
6655   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6656   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6657   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6658   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6659 
6660   /// Categorize the pointer arguments of CB that might access memory in
6661   /// AccessedLoc and update the state and access map accordingly.
6662   void
6663   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6664                                      AAMemoryLocation::StateType &AccessedLocs,
6665                                      bool &Changed);
6666 
6667   /// Return the kind(s) of location that may be accessed by \p V.
6668   AAMemoryLocation::MemoryLocationsKind
6669   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6670 
6671   /// Return the access kind as determined by \p I.
6672   AccessKind getAccessKindFromInst(const Instruction *I) {
6673     AccessKind AK = READ_WRITE;
6674     if (I) {
6675       AK = I->mayReadFromMemory() ? READ : NONE;
6676       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6677     }
6678     return AK;
6679   }
6680 
6681   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6682   /// an access of kind \p AK to a \p MLK memory location with the access
6683   /// pointer \p Ptr.
6684   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6685                                  MemoryLocationsKind MLK, const Instruction *I,
6686                                  const Value *Ptr, bool &Changed,
6687                                  AccessKind AK = READ_WRITE) {
6688 
6689     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6690     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6691     if (!Accesses)
6692       Accesses = new (Allocator) AccessSet();
6693     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6694     State.removeAssumedBits(MLK);
6695   }
6696 
6697   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6698   /// arguments, and update the state and access map accordingly.
6699   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6700                           AAMemoryLocation::StateType &State, bool &Changed);
6701 
6702   /// Used to allocate access sets.
6703   BumpPtrAllocator &Allocator;
6704 
6705   /// The set of IR attributes AAMemoryLocation deals with.
6706   static const Attribute::AttrKind AttrKinds[4];
6707 };
6708 
6709 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6710     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6711     Attribute::InaccessibleMemOrArgMemOnly};
6712 
6713 void AAMemoryLocationImpl::categorizePtrValue(
6714     Attributor &A, const Instruction &I, const Value &Ptr,
6715     AAMemoryLocation::StateType &State, bool &Changed) {
6716   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6717                     << Ptr << " ["
6718                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6719 
6720   auto StripGEPCB = [](Value *V) -> Value * {
6721     auto *GEP = dyn_cast<GEPOperator>(V);
6722     while (GEP) {
6723       V = GEP->getPointerOperand();
6724       GEP = dyn_cast<GEPOperator>(V);
6725     }
6726     return V;
6727   };
6728 
6729   auto VisitValueCB = [&](Value &V, const Instruction *,
6730                           AAMemoryLocation::StateType &T,
6731                           bool Stripped) -> bool {
6732     // TODO: recognize the TBAA used for constant accesses.
6733     MemoryLocationsKind MLK = NO_LOCATIONS;
6734     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6735     if (isa<UndefValue>(V))
6736       return true;
6737     if (auto *Arg = dyn_cast<Argument>(&V)) {
6738       if (Arg->hasByValAttr())
6739         MLK = NO_LOCAL_MEM;
6740       else
6741         MLK = NO_ARGUMENT_MEM;
6742     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6743       // Reading constant memory is not treated as a read "effect" by the
6744       // function attr pass so we won't neither. Constants defined by TBAA are
6745       // similar. (We know we do not write it because it is constant.)
6746       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6747         if (GVar->isConstant())
6748           return true;
6749 
6750       if (GV->hasLocalLinkage())
6751         MLK = NO_GLOBAL_INTERNAL_MEM;
6752       else
6753         MLK = NO_GLOBAL_EXTERNAL_MEM;
6754     } else if (isa<ConstantPointerNull>(V) &&
6755                !NullPointerIsDefined(getAssociatedFunction(),
6756                                      V.getType()->getPointerAddressSpace())) {
6757       return true;
6758     } else if (isa<AllocaInst>(V)) {
6759       MLK = NO_LOCAL_MEM;
6760     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6761       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
6762           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
6763       if (NoAliasAA.isAssumedNoAlias())
6764         MLK = NO_MALLOCED_MEM;
6765       else
6766         MLK = NO_UNKOWN_MEM;
6767     } else {
6768       MLK = NO_UNKOWN_MEM;
6769     }
6770 
6771     assert(MLK != NO_LOCATIONS && "No location specified!");
6772     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6773                               getAccessKindFromInst(&I));
6774     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6775                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6776                       << "\n");
6777     return true;
6778   };
6779 
6780   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6781           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6782           /* UseValueSimplify */ true,
6783           /* MaxValues */ 32, StripGEPCB)) {
6784     LLVM_DEBUG(
6785         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6786     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6787                               getAccessKindFromInst(&I));
6788   } else {
6789     LLVM_DEBUG(
6790         dbgs()
6791         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6792         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6793   }
6794 }
6795 
6796 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6797     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6798     bool &Changed) {
6799   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6800 
6801     // Skip non-pointer arguments.
6802     const Value *ArgOp = CB.getArgOperand(ArgNo);
6803     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6804       continue;
6805 
6806     // Skip readnone arguments.
6807     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6808     const auto &ArgOpMemLocationAA =
6809         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
6810 
6811     if (ArgOpMemLocationAA.isAssumedReadNone())
6812       continue;
6813 
6814     // Categorize potentially accessed pointer arguments as if there was an
6815     // access instruction with them as pointer.
6816     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6817   }
6818 }
6819 
6820 AAMemoryLocation::MemoryLocationsKind
6821 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6822                                                   bool &Changed) {
6823   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6824                     << I << "\n");
6825 
6826   AAMemoryLocation::StateType AccessedLocs;
6827   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6828 
6829   if (auto *CB = dyn_cast<CallBase>(&I)) {
6830 
6831     // First check if we assume any memory is access is visible.
6832     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
6833         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
6834     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6835                       << " [" << CBMemLocationAA << "]\n");
6836 
6837     if (CBMemLocationAA.isAssumedReadNone())
6838       return NO_LOCATIONS;
6839 
6840     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6841       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6842                                 Changed, getAccessKindFromInst(&I));
6843       return AccessedLocs.getAssumed();
6844     }
6845 
6846     uint32_t CBAssumedNotAccessedLocs =
6847         CBMemLocationAA.getAssumedNotAccessedLocation();
6848 
6849     // Set the argmemonly and global bit as we handle them separately below.
6850     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6851         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6852 
6853     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6854       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6855         continue;
6856       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6857                                 getAccessKindFromInst(&I));
6858     }
6859 
6860     // Now handle global memory if it might be accessed. This is slightly tricky
6861     // as NO_GLOBAL_MEM has multiple bits set.
6862     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6863     if (HasGlobalAccesses) {
6864       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6865                             AccessKind Kind, MemoryLocationsKind MLK) {
6866         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6867                                   getAccessKindFromInst(&I));
6868         return true;
6869       };
6870       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6871               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6872         return AccessedLocs.getWorstState();
6873     }
6874 
6875     LLVM_DEBUG(
6876         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6877                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6878 
6879     // Now handle argument memory if it might be accessed.
6880     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6881     if (HasArgAccesses)
6882       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6883 
6884     LLVM_DEBUG(
6885         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6886                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6887 
6888     return AccessedLocs.getAssumed();
6889   }
6890 
6891   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6892     LLVM_DEBUG(
6893         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6894                << I << " [" << *Ptr << "]\n");
6895     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6896     return AccessedLocs.getAssumed();
6897   }
6898 
6899   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6900                     << I << "\n");
6901   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6902                             getAccessKindFromInst(&I));
6903   return AccessedLocs.getAssumed();
6904 }
6905 
6906 /// An AA to represent the memory behavior function attributes.
6907 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6908   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6909       : AAMemoryLocationImpl(IRP, A) {}
6910 
6911   /// See AbstractAttribute::updateImpl(Attributor &A).
6912   virtual ChangeStatus updateImpl(Attributor &A) override {
6913 
6914     const auto &MemBehaviorAA =
6915         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
6916     if (MemBehaviorAA.isAssumedReadNone()) {
6917       if (MemBehaviorAA.isKnownReadNone())
6918         return indicateOptimisticFixpoint();
6919       assert(isAssumedReadNone() &&
6920              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6921       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6922       return ChangeStatus::UNCHANGED;
6923     }
6924 
6925     // The current assumed state used to determine a change.
6926     auto AssumedState = getAssumed();
6927     bool Changed = false;
6928 
6929     auto CheckRWInst = [&](Instruction &I) {
6930       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6931       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6932                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6933       removeAssumedBits(inverseLocation(MLK, false, false));
6934       // Stop once only the valid bit set in the *not assumed location*, thus
6935       // once we don't actually exclude any memory locations in the state.
6936       return getAssumedNotAccessedLocation() != VALID_STATE;
6937     };
6938 
6939     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6940       return indicatePessimisticFixpoint();
6941 
6942     Changed |= AssumedState != getAssumed();
6943     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6944   }
6945 
6946   /// See AbstractAttribute::trackStatistics()
6947   void trackStatistics() const override {
6948     if (isAssumedReadNone())
6949       STATS_DECLTRACK_FN_ATTR(readnone)
6950     else if (isAssumedArgMemOnly())
6951       STATS_DECLTRACK_FN_ATTR(argmemonly)
6952     else if (isAssumedInaccessibleMemOnly())
6953       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6954     else if (isAssumedInaccessibleOrArgMemOnly())
6955       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6956   }
6957 };
6958 
6959 /// AAMemoryLocation attribute for call sites.
6960 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6961   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6962       : AAMemoryLocationImpl(IRP, A) {}
6963 
6964   /// See AbstractAttribute::initialize(...).
6965   void initialize(Attributor &A) override {
6966     AAMemoryLocationImpl::initialize(A);
6967     Function *F = getAssociatedFunction();
6968     if (!F || F->isDeclaration())
6969       indicatePessimisticFixpoint();
6970   }
6971 
6972   /// See AbstractAttribute::updateImpl(...).
6973   ChangeStatus updateImpl(Attributor &A) override {
6974     // TODO: Once we have call site specific value information we can provide
6975     //       call site specific liveness liveness information and then it makes
6976     //       sense to specialize attributes for call sites arguments instead of
6977     //       redirecting requests to the callee argument.
6978     Function *F = getAssociatedFunction();
6979     const IRPosition &FnPos = IRPosition::function(*F);
6980     auto &FnAA =
6981         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
6982     bool Changed = false;
6983     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6984                           AccessKind Kind, MemoryLocationsKind MLK) {
6985       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6986                                 getAccessKindFromInst(I));
6987       return true;
6988     };
6989     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6990       return indicatePessimisticFixpoint();
6991     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6992   }
6993 
6994   /// See AbstractAttribute::trackStatistics()
6995   void trackStatistics() const override {
6996     if (isAssumedReadNone())
6997       STATS_DECLTRACK_CS_ATTR(readnone)
6998   }
6999 };
7000 
7001 /// ------------------ Value Constant Range Attribute -------------------------
7002 
7003 struct AAValueConstantRangeImpl : AAValueConstantRange {
7004   using StateType = IntegerRangeState;
7005   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7006       : AAValueConstantRange(IRP, A) {}
7007 
7008   /// See AbstractAttribute::getAsStr().
7009   const std::string getAsStr() const override {
7010     std::string Str;
7011     llvm::raw_string_ostream OS(Str);
7012     OS << "range(" << getBitWidth() << ")<";
7013     getKnown().print(OS);
7014     OS << " / ";
7015     getAssumed().print(OS);
7016     OS << ">";
7017     return OS.str();
7018   }
7019 
7020   /// Helper function to get a SCEV expr for the associated value at program
7021   /// point \p I.
7022   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7023     if (!getAnchorScope())
7024       return nullptr;
7025 
7026     ScalarEvolution *SE =
7027         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7028             *getAnchorScope());
7029 
7030     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7031         *getAnchorScope());
7032 
7033     if (!SE || !LI)
7034       return nullptr;
7035 
7036     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7037     if (!I)
7038       return S;
7039 
7040     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7041   }
7042 
7043   /// Helper function to get a range from SCEV for the associated value at
7044   /// program point \p I.
7045   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7046                                          const Instruction *I = nullptr) const {
7047     if (!getAnchorScope())
7048       return getWorstState(getBitWidth());
7049 
7050     ScalarEvolution *SE =
7051         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7052             *getAnchorScope());
7053 
7054     const SCEV *S = getSCEV(A, I);
7055     if (!SE || !S)
7056       return getWorstState(getBitWidth());
7057 
7058     return SE->getUnsignedRange(S);
7059   }
7060 
7061   /// Helper function to get a range from LVI for the associated value at
7062   /// program point \p I.
7063   ConstantRange
7064   getConstantRangeFromLVI(Attributor &A,
7065                           const Instruction *CtxI = nullptr) const {
7066     if (!getAnchorScope())
7067       return getWorstState(getBitWidth());
7068 
7069     LazyValueInfo *LVI =
7070         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7071             *getAnchorScope());
7072 
7073     if (!LVI || !CtxI)
7074       return getWorstState(getBitWidth());
7075     return LVI->getConstantRange(&getAssociatedValue(),
7076                                  const_cast<Instruction *>(CtxI));
7077   }
7078 
7079   /// See AAValueConstantRange::getKnownConstantRange(..).
7080   ConstantRange
7081   getKnownConstantRange(Attributor &A,
7082                         const Instruction *CtxI = nullptr) const override {
7083     if (!CtxI || CtxI == getCtxI())
7084       return getKnown();
7085 
7086     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7087     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7088     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7089   }
7090 
7091   /// See AAValueConstantRange::getAssumedConstantRange(..).
7092   ConstantRange
7093   getAssumedConstantRange(Attributor &A,
7094                           const Instruction *CtxI = nullptr) const override {
7095     // TODO: Make SCEV use Attributor assumption.
7096     //       We may be able to bound a variable range via assumptions in
7097     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7098     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7099 
7100     if (!CtxI || CtxI == getCtxI())
7101       return getAssumed();
7102 
7103     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7104     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7105     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7106   }
7107 
7108   /// See AbstractAttribute::initialize(..).
7109   void initialize(Attributor &A) override {
7110     // Intersect a range given by SCEV.
7111     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7112 
7113     // Intersect a range given by LVI.
7114     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7115   }
7116 
7117   /// Helper function to create MDNode for range metadata.
7118   static MDNode *
7119   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7120                             const ConstantRange &AssumedConstantRange) {
7121     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7122                                   Ty, AssumedConstantRange.getLower())),
7123                               ConstantAsMetadata::get(ConstantInt::get(
7124                                   Ty, AssumedConstantRange.getUpper()))};
7125     return MDNode::get(Ctx, LowAndHigh);
7126   }
7127 
7128   /// Return true if \p Assumed is included in \p KnownRanges.
7129   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7130 
7131     if (Assumed.isFullSet())
7132       return false;
7133 
7134     if (!KnownRanges)
7135       return true;
7136 
7137     // If multiple ranges are annotated in IR, we give up to annotate assumed
7138     // range for now.
7139 
7140     // TODO:  If there exists a known range which containts assumed range, we
7141     // can say assumed range is better.
7142     if (KnownRanges->getNumOperands() > 2)
7143       return false;
7144 
7145     ConstantInt *Lower =
7146         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7147     ConstantInt *Upper =
7148         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7149 
7150     ConstantRange Known(Lower->getValue(), Upper->getValue());
7151     return Known.contains(Assumed) && Known != Assumed;
7152   }
7153 
7154   /// Helper function to set range metadata.
7155   static bool
7156   setRangeMetadataIfisBetterRange(Instruction *I,
7157                                   const ConstantRange &AssumedConstantRange) {
7158     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7159     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7160       if (!AssumedConstantRange.isEmptySet()) {
7161         I->setMetadata(LLVMContext::MD_range,
7162                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7163                                                  AssumedConstantRange));
7164         return true;
7165       }
7166     }
7167     return false;
7168   }
7169 
7170   /// See AbstractAttribute::manifest()
7171   ChangeStatus manifest(Attributor &A) override {
7172     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7173     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7174     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7175 
7176     auto &V = getAssociatedValue();
7177     if (!AssumedConstantRange.isEmptySet() &&
7178         !AssumedConstantRange.isSingleElement()) {
7179       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7180         assert(I == getCtxI() && "Should not annotate an instruction which is "
7181                                  "not the context instruction");
7182         if (isa<CallInst>(I) || isa<LoadInst>(I))
7183           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7184             Changed = ChangeStatus::CHANGED;
7185       }
7186     }
7187 
7188     return Changed;
7189   }
7190 };
7191 
7192 struct AAValueConstantRangeArgument final
7193     : AAArgumentFromCallSiteArguments<
7194           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7195           true /* BridgeCallBaseContext */> {
7196   using Base = AAArgumentFromCallSiteArguments<
7197       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7198       true /* BridgeCallBaseContext */>;
7199   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7200       : Base(IRP, A) {}
7201 
7202   /// See AbstractAttribute::initialize(..).
7203   void initialize(Attributor &A) override {
7204     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7205       indicatePessimisticFixpoint();
7206     } else {
7207       Base::initialize(A);
7208     }
7209   }
7210 
7211   /// See AbstractAttribute::trackStatistics()
7212   void trackStatistics() const override {
7213     STATS_DECLTRACK_ARG_ATTR(value_range)
7214   }
7215 };
7216 
7217 struct AAValueConstantRangeReturned
7218     : AAReturnedFromReturnedValues<AAValueConstantRange,
7219                                    AAValueConstantRangeImpl,
7220                                    AAValueConstantRangeImpl::StateType,
7221                                    /* PropogateCallBaseContext */ true> {
7222   using Base =
7223       AAReturnedFromReturnedValues<AAValueConstantRange,
7224                                    AAValueConstantRangeImpl,
7225                                    AAValueConstantRangeImpl::StateType,
7226                                    /* PropogateCallBaseContext */ true>;
7227   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7228       : Base(IRP, A) {}
7229 
7230   /// See AbstractAttribute::initialize(...).
7231   void initialize(Attributor &A) override {}
7232 
7233   /// See AbstractAttribute::trackStatistics()
7234   void trackStatistics() const override {
7235     STATS_DECLTRACK_FNRET_ATTR(value_range)
7236   }
7237 };
7238 
7239 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7240   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7241       : AAValueConstantRangeImpl(IRP, A) {}
7242 
7243   /// See AbstractAttribute::initialize(...).
7244   void initialize(Attributor &A) override {
7245     AAValueConstantRangeImpl::initialize(A);
7246     Value &V = getAssociatedValue();
7247 
7248     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7249       unionAssumed(ConstantRange(C->getValue()));
7250       indicateOptimisticFixpoint();
7251       return;
7252     }
7253 
7254     if (isa<UndefValue>(&V)) {
7255       // Collapse the undef state to 0.
7256       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7257       indicateOptimisticFixpoint();
7258       return;
7259     }
7260 
7261     if (isa<CallBase>(&V))
7262       return;
7263 
7264     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7265       return;
7266     // If it is a load instruction with range metadata, use it.
7267     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7268       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7269         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7270         return;
7271       }
7272 
7273     // We can work with PHI and select instruction as we traverse their operands
7274     // during update.
7275     if (isa<SelectInst>(V) || isa<PHINode>(V))
7276       return;
7277 
7278     // Otherwise we give up.
7279     indicatePessimisticFixpoint();
7280 
7281     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7282                       << getAssociatedValue() << "\n");
7283   }
7284 
7285   bool calculateBinaryOperator(
7286       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7287       const Instruction *CtxI,
7288       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7289     Value *LHS = BinOp->getOperand(0);
7290     Value *RHS = BinOp->getOperand(1);
7291     // TODO: Allow non integers as well.
7292     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7293       return false;
7294 
7295     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7296         *this, IRPosition::value(*LHS, getCallBaseContext()),
7297         DepClassTy::REQUIRED);
7298     QuerriedAAs.push_back(&LHSAA);
7299     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7300 
7301     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7302         *this, IRPosition::value(*RHS, getCallBaseContext()),
7303         DepClassTy::REQUIRED);
7304     QuerriedAAs.push_back(&RHSAA);
7305     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7306 
7307     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7308 
7309     T.unionAssumed(AssumedRange);
7310 
7311     // TODO: Track a known state too.
7312 
7313     return T.isValidState();
7314   }
7315 
7316   bool calculateCastInst(
7317       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7318       const Instruction *CtxI,
7319       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7320     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7321     // TODO: Allow non integers as well.
7322     Value &OpV = *CastI->getOperand(0);
7323     if (!OpV.getType()->isIntegerTy())
7324       return false;
7325 
7326     auto &OpAA = A.getAAFor<AAValueConstantRange>(
7327         *this, IRPosition::value(OpV, getCallBaseContext()),
7328         DepClassTy::REQUIRED);
7329     QuerriedAAs.push_back(&OpAA);
7330     T.unionAssumed(
7331         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7332     return T.isValidState();
7333   }
7334 
7335   bool
7336   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7337                    const Instruction *CtxI,
7338                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7339     Value *LHS = CmpI->getOperand(0);
7340     Value *RHS = CmpI->getOperand(1);
7341     // TODO: Allow non integers as well.
7342     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7343       return false;
7344 
7345     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7346         *this, IRPosition::value(*LHS, getCallBaseContext()),
7347         DepClassTy::REQUIRED);
7348     QuerriedAAs.push_back(&LHSAA);
7349     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7350         *this, IRPosition::value(*RHS, getCallBaseContext()),
7351         DepClassTy::REQUIRED);
7352     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7353     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7354 
7355     // If one of them is empty set, we can't decide.
7356     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7357       return true;
7358 
7359     bool MustTrue = false, MustFalse = false;
7360 
7361     auto AllowedRegion =
7362         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7363 
7364     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7365       MustFalse = true;
7366 
7367     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
7368       MustTrue = true;
7369 
7370     assert((!MustTrue || !MustFalse) &&
7371            "Either MustTrue or MustFalse should be false!");
7372 
7373     if (MustTrue)
7374       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7375     else if (MustFalse)
7376       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7377     else
7378       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7379 
7380     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7381                       << " " << RHSAA << "\n");
7382 
7383     // TODO: Track a known state too.
7384     return T.isValidState();
7385   }
7386 
7387   /// See AbstractAttribute::updateImpl(...).
7388   ChangeStatus updateImpl(Attributor &A) override {
7389     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7390                             IntegerRangeState &T, bool Stripped) -> bool {
7391       Instruction *I = dyn_cast<Instruction>(&V);
7392       if (!I || isa<CallBase>(I)) {
7393 
7394         // If the value is not instruction, we query AA to Attributor.
7395         const auto &AA = A.getAAFor<AAValueConstantRange>(
7396             *this, IRPosition::value(V, getCallBaseContext()),
7397             DepClassTy::REQUIRED);
7398 
7399         // Clamp operator is not used to utilize a program point CtxI.
7400         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7401 
7402         return T.isValidState();
7403       }
7404 
7405       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7406       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7407         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7408           return false;
7409       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7410         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7411           return false;
7412       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7413         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7414           return false;
7415       } else {
7416         // Give up with other instructions.
7417         // TODO: Add other instructions
7418 
7419         T.indicatePessimisticFixpoint();
7420         return false;
7421       }
7422 
7423       // Catch circular reasoning in a pessimistic way for now.
7424       // TODO: Check how the range evolves and if we stripped anything, see also
7425       //       AADereferenceable or AAAlign for similar situations.
7426       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7427         if (QueriedAA != this)
7428           continue;
7429         // If we are in a stady state we do not need to worry.
7430         if (T.getAssumed() == getState().getAssumed())
7431           continue;
7432         T.indicatePessimisticFixpoint();
7433       }
7434 
7435       return T.isValidState();
7436     };
7437 
7438     IntegerRangeState T(getBitWidth());
7439 
7440     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7441             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7442             /* UseValueSimplify */ false))
7443       return indicatePessimisticFixpoint();
7444 
7445     return clampStateAndIndicateChange(getState(), T);
7446   }
7447 
7448   /// See AbstractAttribute::trackStatistics()
7449   void trackStatistics() const override {
7450     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7451   }
7452 };
7453 
7454 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7455   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7456       : AAValueConstantRangeImpl(IRP, A) {}
7457 
7458   /// See AbstractAttribute::initialize(...).
7459   ChangeStatus updateImpl(Attributor &A) override {
7460     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7461                      "not be called");
7462   }
7463 
7464   /// See AbstractAttribute::trackStatistics()
7465   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7466 };
7467 
7468 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7469   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7470       : AAValueConstantRangeFunction(IRP, A) {}
7471 
7472   /// See AbstractAttribute::trackStatistics()
7473   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7474 };
7475 
7476 struct AAValueConstantRangeCallSiteReturned
7477     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7478                                      AAValueConstantRangeImpl,
7479                                      AAValueConstantRangeImpl::StateType,
7480                                      /* IntroduceCallBaseContext */ true> {
7481   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7482       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7483                                        AAValueConstantRangeImpl,
7484                                        AAValueConstantRangeImpl::StateType,
7485                                        /* IntroduceCallBaseContext */ true>(IRP,
7486                                                                             A) {
7487   }
7488 
7489   /// See AbstractAttribute::initialize(...).
7490   void initialize(Attributor &A) override {
7491     // If it is a load instruction with range metadata, use the metadata.
7492     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7493       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7494         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7495 
7496     AAValueConstantRangeImpl::initialize(A);
7497   }
7498 
7499   /// See AbstractAttribute::trackStatistics()
7500   void trackStatistics() const override {
7501     STATS_DECLTRACK_CSRET_ATTR(value_range)
7502   }
7503 };
7504 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7505   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7506       : AAValueConstantRangeFloating(IRP, A) {}
7507 
7508   /// See AbstractAttribute::manifest()
7509   ChangeStatus manifest(Attributor &A) override {
7510     return ChangeStatus::UNCHANGED;
7511   }
7512 
7513   /// See AbstractAttribute::trackStatistics()
7514   void trackStatistics() const override {
7515     STATS_DECLTRACK_CSARG_ATTR(value_range)
7516   }
7517 };
7518 
7519 /// ------------------ Potential Values Attribute -------------------------
7520 
7521 struct AAPotentialValuesImpl : AAPotentialValues {
7522   using StateType = PotentialConstantIntValuesState;
7523 
7524   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7525       : AAPotentialValues(IRP, A) {}
7526 
7527   /// See AbstractAttribute::getAsStr().
7528   const std::string getAsStr() const override {
7529     std::string Str;
7530     llvm::raw_string_ostream OS(Str);
7531     OS << getState();
7532     return OS.str();
7533   }
7534 
7535   /// See AbstractAttribute::updateImpl(...).
7536   ChangeStatus updateImpl(Attributor &A) override {
7537     return indicatePessimisticFixpoint();
7538   }
7539 };
7540 
7541 struct AAPotentialValuesArgument final
7542     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7543                                       PotentialConstantIntValuesState> {
7544   using Base =
7545       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7546                                       PotentialConstantIntValuesState>;
7547   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7548       : Base(IRP, A) {}
7549 
7550   /// See AbstractAttribute::initialize(..).
7551   void initialize(Attributor &A) override {
7552     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7553       indicatePessimisticFixpoint();
7554     } else {
7555       Base::initialize(A);
7556     }
7557   }
7558 
7559   /// See AbstractAttribute::trackStatistics()
7560   void trackStatistics() const override {
7561     STATS_DECLTRACK_ARG_ATTR(potential_values)
7562   }
7563 };
7564 
7565 struct AAPotentialValuesReturned
7566     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7567   using Base =
7568       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7569   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7570       : Base(IRP, A) {}
7571 
7572   /// See AbstractAttribute::trackStatistics()
7573   void trackStatistics() const override {
7574     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7575   }
7576 };
7577 
7578 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7579   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7580       : AAPotentialValuesImpl(IRP, A) {}
7581 
7582   /// See AbstractAttribute::initialize(..).
7583   void initialize(Attributor &A) override {
7584     Value &V = getAssociatedValue();
7585 
7586     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7587       unionAssumed(C->getValue());
7588       indicateOptimisticFixpoint();
7589       return;
7590     }
7591 
7592     if (isa<UndefValue>(&V)) {
7593       unionAssumedWithUndef();
7594       indicateOptimisticFixpoint();
7595       return;
7596     }
7597 
7598     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7599       return;
7600 
7601     if (isa<SelectInst>(V) || isa<PHINode>(V))
7602       return;
7603 
7604     indicatePessimisticFixpoint();
7605 
7606     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7607                       << getAssociatedValue() << "\n");
7608   }
7609 
7610   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7611                                 const APInt &RHS) {
7612     ICmpInst::Predicate Pred = ICI->getPredicate();
7613     switch (Pred) {
7614     case ICmpInst::ICMP_UGT:
7615       return LHS.ugt(RHS);
7616     case ICmpInst::ICMP_SGT:
7617       return LHS.sgt(RHS);
7618     case ICmpInst::ICMP_EQ:
7619       return LHS.eq(RHS);
7620     case ICmpInst::ICMP_UGE:
7621       return LHS.uge(RHS);
7622     case ICmpInst::ICMP_SGE:
7623       return LHS.sge(RHS);
7624     case ICmpInst::ICMP_ULT:
7625       return LHS.ult(RHS);
7626     case ICmpInst::ICMP_SLT:
7627       return LHS.slt(RHS);
7628     case ICmpInst::ICMP_NE:
7629       return LHS.ne(RHS);
7630     case ICmpInst::ICMP_ULE:
7631       return LHS.ule(RHS);
7632     case ICmpInst::ICMP_SLE:
7633       return LHS.sle(RHS);
7634     default:
7635       llvm_unreachable("Invalid ICmp predicate!");
7636     }
7637   }
7638 
7639   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7640                                  uint32_t ResultBitWidth) {
7641     Instruction::CastOps CastOp = CI->getOpcode();
7642     switch (CastOp) {
7643     default:
7644       llvm_unreachable("unsupported or not integer cast");
7645     case Instruction::Trunc:
7646       return Src.trunc(ResultBitWidth);
7647     case Instruction::SExt:
7648       return Src.sext(ResultBitWidth);
7649     case Instruction::ZExt:
7650       return Src.zext(ResultBitWidth);
7651     case Instruction::BitCast:
7652       return Src;
7653     }
7654   }
7655 
7656   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7657                                        const APInt &LHS, const APInt &RHS,
7658                                        bool &SkipOperation, bool &Unsupported) {
7659     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7660     // Unsupported is set to true when the binary operator is not supported.
7661     // SkipOperation is set to true when UB occur with the given operand pair
7662     // (LHS, RHS).
7663     // TODO: we should look at nsw and nuw keywords to handle operations
7664     //       that create poison or undef value.
7665     switch (BinOpcode) {
7666     default:
7667       Unsupported = true;
7668       return LHS;
7669     case Instruction::Add:
7670       return LHS + RHS;
7671     case Instruction::Sub:
7672       return LHS - RHS;
7673     case Instruction::Mul:
7674       return LHS * RHS;
7675     case Instruction::UDiv:
7676       if (RHS.isNullValue()) {
7677         SkipOperation = true;
7678         return LHS;
7679       }
7680       return LHS.udiv(RHS);
7681     case Instruction::SDiv:
7682       if (RHS.isNullValue()) {
7683         SkipOperation = true;
7684         return LHS;
7685       }
7686       return LHS.sdiv(RHS);
7687     case Instruction::URem:
7688       if (RHS.isNullValue()) {
7689         SkipOperation = true;
7690         return LHS;
7691       }
7692       return LHS.urem(RHS);
7693     case Instruction::SRem:
7694       if (RHS.isNullValue()) {
7695         SkipOperation = true;
7696         return LHS;
7697       }
7698       return LHS.srem(RHS);
7699     case Instruction::Shl:
7700       return LHS.shl(RHS);
7701     case Instruction::LShr:
7702       return LHS.lshr(RHS);
7703     case Instruction::AShr:
7704       return LHS.ashr(RHS);
7705     case Instruction::And:
7706       return LHS & RHS;
7707     case Instruction::Or:
7708       return LHS | RHS;
7709     case Instruction::Xor:
7710       return LHS ^ RHS;
7711     }
7712   }
7713 
7714   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7715                                            const APInt &LHS, const APInt &RHS) {
7716     bool SkipOperation = false;
7717     bool Unsupported = false;
7718     APInt Result =
7719         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7720     if (Unsupported)
7721       return false;
7722     // If SkipOperation is true, we can ignore this operand pair (L, R).
7723     if (!SkipOperation)
7724       unionAssumed(Result);
7725     return isValidState();
7726   }
7727 
7728   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7729     auto AssumedBefore = getAssumed();
7730     Value *LHS = ICI->getOperand(0);
7731     Value *RHS = ICI->getOperand(1);
7732     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7733       return indicatePessimisticFixpoint();
7734 
7735     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7736                                                 DepClassTy::REQUIRED);
7737     if (!LHSAA.isValidState())
7738       return indicatePessimisticFixpoint();
7739 
7740     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7741                                                 DepClassTy::REQUIRED);
7742     if (!RHSAA.isValidState())
7743       return indicatePessimisticFixpoint();
7744 
7745     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7746     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7747 
7748     // TODO: make use of undef flag to limit potential values aggressively.
7749     bool MaybeTrue = false, MaybeFalse = false;
7750     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7751     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7752       // The result of any comparison between undefs can be soundly replaced
7753       // with undef.
7754       unionAssumedWithUndef();
7755     } else if (LHSAA.undefIsContained()) {
7756       for (const APInt &R : RHSAAPVS) {
7757         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7758         MaybeTrue |= CmpResult;
7759         MaybeFalse |= !CmpResult;
7760         if (MaybeTrue & MaybeFalse)
7761           return indicatePessimisticFixpoint();
7762       }
7763     } else if (RHSAA.undefIsContained()) {
7764       for (const APInt &L : LHSAAPVS) {
7765         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7766         MaybeTrue |= CmpResult;
7767         MaybeFalse |= !CmpResult;
7768         if (MaybeTrue & MaybeFalse)
7769           return indicatePessimisticFixpoint();
7770       }
7771     } else {
7772       for (const APInt &L : LHSAAPVS) {
7773         for (const APInt &R : RHSAAPVS) {
7774           bool CmpResult = calculateICmpInst(ICI, L, R);
7775           MaybeTrue |= CmpResult;
7776           MaybeFalse |= !CmpResult;
7777           if (MaybeTrue & MaybeFalse)
7778             return indicatePessimisticFixpoint();
7779         }
7780       }
7781     }
7782     if (MaybeTrue)
7783       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7784     if (MaybeFalse)
7785       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7786     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7787                                          : ChangeStatus::CHANGED;
7788   }
7789 
7790   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7791     auto AssumedBefore = getAssumed();
7792     Value *LHS = SI->getTrueValue();
7793     Value *RHS = SI->getFalseValue();
7794     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7795       return indicatePessimisticFixpoint();
7796 
7797     // TODO: Use assumed simplified condition value
7798     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7799                                                 DepClassTy::REQUIRED);
7800     if (!LHSAA.isValidState())
7801       return indicatePessimisticFixpoint();
7802 
7803     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7804                                                 DepClassTy::REQUIRED);
7805     if (!RHSAA.isValidState())
7806       return indicatePessimisticFixpoint();
7807 
7808     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7809       // select i1 *, undef , undef => undef
7810       unionAssumedWithUndef();
7811     else {
7812       unionAssumed(LHSAA);
7813       unionAssumed(RHSAA);
7814     }
7815     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7816                                          : ChangeStatus::CHANGED;
7817   }
7818 
7819   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7820     auto AssumedBefore = getAssumed();
7821     if (!CI->isIntegerCast())
7822       return indicatePessimisticFixpoint();
7823     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7824     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7825     Value *Src = CI->getOperand(0);
7826     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
7827                                                 DepClassTy::REQUIRED);
7828     if (!SrcAA.isValidState())
7829       return indicatePessimisticFixpoint();
7830     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7831     if (SrcAA.undefIsContained())
7832       unionAssumedWithUndef();
7833     else {
7834       for (const APInt &S : SrcAAPVS) {
7835         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7836         unionAssumed(T);
7837       }
7838     }
7839     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7840                                          : ChangeStatus::CHANGED;
7841   }
7842 
7843   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7844     auto AssumedBefore = getAssumed();
7845     Value *LHS = BinOp->getOperand(0);
7846     Value *RHS = BinOp->getOperand(1);
7847     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7848       return indicatePessimisticFixpoint();
7849 
7850     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7851                                                 DepClassTy::REQUIRED);
7852     if (!LHSAA.isValidState())
7853       return indicatePessimisticFixpoint();
7854 
7855     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7856                                                 DepClassTy::REQUIRED);
7857     if (!RHSAA.isValidState())
7858       return indicatePessimisticFixpoint();
7859 
7860     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7861     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7862     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7863 
7864     // TODO: make use of undef flag to limit potential values aggressively.
7865     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7866       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7867         return indicatePessimisticFixpoint();
7868     } else if (LHSAA.undefIsContained()) {
7869       for (const APInt &R : RHSAAPVS) {
7870         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7871           return indicatePessimisticFixpoint();
7872       }
7873     } else if (RHSAA.undefIsContained()) {
7874       for (const APInt &L : LHSAAPVS) {
7875         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7876           return indicatePessimisticFixpoint();
7877       }
7878     } else {
7879       for (const APInt &L : LHSAAPVS) {
7880         for (const APInt &R : RHSAAPVS) {
7881           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7882             return indicatePessimisticFixpoint();
7883         }
7884       }
7885     }
7886     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7887                                          : ChangeStatus::CHANGED;
7888   }
7889 
7890   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7891     auto AssumedBefore = getAssumed();
7892     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7893       Value *IncomingValue = PHI->getIncomingValue(u);
7894       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7895           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
7896       if (!PotentialValuesAA.isValidState())
7897         return indicatePessimisticFixpoint();
7898       if (PotentialValuesAA.undefIsContained())
7899         unionAssumedWithUndef();
7900       else
7901         unionAssumed(PotentialValuesAA.getAssumed());
7902     }
7903     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7904                                          : ChangeStatus::CHANGED;
7905   }
7906 
7907   /// See AbstractAttribute::updateImpl(...).
7908   ChangeStatus updateImpl(Attributor &A) override {
7909     Value &V = getAssociatedValue();
7910     Instruction *I = dyn_cast<Instruction>(&V);
7911 
7912     if (auto *ICI = dyn_cast<ICmpInst>(I))
7913       return updateWithICmpInst(A, ICI);
7914 
7915     if (auto *SI = dyn_cast<SelectInst>(I))
7916       return updateWithSelectInst(A, SI);
7917 
7918     if (auto *CI = dyn_cast<CastInst>(I))
7919       return updateWithCastInst(A, CI);
7920 
7921     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7922       return updateWithBinaryOperator(A, BinOp);
7923 
7924     if (auto *PHI = dyn_cast<PHINode>(I))
7925       return updateWithPHINode(A, PHI);
7926 
7927     return indicatePessimisticFixpoint();
7928   }
7929 
7930   /// See AbstractAttribute::trackStatistics()
7931   void trackStatistics() const override {
7932     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7933   }
7934 };
7935 
7936 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7937   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7938       : AAPotentialValuesImpl(IRP, A) {}
7939 
7940   /// See AbstractAttribute::initialize(...).
7941   ChangeStatus updateImpl(Attributor &A) override {
7942     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7943                      "not be called");
7944   }
7945 
7946   /// See AbstractAttribute::trackStatistics()
7947   void trackStatistics() const override {
7948     STATS_DECLTRACK_FN_ATTR(potential_values)
7949   }
7950 };
7951 
7952 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7953   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7954       : AAPotentialValuesFunction(IRP, A) {}
7955 
7956   /// See AbstractAttribute::trackStatistics()
7957   void trackStatistics() const override {
7958     STATS_DECLTRACK_CS_ATTR(potential_values)
7959   }
7960 };
7961 
7962 struct AAPotentialValuesCallSiteReturned
7963     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7964   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7965       : AACallSiteReturnedFromReturned<AAPotentialValues,
7966                                        AAPotentialValuesImpl>(IRP, A) {}
7967 
7968   /// See AbstractAttribute::trackStatistics()
7969   void trackStatistics() const override {
7970     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7971   }
7972 };
7973 
7974 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7975   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7976       : AAPotentialValuesFloating(IRP, A) {}
7977 
7978   /// See AbstractAttribute::initialize(..).
7979   void initialize(Attributor &A) override {
7980     Value &V = getAssociatedValue();
7981 
7982     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7983       unionAssumed(C->getValue());
7984       indicateOptimisticFixpoint();
7985       return;
7986     }
7987 
7988     if (isa<UndefValue>(&V)) {
7989       unionAssumedWithUndef();
7990       indicateOptimisticFixpoint();
7991       return;
7992     }
7993   }
7994 
7995   /// See AbstractAttribute::updateImpl(...).
7996   ChangeStatus updateImpl(Attributor &A) override {
7997     Value &V = getAssociatedValue();
7998     auto AssumedBefore = getAssumed();
7999     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
8000                                              DepClassTy::REQUIRED);
8001     const auto &S = AA.getAssumed();
8002     unionAssumed(S);
8003     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8004                                          : ChangeStatus::CHANGED;
8005   }
8006 
8007   /// See AbstractAttribute::trackStatistics()
8008   void trackStatistics() const override {
8009     STATS_DECLTRACK_CSARG_ATTR(potential_values)
8010   }
8011 };
8012 
8013 /// ------------------------ NoUndef Attribute ---------------------------------
8014 struct AANoUndefImpl : AANoUndef {
8015   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
8016 
8017   /// See AbstractAttribute::initialize(...).
8018   void initialize(Attributor &A) override {
8019     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
8020       indicateOptimisticFixpoint();
8021       return;
8022     }
8023     Value &V = getAssociatedValue();
8024     if (isa<UndefValue>(V))
8025       indicatePessimisticFixpoint();
8026     else if (isa<FreezeInst>(V))
8027       indicateOptimisticFixpoint();
8028     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
8029              isGuaranteedNotToBeUndefOrPoison(&V))
8030       indicateOptimisticFixpoint();
8031     else
8032       AANoUndef::initialize(A);
8033   }
8034 
8035   /// See followUsesInMBEC
8036   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
8037                        AANoUndef::StateType &State) {
8038     const Value *UseV = U->get();
8039     const DominatorTree *DT = nullptr;
8040     AssumptionCache *AC = nullptr;
8041     InformationCache &InfoCache = A.getInfoCache();
8042     if (Function *F = getAnchorScope()) {
8043       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
8044       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
8045     }
8046     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
8047     bool TrackUse = false;
8048     // Track use for instructions which must produce undef or poison bits when
8049     // at least one operand contains such bits.
8050     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
8051       TrackUse = true;
8052     return TrackUse;
8053   }
8054 
8055   /// See AbstractAttribute::getAsStr().
8056   const std::string getAsStr() const override {
8057     return getAssumed() ? "noundef" : "may-undef-or-poison";
8058   }
8059 
8060   ChangeStatus manifest(Attributor &A) override {
8061     // We don't manifest noundef attribute for dead positions because the
8062     // associated values with dead positions would be replaced with undef
8063     // values.
8064     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
8065       return ChangeStatus::UNCHANGED;
8066     // A position whose simplified value does not have any value is
8067     // considered to be dead. We don't manifest noundef in such positions for
8068     // the same reason above.
8069     bool UsedAssumedInformation = false;
8070     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
8071              .hasValue())
8072       return ChangeStatus::UNCHANGED;
8073     return AANoUndef::manifest(A);
8074   }
8075 };
8076 
8077 struct AANoUndefFloating : public AANoUndefImpl {
8078   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
8079       : AANoUndefImpl(IRP, A) {}
8080 
8081   /// See AbstractAttribute::initialize(...).
8082   void initialize(Attributor &A) override {
8083     AANoUndefImpl::initialize(A);
8084     if (!getState().isAtFixpoint())
8085       if (Instruction *CtxI = getCtxI())
8086         followUsesInMBEC(*this, A, getState(), *CtxI);
8087   }
8088 
8089   /// See AbstractAttribute::updateImpl(...).
8090   ChangeStatus updateImpl(Attributor &A) override {
8091     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8092                             AANoUndef::StateType &T, bool Stripped) -> bool {
8093       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
8094                                              DepClassTy::REQUIRED);
8095       if (!Stripped && this == &AA) {
8096         T.indicatePessimisticFixpoint();
8097       } else {
8098         const AANoUndef::StateType &S =
8099             static_cast<const AANoUndef::StateType &>(AA.getState());
8100         T ^= S;
8101       }
8102       return T.isValidState();
8103     };
8104 
8105     StateType T;
8106     if (!genericValueTraversal<AANoUndef, StateType>(
8107             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
8108       return indicatePessimisticFixpoint();
8109 
8110     return clampStateAndIndicateChange(getState(), T);
8111   }
8112 
8113   /// See AbstractAttribute::trackStatistics()
8114   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8115 };
8116 
8117 struct AANoUndefReturned final
8118     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
8119   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
8120       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
8121 
8122   /// See AbstractAttribute::trackStatistics()
8123   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8124 };
8125 
8126 struct AANoUndefArgument final
8127     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
8128   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8129       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8130 
8131   /// See AbstractAttribute::trackStatistics()
8132   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8133 };
8134 
8135 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8136   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8137       : AANoUndefFloating(IRP, A) {}
8138 
8139   /// See AbstractAttribute::trackStatistics()
8140   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8141 };
8142 
8143 struct AANoUndefCallSiteReturned final
8144     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8145   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8146       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8147 
8148   /// See AbstractAttribute::trackStatistics()
8149   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8150 };
8151 
8152 struct AACallEdgesFunction : public AACallEdges {
8153   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
8154       : AACallEdges(IRP, A) {}
8155 
8156   /// See AbstractAttribute::updateImpl(...).
8157   ChangeStatus updateImpl(Attributor &A) override {
8158     ChangeStatus Change = ChangeStatus::UNCHANGED;
8159     bool OldHasUnknownCallee = HasUnknownCallee;
8160 
8161     auto AddCalledFunction = [&](Function *Fn) {
8162       if (CalledFunctions.insert(Fn)) {
8163         Change = ChangeStatus::CHANGED;
8164         LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
8165                           << "\n");
8166       }
8167     };
8168 
8169     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
8170                           bool Stripped) -> bool {
8171       if (Function *Fn = dyn_cast<Function>(&V)) {
8172         AddCalledFunction(Fn);
8173       } else {
8174         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
8175         HasUnknown = true;
8176       }
8177 
8178       // Explore all values.
8179       return true;
8180     };
8181 
8182     // Process any value that we might call.
8183     auto ProcessCalledOperand = [&](Value *V, Instruction *Ctx) {
8184       if (!genericValueTraversal<AACallEdges, bool>(A, IRPosition::value(*V),
8185                                                     *this, HasUnknownCallee,
8186                                                     VisitValue, nullptr, false))
8187         // If we haven't gone through all values, assume that there are unknown
8188         // callees.
8189         HasUnknownCallee = true;
8190     };
8191 
8192     auto ProcessCallInst = [&](Instruction &Inst) {
8193       CallBase &CB = static_cast<CallBase &>(Inst);
8194 
8195       // Process callee metadata if available.
8196       if (auto *MD = Inst.getMetadata(LLVMContext::MD_callees)) {
8197         for (auto &Op : MD->operands()) {
8198           Function *Callee = mdconst::extract_or_null<Function>(Op);
8199           if (Callee)
8200             AddCalledFunction(Callee);
8201         }
8202         // Callees metadata grantees that the called function is one of its
8203         // operands, So we are done.
8204         return true;
8205       }
8206 
8207       // The most simple case.
8208       ProcessCalledOperand(CB.getCalledOperand(), &Inst);
8209 
8210       // Process callback functions.
8211       SmallVector<const Use *, 4u> CallbackUses;
8212       AbstractCallSite::getCallbackUses(CB, CallbackUses);
8213       for (const Use *U : CallbackUses)
8214         ProcessCalledOperand(U->get(), &Inst);
8215 
8216       return true;
8217     };
8218 
8219     // Visit all callable instructions.
8220     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this))
8221       // If we haven't looked at all call like instructions, assume that there
8222       // are unknown callees.
8223       HasUnknownCallee = true;
8224     // Track changes.
8225     if (OldHasUnknownCallee != HasUnknownCallee)
8226       Change = ChangeStatus::CHANGED;
8227 
8228     return Change;
8229   }
8230 
8231   virtual const SetVector<Function *> &getOptimisticEdges() const override {
8232     return CalledFunctions;
8233   };
8234 
8235   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
8236 
8237   const std::string getAsStr() const override {
8238     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
8239            std::to_string(CalledFunctions.size()) + "]";
8240   }
8241 
8242   void trackStatistics() const override {}
8243 
8244   /// Optimistic set of functions that might be called by this function.
8245   SetVector<Function *> CalledFunctions;
8246 
8247   /// Does this function have a call to a function that we don't know about.
8248   bool HasUnknownCallee = false;
8249 };
8250 
8251 } // namespace
8252 
8253 AACallGraphNode *AACallEdgeIterator::operator*() const {
8254   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
8255       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
8256 }
8257 
8258 void AttributorCallGraph::print() {
8259   llvm::WriteGraph(outs(), this);
8260 }
8261 
8262 const char AAReturnedValues::ID = 0;
8263 const char AANoUnwind::ID = 0;
8264 const char AANoSync::ID = 0;
8265 const char AANoFree::ID = 0;
8266 const char AANonNull::ID = 0;
8267 const char AANoRecurse::ID = 0;
8268 const char AAWillReturn::ID = 0;
8269 const char AAUndefinedBehavior::ID = 0;
8270 const char AANoAlias::ID = 0;
8271 const char AAReachability::ID = 0;
8272 const char AANoReturn::ID = 0;
8273 const char AAIsDead::ID = 0;
8274 const char AADereferenceable::ID = 0;
8275 const char AAAlign::ID = 0;
8276 const char AANoCapture::ID = 0;
8277 const char AAValueSimplify::ID = 0;
8278 const char AAHeapToStack::ID = 0;
8279 const char AAPrivatizablePtr::ID = 0;
8280 const char AAMemoryBehavior::ID = 0;
8281 const char AAMemoryLocation::ID = 0;
8282 const char AAValueConstantRange::ID = 0;
8283 const char AAPotentialValues::ID = 0;
8284 const char AANoUndef::ID = 0;
8285 const char AACallEdges::ID = 0;
8286 
8287 // Macro magic to create the static generator function for attributes that
8288 // follow the naming scheme.
8289 
8290 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8291   case IRPosition::PK:                                                         \
8292     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8293 
8294 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8295   case IRPosition::PK:                                                         \
8296     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8297     ++NumAAs;                                                                  \
8298     break;
8299 
8300 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8301   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8302     CLASS *AA = nullptr;                                                       \
8303     switch (IRP.getPositionKind()) {                                           \
8304       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8305       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8306       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8307       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8308       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8309       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8310       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8311       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8312     }                                                                          \
8313     return *AA;                                                                \
8314   }
8315 
8316 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8317   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8318     CLASS *AA = nullptr;                                                       \
8319     switch (IRP.getPositionKind()) {                                           \
8320       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8321       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8322       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8323       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8324       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8325       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8326       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8327       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8328     }                                                                          \
8329     return *AA;                                                                \
8330   }
8331 
8332 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8333   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8334     CLASS *AA = nullptr;                                                       \
8335     switch (IRP.getPositionKind()) {                                           \
8336       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8337       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8338       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8339       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8340       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8341       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8342       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8343       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8344     }                                                                          \
8345     return *AA;                                                                \
8346   }
8347 
8348 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8349   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8350     CLASS *AA = nullptr;                                                       \
8351     switch (IRP.getPositionKind()) {                                           \
8352       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8353       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8354       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8355       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8356       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8357       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8358       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8359       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8360     }                                                                          \
8361     return *AA;                                                                \
8362   }
8363 
8364 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8365   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8366     CLASS *AA = nullptr;                                                       \
8367     switch (IRP.getPositionKind()) {                                           \
8368       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8369       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8370       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8371       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8372       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8373       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8374       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8375       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8376     }                                                                          \
8377     return *AA;                                                                \
8378   }
8379 
8380 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8381 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8382 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8383 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8384 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8385 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8386 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8387 
8388 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8389 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8390 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8391 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8392 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8393 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8394 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8395 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8396 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8397 
8398 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8399 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8400 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8401 
8402 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8403 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8404 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8405 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
8406 
8407 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8408 
8409 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8410 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8411 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8412 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8413 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8414 #undef SWITCH_PK_CREATE
8415 #undef SWITCH_PK_INV
8416