1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/FileSystem.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
36 #include "llvm/Transforms/Utils/Local.h"
37 #include <cassert>
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "attributor"
42 
43 static cl::opt<bool> ManifestInternal(
44     "attributor-manifest-internal", cl::Hidden,
45     cl::desc("Manifest Attributor internal string attributes."),
46     cl::init(false));
47 
48 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
49                                        cl::Hidden);
50 
51 template <>
52 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
53 
54 static cl::opt<unsigned, true> MaxPotentialValues(
55     "attributor-max-potential-values", cl::Hidden,
56     cl::desc("Maximum number of potential values to be "
57              "tracked for each position."),
58     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
59     cl::init(7));
60 
61 STATISTIC(NumAAs, "Number of abstract attributes created");
62 
63 // Some helper macros to deal with statistics tracking.
64 //
65 // Usage:
66 // For simple IR attribute tracking overload trackStatistics in the abstract
67 // attribute and choose the right STATS_DECLTRACK_********* macro,
68 // e.g.,:
69 //  void trackStatistics() const override {
70 //    STATS_DECLTRACK_ARG_ATTR(returned)
71 //  }
72 // If there is a single "increment" side one can use the macro
73 // STATS_DECLTRACK with a custom message. If there are multiple increment
74 // sides, STATS_DECL and STATS_TRACK can also be used separately.
75 //
76 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
77   ("Number of " #TYPE " marked '" #NAME "'")
78 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
79 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
80 #define STATS_DECL(NAME, TYPE, MSG)                                            \
81   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
82 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
83 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
84   {                                                                            \
85     STATS_DECL(NAME, TYPE, MSG)                                                \
86     STATS_TRACK(NAME, TYPE)                                                    \
87   }
88 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
89   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
90 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
91   STATS_DECLTRACK(NAME, CSArguments,                                           \
92                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
93 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
94   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
95 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
96   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
97 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
98   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
99                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
100 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
101   STATS_DECLTRACK(NAME, CSReturn,                                              \
102                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
103 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
104   STATS_DECLTRACK(NAME, Floating,                                              \
105                   ("Number of floating values known to be '" #NAME "'"))
106 
107 // Specialization of the operator<< for abstract attributes subclasses. This
108 // disambiguates situations where multiple operators are applicable.
109 namespace llvm {
110 #define PIPE_OPERATOR(CLASS)                                                   \
111   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
112     return OS << static_cast<const AbstractAttribute &>(AA);                   \
113   }
114 
115 PIPE_OPERATOR(AAIsDead)
116 PIPE_OPERATOR(AANoUnwind)
117 PIPE_OPERATOR(AANoSync)
118 PIPE_OPERATOR(AANoRecurse)
119 PIPE_OPERATOR(AAWillReturn)
120 PIPE_OPERATOR(AANoReturn)
121 PIPE_OPERATOR(AAReturnedValues)
122 PIPE_OPERATOR(AANonNull)
123 PIPE_OPERATOR(AANoAlias)
124 PIPE_OPERATOR(AADereferenceable)
125 PIPE_OPERATOR(AAAlign)
126 PIPE_OPERATOR(AANoCapture)
127 PIPE_OPERATOR(AAValueSimplify)
128 PIPE_OPERATOR(AANoFree)
129 PIPE_OPERATOR(AAHeapToStack)
130 PIPE_OPERATOR(AAReachability)
131 PIPE_OPERATOR(AAMemoryBehavior)
132 PIPE_OPERATOR(AAMemoryLocation)
133 PIPE_OPERATOR(AAValueConstantRange)
134 PIPE_OPERATOR(AAPrivatizablePtr)
135 PIPE_OPERATOR(AAUndefinedBehavior)
136 PIPE_OPERATOR(AAPotentialValues)
137 PIPE_OPERATOR(AANoUndef)
138 PIPE_OPERATOR(AACallEdges)
139 PIPE_OPERATOR(AAFunctionReachability)
140 
141 #undef PIPE_OPERATOR
142 } // namespace llvm
143 
144 /// Get pointer operand of memory accessing instruction. If \p I is
145 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
146 /// is set to false and the instruction is volatile, return nullptr.
147 static const Value *getPointerOperand(const Instruction *I,
148                                       bool AllowVolatile) {
149   if (!AllowVolatile && I->isVolatile())
150     return nullptr;
151 
152   if (auto *LI = dyn_cast<LoadInst>(I)) {
153     return LI->getPointerOperand();
154   }
155 
156   if (auto *SI = dyn_cast<StoreInst>(I)) {
157     return SI->getPointerOperand();
158   }
159 
160   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
161     return CXI->getPointerOperand();
162   }
163 
164   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
165     return RMWI->getPointerOperand();
166   }
167 
168   return nullptr;
169 }
170 
171 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
172 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
173 /// getelement pointer instructions that traverse the natural type of \p Ptr if
174 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
175 /// through a cast to i8*.
176 ///
177 /// TODO: This could probably live somewhere more prominantly if it doesn't
178 ///       already exist.
179 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
180                                int64_t Offset, IRBuilder<NoFolder> &IRB,
181                                const DataLayout &DL) {
182   assert(Offset >= 0 && "Negative offset not supported yet!");
183   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
184                     << "-bytes as " << *ResTy << "\n");
185 
186   if (Offset) {
187     SmallVector<Value *, 4> Indices;
188     std::string GEPName = Ptr->getName().str() + ".0";
189 
190     // Add 0 index to look through the pointer.
191     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
192            "Offset out of bounds");
193     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
194 
195     Type *Ty = PtrElemTy;
196     do {
197       auto *STy = dyn_cast<StructType>(Ty);
198       if (!STy)
199         // Non-aggregate type, we cast and make byte-wise progress now.
200         break;
201 
202       const StructLayout *SL = DL.getStructLayout(STy);
203       if (int64_t(SL->getSizeInBytes()) < Offset)
204         break;
205 
206       uint64_t Idx = SL->getElementContainingOffset(Offset);
207       assert(Idx < STy->getNumElements() && "Offset calculation error!");
208       uint64_t Rem = Offset - SL->getElementOffset(Idx);
209       Ty = STy->getElementType(Idx);
210 
211       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
212                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
213 
214       GEPName += "." + std::to_string(Idx);
215       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
216       Offset = Rem;
217     } while (Offset);
218 
219     // Create a GEP for the indices collected above.
220     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
221 
222     // If an offset is left we use byte-wise adjustment.
223     if (Offset) {
224       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
225       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
226                           GEPName + ".b" + Twine(Offset));
227     }
228   }
229 
230   // Ensure the result has the requested type.
231   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
232 
233   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
234   return Ptr;
235 }
236 
237 /// Recursively visit all values that might become \p IRP at some point. This
238 /// will be done by looking through cast instructions, selects, phis, and calls
239 /// with the "returned" attribute. Once we cannot look through the value any
240 /// further, the callback \p VisitValueCB is invoked and passed the current
241 /// value, the \p State, and a flag to indicate if we stripped anything.
242 /// Stripped means that we unpacked the value associated with \p IRP at least
243 /// once. Note that the value used for the callback may still be the value
244 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
245 /// we will never visit more values than specified by \p MaxValues.
246 template <typename AAType, typename StateTy>
247 static bool genericValueTraversal(
248     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
249     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
250         VisitValueCB,
251     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
252     function_ref<Value *(Value *)> StripCB = nullptr) {
253 
254   const AAIsDead *LivenessAA = nullptr;
255   if (IRP.getAnchorScope())
256     LivenessAA = &A.getAAFor<AAIsDead>(
257         QueryingAA,
258         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
259         DepClassTy::NONE);
260   bool AnyDead = false;
261 
262   using Item = std::pair<Value *, const Instruction *>;
263   SmallSet<Item, 16> Visited;
264   SmallVector<Item, 16> Worklist;
265   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
266 
267   int Iteration = 0;
268   do {
269     Item I = Worklist.pop_back_val();
270     Value *V = I.first;
271     CtxI = I.second;
272     if (StripCB)
273       V = StripCB(V);
274 
275     // Check if we should process the current value. To prevent endless
276     // recursion keep a record of the values we followed!
277     if (!Visited.insert(I).second)
278       continue;
279 
280     // Make sure we limit the compile time for complex expressions.
281     if (Iteration++ >= MaxValues)
282       return false;
283 
284     // Explicitly look through calls with a "returned" attribute if we do
285     // not have a pointer as stripPointerCasts only works on them.
286     Value *NewV = nullptr;
287     if (V->getType()->isPointerTy()) {
288       NewV = V->stripPointerCasts();
289     } else {
290       auto *CB = dyn_cast<CallBase>(V);
291       if (CB && CB->getCalledFunction()) {
292         for (Argument &Arg : CB->getCalledFunction()->args())
293           if (Arg.hasReturnedAttr()) {
294             NewV = CB->getArgOperand(Arg.getArgNo());
295             break;
296           }
297       }
298     }
299     if (NewV && NewV != V) {
300       Worklist.push_back({NewV, CtxI});
301       continue;
302     }
303 
304     // Look through select instructions, visit both potential values.
305     if (auto *SI = dyn_cast<SelectInst>(V)) {
306       Worklist.push_back({SI->getTrueValue(), CtxI});
307       Worklist.push_back({SI->getFalseValue(), CtxI});
308       continue;
309     }
310 
311     // Look through phi nodes, visit all live operands.
312     if (auto *PHI = dyn_cast<PHINode>(V)) {
313       assert(LivenessAA &&
314              "Expected liveness in the presence of instructions!");
315       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
316         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
317         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
318                             LivenessAA,
319                             /* CheckBBLivenessOnly */ true)) {
320           AnyDead = true;
321           continue;
322         }
323         Worklist.push_back(
324             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
325       }
326       continue;
327     }
328 
329     if (UseValueSimplify && !isa<Constant>(V)) {
330       bool UsedAssumedInformation = false;
331       Optional<Constant *> C =
332           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
333       if (!C.hasValue())
334         continue;
335       if (Value *NewV = C.getValue()) {
336         Worklist.push_back({NewV, CtxI});
337         continue;
338       }
339     }
340 
341     // Once a leaf is reached we inform the user through the callback.
342     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
343       return false;
344   } while (!Worklist.empty());
345 
346   // If we actually used liveness information so we have to record a dependence.
347   if (AnyDead)
348     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
349 
350   // All values have been visited.
351   return true;
352 }
353 
354 const Value *stripAndAccumulateMinimalOffsets(
355     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
356     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
357     bool UseAssumed = false) {
358 
359   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
360     const IRPosition &Pos = IRPosition::value(V);
361     // Only track dependence if we are going to use the assumed info.
362     const AAValueConstantRange &ValueConstantRangeAA =
363         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
364                                          UseAssumed ? DepClassTy::OPTIONAL
365                                                     : DepClassTy::NONE);
366     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
367                                      : ValueConstantRangeAA.getKnown();
368     // We can only use the lower part of the range because the upper part can
369     // be higher than what the value can really be.
370     ROffset = Range.getSignedMin();
371     return true;
372   };
373 
374   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
375                                                 AttributorAnalysis);
376 }
377 
378 static const Value *getMinimalBaseOfAccsesPointerOperand(
379     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
380     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
381   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
382   if (!Ptr)
383     return nullptr;
384   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
385   const Value *Base = stripAndAccumulateMinimalOffsets(
386       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
387 
388   BytesOffset = OffsetAPInt.getSExtValue();
389   return Base;
390 }
391 
392 static const Value *
393 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
394                                      const DataLayout &DL,
395                                      bool AllowNonInbounds = false) {
396   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
397   if (!Ptr)
398     return nullptr;
399 
400   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
401                                           AllowNonInbounds);
402 }
403 
404 /// Helper function to clamp a state \p S of type \p StateType with the
405 /// information in \p R and indicate/return if \p S did change (as-in update is
406 /// required to be run again).
407 template <typename StateType>
408 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
409   auto Assumed = S.getAssumed();
410   S ^= R;
411   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
412                                    : ChangeStatus::CHANGED;
413 }
414 
415 /// Clamp the information known for all returned values of a function
416 /// (identified by \p QueryingAA) into \p S.
417 template <typename AAType, typename StateType = typename AAType::StateType>
418 static void clampReturnedValueStates(
419     Attributor &A, const AAType &QueryingAA, StateType &S,
420     const IRPosition::CallBaseContext *CBContext = nullptr) {
421   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
422                     << QueryingAA << " into " << S << "\n");
423 
424   assert((QueryingAA.getIRPosition().getPositionKind() ==
425               IRPosition::IRP_RETURNED ||
426           QueryingAA.getIRPosition().getPositionKind() ==
427               IRPosition::IRP_CALL_SITE_RETURNED) &&
428          "Can only clamp returned value states for a function returned or call "
429          "site returned position!");
430 
431   // Use an optional state as there might not be any return values and we want
432   // to join (IntegerState::operator&) the state of all there are.
433   Optional<StateType> T;
434 
435   // Callback for each possibly returned value.
436   auto CheckReturnValue = [&](Value &RV) -> bool {
437     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
438     const AAType &AA =
439         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
440     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
441                       << " @ " << RVPos << "\n");
442     const StateType &AAS = AA.getState();
443     if (T.hasValue())
444       *T &= AAS;
445     else
446       T = AAS;
447     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
448                       << "\n");
449     return T->isValidState();
450   };
451 
452   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
453     S.indicatePessimisticFixpoint();
454   else if (T.hasValue())
455     S ^= *T;
456 }
457 
458 /// Helper class for generic deduction: return value -> returned position.
459 template <typename AAType, typename BaseType,
460           typename StateType = typename BaseType::StateType,
461           bool PropagateCallBaseContext = false>
462 struct AAReturnedFromReturnedValues : public BaseType {
463   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
464       : BaseType(IRP, A) {}
465 
466   /// See AbstractAttribute::updateImpl(...).
467   ChangeStatus updateImpl(Attributor &A) override {
468     StateType S(StateType::getBestState(this->getState()));
469     clampReturnedValueStates<AAType, StateType>(
470         A, *this, S,
471         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
472     // TODO: If we know we visited all returned values, thus no are assumed
473     // dead, we can take the known information from the state T.
474     return clampStateAndIndicateChange<StateType>(this->getState(), S);
475   }
476 };
477 
478 /// Clamp the information known at all call sites for a given argument
479 /// (identified by \p QueryingAA) into \p S.
480 template <typename AAType, typename StateType = typename AAType::StateType>
481 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
482                                         StateType &S) {
483   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
484                     << QueryingAA << " into " << S << "\n");
485 
486   assert(QueryingAA.getIRPosition().getPositionKind() ==
487              IRPosition::IRP_ARGUMENT &&
488          "Can only clamp call site argument states for an argument position!");
489 
490   // Use an optional state as there might not be any return values and we want
491   // to join (IntegerState::operator&) the state of all there are.
492   Optional<StateType> T;
493 
494   // The argument number which is also the call site argument number.
495   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
496 
497   auto CallSiteCheck = [&](AbstractCallSite ACS) {
498     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
499     // Check if a coresponding argument was found or if it is on not associated
500     // (which can happen for callback calls).
501     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
502       return false;
503 
504     const AAType &AA =
505         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
506     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
507                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
508     const StateType &AAS = AA.getState();
509     if (T.hasValue())
510       *T &= AAS;
511     else
512       T = AAS;
513     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
514                       << "\n");
515     return T->isValidState();
516   };
517 
518   bool AllCallSitesKnown;
519   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
520                               AllCallSitesKnown))
521     S.indicatePessimisticFixpoint();
522   else if (T.hasValue())
523     S ^= *T;
524 }
525 
526 /// This function is the bridge between argument position and the call base
527 /// context.
528 template <typename AAType, typename BaseType,
529           typename StateType = typename AAType::StateType>
530 bool getArgumentStateFromCallBaseContext(Attributor &A,
531                                          BaseType &QueryingAttribute,
532                                          IRPosition &Pos, StateType &State) {
533   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
534          "Expected an 'argument' position !");
535   const CallBase *CBContext = Pos.getCallBaseContext();
536   if (!CBContext)
537     return false;
538 
539   int ArgNo = Pos.getCallSiteArgNo();
540   assert(ArgNo >= 0 && "Invalid Arg No!");
541 
542   const auto &AA = A.getAAFor<AAType>(
543       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
544       DepClassTy::REQUIRED);
545   const StateType &CBArgumentState =
546       static_cast<const StateType &>(AA.getState());
547 
548   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
549                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
550                     << "\n");
551 
552   // NOTE: If we want to do call site grouping it should happen here.
553   State ^= CBArgumentState;
554   return true;
555 }
556 
557 /// Helper class for generic deduction: call site argument -> argument position.
558 template <typename AAType, typename BaseType,
559           typename StateType = typename AAType::StateType,
560           bool BridgeCallBaseContext = false>
561 struct AAArgumentFromCallSiteArguments : public BaseType {
562   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
563       : BaseType(IRP, A) {}
564 
565   /// See AbstractAttribute::updateImpl(...).
566   ChangeStatus updateImpl(Attributor &A) override {
567     StateType S = StateType::getBestState(this->getState());
568 
569     if (BridgeCallBaseContext) {
570       bool Success =
571           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
572               A, *this, this->getIRPosition(), S);
573       if (Success)
574         return clampStateAndIndicateChange<StateType>(this->getState(), S);
575     }
576     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
577 
578     // TODO: If we know we visited all incoming values, thus no are assumed
579     // dead, we can take the known information from the state T.
580     return clampStateAndIndicateChange<StateType>(this->getState(), S);
581   }
582 };
583 
584 /// Helper class for generic replication: function returned -> cs returned.
585 template <typename AAType, typename BaseType,
586           typename StateType = typename BaseType::StateType,
587           bool IntroduceCallBaseContext = false>
588 struct AACallSiteReturnedFromReturned : public BaseType {
589   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
590       : BaseType(IRP, A) {}
591 
592   /// See AbstractAttribute::updateImpl(...).
593   ChangeStatus updateImpl(Attributor &A) override {
594     assert(this->getIRPosition().getPositionKind() ==
595                IRPosition::IRP_CALL_SITE_RETURNED &&
596            "Can only wrap function returned positions for call site returned "
597            "positions!");
598     auto &S = this->getState();
599 
600     const Function *AssociatedFunction =
601         this->getIRPosition().getAssociatedFunction();
602     if (!AssociatedFunction)
603       return S.indicatePessimisticFixpoint();
604 
605     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
606     if (IntroduceCallBaseContext)
607       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
608                         << CBContext << "\n");
609 
610     IRPosition FnPos = IRPosition::returned(
611         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
612     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
613     return clampStateAndIndicateChange(S, AA.getState());
614   }
615 };
616 
617 /// Helper function to accumulate uses.
618 template <class AAType, typename StateType = typename AAType::StateType>
619 static void followUsesInContext(AAType &AA, Attributor &A,
620                                 MustBeExecutedContextExplorer &Explorer,
621                                 const Instruction *CtxI,
622                                 SetVector<const Use *> &Uses,
623                                 StateType &State) {
624   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
625   for (unsigned u = 0; u < Uses.size(); ++u) {
626     const Use *U = Uses[u];
627     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
628       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
629       if (Found && AA.followUseInMBEC(A, U, UserI, State))
630         for (const Use &Us : UserI->uses())
631           Uses.insert(&Us);
632     }
633   }
634 }
635 
636 /// Use the must-be-executed-context around \p I to add information into \p S.
637 /// The AAType class is required to have `followUseInMBEC` method with the
638 /// following signature and behaviour:
639 ///
640 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
641 /// U - Underlying use.
642 /// I - The user of the \p U.
643 /// Returns true if the value should be tracked transitively.
644 ///
645 template <class AAType, typename StateType = typename AAType::StateType>
646 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
647                              Instruction &CtxI) {
648 
649   // Container for (transitive) uses of the associated value.
650   SetVector<const Use *> Uses;
651   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
652     Uses.insert(&U);
653 
654   MustBeExecutedContextExplorer &Explorer =
655       A.getInfoCache().getMustBeExecutedContextExplorer();
656 
657   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
658 
659   if (S.isAtFixpoint())
660     return;
661 
662   SmallVector<const BranchInst *, 4> BrInsts;
663   auto Pred = [&](const Instruction *I) {
664     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
665       if (Br->isConditional())
666         BrInsts.push_back(Br);
667     return true;
668   };
669 
670   // Here, accumulate conditional branch instructions in the context. We
671   // explore the child paths and collect the known states. The disjunction of
672   // those states can be merged to its own state. Let ParentState_i be a state
673   // to indicate the known information for an i-th branch instruction in the
674   // context. ChildStates are created for its successors respectively.
675   //
676   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
677   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
678   //      ...
679   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
680   //
681   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
682   //
683   // FIXME: Currently, recursive branches are not handled. For example, we
684   // can't deduce that ptr must be dereferenced in below function.
685   //
686   // void f(int a, int c, int *ptr) {
687   //    if(a)
688   //      if (b) {
689   //        *ptr = 0;
690   //      } else {
691   //        *ptr = 1;
692   //      }
693   //    else {
694   //      if (b) {
695   //        *ptr = 0;
696   //      } else {
697   //        *ptr = 1;
698   //      }
699   //    }
700   // }
701 
702   Explorer.checkForAllContext(&CtxI, Pred);
703   for (const BranchInst *Br : BrInsts) {
704     StateType ParentState;
705 
706     // The known state of the parent state is a conjunction of children's
707     // known states so it is initialized with a best state.
708     ParentState.indicateOptimisticFixpoint();
709 
710     for (const BasicBlock *BB : Br->successors()) {
711       StateType ChildState;
712 
713       size_t BeforeSize = Uses.size();
714       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
715 
716       // Erase uses which only appear in the child.
717       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
718         It = Uses.erase(It);
719 
720       ParentState &= ChildState;
721     }
722 
723     // Use only known state.
724     S += ParentState;
725   }
726 }
727 
728 /// -----------------------NoUnwind Function Attribute--------------------------
729 
730 struct AANoUnwindImpl : AANoUnwind {
731   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
732 
733   const std::string getAsStr() const override {
734     return getAssumed() ? "nounwind" : "may-unwind";
735   }
736 
737   /// See AbstractAttribute::updateImpl(...).
738   ChangeStatus updateImpl(Attributor &A) override {
739     auto Opcodes = {
740         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
741         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
742         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
743 
744     auto CheckForNoUnwind = [&](Instruction &I) {
745       if (!I.mayThrow())
746         return true;
747 
748       if (const auto *CB = dyn_cast<CallBase>(&I)) {
749         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
750             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
751         return NoUnwindAA.isAssumedNoUnwind();
752       }
753       return false;
754     };
755 
756     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
757       return indicatePessimisticFixpoint();
758 
759     return ChangeStatus::UNCHANGED;
760   }
761 };
762 
763 struct AANoUnwindFunction final : public AANoUnwindImpl {
764   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
765       : AANoUnwindImpl(IRP, A) {}
766 
767   /// See AbstractAttribute::trackStatistics()
768   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
769 };
770 
771 /// NoUnwind attribute deduction for a call sites.
772 struct AANoUnwindCallSite final : AANoUnwindImpl {
773   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
774       : AANoUnwindImpl(IRP, A) {}
775 
776   /// See AbstractAttribute::initialize(...).
777   void initialize(Attributor &A) override {
778     AANoUnwindImpl::initialize(A);
779     Function *F = getAssociatedFunction();
780     if (!F || F->isDeclaration())
781       indicatePessimisticFixpoint();
782   }
783 
784   /// See AbstractAttribute::updateImpl(...).
785   ChangeStatus updateImpl(Attributor &A) override {
786     // TODO: Once we have call site specific value information we can provide
787     //       call site specific liveness information and then it makes
788     //       sense to specialize attributes for call sites arguments instead of
789     //       redirecting requests to the callee argument.
790     Function *F = getAssociatedFunction();
791     const IRPosition &FnPos = IRPosition::function(*F);
792     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
793     return clampStateAndIndicateChange(getState(), FnAA.getState());
794   }
795 
796   /// See AbstractAttribute::trackStatistics()
797   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
798 };
799 
800 /// --------------------- Function Return Values -------------------------------
801 
802 /// "Attribute" that collects all potential returned values and the return
803 /// instructions that they arise from.
804 ///
805 /// If there is a unique returned value R, the manifest method will:
806 ///   - mark R with the "returned" attribute, if R is an argument.
807 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
808 
809   /// Mapping of values potentially returned by the associated function to the
810   /// return instructions that might return them.
811   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
812 
813   /// Mapping to remember the number of returned values for a call site such
814   /// that we can avoid updates if nothing changed.
815   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
816 
817   /// Set of unresolved calls returned by the associated function.
818   SmallSetVector<CallBase *, 4> UnresolvedCalls;
819 
820   /// State flags
821   ///
822   ///{
823   bool IsFixed = false;
824   bool IsValidState = true;
825   ///}
826 
827 public:
828   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
829       : AAReturnedValues(IRP, A) {}
830 
831   /// See AbstractAttribute::initialize(...).
832   void initialize(Attributor &A) override {
833     // Reset the state.
834     IsFixed = false;
835     IsValidState = true;
836     ReturnedValues.clear();
837 
838     Function *F = getAssociatedFunction();
839     if (!F || F->isDeclaration()) {
840       indicatePessimisticFixpoint();
841       return;
842     }
843     assert(!F->getReturnType()->isVoidTy() &&
844            "Did not expect a void return type!");
845 
846     // The map from instruction opcodes to those instructions in the function.
847     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
848 
849     // Look through all arguments, if one is marked as returned we are done.
850     for (Argument &Arg : F->args()) {
851       if (Arg.hasReturnedAttr()) {
852         auto &ReturnInstSet = ReturnedValues[&Arg];
853         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
854           for (Instruction *RI : *Insts)
855             ReturnInstSet.insert(cast<ReturnInst>(RI));
856 
857         indicateOptimisticFixpoint();
858         return;
859       }
860     }
861 
862     if (!A.isFunctionIPOAmendable(*F))
863       indicatePessimisticFixpoint();
864   }
865 
866   /// See AbstractAttribute::manifest(...).
867   ChangeStatus manifest(Attributor &A) override;
868 
869   /// See AbstractAttribute::getState(...).
870   AbstractState &getState() override { return *this; }
871 
872   /// See AbstractAttribute::getState(...).
873   const AbstractState &getState() const override { return *this; }
874 
875   /// See AbstractAttribute::updateImpl(Attributor &A).
876   ChangeStatus updateImpl(Attributor &A) override;
877 
878   llvm::iterator_range<iterator> returned_values() override {
879     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
880   }
881 
882   llvm::iterator_range<const_iterator> returned_values() const override {
883     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
884   }
885 
886   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
887     return UnresolvedCalls;
888   }
889 
890   /// Return the number of potential return values, -1 if unknown.
891   size_t getNumReturnValues() const override {
892     return isValidState() ? ReturnedValues.size() : -1;
893   }
894 
895   /// Return an assumed unique return value if a single candidate is found. If
896   /// there cannot be one, return a nullptr. If it is not clear yet, return the
897   /// Optional::NoneType.
898   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
899 
900   /// See AbstractState::checkForAllReturnedValues(...).
901   bool checkForAllReturnedValuesAndReturnInsts(
902       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
903       const override;
904 
905   /// Pretty print the attribute similar to the IR representation.
906   const std::string getAsStr() const override;
907 
908   /// See AbstractState::isAtFixpoint().
909   bool isAtFixpoint() const override { return IsFixed; }
910 
911   /// See AbstractState::isValidState().
912   bool isValidState() const override { return IsValidState; }
913 
914   /// See AbstractState::indicateOptimisticFixpoint(...).
915   ChangeStatus indicateOptimisticFixpoint() override {
916     IsFixed = true;
917     return ChangeStatus::UNCHANGED;
918   }
919 
920   ChangeStatus indicatePessimisticFixpoint() override {
921     IsFixed = true;
922     IsValidState = false;
923     return ChangeStatus::CHANGED;
924   }
925 };
926 
927 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
928   ChangeStatus Changed = ChangeStatus::UNCHANGED;
929 
930   // Bookkeeping.
931   assert(isValidState());
932   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
933                   "Number of function with known return values");
934 
935   // Check if we have an assumed unique return value that we could manifest.
936   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
937 
938   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
939     return Changed;
940 
941   // Bookkeeping.
942   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
943                   "Number of function with unique return");
944 
945   // Callback to replace the uses of CB with the constant C.
946   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
947     if (CB.use_empty())
948       return ChangeStatus::UNCHANGED;
949     if (A.changeValueAfterManifest(CB, C))
950       return ChangeStatus::CHANGED;
951     return ChangeStatus::UNCHANGED;
952   };
953 
954   // If the assumed unique return value is an argument, annotate it.
955   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
956     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
957             getAssociatedFunction()->getReturnType())) {
958       getIRPosition() = IRPosition::argument(*UniqueRVArg);
959       Changed = IRAttribute::manifest(A);
960     }
961   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
962     // We can replace the returned value with the unique returned constant.
963     Value &AnchorValue = getAnchorValue();
964     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
965       for (const Use &U : F->uses())
966         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
967           if (CB->isCallee(&U)) {
968             Constant *RVCCast =
969                 CB->getType() == RVC->getType()
970                     ? RVC
971                     : ConstantExpr::getPointerCast(RVC, CB->getType());
972             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
973           }
974     } else {
975       assert(isa<CallBase>(AnchorValue) &&
976              "Expcected a function or call base anchor!");
977       Constant *RVCCast =
978           AnchorValue.getType() == RVC->getType()
979               ? RVC
980               : ConstantExpr::getPointerCast(RVC, AnchorValue.getType());
981       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
982     }
983     if (Changed == ChangeStatus::CHANGED)
984       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
985                       "Number of function returns replaced by constant return");
986   }
987 
988   return Changed;
989 }
990 
991 const std::string AAReturnedValuesImpl::getAsStr() const {
992   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
993          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
994          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
995 }
996 
997 Optional<Value *>
998 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
999   // If checkForAllReturnedValues provides a unique value, ignoring potential
1000   // undef values that can also be present, it is assumed to be the actual
1001   // return value and forwarded to the caller of this method. If there are
1002   // multiple, a nullptr is returned indicating there cannot be a unique
1003   // returned value.
1004   Optional<Value *> UniqueRV;
1005   Type *Ty = getAssociatedFunction()->getReturnType();
1006 
1007   auto Pred = [&](Value &RV) -> bool {
1008     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1009     return UniqueRV != Optional<Value *>(nullptr);
1010   };
1011 
1012   if (!A.checkForAllReturnedValues(Pred, *this))
1013     UniqueRV = nullptr;
1014 
1015   return UniqueRV;
1016 }
1017 
1018 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1019     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1020     const {
1021   if (!isValidState())
1022     return false;
1023 
1024   // Check all returned values but ignore call sites as long as we have not
1025   // encountered an overdefined one during an update.
1026   for (auto &It : ReturnedValues) {
1027     Value *RV = It.first;
1028 
1029     CallBase *CB = dyn_cast<CallBase>(RV);
1030     if (CB && !UnresolvedCalls.count(CB))
1031       continue;
1032 
1033     if (!Pred(*RV, It.second))
1034       return false;
1035   }
1036 
1037   return true;
1038 }
1039 
1040 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1041   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1042   bool Changed = false;
1043 
1044   // State used in the value traversals starting in returned values.
1045   struct RVState {
1046     // The map in which we collect return values -> return instrs.
1047     decltype(ReturnedValues) &RetValsMap;
1048     // The flag to indicate a change.
1049     bool &Changed;
1050     // The return instrs we come from.
1051     SmallSetVector<ReturnInst *, 4> RetInsts;
1052   };
1053 
1054   // Callback for a leaf value returned by the associated function.
1055   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1056                          bool) -> bool {
1057     auto Size = RVS.RetValsMap[&Val].size();
1058     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1059     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1060     RVS.Changed |= Inserted;
1061     LLVM_DEBUG({
1062       if (Inserted)
1063         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1064                << " => " << RVS.RetInsts.size() << "\n";
1065     });
1066     return true;
1067   };
1068 
1069   // Helper method to invoke the generic value traversal.
1070   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1071                                 const Instruction *CtxI) {
1072     IRPosition RetValPos = IRPosition::value(RV, getCallBaseContext());
1073     return genericValueTraversal<AAReturnedValues, RVState>(
1074         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1075         /* UseValueSimplify */ false);
1076   };
1077 
1078   // Callback for all "return intructions" live in the associated function.
1079   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1080     ReturnInst &Ret = cast<ReturnInst>(I);
1081     RVState RVS({ReturnedValues, Changed, {}});
1082     RVS.RetInsts.insert(&Ret);
1083     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1084   };
1085 
1086   // Start by discovering returned values from all live returned instructions in
1087   // the associated function.
1088   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1089     return indicatePessimisticFixpoint();
1090 
1091   // Once returned values "directly" present in the code are handled we try to
1092   // resolve returned calls. To avoid modifications to the ReturnedValues map
1093   // while we iterate over it we kept record of potential new entries in a copy
1094   // map, NewRVsMap.
1095   decltype(ReturnedValues) NewRVsMap;
1096 
1097   auto HandleReturnValue = [&](Value *RV,
1098                                SmallSetVector<ReturnInst *, 4> &RIs) {
1099     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1100                       << RIs.size() << " RIs\n");
1101     CallBase *CB = dyn_cast<CallBase>(RV);
1102     if (!CB || UnresolvedCalls.count(CB))
1103       return;
1104 
1105     if (!CB->getCalledFunction()) {
1106       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1107                         << "\n");
1108       UnresolvedCalls.insert(CB);
1109       return;
1110     }
1111 
1112     // TODO: use the function scope once we have call site AAReturnedValues.
1113     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1114         *this, IRPosition::function(*CB->getCalledFunction()),
1115         DepClassTy::REQUIRED);
1116     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1117                       << RetValAA << "\n");
1118 
1119     // Skip dead ends, thus if we do not know anything about the returned
1120     // call we mark it as unresolved and it will stay that way.
1121     if (!RetValAA.getState().isValidState()) {
1122       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1123                         << "\n");
1124       UnresolvedCalls.insert(CB);
1125       return;
1126     }
1127 
1128     // Do not try to learn partial information. If the callee has unresolved
1129     // return values we will treat the call as unresolved/opaque.
1130     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1131     if (!RetValAAUnresolvedCalls.empty()) {
1132       UnresolvedCalls.insert(CB);
1133       return;
1134     }
1135 
1136     // Now check if we can track transitively returned values. If possible, thus
1137     // if all return value can be represented in the current scope, do so.
1138     bool Unresolved = false;
1139     for (auto &RetValAAIt : RetValAA.returned_values()) {
1140       Value *RetVal = RetValAAIt.first;
1141       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1142           isa<Constant>(RetVal))
1143         continue;
1144       // Anything that did not fit in the above categories cannot be resolved,
1145       // mark the call as unresolved.
1146       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1147                            "cannot be translated: "
1148                         << *RetVal << "\n");
1149       UnresolvedCalls.insert(CB);
1150       Unresolved = true;
1151       break;
1152     }
1153 
1154     if (Unresolved)
1155       return;
1156 
1157     // Now track transitively returned values.
1158     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1159     if (NumRetAA == RetValAA.getNumReturnValues()) {
1160       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1161                            "changed since it was seen last\n");
1162       return;
1163     }
1164     NumRetAA = RetValAA.getNumReturnValues();
1165 
1166     for (auto &RetValAAIt : RetValAA.returned_values()) {
1167       Value *RetVal = RetValAAIt.first;
1168       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1169         // Arguments are mapped to call site operands and we begin the traversal
1170         // again.
1171         bool Unused = false;
1172         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1173         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1174         continue;
1175       }
1176       if (isa<CallBase>(RetVal)) {
1177         // Call sites are resolved by the callee attribute over time, no need to
1178         // do anything for us.
1179         continue;
1180       }
1181       if (isa<Constant>(RetVal)) {
1182         // Constants are valid everywhere, we can simply take them.
1183         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1184         continue;
1185       }
1186     }
1187   };
1188 
1189   for (auto &It : ReturnedValues)
1190     HandleReturnValue(It.first, It.second);
1191 
1192   // Because processing the new information can again lead to new return values
1193   // we have to be careful and iterate until this iteration is complete. The
1194   // idea is that we are in a stable state at the end of an update. All return
1195   // values have been handled and properly categorized. We might not update
1196   // again if we have not requested a non-fix attribute so we cannot "wait" for
1197   // the next update to analyze a new return value.
1198   while (!NewRVsMap.empty()) {
1199     auto It = std::move(NewRVsMap.back());
1200     NewRVsMap.pop_back();
1201 
1202     assert(!It.second.empty() && "Entry does not add anything.");
1203     auto &ReturnInsts = ReturnedValues[It.first];
1204     for (ReturnInst *RI : It.second)
1205       if (ReturnInsts.insert(RI)) {
1206         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1207                           << *It.first << " => " << *RI << "\n");
1208         HandleReturnValue(It.first, ReturnInsts);
1209         Changed = true;
1210       }
1211   }
1212 
1213   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1214   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1215 }
1216 
1217 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1218   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1219       : AAReturnedValuesImpl(IRP, A) {}
1220 
1221   /// See AbstractAttribute::trackStatistics()
1222   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1223 };
1224 
1225 /// Returned values information for a call sites.
1226 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1227   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1228       : AAReturnedValuesImpl(IRP, A) {}
1229 
1230   /// See AbstractAttribute::initialize(...).
1231   void initialize(Attributor &A) override {
1232     // TODO: Once we have call site specific value information we can provide
1233     //       call site specific liveness information and then it makes
1234     //       sense to specialize attributes for call sites instead of
1235     //       redirecting requests to the callee.
1236     llvm_unreachable("Abstract attributes for returned values are not "
1237                      "supported for call sites yet!");
1238   }
1239 
1240   /// See AbstractAttribute::updateImpl(...).
1241   ChangeStatus updateImpl(Attributor &A) override {
1242     return indicatePessimisticFixpoint();
1243   }
1244 
1245   /// See AbstractAttribute::trackStatistics()
1246   void trackStatistics() const override {}
1247 };
1248 
1249 /// ------------------------ NoSync Function Attribute -------------------------
1250 
1251 struct AANoSyncImpl : AANoSync {
1252   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1253 
1254   const std::string getAsStr() const override {
1255     return getAssumed() ? "nosync" : "may-sync";
1256   }
1257 
1258   /// See AbstractAttribute::updateImpl(...).
1259   ChangeStatus updateImpl(Attributor &A) override;
1260 
1261   /// Helper function used to determine whether an instruction is non-relaxed
1262   /// atomic. In other words, if an atomic instruction does not have unordered
1263   /// or monotonic ordering
1264   static bool isNonRelaxedAtomic(Instruction *I);
1265 
1266   /// Helper function specific for intrinsics which are potentially volatile
1267   static bool isNoSyncIntrinsic(Instruction *I);
1268 };
1269 
1270 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1271   if (!I->isAtomic())
1272     return false;
1273 
1274   if (auto *FI = dyn_cast<FenceInst>(I))
1275     // All legal orderings for fence are stronger than monotonic.
1276     return FI->getSyncScopeID() != SyncScope::SingleThread;
1277   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1278     // Unordered is not a legal ordering for cmpxchg.
1279     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1280             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1281   }
1282 
1283   AtomicOrdering Ordering;
1284   switch (I->getOpcode()) {
1285   case Instruction::AtomicRMW:
1286     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1287     break;
1288   case Instruction::Store:
1289     Ordering = cast<StoreInst>(I)->getOrdering();
1290     break;
1291   case Instruction::Load:
1292     Ordering = cast<LoadInst>(I)->getOrdering();
1293     break;
1294   default:
1295     llvm_unreachable(
1296         "New atomic operations need to be known in the attributor.");
1297   }
1298 
1299   return (Ordering != AtomicOrdering::Unordered &&
1300           Ordering != AtomicOrdering::Monotonic);
1301 }
1302 
1303 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1304 /// which would be nosync except that they have a volatile flag.  All other
1305 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1306 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1307   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1308     return !MI->isVolatile();
1309   return false;
1310 }
1311 
1312 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1313 
1314   auto CheckRWInstForNoSync = [&](Instruction &I) {
1315     /// We are looking for volatile instructions or Non-Relaxed atomics.
1316 
1317     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1318       if (CB->hasFnAttr(Attribute::NoSync))
1319         return true;
1320 
1321       if (isNoSyncIntrinsic(&I))
1322         return true;
1323 
1324       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1325           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1326       return NoSyncAA.isAssumedNoSync();
1327     }
1328 
1329     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1330       return true;
1331 
1332     return false;
1333   };
1334 
1335   auto CheckForNoSync = [&](Instruction &I) {
1336     // At this point we handled all read/write effects and they are all
1337     // nosync, so they can be skipped.
1338     if (I.mayReadOrWriteMemory())
1339       return true;
1340 
1341     // non-convergent and readnone imply nosync.
1342     return !cast<CallBase>(I).isConvergent();
1343   };
1344 
1345   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1346       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1347     return indicatePessimisticFixpoint();
1348 
1349   return ChangeStatus::UNCHANGED;
1350 }
1351 
1352 struct AANoSyncFunction final : public AANoSyncImpl {
1353   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1354       : AANoSyncImpl(IRP, A) {}
1355 
1356   /// See AbstractAttribute::trackStatistics()
1357   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1358 };
1359 
1360 /// NoSync attribute deduction for a call sites.
1361 struct AANoSyncCallSite final : AANoSyncImpl {
1362   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1363       : AANoSyncImpl(IRP, A) {}
1364 
1365   /// See AbstractAttribute::initialize(...).
1366   void initialize(Attributor &A) override {
1367     AANoSyncImpl::initialize(A);
1368     Function *F = getAssociatedFunction();
1369     if (!F || F->isDeclaration())
1370       indicatePessimisticFixpoint();
1371   }
1372 
1373   /// See AbstractAttribute::updateImpl(...).
1374   ChangeStatus updateImpl(Attributor &A) override {
1375     // TODO: Once we have call site specific value information we can provide
1376     //       call site specific liveness information and then it makes
1377     //       sense to specialize attributes for call sites arguments instead of
1378     //       redirecting requests to the callee argument.
1379     Function *F = getAssociatedFunction();
1380     const IRPosition &FnPos = IRPosition::function(*F);
1381     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1382     return clampStateAndIndicateChange(getState(), FnAA.getState());
1383   }
1384 
1385   /// See AbstractAttribute::trackStatistics()
1386   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1387 };
1388 
1389 /// ------------------------ No-Free Attributes ----------------------------
1390 
1391 struct AANoFreeImpl : public AANoFree {
1392   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1393 
1394   /// See AbstractAttribute::updateImpl(...).
1395   ChangeStatus updateImpl(Attributor &A) override {
1396     auto CheckForNoFree = [&](Instruction &I) {
1397       const auto &CB = cast<CallBase>(I);
1398       if (CB.hasFnAttr(Attribute::NoFree))
1399         return true;
1400 
1401       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1402           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1403       return NoFreeAA.isAssumedNoFree();
1404     };
1405 
1406     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1407       return indicatePessimisticFixpoint();
1408     return ChangeStatus::UNCHANGED;
1409   }
1410 
1411   /// See AbstractAttribute::getAsStr().
1412   const std::string getAsStr() const override {
1413     return getAssumed() ? "nofree" : "may-free";
1414   }
1415 };
1416 
1417 struct AANoFreeFunction final : public AANoFreeImpl {
1418   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1419       : AANoFreeImpl(IRP, A) {}
1420 
1421   /// See AbstractAttribute::trackStatistics()
1422   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1423 };
1424 
1425 /// NoFree attribute deduction for a call sites.
1426 struct AANoFreeCallSite final : AANoFreeImpl {
1427   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1428       : AANoFreeImpl(IRP, A) {}
1429 
1430   /// See AbstractAttribute::initialize(...).
1431   void initialize(Attributor &A) override {
1432     AANoFreeImpl::initialize(A);
1433     Function *F = getAssociatedFunction();
1434     if (!F || F->isDeclaration())
1435       indicatePessimisticFixpoint();
1436   }
1437 
1438   /// See AbstractAttribute::updateImpl(...).
1439   ChangeStatus updateImpl(Attributor &A) override {
1440     // TODO: Once we have call site specific value information we can provide
1441     //       call site specific liveness information and then it makes
1442     //       sense to specialize attributes for call sites arguments instead of
1443     //       redirecting requests to the callee argument.
1444     Function *F = getAssociatedFunction();
1445     const IRPosition &FnPos = IRPosition::function(*F);
1446     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1447     return clampStateAndIndicateChange(getState(), FnAA.getState());
1448   }
1449 
1450   /// See AbstractAttribute::trackStatistics()
1451   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1452 };
1453 
1454 /// NoFree attribute for floating values.
1455 struct AANoFreeFloating : AANoFreeImpl {
1456   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1457       : AANoFreeImpl(IRP, A) {}
1458 
1459   /// See AbstractAttribute::trackStatistics()
1460   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1461 
1462   /// See Abstract Attribute::updateImpl(...).
1463   ChangeStatus updateImpl(Attributor &A) override {
1464     const IRPosition &IRP = getIRPosition();
1465 
1466     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1467         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1468     if (NoFreeAA.isAssumedNoFree())
1469       return ChangeStatus::UNCHANGED;
1470 
1471     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1472     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1473       Instruction *UserI = cast<Instruction>(U.getUser());
1474       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1475         if (CB->isBundleOperand(&U))
1476           return false;
1477         if (!CB->isArgOperand(&U))
1478           return true;
1479         unsigned ArgNo = CB->getArgOperandNo(&U);
1480 
1481         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1482             *this, IRPosition::callsite_argument(*CB, ArgNo),
1483             DepClassTy::REQUIRED);
1484         return NoFreeArg.isAssumedNoFree();
1485       }
1486 
1487       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1488           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1489         Follow = true;
1490         return true;
1491       }
1492       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
1493           isa<ReturnInst>(UserI))
1494         return true;
1495 
1496       // Unknown user.
1497       return false;
1498     };
1499     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1500       return indicatePessimisticFixpoint();
1501 
1502     return ChangeStatus::UNCHANGED;
1503   }
1504 };
1505 
1506 /// NoFree attribute for a call site argument.
1507 struct AANoFreeArgument final : AANoFreeFloating {
1508   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1509       : AANoFreeFloating(IRP, A) {}
1510 
1511   /// See AbstractAttribute::trackStatistics()
1512   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1513 };
1514 
1515 /// NoFree attribute for call site arguments.
1516 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1517   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1518       : AANoFreeFloating(IRP, A) {}
1519 
1520   /// See AbstractAttribute::updateImpl(...).
1521   ChangeStatus updateImpl(Attributor &A) override {
1522     // TODO: Once we have call site specific value information we can provide
1523     //       call site specific liveness information and then it makes
1524     //       sense to specialize attributes for call sites arguments instead of
1525     //       redirecting requests to the callee argument.
1526     Argument *Arg = getAssociatedArgument();
1527     if (!Arg)
1528       return indicatePessimisticFixpoint();
1529     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1530     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1531     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1532   }
1533 
1534   /// See AbstractAttribute::trackStatistics()
1535   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1536 };
1537 
1538 /// NoFree attribute for function return value.
1539 struct AANoFreeReturned final : AANoFreeFloating {
1540   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1541       : AANoFreeFloating(IRP, A) {
1542     llvm_unreachable("NoFree is not applicable to function returns!");
1543   }
1544 
1545   /// See AbstractAttribute::initialize(...).
1546   void initialize(Attributor &A) override {
1547     llvm_unreachable("NoFree is not applicable to function returns!");
1548   }
1549 
1550   /// See AbstractAttribute::updateImpl(...).
1551   ChangeStatus updateImpl(Attributor &A) override {
1552     llvm_unreachable("NoFree is not applicable to function returns!");
1553   }
1554 
1555   /// See AbstractAttribute::trackStatistics()
1556   void trackStatistics() const override {}
1557 };
1558 
1559 /// NoFree attribute deduction for a call site return value.
1560 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1561   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1562       : AANoFreeFloating(IRP, A) {}
1563 
1564   ChangeStatus manifest(Attributor &A) override {
1565     return ChangeStatus::UNCHANGED;
1566   }
1567   /// See AbstractAttribute::trackStatistics()
1568   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1569 };
1570 
1571 /// ------------------------ NonNull Argument Attribute ------------------------
1572 static int64_t getKnownNonNullAndDerefBytesForUse(
1573     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1574     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1575   TrackUse = false;
1576 
1577   const Value *UseV = U->get();
1578   if (!UseV->getType()->isPointerTy())
1579     return 0;
1580 
1581   // We need to follow common pointer manipulation uses to the accesses they
1582   // feed into. We can try to be smart to avoid looking through things we do not
1583   // like for now, e.g., non-inbounds GEPs.
1584   if (isa<CastInst>(I)) {
1585     TrackUse = true;
1586     return 0;
1587   }
1588 
1589   if (isa<GetElementPtrInst>(I)) {
1590     TrackUse = true;
1591     return 0;
1592   }
1593 
1594   Type *PtrTy = UseV->getType();
1595   const Function *F = I->getFunction();
1596   bool NullPointerIsDefined =
1597       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1598   const DataLayout &DL = A.getInfoCache().getDL();
1599   if (const auto *CB = dyn_cast<CallBase>(I)) {
1600     if (CB->isBundleOperand(U)) {
1601       if (RetainedKnowledge RK = getKnowledgeFromUse(
1602               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1603         IsNonNull |=
1604             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1605         return RK.ArgValue;
1606       }
1607       return 0;
1608     }
1609 
1610     if (CB->isCallee(U)) {
1611       IsNonNull |= !NullPointerIsDefined;
1612       return 0;
1613     }
1614 
1615     unsigned ArgNo = CB->getArgOperandNo(U);
1616     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1617     // As long as we only use known information there is no need to track
1618     // dependences here.
1619     auto &DerefAA =
1620         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1621     IsNonNull |= DerefAA.isKnownNonNull();
1622     return DerefAA.getKnownDereferenceableBytes();
1623   }
1624 
1625   int64_t Offset;
1626   const Value *Base =
1627       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1628   if (Base) {
1629     if (Base == &AssociatedValue &&
1630         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1631       int64_t DerefBytes =
1632           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1633 
1634       IsNonNull |= !NullPointerIsDefined;
1635       return std::max(int64_t(0), DerefBytes);
1636     }
1637   }
1638 
1639   /// Corner case when an offset is 0.
1640   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1641                                               /*AllowNonInbounds*/ true);
1642   if (Base) {
1643     if (Offset == 0 && Base == &AssociatedValue &&
1644         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1645       int64_t DerefBytes =
1646           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1647       IsNonNull |= !NullPointerIsDefined;
1648       return std::max(int64_t(0), DerefBytes);
1649     }
1650   }
1651 
1652   return 0;
1653 }
1654 
1655 struct AANonNullImpl : AANonNull {
1656   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1657       : AANonNull(IRP, A),
1658         NullIsDefined(NullPointerIsDefined(
1659             getAnchorScope(),
1660             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1661 
1662   /// See AbstractAttribute::initialize(...).
1663   void initialize(Attributor &A) override {
1664     Value &V = getAssociatedValue();
1665     if (!NullIsDefined &&
1666         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1667                 /* IgnoreSubsumingPositions */ false, &A)) {
1668       indicateOptimisticFixpoint();
1669       return;
1670     }
1671 
1672     if (isa<ConstantPointerNull>(V)) {
1673       indicatePessimisticFixpoint();
1674       return;
1675     }
1676 
1677     AANonNull::initialize(A);
1678 
1679     bool CanBeNull, CanBeFreed;
1680     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
1681                                          CanBeFreed)) {
1682       if (!CanBeNull) {
1683         indicateOptimisticFixpoint();
1684         return;
1685       }
1686     }
1687 
1688     if (isa<GlobalValue>(&getAssociatedValue())) {
1689       indicatePessimisticFixpoint();
1690       return;
1691     }
1692 
1693     if (Instruction *CtxI = getCtxI())
1694       followUsesInMBEC(*this, A, getState(), *CtxI);
1695   }
1696 
1697   /// See followUsesInMBEC
1698   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1699                        AANonNull::StateType &State) {
1700     bool IsNonNull = false;
1701     bool TrackUse = false;
1702     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1703                                        IsNonNull, TrackUse);
1704     State.setKnown(IsNonNull);
1705     return TrackUse;
1706   }
1707 
1708   /// See AbstractAttribute::getAsStr().
1709   const std::string getAsStr() const override {
1710     return getAssumed() ? "nonnull" : "may-null";
1711   }
1712 
1713   /// Flag to determine if the underlying value can be null and still allow
1714   /// valid accesses.
1715   const bool NullIsDefined;
1716 };
1717 
1718 /// NonNull attribute for a floating value.
1719 struct AANonNullFloating : public AANonNullImpl {
1720   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1721       : AANonNullImpl(IRP, A) {}
1722 
1723   /// See AbstractAttribute::updateImpl(...).
1724   ChangeStatus updateImpl(Attributor &A) override {
1725     const DataLayout &DL = A.getDataLayout();
1726 
1727     DominatorTree *DT = nullptr;
1728     AssumptionCache *AC = nullptr;
1729     InformationCache &InfoCache = A.getInfoCache();
1730     if (const Function *Fn = getAnchorScope()) {
1731       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1732       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1733     }
1734 
1735     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1736                             AANonNull::StateType &T, bool Stripped) -> bool {
1737       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1738                                              DepClassTy::REQUIRED);
1739       if (!Stripped && this == &AA) {
1740         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1741           T.indicatePessimisticFixpoint();
1742       } else {
1743         // Use abstract attribute information.
1744         const AANonNull::StateType &NS = AA.getState();
1745         T ^= NS;
1746       }
1747       return T.isValidState();
1748     };
1749 
1750     StateType T;
1751     if (!genericValueTraversal<AANonNull, StateType>(
1752             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1753       return indicatePessimisticFixpoint();
1754 
1755     return clampStateAndIndicateChange(getState(), T);
1756   }
1757 
1758   /// See AbstractAttribute::trackStatistics()
1759   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1760 };
1761 
1762 /// NonNull attribute for function return value.
1763 struct AANonNullReturned final
1764     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1765   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1766       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1767 
1768   /// See AbstractAttribute::getAsStr().
1769   const std::string getAsStr() const override {
1770     return getAssumed() ? "nonnull" : "may-null";
1771   }
1772 
1773   /// See AbstractAttribute::trackStatistics()
1774   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1775 };
1776 
1777 /// NonNull attribute for function argument.
1778 struct AANonNullArgument final
1779     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1780   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1781       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1782 
1783   /// See AbstractAttribute::trackStatistics()
1784   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1785 };
1786 
1787 struct AANonNullCallSiteArgument final : AANonNullFloating {
1788   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1789       : AANonNullFloating(IRP, A) {}
1790 
1791   /// See AbstractAttribute::trackStatistics()
1792   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1793 };
1794 
1795 /// NonNull attribute for a call site return position.
1796 struct AANonNullCallSiteReturned final
1797     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1798   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1799       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1800 
1801   /// See AbstractAttribute::trackStatistics()
1802   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1803 };
1804 
1805 /// ------------------------ No-Recurse Attributes ----------------------------
1806 
1807 struct AANoRecurseImpl : public AANoRecurse {
1808   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1809 
1810   /// See AbstractAttribute::getAsStr()
1811   const std::string getAsStr() const override {
1812     return getAssumed() ? "norecurse" : "may-recurse";
1813   }
1814 };
1815 
1816 struct AANoRecurseFunction final : AANoRecurseImpl {
1817   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1818       : AANoRecurseImpl(IRP, A) {}
1819 
1820   /// See AbstractAttribute::initialize(...).
1821   void initialize(Attributor &A) override {
1822     AANoRecurseImpl::initialize(A);
1823     if (const Function *F = getAnchorScope())
1824       if (A.getInfoCache().getSccSize(*F) != 1)
1825         indicatePessimisticFixpoint();
1826   }
1827 
1828   /// See AbstractAttribute::updateImpl(...).
1829   ChangeStatus updateImpl(Attributor &A) override {
1830 
1831     // If all live call sites are known to be no-recurse, we are as well.
1832     auto CallSitePred = [&](AbstractCallSite ACS) {
1833       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1834           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1835           DepClassTy::NONE);
1836       return NoRecurseAA.isKnownNoRecurse();
1837     };
1838     bool AllCallSitesKnown;
1839     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1840       // If we know all call sites and all are known no-recurse, we are done.
1841       // If all known call sites, which might not be all that exist, are known
1842       // to be no-recurse, we are not done but we can continue to assume
1843       // no-recurse. If one of the call sites we have not visited will become
1844       // live, another update is triggered.
1845       if (AllCallSitesKnown)
1846         indicateOptimisticFixpoint();
1847       return ChangeStatus::UNCHANGED;
1848     }
1849 
1850     // If the above check does not hold anymore we look at the calls.
1851     auto CheckForNoRecurse = [&](Instruction &I) {
1852       const auto &CB = cast<CallBase>(I);
1853       if (CB.hasFnAttr(Attribute::NoRecurse))
1854         return true;
1855 
1856       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1857           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1858       if (!NoRecurseAA.isAssumedNoRecurse())
1859         return false;
1860 
1861       // Recursion to the same function
1862       if (CB.getCalledFunction() == getAnchorScope())
1863         return false;
1864 
1865       return true;
1866     };
1867 
1868     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1869       return indicatePessimisticFixpoint();
1870     return ChangeStatus::UNCHANGED;
1871   }
1872 
1873   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1874 };
1875 
1876 /// NoRecurse attribute deduction for a call sites.
1877 struct AANoRecurseCallSite final : AANoRecurseImpl {
1878   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1879       : AANoRecurseImpl(IRP, A) {}
1880 
1881   /// See AbstractAttribute::initialize(...).
1882   void initialize(Attributor &A) override {
1883     AANoRecurseImpl::initialize(A);
1884     Function *F = getAssociatedFunction();
1885     if (!F || F->isDeclaration())
1886       indicatePessimisticFixpoint();
1887   }
1888 
1889   /// See AbstractAttribute::updateImpl(...).
1890   ChangeStatus updateImpl(Attributor &A) override {
1891     // TODO: Once we have call site specific value information we can provide
1892     //       call site specific liveness information and then it makes
1893     //       sense to specialize attributes for call sites arguments instead of
1894     //       redirecting requests to the callee argument.
1895     Function *F = getAssociatedFunction();
1896     const IRPosition &FnPos = IRPosition::function(*F);
1897     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1898     return clampStateAndIndicateChange(getState(), FnAA.getState());
1899   }
1900 
1901   /// See AbstractAttribute::trackStatistics()
1902   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1903 };
1904 
1905 /// -------------------- Undefined-Behavior Attributes ------------------------
1906 
1907 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1908   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1909       : AAUndefinedBehavior(IRP, A) {}
1910 
1911   /// See AbstractAttribute::updateImpl(...).
1912   // through a pointer (i.e. also branches etc.)
1913   ChangeStatus updateImpl(Attributor &A) override {
1914     const size_t UBPrevSize = KnownUBInsts.size();
1915     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1916 
1917     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1918       // Skip instructions that are already saved.
1919       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1920         return true;
1921 
1922       // If we reach here, we know we have an instruction
1923       // that accesses memory through a pointer operand,
1924       // for which getPointerOperand() should give it to us.
1925       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1926       assert(PtrOp &&
1927              "Expected pointer operand of memory accessing instruction");
1928 
1929       // Either we stopped and the appropriate action was taken,
1930       // or we got back a simplified value to continue.
1931       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1932       if (!SimplifiedPtrOp.hasValue())
1933         return true;
1934       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1935 
1936       // A memory access through a pointer is considered UB
1937       // only if the pointer has constant null value.
1938       // TODO: Expand it to not only check constant values.
1939       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1940         AssumedNoUBInsts.insert(&I);
1941         return true;
1942       }
1943       const Type *PtrTy = PtrOpVal->getType();
1944 
1945       // Because we only consider instructions inside functions,
1946       // assume that a parent function exists.
1947       const Function *F = I.getFunction();
1948 
1949       // A memory access using constant null pointer is only considered UB
1950       // if null pointer is _not_ defined for the target platform.
1951       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1952         AssumedNoUBInsts.insert(&I);
1953       else
1954         KnownUBInsts.insert(&I);
1955       return true;
1956     };
1957 
1958     auto InspectBrInstForUB = [&](Instruction &I) {
1959       // A conditional branch instruction is considered UB if it has `undef`
1960       // condition.
1961 
1962       // Skip instructions that are already saved.
1963       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1964         return true;
1965 
1966       // We know we have a branch instruction.
1967       auto BrInst = cast<BranchInst>(&I);
1968 
1969       // Unconditional branches are never considered UB.
1970       if (BrInst->isUnconditional())
1971         return true;
1972 
1973       // Either we stopped and the appropriate action was taken,
1974       // or we got back a simplified value to continue.
1975       Optional<Value *> SimplifiedCond =
1976           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1977       if (!SimplifiedCond.hasValue())
1978         return true;
1979       AssumedNoUBInsts.insert(&I);
1980       return true;
1981     };
1982 
1983     auto InspectCallSiteForUB = [&](Instruction &I) {
1984       // Check whether a callsite always cause UB or not
1985 
1986       // Skip instructions that are already saved.
1987       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1988         return true;
1989 
1990       // Check nonnull and noundef argument attribute violation for each
1991       // callsite.
1992       CallBase &CB = cast<CallBase>(I);
1993       Function *Callee = CB.getCalledFunction();
1994       if (!Callee)
1995         return true;
1996       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
1997         // If current argument is known to be simplified to null pointer and the
1998         // corresponding argument position is known to have nonnull attribute,
1999         // the argument is poison. Furthermore, if the argument is poison and
2000         // the position is known to have noundef attriubte, this callsite is
2001         // considered UB.
2002         if (idx >= Callee->arg_size())
2003           break;
2004         Value *ArgVal = CB.getArgOperand(idx);
2005         if (!ArgVal)
2006           continue;
2007         // Here, we handle three cases.
2008         //   (1) Not having a value means it is dead. (we can replace the value
2009         //       with undef)
2010         //   (2) Simplified to undef. The argument violate noundef attriubte.
2011         //   (3) Simplified to null pointer where known to be nonnull.
2012         //       The argument is a poison value and violate noundef attribute.
2013         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2014         auto &NoUndefAA =
2015             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2016         if (!NoUndefAA.isKnownNoUndef())
2017           continue;
2018         bool UsedAssumedInformation = false;
2019         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2020             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2021         if (UsedAssumedInformation)
2022           continue;
2023         if (!SimplifiedVal.hasValue() ||
2024             isa<UndefValue>(*SimplifiedVal.getValue())) {
2025           KnownUBInsts.insert(&I);
2026           continue;
2027         }
2028         if (!ArgVal->getType()->isPointerTy() ||
2029             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2030           continue;
2031         auto &NonNullAA =
2032             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2033         if (NonNullAA.isKnownNonNull())
2034           KnownUBInsts.insert(&I);
2035       }
2036       return true;
2037     };
2038 
2039     auto InspectReturnInstForUB =
2040         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2041           // Check if a return instruction always cause UB or not
2042           // Note: It is guaranteed that the returned position of the anchor
2043           //       scope has noundef attribute when this is called.
2044           //       We also ensure the return position is not "assumed dead"
2045           //       because the returned value was then potentially simplified to
2046           //       `undef` in AAReturnedValues without removing the `noundef`
2047           //       attribute yet.
2048 
2049           // When the returned position has noundef attriubte, UB occur in the
2050           // following cases.
2051           //   (1) Returned value is known to be undef.
2052           //   (2) The value is known to be a null pointer and the returned
2053           //       position has nonnull attribute (because the returned value is
2054           //       poison).
2055           bool FoundUB = false;
2056           if (isa<UndefValue>(V)) {
2057             FoundUB = true;
2058           } else {
2059             if (isa<ConstantPointerNull>(V)) {
2060               auto &NonNullAA = A.getAAFor<AANonNull>(
2061                   *this, IRPosition::returned(*getAnchorScope()),
2062                   DepClassTy::NONE);
2063               if (NonNullAA.isKnownNonNull())
2064                 FoundUB = true;
2065             }
2066           }
2067 
2068           if (FoundUB)
2069             for (ReturnInst *RI : RetInsts)
2070               KnownUBInsts.insert(RI);
2071           return true;
2072         };
2073 
2074     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2075                               {Instruction::Load, Instruction::Store,
2076                                Instruction::AtomicCmpXchg,
2077                                Instruction::AtomicRMW},
2078                               /* CheckBBLivenessOnly */ true);
2079     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2080                               /* CheckBBLivenessOnly */ true);
2081     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2082 
2083     // If the returned position of the anchor scope has noundef attriubte, check
2084     // all returned instructions.
2085     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2086       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2087       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2088         auto &RetPosNoUndefAA =
2089             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2090         if (RetPosNoUndefAA.isKnownNoUndef())
2091           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2092                                                     *this);
2093       }
2094     }
2095 
2096     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2097         UBPrevSize != KnownUBInsts.size())
2098       return ChangeStatus::CHANGED;
2099     return ChangeStatus::UNCHANGED;
2100   }
2101 
2102   bool isKnownToCauseUB(Instruction *I) const override {
2103     return KnownUBInsts.count(I);
2104   }
2105 
2106   bool isAssumedToCauseUB(Instruction *I) const override {
2107     // In simple words, if an instruction is not in the assumed to _not_
2108     // cause UB, then it is assumed UB (that includes those
2109     // in the KnownUBInsts set). The rest is boilerplate
2110     // is to ensure that it is one of the instructions we test
2111     // for UB.
2112 
2113     switch (I->getOpcode()) {
2114     case Instruction::Load:
2115     case Instruction::Store:
2116     case Instruction::AtomicCmpXchg:
2117     case Instruction::AtomicRMW:
2118       return !AssumedNoUBInsts.count(I);
2119     case Instruction::Br: {
2120       auto BrInst = cast<BranchInst>(I);
2121       if (BrInst->isUnconditional())
2122         return false;
2123       return !AssumedNoUBInsts.count(I);
2124     } break;
2125     default:
2126       return false;
2127     }
2128     return false;
2129   }
2130 
2131   ChangeStatus manifest(Attributor &A) override {
2132     if (KnownUBInsts.empty())
2133       return ChangeStatus::UNCHANGED;
2134     for (Instruction *I : KnownUBInsts)
2135       A.changeToUnreachableAfterManifest(I);
2136     return ChangeStatus::CHANGED;
2137   }
2138 
2139   /// See AbstractAttribute::getAsStr()
2140   const std::string getAsStr() const override {
2141     return getAssumed() ? "undefined-behavior" : "no-ub";
2142   }
2143 
2144   /// Note: The correctness of this analysis depends on the fact that the
2145   /// following 2 sets will stop changing after some point.
2146   /// "Change" here means that their size changes.
2147   /// The size of each set is monotonically increasing
2148   /// (we only add items to them) and it is upper bounded by the number of
2149   /// instructions in the processed function (we can never save more
2150   /// elements in either set than this number). Hence, at some point,
2151   /// they will stop increasing.
2152   /// Consequently, at some point, both sets will have stopped
2153   /// changing, effectively making the analysis reach a fixpoint.
2154 
2155   /// Note: These 2 sets are disjoint and an instruction can be considered
2156   /// one of 3 things:
2157   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2158   ///    the KnownUBInsts set.
2159   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2160   ///    has a reason to assume it).
2161   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2162   ///    could not find a reason to assume or prove that it can cause UB,
2163   ///    hence it assumes it doesn't. We have a set for these instructions
2164   ///    so that we don't reprocess them in every update.
2165   ///    Note however that instructions in this set may cause UB.
2166 
2167 protected:
2168   /// A set of all live instructions _known_ to cause UB.
2169   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2170 
2171 private:
2172   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2173   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2174 
2175   // Should be called on updates in which if we're processing an instruction
2176   // \p I that depends on a value \p V, one of the following has to happen:
2177   // - If the value is assumed, then stop.
2178   // - If the value is known but undef, then consider it UB.
2179   // - Otherwise, do specific processing with the simplified value.
2180   // We return None in the first 2 cases to signify that an appropriate
2181   // action was taken and the caller should stop.
2182   // Otherwise, we return the simplified value that the caller should
2183   // use for specific processing.
2184   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2185                                          Instruction *I) {
2186     bool UsedAssumedInformation = false;
2187     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2188         IRPosition::value(*V), *this, UsedAssumedInformation);
2189     if (UsedAssumedInformation) {
2190       // Don't depend on assumed values.
2191       return llvm::None;
2192     }
2193     if (!SimplifiedV.hasValue()) {
2194       // If it is known (which we tested above) but it doesn't have a value,
2195       // then we can assume `undef` and hence the instruction is UB.
2196       KnownUBInsts.insert(I);
2197       return llvm::None;
2198     }
2199     Value *Val = SimplifiedV.getValue();
2200     if (isa<UndefValue>(Val)) {
2201       KnownUBInsts.insert(I);
2202       return llvm::None;
2203     }
2204     return Val;
2205   }
2206 };
2207 
2208 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2209   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2210       : AAUndefinedBehaviorImpl(IRP, A) {}
2211 
2212   /// See AbstractAttribute::trackStatistics()
2213   void trackStatistics() const override {
2214     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2215                "Number of instructions known to have UB");
2216     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2217         KnownUBInsts.size();
2218   }
2219 };
2220 
2221 /// ------------------------ Will-Return Attributes ----------------------------
2222 
2223 // Helper function that checks whether a function has any cycle which we don't
2224 // know if it is bounded or not.
2225 // Loops with maximum trip count are considered bounded, any other cycle not.
2226 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2227   ScalarEvolution *SE =
2228       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2229   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2230   // If either SCEV or LoopInfo is not available for the function then we assume
2231   // any cycle to be unbounded cycle.
2232   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2233   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2234   if (!SE || !LI) {
2235     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2236       if (SCCI.hasCycle())
2237         return true;
2238     return false;
2239   }
2240 
2241   // If there's irreducible control, the function may contain non-loop cycles.
2242   if (mayContainIrreducibleControl(F, LI))
2243     return true;
2244 
2245   // Any loop that does not have a max trip count is considered unbounded cycle.
2246   for (auto *L : LI->getLoopsInPreorder()) {
2247     if (!SE->getSmallConstantMaxTripCount(L))
2248       return true;
2249   }
2250   return false;
2251 }
2252 
2253 struct AAWillReturnImpl : public AAWillReturn {
2254   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2255       : AAWillReturn(IRP, A) {}
2256 
2257   /// See AbstractAttribute::initialize(...).
2258   void initialize(Attributor &A) override {
2259     AAWillReturn::initialize(A);
2260 
2261     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2262       indicateOptimisticFixpoint();
2263       return;
2264     }
2265   }
2266 
2267   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2268   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2269     // Check for `mustprogress` in the scope and the associated function which
2270     // might be different if this is a call site.
2271     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2272         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2273       return false;
2274 
2275     const auto &MemAA =
2276         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2277     if (!MemAA.isAssumedReadOnly())
2278       return false;
2279     if (KnownOnly && !MemAA.isKnownReadOnly())
2280       return false;
2281     if (!MemAA.isKnownReadOnly())
2282       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2283 
2284     return true;
2285   }
2286 
2287   /// See AbstractAttribute::updateImpl(...).
2288   ChangeStatus updateImpl(Attributor &A) override {
2289     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2290       return ChangeStatus::UNCHANGED;
2291 
2292     auto CheckForWillReturn = [&](Instruction &I) {
2293       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2294       const auto &WillReturnAA =
2295           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2296       if (WillReturnAA.isKnownWillReturn())
2297         return true;
2298       if (!WillReturnAA.isAssumedWillReturn())
2299         return false;
2300       const auto &NoRecurseAA =
2301           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2302       return NoRecurseAA.isAssumedNoRecurse();
2303     };
2304 
2305     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2306       return indicatePessimisticFixpoint();
2307 
2308     return ChangeStatus::UNCHANGED;
2309   }
2310 
2311   /// See AbstractAttribute::getAsStr()
2312   const std::string getAsStr() const override {
2313     return getAssumed() ? "willreturn" : "may-noreturn";
2314   }
2315 };
2316 
2317 struct AAWillReturnFunction final : AAWillReturnImpl {
2318   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2319       : AAWillReturnImpl(IRP, A) {}
2320 
2321   /// See AbstractAttribute::initialize(...).
2322   void initialize(Attributor &A) override {
2323     AAWillReturnImpl::initialize(A);
2324 
2325     Function *F = getAnchorScope();
2326     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2327       indicatePessimisticFixpoint();
2328   }
2329 
2330   /// See AbstractAttribute::trackStatistics()
2331   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2332 };
2333 
2334 /// WillReturn attribute deduction for a call sites.
2335 struct AAWillReturnCallSite final : AAWillReturnImpl {
2336   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2337       : AAWillReturnImpl(IRP, A) {}
2338 
2339   /// See AbstractAttribute::initialize(...).
2340   void initialize(Attributor &A) override {
2341     AAWillReturnImpl::initialize(A);
2342     Function *F = getAssociatedFunction();
2343     if (!F || !A.isFunctionIPOAmendable(*F))
2344       indicatePessimisticFixpoint();
2345   }
2346 
2347   /// See AbstractAttribute::updateImpl(...).
2348   ChangeStatus updateImpl(Attributor &A) override {
2349     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2350       return ChangeStatus::UNCHANGED;
2351 
2352     // TODO: Once we have call site specific value information we can provide
2353     //       call site specific liveness information and then it makes
2354     //       sense to specialize attributes for call sites arguments instead of
2355     //       redirecting requests to the callee argument.
2356     Function *F = getAssociatedFunction();
2357     const IRPosition &FnPos = IRPosition::function(*F);
2358     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2359     return clampStateAndIndicateChange(getState(), FnAA.getState());
2360   }
2361 
2362   /// See AbstractAttribute::trackStatistics()
2363   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2364 };
2365 
2366 /// -------------------AAReachability Attribute--------------------------
2367 
2368 struct AAReachabilityImpl : AAReachability {
2369   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2370       : AAReachability(IRP, A) {}
2371 
2372   const std::string getAsStr() const override {
2373     // TODO: Return the number of reachable queries.
2374     return "reachable";
2375   }
2376 
2377   /// See AbstractAttribute::updateImpl(...).
2378   ChangeStatus updateImpl(Attributor &A) override {
2379     return ChangeStatus::UNCHANGED;
2380   }
2381 };
2382 
2383 struct AAReachabilityFunction final : public AAReachabilityImpl {
2384   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2385       : AAReachabilityImpl(IRP, A) {}
2386 
2387   /// See AbstractAttribute::trackStatistics()
2388   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2389 };
2390 
2391 /// ------------------------ NoAlias Argument Attribute ------------------------
2392 
2393 struct AANoAliasImpl : AANoAlias {
2394   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2395     assert(getAssociatedType()->isPointerTy() &&
2396            "Noalias is a pointer attribute");
2397   }
2398 
2399   const std::string getAsStr() const override {
2400     return getAssumed() ? "noalias" : "may-alias";
2401   }
2402 };
2403 
2404 /// NoAlias attribute for a floating value.
2405 struct AANoAliasFloating final : AANoAliasImpl {
2406   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2407       : AANoAliasImpl(IRP, A) {}
2408 
2409   /// See AbstractAttribute::initialize(...).
2410   void initialize(Attributor &A) override {
2411     AANoAliasImpl::initialize(A);
2412     Value *Val = &getAssociatedValue();
2413     do {
2414       CastInst *CI = dyn_cast<CastInst>(Val);
2415       if (!CI)
2416         break;
2417       Value *Base = CI->getOperand(0);
2418       if (!Base->hasOneUse())
2419         break;
2420       Val = Base;
2421     } while (true);
2422 
2423     if (!Val->getType()->isPointerTy()) {
2424       indicatePessimisticFixpoint();
2425       return;
2426     }
2427 
2428     if (isa<AllocaInst>(Val))
2429       indicateOptimisticFixpoint();
2430     else if (isa<ConstantPointerNull>(Val) &&
2431              !NullPointerIsDefined(getAnchorScope(),
2432                                    Val->getType()->getPointerAddressSpace()))
2433       indicateOptimisticFixpoint();
2434     else if (Val != &getAssociatedValue()) {
2435       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2436           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2437       if (ValNoAliasAA.isKnownNoAlias())
2438         indicateOptimisticFixpoint();
2439     }
2440   }
2441 
2442   /// See AbstractAttribute::updateImpl(...).
2443   ChangeStatus updateImpl(Attributor &A) override {
2444     // TODO: Implement this.
2445     return indicatePessimisticFixpoint();
2446   }
2447 
2448   /// See AbstractAttribute::trackStatistics()
2449   void trackStatistics() const override {
2450     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2451   }
2452 };
2453 
2454 /// NoAlias attribute for an argument.
2455 struct AANoAliasArgument final
2456     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2457   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2458   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2459 
2460   /// See AbstractAttribute::initialize(...).
2461   void initialize(Attributor &A) override {
2462     Base::initialize(A);
2463     // See callsite argument attribute and callee argument attribute.
2464     if (hasAttr({Attribute::ByVal}))
2465       indicateOptimisticFixpoint();
2466   }
2467 
2468   /// See AbstractAttribute::update(...).
2469   ChangeStatus updateImpl(Attributor &A) override {
2470     // We have to make sure no-alias on the argument does not break
2471     // synchronization when this is a callback argument, see also [1] below.
2472     // If synchronization cannot be affected, we delegate to the base updateImpl
2473     // function, otherwise we give up for now.
2474 
2475     // If the function is no-sync, no-alias cannot break synchronization.
2476     const auto &NoSyncAA =
2477         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2478                              DepClassTy::OPTIONAL);
2479     if (NoSyncAA.isAssumedNoSync())
2480       return Base::updateImpl(A);
2481 
2482     // If the argument is read-only, no-alias cannot break synchronization.
2483     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2484         *this, getIRPosition(), DepClassTy::OPTIONAL);
2485     if (MemBehaviorAA.isAssumedReadOnly())
2486       return Base::updateImpl(A);
2487 
2488     // If the argument is never passed through callbacks, no-alias cannot break
2489     // synchronization.
2490     bool AllCallSitesKnown;
2491     if (A.checkForAllCallSites(
2492             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2493             true, AllCallSitesKnown))
2494       return Base::updateImpl(A);
2495 
2496     // TODO: add no-alias but make sure it doesn't break synchronization by
2497     // introducing fake uses. See:
2498     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2499     //     International Workshop on OpenMP 2018,
2500     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2501 
2502     return indicatePessimisticFixpoint();
2503   }
2504 
2505   /// See AbstractAttribute::trackStatistics()
2506   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2507 };
2508 
2509 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2510   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2511       : AANoAliasImpl(IRP, A) {}
2512 
2513   /// See AbstractAttribute::initialize(...).
2514   void initialize(Attributor &A) override {
2515     // See callsite argument attribute and callee argument attribute.
2516     const auto &CB = cast<CallBase>(getAnchorValue());
2517     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2518       indicateOptimisticFixpoint();
2519     Value &Val = getAssociatedValue();
2520     if (isa<ConstantPointerNull>(Val) &&
2521         !NullPointerIsDefined(getAnchorScope(),
2522                               Val.getType()->getPointerAddressSpace()))
2523       indicateOptimisticFixpoint();
2524   }
2525 
2526   /// Determine if the underlying value may alias with the call site argument
2527   /// \p OtherArgNo of \p ICS (= the underlying call site).
2528   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2529                             const AAMemoryBehavior &MemBehaviorAA,
2530                             const CallBase &CB, unsigned OtherArgNo) {
2531     // We do not need to worry about aliasing with the underlying IRP.
2532     if (this->getCalleeArgNo() == (int)OtherArgNo)
2533       return false;
2534 
2535     // If it is not a pointer or pointer vector we do not alias.
2536     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2537     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2538       return false;
2539 
2540     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2541         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2542 
2543     // If the argument is readnone, there is no read-write aliasing.
2544     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2545       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2546       return false;
2547     }
2548 
2549     // If the argument is readonly and the underlying value is readonly, there
2550     // is no read-write aliasing.
2551     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2552     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2553       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2554       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2555       return false;
2556     }
2557 
2558     // We have to utilize actual alias analysis queries so we need the object.
2559     if (!AAR)
2560       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2561 
2562     // Try to rule it out at the call site.
2563     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2564     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2565                          "callsite arguments: "
2566                       << getAssociatedValue() << " " << *ArgOp << " => "
2567                       << (IsAliasing ? "" : "no-") << "alias \n");
2568 
2569     return IsAliasing;
2570   }
2571 
2572   bool
2573   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2574                                          const AAMemoryBehavior &MemBehaviorAA,
2575                                          const AANoAlias &NoAliasAA) {
2576     // We can deduce "noalias" if the following conditions hold.
2577     // (i)   Associated value is assumed to be noalias in the definition.
2578     // (ii)  Associated value is assumed to be no-capture in all the uses
2579     //       possibly executed before this callsite.
2580     // (iii) There is no other pointer argument which could alias with the
2581     //       value.
2582 
2583     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2584     if (!AssociatedValueIsNoAliasAtDef) {
2585       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2586                         << " is not no-alias at the definition\n");
2587       return false;
2588     }
2589 
2590     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2591 
2592     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2593     const Function *ScopeFn = VIRP.getAnchorScope();
2594     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2595     // Check whether the value is captured in the scope using AANoCapture.
2596     //      Look at CFG and check only uses possibly executed before this
2597     //      callsite.
2598     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2599       Instruction *UserI = cast<Instruction>(U.getUser());
2600 
2601       // If UserI is the curr instruction and there is a single potential use of
2602       // the value in UserI we allow the use.
2603       // TODO: We should inspect the operands and allow those that cannot alias
2604       //       with the value.
2605       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2606         return true;
2607 
2608       if (ScopeFn) {
2609         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2610             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2611 
2612         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2613           return true;
2614 
2615         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2616           if (CB->isArgOperand(&U)) {
2617 
2618             unsigned ArgNo = CB->getArgOperandNo(&U);
2619 
2620             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2621                 *this, IRPosition::callsite_argument(*CB, ArgNo),
2622                 DepClassTy::OPTIONAL);
2623 
2624             if (NoCaptureAA.isAssumedNoCapture())
2625               return true;
2626           }
2627         }
2628       }
2629 
2630       // For cases which can potentially have more users
2631       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2632           isa<SelectInst>(U)) {
2633         Follow = true;
2634         return true;
2635       }
2636 
2637       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2638       return false;
2639     };
2640 
2641     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2642       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2643         LLVM_DEBUG(
2644             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2645                    << " cannot be noalias as it is potentially captured\n");
2646         return false;
2647       }
2648     }
2649     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2650 
2651     // Check there is no other pointer argument which could alias with the
2652     // value passed at this call site.
2653     // TODO: AbstractCallSite
2654     const auto &CB = cast<CallBase>(getAnchorValue());
2655     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2656          OtherArgNo++)
2657       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2658         return false;
2659 
2660     return true;
2661   }
2662 
2663   /// See AbstractAttribute::updateImpl(...).
2664   ChangeStatus updateImpl(Attributor &A) override {
2665     // If the argument is readnone we are done as there are no accesses via the
2666     // argument.
2667     auto &MemBehaviorAA =
2668         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2669     if (MemBehaviorAA.isAssumedReadNone()) {
2670       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2671       return ChangeStatus::UNCHANGED;
2672     }
2673 
2674     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2675     const auto &NoAliasAA =
2676         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2677 
2678     AAResults *AAR = nullptr;
2679     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2680                                                NoAliasAA)) {
2681       LLVM_DEBUG(
2682           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2683       return ChangeStatus::UNCHANGED;
2684     }
2685 
2686     return indicatePessimisticFixpoint();
2687   }
2688 
2689   /// See AbstractAttribute::trackStatistics()
2690   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2691 };
2692 
2693 /// NoAlias attribute for function return value.
2694 struct AANoAliasReturned final : AANoAliasImpl {
2695   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2696       : AANoAliasImpl(IRP, A) {}
2697 
2698   /// See AbstractAttribute::initialize(...).
2699   void initialize(Attributor &A) override {
2700     AANoAliasImpl::initialize(A);
2701     Function *F = getAssociatedFunction();
2702     if (!F || F->isDeclaration())
2703       indicatePessimisticFixpoint();
2704   }
2705 
2706   /// See AbstractAttribute::updateImpl(...).
2707   virtual ChangeStatus updateImpl(Attributor &A) override {
2708 
2709     auto CheckReturnValue = [&](Value &RV) -> bool {
2710       if (Constant *C = dyn_cast<Constant>(&RV))
2711         if (C->isNullValue() || isa<UndefValue>(C))
2712           return true;
2713 
2714       /// For now, we can only deduce noalias if we have call sites.
2715       /// FIXME: add more support.
2716       if (!isa<CallBase>(&RV))
2717         return false;
2718 
2719       const IRPosition &RVPos = IRPosition::value(RV);
2720       const auto &NoAliasAA =
2721           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2722       if (!NoAliasAA.isAssumedNoAlias())
2723         return false;
2724 
2725       const auto &NoCaptureAA =
2726           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2727       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2728     };
2729 
2730     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2731       return indicatePessimisticFixpoint();
2732 
2733     return ChangeStatus::UNCHANGED;
2734   }
2735 
2736   /// See AbstractAttribute::trackStatistics()
2737   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2738 };
2739 
2740 /// NoAlias attribute deduction for a call site return value.
2741 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2742   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2743       : AANoAliasImpl(IRP, A) {}
2744 
2745   /// See AbstractAttribute::initialize(...).
2746   void initialize(Attributor &A) override {
2747     AANoAliasImpl::initialize(A);
2748     Function *F = getAssociatedFunction();
2749     if (!F || F->isDeclaration())
2750       indicatePessimisticFixpoint();
2751   }
2752 
2753   /// See AbstractAttribute::updateImpl(...).
2754   ChangeStatus updateImpl(Attributor &A) override {
2755     // TODO: Once we have call site specific value information we can provide
2756     //       call site specific liveness information and then it makes
2757     //       sense to specialize attributes for call sites arguments instead of
2758     //       redirecting requests to the callee argument.
2759     Function *F = getAssociatedFunction();
2760     const IRPosition &FnPos = IRPosition::returned(*F);
2761     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2762     return clampStateAndIndicateChange(getState(), FnAA.getState());
2763   }
2764 
2765   /// See AbstractAttribute::trackStatistics()
2766   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2767 };
2768 
2769 /// -------------------AAIsDead Function Attribute-----------------------
2770 
2771 struct AAIsDeadValueImpl : public AAIsDead {
2772   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2773 
2774   /// See AAIsDead::isAssumedDead().
2775   bool isAssumedDead() const override { return getAssumed(); }
2776 
2777   /// See AAIsDead::isKnownDead().
2778   bool isKnownDead() const override { return getKnown(); }
2779 
2780   /// See AAIsDead::isAssumedDead(BasicBlock *).
2781   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2782 
2783   /// See AAIsDead::isKnownDead(BasicBlock *).
2784   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2785 
2786   /// See AAIsDead::isAssumedDead(Instruction *I).
2787   bool isAssumedDead(const Instruction *I) const override {
2788     return I == getCtxI() && isAssumedDead();
2789   }
2790 
2791   /// See AAIsDead::isKnownDead(Instruction *I).
2792   bool isKnownDead(const Instruction *I) const override {
2793     return isAssumedDead(I) && getKnown();
2794   }
2795 
2796   /// See AbstractAttribute::getAsStr().
2797   const std::string getAsStr() const override {
2798     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2799   }
2800 
2801   /// Check if all uses are assumed dead.
2802   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2803     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2804     // Explicitly set the dependence class to required because we want a long
2805     // chain of N dependent instructions to be considered live as soon as one is
2806     // without going through N update cycles. This is not required for
2807     // correctness.
2808     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2809   }
2810 
2811   /// Determine if \p I is assumed to be side-effect free.
2812   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2813     if (!I || wouldInstructionBeTriviallyDead(I))
2814       return true;
2815 
2816     auto *CB = dyn_cast<CallBase>(I);
2817     if (!CB || isa<IntrinsicInst>(CB))
2818       return false;
2819 
2820     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2821     const auto &NoUnwindAA =
2822         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2823     if (!NoUnwindAA.isAssumedNoUnwind())
2824       return false;
2825     if (!NoUnwindAA.isKnownNoUnwind())
2826       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2827 
2828     const auto &MemBehaviorAA =
2829         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2830     if (MemBehaviorAA.isAssumedReadOnly()) {
2831       if (!MemBehaviorAA.isKnownReadOnly())
2832         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2833       return true;
2834     }
2835     return false;
2836   }
2837 };
2838 
2839 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2840   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2841       : AAIsDeadValueImpl(IRP, A) {}
2842 
2843   /// See AbstractAttribute::initialize(...).
2844   void initialize(Attributor &A) override {
2845     if (isa<UndefValue>(getAssociatedValue())) {
2846       indicatePessimisticFixpoint();
2847       return;
2848     }
2849 
2850     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2851     if (!isAssumedSideEffectFree(A, I))
2852       indicatePessimisticFixpoint();
2853   }
2854 
2855   /// See AbstractAttribute::updateImpl(...).
2856   ChangeStatus updateImpl(Attributor &A) override {
2857     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2858     if (!isAssumedSideEffectFree(A, I))
2859       return indicatePessimisticFixpoint();
2860 
2861     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2862       return indicatePessimisticFixpoint();
2863     return ChangeStatus::UNCHANGED;
2864   }
2865 
2866   /// See AbstractAttribute::manifest(...).
2867   ChangeStatus manifest(Attributor &A) override {
2868     Value &V = getAssociatedValue();
2869     if (auto *I = dyn_cast<Instruction>(&V)) {
2870       // If we get here we basically know the users are all dead. We check if
2871       // isAssumedSideEffectFree returns true here again because it might not be
2872       // the case and only the users are dead but the instruction (=call) is
2873       // still needed.
2874       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2875         A.deleteAfterManifest(*I);
2876         return ChangeStatus::CHANGED;
2877       }
2878     }
2879     if (V.use_empty())
2880       return ChangeStatus::UNCHANGED;
2881 
2882     bool UsedAssumedInformation = false;
2883     Optional<Constant *> C =
2884         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2885     if (C.hasValue() && C.getValue())
2886       return ChangeStatus::UNCHANGED;
2887 
2888     // Replace the value with undef as it is dead but keep droppable uses around
2889     // as they provide information we don't want to give up on just yet.
2890     UndefValue &UV = *UndefValue::get(V.getType());
2891     bool AnyChange =
2892         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2893     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2894   }
2895 
2896   /// See AbstractAttribute::trackStatistics()
2897   void trackStatistics() const override {
2898     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2899   }
2900 };
2901 
2902 struct AAIsDeadArgument : public AAIsDeadFloating {
2903   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2904       : AAIsDeadFloating(IRP, A) {}
2905 
2906   /// See AbstractAttribute::initialize(...).
2907   void initialize(Attributor &A) override {
2908     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2909       indicatePessimisticFixpoint();
2910   }
2911 
2912   /// See AbstractAttribute::manifest(...).
2913   ChangeStatus manifest(Attributor &A) override {
2914     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2915     Argument &Arg = *getAssociatedArgument();
2916     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2917       if (A.registerFunctionSignatureRewrite(
2918               Arg, /* ReplacementTypes */ {},
2919               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2920               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2921         Arg.dropDroppableUses();
2922         return ChangeStatus::CHANGED;
2923       }
2924     return Changed;
2925   }
2926 
2927   /// See AbstractAttribute::trackStatistics()
2928   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2929 };
2930 
2931 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2932   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2933       : AAIsDeadValueImpl(IRP, A) {}
2934 
2935   /// See AbstractAttribute::initialize(...).
2936   void initialize(Attributor &A) override {
2937     if (isa<UndefValue>(getAssociatedValue()))
2938       indicatePessimisticFixpoint();
2939   }
2940 
2941   /// See AbstractAttribute::updateImpl(...).
2942   ChangeStatus updateImpl(Attributor &A) override {
2943     // TODO: Once we have call site specific value information we can provide
2944     //       call site specific liveness information and then it makes
2945     //       sense to specialize attributes for call sites arguments instead of
2946     //       redirecting requests to the callee argument.
2947     Argument *Arg = getAssociatedArgument();
2948     if (!Arg)
2949       return indicatePessimisticFixpoint();
2950     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2951     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
2952     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2953   }
2954 
2955   /// See AbstractAttribute::manifest(...).
2956   ChangeStatus manifest(Attributor &A) override {
2957     CallBase &CB = cast<CallBase>(getAnchorValue());
2958     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2959     assert(!isa<UndefValue>(U.get()) &&
2960            "Expected undef values to be filtered out!");
2961     UndefValue &UV = *UndefValue::get(U->getType());
2962     if (A.changeUseAfterManifest(U, UV))
2963       return ChangeStatus::CHANGED;
2964     return ChangeStatus::UNCHANGED;
2965   }
2966 
2967   /// See AbstractAttribute::trackStatistics()
2968   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2969 };
2970 
2971 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2972   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2973       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2974 
2975   /// See AAIsDead::isAssumedDead().
2976   bool isAssumedDead() const override {
2977     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2978   }
2979 
2980   /// See AbstractAttribute::initialize(...).
2981   void initialize(Attributor &A) override {
2982     if (isa<UndefValue>(getAssociatedValue())) {
2983       indicatePessimisticFixpoint();
2984       return;
2985     }
2986 
2987     // We track this separately as a secondary state.
2988     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2989   }
2990 
2991   /// See AbstractAttribute::updateImpl(...).
2992   ChangeStatus updateImpl(Attributor &A) override {
2993     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2994     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2995       IsAssumedSideEffectFree = false;
2996       Changed = ChangeStatus::CHANGED;
2997     }
2998 
2999     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3000       return indicatePessimisticFixpoint();
3001     return Changed;
3002   }
3003 
3004   /// See AbstractAttribute::trackStatistics()
3005   void trackStatistics() const override {
3006     if (IsAssumedSideEffectFree)
3007       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3008     else
3009       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3010   }
3011 
3012   /// See AbstractAttribute::getAsStr().
3013   const std::string getAsStr() const override {
3014     return isAssumedDead()
3015                ? "assumed-dead"
3016                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3017   }
3018 
3019 private:
3020   bool IsAssumedSideEffectFree;
3021 };
3022 
3023 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3024   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3025       : AAIsDeadValueImpl(IRP, A) {}
3026 
3027   /// See AbstractAttribute::updateImpl(...).
3028   ChangeStatus updateImpl(Attributor &A) override {
3029 
3030     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3031                               {Instruction::Ret});
3032 
3033     auto PredForCallSite = [&](AbstractCallSite ACS) {
3034       if (ACS.isCallbackCall() || !ACS.getInstruction())
3035         return false;
3036       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3037     };
3038 
3039     bool AllCallSitesKnown;
3040     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3041                                 AllCallSitesKnown))
3042       return indicatePessimisticFixpoint();
3043 
3044     return ChangeStatus::UNCHANGED;
3045   }
3046 
3047   /// See AbstractAttribute::manifest(...).
3048   ChangeStatus manifest(Attributor &A) override {
3049     // TODO: Rewrite the signature to return void?
3050     bool AnyChange = false;
3051     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3052     auto RetInstPred = [&](Instruction &I) {
3053       ReturnInst &RI = cast<ReturnInst>(I);
3054       if (!isa<UndefValue>(RI.getReturnValue()))
3055         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3056       return true;
3057     };
3058     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3059     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3060   }
3061 
3062   /// See AbstractAttribute::trackStatistics()
3063   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3064 };
3065 
3066 struct AAIsDeadFunction : public AAIsDead {
3067   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3068 
3069   /// See AbstractAttribute::initialize(...).
3070   void initialize(Attributor &A) override {
3071     const Function *F = getAnchorScope();
3072     if (F && !F->isDeclaration()) {
3073       // We only want to compute liveness once. If the function is not part of
3074       // the SCC, skip it.
3075       if (A.isRunOn(*const_cast<Function *>(F))) {
3076         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3077         assumeLive(A, F->getEntryBlock());
3078       } else {
3079         indicatePessimisticFixpoint();
3080       }
3081     }
3082   }
3083 
3084   /// See AbstractAttribute::getAsStr().
3085   const std::string getAsStr() const override {
3086     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3087            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3088            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3089            std::to_string(KnownDeadEnds.size()) + "]";
3090   }
3091 
3092   /// See AbstractAttribute::manifest(...).
3093   ChangeStatus manifest(Attributor &A) override {
3094     assert(getState().isValidState() &&
3095            "Attempted to manifest an invalid state!");
3096 
3097     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3098     Function &F = *getAnchorScope();
3099 
3100     if (AssumedLiveBlocks.empty()) {
3101       A.deleteAfterManifest(F);
3102       return ChangeStatus::CHANGED;
3103     }
3104 
3105     // Flag to determine if we can change an invoke to a call assuming the
3106     // callee is nounwind. This is not possible if the personality of the
3107     // function allows to catch asynchronous exceptions.
3108     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3109 
3110     KnownDeadEnds.set_union(ToBeExploredFrom);
3111     for (const Instruction *DeadEndI : KnownDeadEnds) {
3112       auto *CB = dyn_cast<CallBase>(DeadEndI);
3113       if (!CB)
3114         continue;
3115       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3116           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3117       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3118       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3119         continue;
3120 
3121       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3122         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3123       else
3124         A.changeToUnreachableAfterManifest(
3125             const_cast<Instruction *>(DeadEndI->getNextNode()));
3126       HasChanged = ChangeStatus::CHANGED;
3127     }
3128 
3129     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3130     for (BasicBlock &BB : F)
3131       if (!AssumedLiveBlocks.count(&BB)) {
3132         A.deleteAfterManifest(BB);
3133         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3134       }
3135 
3136     return HasChanged;
3137   }
3138 
3139   /// See AbstractAttribute::updateImpl(...).
3140   ChangeStatus updateImpl(Attributor &A) override;
3141 
3142   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3143     return !AssumedLiveEdges.count(std::make_pair(From, To));
3144   }
3145 
3146   /// See AbstractAttribute::trackStatistics()
3147   void trackStatistics() const override {}
3148 
3149   /// Returns true if the function is assumed dead.
3150   bool isAssumedDead() const override { return false; }
3151 
3152   /// See AAIsDead::isKnownDead().
3153   bool isKnownDead() const override { return false; }
3154 
3155   /// See AAIsDead::isAssumedDead(BasicBlock *).
3156   bool isAssumedDead(const BasicBlock *BB) const override {
3157     assert(BB->getParent() == getAnchorScope() &&
3158            "BB must be in the same anchor scope function.");
3159 
3160     if (!getAssumed())
3161       return false;
3162     return !AssumedLiveBlocks.count(BB);
3163   }
3164 
3165   /// See AAIsDead::isKnownDead(BasicBlock *).
3166   bool isKnownDead(const BasicBlock *BB) const override {
3167     return getKnown() && isAssumedDead(BB);
3168   }
3169 
3170   /// See AAIsDead::isAssumed(Instruction *I).
3171   bool isAssumedDead(const Instruction *I) const override {
3172     assert(I->getParent()->getParent() == getAnchorScope() &&
3173            "Instruction must be in the same anchor scope function.");
3174 
3175     if (!getAssumed())
3176       return false;
3177 
3178     // If it is not in AssumedLiveBlocks then it for sure dead.
3179     // Otherwise, it can still be after noreturn call in a live block.
3180     if (!AssumedLiveBlocks.count(I->getParent()))
3181       return true;
3182 
3183     // If it is not after a liveness barrier it is live.
3184     const Instruction *PrevI = I->getPrevNode();
3185     while (PrevI) {
3186       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3187         return true;
3188       PrevI = PrevI->getPrevNode();
3189     }
3190     return false;
3191   }
3192 
3193   /// See AAIsDead::isKnownDead(Instruction *I).
3194   bool isKnownDead(const Instruction *I) const override {
3195     return getKnown() && isAssumedDead(I);
3196   }
3197 
3198   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3199   /// that internal function called from \p BB should now be looked at.
3200   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3201     if (!AssumedLiveBlocks.insert(&BB).second)
3202       return false;
3203 
3204     // We assume that all of BB is (probably) live now and if there are calls to
3205     // internal functions we will assume that those are now live as well. This
3206     // is a performance optimization for blocks with calls to a lot of internal
3207     // functions. It can however cause dead functions to be treated as live.
3208     for (const Instruction &I : BB)
3209       if (const auto *CB = dyn_cast<CallBase>(&I))
3210         if (const Function *F = CB->getCalledFunction())
3211           if (F->hasLocalLinkage())
3212             A.markLiveInternalFunction(*F);
3213     return true;
3214   }
3215 
3216   /// Collection of instructions that need to be explored again, e.g., we
3217   /// did assume they do not transfer control to (one of their) successors.
3218   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3219 
3220   /// Collection of instructions that are known to not transfer control.
3221   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3222 
3223   /// Collection of all assumed live edges
3224   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3225 
3226   /// Collection of all assumed live BasicBlocks.
3227   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3228 };
3229 
3230 static bool
3231 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3232                         AbstractAttribute &AA,
3233                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3234   const IRPosition &IPos = IRPosition::callsite_function(CB);
3235 
3236   const auto &NoReturnAA =
3237       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3238   if (NoReturnAA.isAssumedNoReturn())
3239     return !NoReturnAA.isKnownNoReturn();
3240   if (CB.isTerminator())
3241     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3242   else
3243     AliveSuccessors.push_back(CB.getNextNode());
3244   return false;
3245 }
3246 
3247 static bool
3248 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3249                         AbstractAttribute &AA,
3250                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3251   bool UsedAssumedInformation =
3252       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3253 
3254   // First, determine if we can change an invoke to a call assuming the
3255   // callee is nounwind. This is not possible if the personality of the
3256   // function allows to catch asynchronous exceptions.
3257   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3258     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3259   } else {
3260     const IRPosition &IPos = IRPosition::callsite_function(II);
3261     const auto &AANoUnw =
3262         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3263     if (AANoUnw.isAssumedNoUnwind()) {
3264       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3265     } else {
3266       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3267     }
3268   }
3269   return UsedAssumedInformation;
3270 }
3271 
3272 static bool
3273 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3274                         AbstractAttribute &AA,
3275                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3276   bool UsedAssumedInformation = false;
3277   if (BI.getNumSuccessors() == 1) {
3278     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3279   } else {
3280     Optional<Constant *> C =
3281         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3282     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3283       // No value yet, assume both edges are dead.
3284     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3285       const BasicBlock *SuccBB =
3286           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3287       AliveSuccessors.push_back(&SuccBB->front());
3288     } else {
3289       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3290       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3291       UsedAssumedInformation = false;
3292     }
3293   }
3294   return UsedAssumedInformation;
3295 }
3296 
3297 static bool
3298 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3299                         AbstractAttribute &AA,
3300                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3301   bool UsedAssumedInformation = false;
3302   Optional<Constant *> C =
3303       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3304   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3305     // No value yet, assume all edges are dead.
3306   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3307     for (auto &CaseIt : SI.cases()) {
3308       if (CaseIt.getCaseValue() == C.getValue()) {
3309         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3310         return UsedAssumedInformation;
3311       }
3312     }
3313     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3314     return UsedAssumedInformation;
3315   } else {
3316     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3317       AliveSuccessors.push_back(&SuccBB->front());
3318   }
3319   return UsedAssumedInformation;
3320 }
3321 
3322 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3323   ChangeStatus Change = ChangeStatus::UNCHANGED;
3324 
3325   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3326                     << getAnchorScope()->size() << "] BBs and "
3327                     << ToBeExploredFrom.size() << " exploration points and "
3328                     << KnownDeadEnds.size() << " known dead ends\n");
3329 
3330   // Copy and clear the list of instructions we need to explore from. It is
3331   // refilled with instructions the next update has to look at.
3332   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3333                                                ToBeExploredFrom.end());
3334   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3335 
3336   SmallVector<const Instruction *, 8> AliveSuccessors;
3337   while (!Worklist.empty()) {
3338     const Instruction *I = Worklist.pop_back_val();
3339     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3340 
3341     // Fast forward for uninteresting instructions. We could look for UB here
3342     // though.
3343     while (!I->isTerminator() && !isa<CallBase>(I)) {
3344       Change = ChangeStatus::CHANGED;
3345       I = I->getNextNode();
3346     }
3347 
3348     AliveSuccessors.clear();
3349 
3350     bool UsedAssumedInformation = false;
3351     switch (I->getOpcode()) {
3352     // TODO: look for (assumed) UB to backwards propagate "deadness".
3353     default:
3354       assert(I->isTerminator() &&
3355              "Expected non-terminators to be handled already!");
3356       for (const BasicBlock *SuccBB : successors(I->getParent()))
3357         AliveSuccessors.push_back(&SuccBB->front());
3358       break;
3359     case Instruction::Call:
3360       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3361                                                        *this, AliveSuccessors);
3362       break;
3363     case Instruction::Invoke:
3364       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3365                                                        *this, AliveSuccessors);
3366       break;
3367     case Instruction::Br:
3368       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3369                                                        *this, AliveSuccessors);
3370       break;
3371     case Instruction::Switch:
3372       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3373                                                        *this, AliveSuccessors);
3374       break;
3375     }
3376 
3377     if (UsedAssumedInformation) {
3378       NewToBeExploredFrom.insert(I);
3379     } else {
3380       Change = ChangeStatus::CHANGED;
3381       if (AliveSuccessors.empty() ||
3382           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3383         KnownDeadEnds.insert(I);
3384     }
3385 
3386     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3387                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3388                       << UsedAssumedInformation << "\n");
3389 
3390     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3391       if (!I->isTerminator()) {
3392         assert(AliveSuccessors.size() == 1 &&
3393                "Non-terminator expected to have a single successor!");
3394         Worklist.push_back(AliveSuccessor);
3395       } else {
3396         // record the assumed live edge
3397         AssumedLiveEdges.insert(
3398             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3399         if (assumeLive(A, *AliveSuccessor->getParent()))
3400           Worklist.push_back(AliveSuccessor);
3401       }
3402     }
3403   }
3404 
3405   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3406 
3407   // If we know everything is live there is no need to query for liveness.
3408   // Instead, indicating a pessimistic fixpoint will cause the state to be
3409   // "invalid" and all queries to be answered conservatively without lookups.
3410   // To be in this state we have to (1) finished the exploration and (3) not
3411   // discovered any non-trivial dead end and (2) not ruled unreachable code
3412   // dead.
3413   if (ToBeExploredFrom.empty() &&
3414       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3415       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3416         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3417       }))
3418     return indicatePessimisticFixpoint();
3419   return Change;
3420 }
3421 
3422 /// Liveness information for a call sites.
3423 struct AAIsDeadCallSite final : AAIsDeadFunction {
3424   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3425       : AAIsDeadFunction(IRP, A) {}
3426 
3427   /// See AbstractAttribute::initialize(...).
3428   void initialize(Attributor &A) override {
3429     // TODO: Once we have call site specific value information we can provide
3430     //       call site specific liveness information and then it makes
3431     //       sense to specialize attributes for call sites instead of
3432     //       redirecting requests to the callee.
3433     llvm_unreachable("Abstract attributes for liveness are not "
3434                      "supported for call sites yet!");
3435   }
3436 
3437   /// See AbstractAttribute::updateImpl(...).
3438   ChangeStatus updateImpl(Attributor &A) override {
3439     return indicatePessimisticFixpoint();
3440   }
3441 
3442   /// See AbstractAttribute::trackStatistics()
3443   void trackStatistics() const override {}
3444 };
3445 
3446 /// -------------------- Dereferenceable Argument Attribute --------------------
3447 
3448 template <>
3449 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3450                                                      const DerefState &R) {
3451   ChangeStatus CS0 =
3452       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3453   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3454   return CS0 | CS1;
3455 }
3456 
3457 struct AADereferenceableImpl : AADereferenceable {
3458   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3459       : AADereferenceable(IRP, A) {}
3460   using StateType = DerefState;
3461 
3462   /// See AbstractAttribute::initialize(...).
3463   void initialize(Attributor &A) override {
3464     SmallVector<Attribute, 4> Attrs;
3465     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3466              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3467     for (const Attribute &Attr : Attrs)
3468       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3469 
3470     const IRPosition &IRP = this->getIRPosition();
3471     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3472 
3473     bool CanBeNull, CanBeFreed;
3474     takeKnownDerefBytesMaximum(
3475         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3476             A.getDataLayout(), CanBeNull, CanBeFreed));
3477 
3478     bool IsFnInterface = IRP.isFnInterfaceKind();
3479     Function *FnScope = IRP.getAnchorScope();
3480     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3481       indicatePessimisticFixpoint();
3482       return;
3483     }
3484 
3485     if (Instruction *CtxI = getCtxI())
3486       followUsesInMBEC(*this, A, getState(), *CtxI);
3487   }
3488 
3489   /// See AbstractAttribute::getState()
3490   /// {
3491   StateType &getState() override { return *this; }
3492   const StateType &getState() const override { return *this; }
3493   /// }
3494 
3495   /// Helper function for collecting accessed bytes in must-be-executed-context
3496   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3497                               DerefState &State) {
3498     const Value *UseV = U->get();
3499     if (!UseV->getType()->isPointerTy())
3500       return;
3501 
3502     Type *PtrTy = UseV->getType();
3503     const DataLayout &DL = A.getDataLayout();
3504     int64_t Offset;
3505     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3506             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3507       if (Base == &getAssociatedValue() &&
3508           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3509         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3510         State.addAccessedBytes(Offset, Size);
3511       }
3512     }
3513   }
3514 
3515   /// See followUsesInMBEC
3516   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3517                        AADereferenceable::StateType &State) {
3518     bool IsNonNull = false;
3519     bool TrackUse = false;
3520     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3521         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3522     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3523                       << " for instruction " << *I << "\n");
3524 
3525     addAccessedBytesForUse(A, U, I, State);
3526     State.takeKnownDerefBytesMaximum(DerefBytes);
3527     return TrackUse;
3528   }
3529 
3530   /// See AbstractAttribute::manifest(...).
3531   ChangeStatus manifest(Attributor &A) override {
3532     ChangeStatus Change = AADereferenceable::manifest(A);
3533     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3534       removeAttrs({Attribute::DereferenceableOrNull});
3535       return ChangeStatus::CHANGED;
3536     }
3537     return Change;
3538   }
3539 
3540   void getDeducedAttributes(LLVMContext &Ctx,
3541                             SmallVectorImpl<Attribute> &Attrs) const override {
3542     // TODO: Add *_globally support
3543     if (isAssumedNonNull())
3544       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3545           Ctx, getAssumedDereferenceableBytes()));
3546     else
3547       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3548           Ctx, getAssumedDereferenceableBytes()));
3549   }
3550 
3551   /// See AbstractAttribute::getAsStr().
3552   const std::string getAsStr() const override {
3553     if (!getAssumedDereferenceableBytes())
3554       return "unknown-dereferenceable";
3555     return std::string("dereferenceable") +
3556            (isAssumedNonNull() ? "" : "_or_null") +
3557            (isAssumedGlobal() ? "_globally" : "") + "<" +
3558            std::to_string(getKnownDereferenceableBytes()) + "-" +
3559            std::to_string(getAssumedDereferenceableBytes()) + ">";
3560   }
3561 };
3562 
3563 /// Dereferenceable attribute for a floating value.
3564 struct AADereferenceableFloating : AADereferenceableImpl {
3565   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3566       : AADereferenceableImpl(IRP, A) {}
3567 
3568   /// See AbstractAttribute::updateImpl(...).
3569   ChangeStatus updateImpl(Attributor &A) override {
3570     const DataLayout &DL = A.getDataLayout();
3571 
3572     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3573                             bool Stripped) -> bool {
3574       unsigned IdxWidth =
3575           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3576       APInt Offset(IdxWidth, 0);
3577       const Value *Base =
3578           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3579 
3580       const auto &AA = A.getAAFor<AADereferenceable>(
3581           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3582       int64_t DerefBytes = 0;
3583       if (!Stripped && this == &AA) {
3584         // Use IR information if we did not strip anything.
3585         // TODO: track globally.
3586         bool CanBeNull, CanBeFreed;
3587         DerefBytes =
3588             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
3589         T.GlobalState.indicatePessimisticFixpoint();
3590       } else {
3591         const DerefState &DS = AA.getState();
3592         DerefBytes = DS.DerefBytesState.getAssumed();
3593         T.GlobalState &= DS.GlobalState;
3594       }
3595 
3596       // For now we do not try to "increase" dereferenceability due to negative
3597       // indices as we first have to come up with code to deal with loops and
3598       // for overflows of the dereferenceable bytes.
3599       int64_t OffsetSExt = Offset.getSExtValue();
3600       if (OffsetSExt < 0)
3601         OffsetSExt = 0;
3602 
3603       T.takeAssumedDerefBytesMinimum(
3604           std::max(int64_t(0), DerefBytes - OffsetSExt));
3605 
3606       if (this == &AA) {
3607         if (!Stripped) {
3608           // If nothing was stripped IR information is all we got.
3609           T.takeKnownDerefBytesMaximum(
3610               std::max(int64_t(0), DerefBytes - OffsetSExt));
3611           T.indicatePessimisticFixpoint();
3612         } else if (OffsetSExt > 0) {
3613           // If something was stripped but there is circular reasoning we look
3614           // for the offset. If it is positive we basically decrease the
3615           // dereferenceable bytes in a circluar loop now, which will simply
3616           // drive them down to the known value in a very slow way which we
3617           // can accelerate.
3618           T.indicatePessimisticFixpoint();
3619         }
3620       }
3621 
3622       return T.isValidState();
3623     };
3624 
3625     DerefState T;
3626     if (!genericValueTraversal<AADereferenceable, DerefState>(
3627             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3628       return indicatePessimisticFixpoint();
3629 
3630     return clampStateAndIndicateChange(getState(), T);
3631   }
3632 
3633   /// See AbstractAttribute::trackStatistics()
3634   void trackStatistics() const override {
3635     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3636   }
3637 };
3638 
3639 /// Dereferenceable attribute for a return value.
3640 struct AADereferenceableReturned final
3641     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3642   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3643       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3644             IRP, A) {}
3645 
3646   /// See AbstractAttribute::trackStatistics()
3647   void trackStatistics() const override {
3648     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3649   }
3650 };
3651 
3652 /// Dereferenceable attribute for an argument
3653 struct AADereferenceableArgument final
3654     : AAArgumentFromCallSiteArguments<AADereferenceable,
3655                                       AADereferenceableImpl> {
3656   using Base =
3657       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3658   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3659       : Base(IRP, A) {}
3660 
3661   /// See AbstractAttribute::trackStatistics()
3662   void trackStatistics() const override {
3663     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3664   }
3665 };
3666 
3667 /// Dereferenceable attribute for a call site argument.
3668 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3669   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3670       : AADereferenceableFloating(IRP, A) {}
3671 
3672   /// See AbstractAttribute::trackStatistics()
3673   void trackStatistics() const override {
3674     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3675   }
3676 };
3677 
3678 /// Dereferenceable attribute deduction for a call site return value.
3679 struct AADereferenceableCallSiteReturned final
3680     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3681   using Base =
3682       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3683   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3684       : Base(IRP, A) {}
3685 
3686   /// See AbstractAttribute::trackStatistics()
3687   void trackStatistics() const override {
3688     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3689   }
3690 };
3691 
3692 // ------------------------ Align Argument Attribute ------------------------
3693 
3694 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3695                                     Value &AssociatedValue, const Use *U,
3696                                     const Instruction *I, bool &TrackUse) {
3697   // We need to follow common pointer manipulation uses to the accesses they
3698   // feed into.
3699   if (isa<CastInst>(I)) {
3700     // Follow all but ptr2int casts.
3701     TrackUse = !isa<PtrToIntInst>(I);
3702     return 0;
3703   }
3704   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3705     if (GEP->hasAllConstantIndices())
3706       TrackUse = true;
3707     return 0;
3708   }
3709 
3710   MaybeAlign MA;
3711   if (const auto *CB = dyn_cast<CallBase>(I)) {
3712     if (CB->isBundleOperand(U) || CB->isCallee(U))
3713       return 0;
3714 
3715     unsigned ArgNo = CB->getArgOperandNo(U);
3716     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3717     // As long as we only use known information there is no need to track
3718     // dependences here.
3719     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3720     MA = MaybeAlign(AlignAA.getKnownAlign());
3721   }
3722 
3723   const DataLayout &DL = A.getDataLayout();
3724   const Value *UseV = U->get();
3725   if (auto *SI = dyn_cast<StoreInst>(I)) {
3726     if (SI->getPointerOperand() == UseV)
3727       MA = SI->getAlign();
3728   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3729     if (LI->getPointerOperand() == UseV)
3730       MA = LI->getAlign();
3731   }
3732 
3733   if (!MA || *MA <= QueryingAA.getKnownAlign())
3734     return 0;
3735 
3736   unsigned Alignment = MA->value();
3737   int64_t Offset;
3738 
3739   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3740     if (Base == &AssociatedValue) {
3741       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3742       // So we can say that the maximum power of two which is a divisor of
3743       // gcd(Offset, Alignment) is an alignment.
3744 
3745       uint32_t gcd =
3746           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3747       Alignment = llvm::PowerOf2Floor(gcd);
3748     }
3749   }
3750 
3751   return Alignment;
3752 }
3753 
3754 struct AAAlignImpl : AAAlign {
3755   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3756 
3757   /// See AbstractAttribute::initialize(...).
3758   void initialize(Attributor &A) override {
3759     SmallVector<Attribute, 4> Attrs;
3760     getAttrs({Attribute::Alignment}, Attrs);
3761     for (const Attribute &Attr : Attrs)
3762       takeKnownMaximum(Attr.getValueAsInt());
3763 
3764     Value &V = getAssociatedValue();
3765     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3766     //       use of the function pointer. This was caused by D73131. We want to
3767     //       avoid this for function pointers especially because we iterate
3768     //       their uses and int2ptr is not handled. It is not a correctness
3769     //       problem though!
3770     if (!V.getType()->getPointerElementType()->isFunctionTy())
3771       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3772 
3773     if (getIRPosition().isFnInterfaceKind() &&
3774         (!getAnchorScope() ||
3775          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3776       indicatePessimisticFixpoint();
3777       return;
3778     }
3779 
3780     if (Instruction *CtxI = getCtxI())
3781       followUsesInMBEC(*this, A, getState(), *CtxI);
3782   }
3783 
3784   /// See AbstractAttribute::manifest(...).
3785   ChangeStatus manifest(Attributor &A) override {
3786     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3787 
3788     // Check for users that allow alignment annotations.
3789     Value &AssociatedValue = getAssociatedValue();
3790     for (const Use &U : AssociatedValue.uses()) {
3791       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3792         if (SI->getPointerOperand() == &AssociatedValue)
3793           if (SI->getAlignment() < getAssumedAlign()) {
3794             STATS_DECLTRACK(AAAlign, Store,
3795                             "Number of times alignment added to a store");
3796             SI->setAlignment(Align(getAssumedAlign()));
3797             LoadStoreChanged = ChangeStatus::CHANGED;
3798           }
3799       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3800         if (LI->getPointerOperand() == &AssociatedValue)
3801           if (LI->getAlignment() < getAssumedAlign()) {
3802             LI->setAlignment(Align(getAssumedAlign()));
3803             STATS_DECLTRACK(AAAlign, Load,
3804                             "Number of times alignment added to a load");
3805             LoadStoreChanged = ChangeStatus::CHANGED;
3806           }
3807       }
3808     }
3809 
3810     ChangeStatus Changed = AAAlign::manifest(A);
3811 
3812     Align InheritAlign =
3813         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3814     if (InheritAlign >= getAssumedAlign())
3815       return LoadStoreChanged;
3816     return Changed | LoadStoreChanged;
3817   }
3818 
3819   // TODO: Provide a helper to determine the implied ABI alignment and check in
3820   //       the existing manifest method and a new one for AAAlignImpl that value
3821   //       to avoid making the alignment explicit if it did not improve.
3822 
3823   /// See AbstractAttribute::getDeducedAttributes
3824   virtual void
3825   getDeducedAttributes(LLVMContext &Ctx,
3826                        SmallVectorImpl<Attribute> &Attrs) const override {
3827     if (getAssumedAlign() > 1)
3828       Attrs.emplace_back(
3829           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3830   }
3831 
3832   /// See followUsesInMBEC
3833   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3834                        AAAlign::StateType &State) {
3835     bool TrackUse = false;
3836 
3837     unsigned int KnownAlign =
3838         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3839     State.takeKnownMaximum(KnownAlign);
3840 
3841     return TrackUse;
3842   }
3843 
3844   /// See AbstractAttribute::getAsStr().
3845   const std::string getAsStr() const override {
3846     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3847                                 "-" + std::to_string(getAssumedAlign()) + ">")
3848                              : "unknown-align";
3849   }
3850 };
3851 
3852 /// Align attribute for a floating value.
3853 struct AAAlignFloating : AAAlignImpl {
3854   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3855 
3856   /// See AbstractAttribute::updateImpl(...).
3857   ChangeStatus updateImpl(Attributor &A) override {
3858     const DataLayout &DL = A.getDataLayout();
3859 
3860     auto VisitValueCB = [&](Value &V, const Instruction *,
3861                             AAAlign::StateType &T, bool Stripped) -> bool {
3862       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3863                                            DepClassTy::REQUIRED);
3864       if (!Stripped && this == &AA) {
3865         int64_t Offset;
3866         unsigned Alignment = 1;
3867         if (const Value *Base =
3868                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3869           Align PA = Base->getPointerAlignment(DL);
3870           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3871           // So we can say that the maximum power of two which is a divisor of
3872           // gcd(Offset, Alignment) is an alignment.
3873 
3874           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3875                                                uint32_t(PA.value()));
3876           Alignment = llvm::PowerOf2Floor(gcd);
3877         } else {
3878           Alignment = V.getPointerAlignment(DL).value();
3879         }
3880         // Use only IR information if we did not strip anything.
3881         T.takeKnownMaximum(Alignment);
3882         T.indicatePessimisticFixpoint();
3883       } else {
3884         // Use abstract attribute information.
3885         const AAAlign::StateType &DS = AA.getState();
3886         T ^= DS;
3887       }
3888       return T.isValidState();
3889     };
3890 
3891     StateType T;
3892     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3893                                                    VisitValueCB, getCtxI()))
3894       return indicatePessimisticFixpoint();
3895 
3896     // TODO: If we know we visited all incoming values, thus no are assumed
3897     // dead, we can take the known information from the state T.
3898     return clampStateAndIndicateChange(getState(), T);
3899   }
3900 
3901   /// See AbstractAttribute::trackStatistics()
3902   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3903 };
3904 
3905 /// Align attribute for function return value.
3906 struct AAAlignReturned final
3907     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3908   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3909   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3910 
3911   /// See AbstractAttribute::initialize(...).
3912   void initialize(Attributor &A) override {
3913     Base::initialize(A);
3914     Function *F = getAssociatedFunction();
3915     if (!F || F->isDeclaration())
3916       indicatePessimisticFixpoint();
3917   }
3918 
3919   /// See AbstractAttribute::trackStatistics()
3920   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3921 };
3922 
3923 /// Align attribute for function argument.
3924 struct AAAlignArgument final
3925     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3926   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3927   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3928 
3929   /// See AbstractAttribute::manifest(...).
3930   ChangeStatus manifest(Attributor &A) override {
3931     // If the associated argument is involved in a must-tail call we give up
3932     // because we would need to keep the argument alignments of caller and
3933     // callee in-sync. Just does not seem worth the trouble right now.
3934     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3935       return ChangeStatus::UNCHANGED;
3936     return Base::manifest(A);
3937   }
3938 
3939   /// See AbstractAttribute::trackStatistics()
3940   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3941 };
3942 
3943 struct AAAlignCallSiteArgument final : AAAlignFloating {
3944   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3945       : AAAlignFloating(IRP, A) {}
3946 
3947   /// See AbstractAttribute::manifest(...).
3948   ChangeStatus manifest(Attributor &A) override {
3949     // If the associated argument is involved in a must-tail call we give up
3950     // because we would need to keep the argument alignments of caller and
3951     // callee in-sync. Just does not seem worth the trouble right now.
3952     if (Argument *Arg = getAssociatedArgument())
3953       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3954         return ChangeStatus::UNCHANGED;
3955     ChangeStatus Changed = AAAlignImpl::manifest(A);
3956     Align InheritAlign =
3957         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3958     if (InheritAlign >= getAssumedAlign())
3959       Changed = ChangeStatus::UNCHANGED;
3960     return Changed;
3961   }
3962 
3963   /// See AbstractAttribute::updateImpl(Attributor &A).
3964   ChangeStatus updateImpl(Attributor &A) override {
3965     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3966     if (Argument *Arg = getAssociatedArgument()) {
3967       // We only take known information from the argument
3968       // so we do not need to track a dependence.
3969       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3970           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
3971       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3972     }
3973     return Changed;
3974   }
3975 
3976   /// See AbstractAttribute::trackStatistics()
3977   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3978 };
3979 
3980 /// Align attribute deduction for a call site return value.
3981 struct AAAlignCallSiteReturned final
3982     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3983   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3984   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3985       : Base(IRP, A) {}
3986 
3987   /// See AbstractAttribute::initialize(...).
3988   void initialize(Attributor &A) override {
3989     Base::initialize(A);
3990     Function *F = getAssociatedFunction();
3991     if (!F || F->isDeclaration())
3992       indicatePessimisticFixpoint();
3993   }
3994 
3995   /// See AbstractAttribute::trackStatistics()
3996   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3997 };
3998 
3999 /// ------------------ Function No-Return Attribute ----------------------------
4000 struct AANoReturnImpl : public AANoReturn {
4001   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4002 
4003   /// See AbstractAttribute::initialize(...).
4004   void initialize(Attributor &A) override {
4005     AANoReturn::initialize(A);
4006     Function *F = getAssociatedFunction();
4007     if (!F || F->isDeclaration())
4008       indicatePessimisticFixpoint();
4009   }
4010 
4011   /// See AbstractAttribute::getAsStr().
4012   const std::string getAsStr() const override {
4013     return getAssumed() ? "noreturn" : "may-return";
4014   }
4015 
4016   /// See AbstractAttribute::updateImpl(Attributor &A).
4017   virtual ChangeStatus updateImpl(Attributor &A) override {
4018     auto CheckForNoReturn = [](Instruction &) { return false; };
4019     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4020                                    {(unsigned)Instruction::Ret}))
4021       return indicatePessimisticFixpoint();
4022     return ChangeStatus::UNCHANGED;
4023   }
4024 };
4025 
4026 struct AANoReturnFunction final : AANoReturnImpl {
4027   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4028       : AANoReturnImpl(IRP, A) {}
4029 
4030   /// See AbstractAttribute::trackStatistics()
4031   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4032 };
4033 
4034 /// NoReturn attribute deduction for a call sites.
4035 struct AANoReturnCallSite final : AANoReturnImpl {
4036   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4037       : AANoReturnImpl(IRP, A) {}
4038 
4039   /// See AbstractAttribute::initialize(...).
4040   void initialize(Attributor &A) override {
4041     AANoReturnImpl::initialize(A);
4042     if (Function *F = getAssociatedFunction()) {
4043       const IRPosition &FnPos = IRPosition::function(*F);
4044       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4045       if (!FnAA.isAssumedNoReturn())
4046         indicatePessimisticFixpoint();
4047     }
4048   }
4049 
4050   /// See AbstractAttribute::updateImpl(...).
4051   ChangeStatus updateImpl(Attributor &A) override {
4052     // TODO: Once we have call site specific value information we can provide
4053     //       call site specific liveness information and then it makes
4054     //       sense to specialize attributes for call sites arguments instead of
4055     //       redirecting requests to the callee argument.
4056     Function *F = getAssociatedFunction();
4057     const IRPosition &FnPos = IRPosition::function(*F);
4058     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4059     return clampStateAndIndicateChange(getState(), FnAA.getState());
4060   }
4061 
4062   /// See AbstractAttribute::trackStatistics()
4063   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4064 };
4065 
4066 /// ----------------------- Variable Capturing ---------------------------------
4067 
4068 /// A class to hold the state of for no-capture attributes.
4069 struct AANoCaptureImpl : public AANoCapture {
4070   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4071 
4072   /// See AbstractAttribute::initialize(...).
4073   void initialize(Attributor &A) override {
4074     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4075       indicateOptimisticFixpoint();
4076       return;
4077     }
4078     Function *AnchorScope = getAnchorScope();
4079     if (isFnInterfaceKind() &&
4080         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4081       indicatePessimisticFixpoint();
4082       return;
4083     }
4084 
4085     // You cannot "capture" null in the default address space.
4086     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4087         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4088       indicateOptimisticFixpoint();
4089       return;
4090     }
4091 
4092     const Function *F =
4093         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4094 
4095     // Check what state the associated function can actually capture.
4096     if (F)
4097       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4098     else
4099       indicatePessimisticFixpoint();
4100   }
4101 
4102   /// See AbstractAttribute::updateImpl(...).
4103   ChangeStatus updateImpl(Attributor &A) override;
4104 
4105   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4106   virtual void
4107   getDeducedAttributes(LLVMContext &Ctx,
4108                        SmallVectorImpl<Attribute> &Attrs) const override {
4109     if (!isAssumedNoCaptureMaybeReturned())
4110       return;
4111 
4112     if (isArgumentPosition()) {
4113       if (isAssumedNoCapture())
4114         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4115       else if (ManifestInternal)
4116         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4117     }
4118   }
4119 
4120   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4121   /// depending on the ability of the function associated with \p IRP to capture
4122   /// state in memory and through "returning/throwing", respectively.
4123   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4124                                                    const Function &F,
4125                                                    BitIntegerState &State) {
4126     // TODO: Once we have memory behavior attributes we should use them here.
4127 
4128     // If we know we cannot communicate or write to memory, we do not care about
4129     // ptr2int anymore.
4130     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4131         F.getReturnType()->isVoidTy()) {
4132       State.addKnownBits(NO_CAPTURE);
4133       return;
4134     }
4135 
4136     // A function cannot capture state in memory if it only reads memory, it can
4137     // however return/throw state and the state might be influenced by the
4138     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4139     if (F.onlyReadsMemory())
4140       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4141 
4142     // A function cannot communicate state back if it does not through
4143     // exceptions and doesn not return values.
4144     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4145       State.addKnownBits(NOT_CAPTURED_IN_RET);
4146 
4147     // Check existing "returned" attributes.
4148     int ArgNo = IRP.getCalleeArgNo();
4149     if (F.doesNotThrow() && ArgNo >= 0) {
4150       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4151         if (F.hasParamAttribute(u, Attribute::Returned)) {
4152           if (u == unsigned(ArgNo))
4153             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4154           else if (F.onlyReadsMemory())
4155             State.addKnownBits(NO_CAPTURE);
4156           else
4157             State.addKnownBits(NOT_CAPTURED_IN_RET);
4158           break;
4159         }
4160     }
4161   }
4162 
4163   /// See AbstractState::getAsStr().
4164   const std::string getAsStr() const override {
4165     if (isKnownNoCapture())
4166       return "known not-captured";
4167     if (isAssumedNoCapture())
4168       return "assumed not-captured";
4169     if (isKnownNoCaptureMaybeReturned())
4170       return "known not-captured-maybe-returned";
4171     if (isAssumedNoCaptureMaybeReturned())
4172       return "assumed not-captured-maybe-returned";
4173     return "assumed-captured";
4174   }
4175 };
4176 
4177 /// Attributor-aware capture tracker.
4178 struct AACaptureUseTracker final : public CaptureTracker {
4179 
4180   /// Create a capture tracker that can lookup in-flight abstract attributes
4181   /// through the Attributor \p A.
4182   ///
4183   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4184   /// search is stopped. If a use leads to a return instruction,
4185   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4186   /// If a use leads to a ptr2int which may capture the value,
4187   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4188   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4189   /// set. All values in \p PotentialCopies are later tracked as well. For every
4190   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4191   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4192   /// conservatively set to true.
4193   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4194                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4195                       SmallVectorImpl<const Value *> &PotentialCopies,
4196                       unsigned &RemainingUsesToExplore)
4197       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4198         PotentialCopies(PotentialCopies),
4199         RemainingUsesToExplore(RemainingUsesToExplore) {}
4200 
4201   /// Determine if \p V maybe captured. *Also updates the state!*
4202   bool valueMayBeCaptured(const Value *V) {
4203     if (V->getType()->isPointerTy()) {
4204       PointerMayBeCaptured(V, this);
4205     } else {
4206       State.indicatePessimisticFixpoint();
4207     }
4208     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4209   }
4210 
4211   /// See CaptureTracker::tooManyUses().
4212   void tooManyUses() override {
4213     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4214   }
4215 
4216   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4217     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4218       return true;
4219     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4220         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4221     return DerefAA.getAssumedDereferenceableBytes();
4222   }
4223 
4224   /// See CaptureTracker::captured(...).
4225   bool captured(const Use *U) override {
4226     Instruction *UInst = cast<Instruction>(U->getUser());
4227     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4228                       << "\n");
4229 
4230     // Because we may reuse the tracker multiple times we keep track of the
4231     // number of explored uses ourselves as well.
4232     if (RemainingUsesToExplore-- == 0) {
4233       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4234       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4235                           /* Return */ true);
4236     }
4237 
4238     // Deal with ptr2int by following uses.
4239     if (isa<PtrToIntInst>(UInst)) {
4240       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4241       return valueMayBeCaptured(UInst);
4242     }
4243 
4244     // Explicitly catch return instructions.
4245     if (isa<ReturnInst>(UInst))
4246       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4247                           /* Return */ true);
4248 
4249     // For now we only use special logic for call sites. However, the tracker
4250     // itself knows about a lot of other non-capturing cases already.
4251     auto *CB = dyn_cast<CallBase>(UInst);
4252     if (!CB || !CB->isArgOperand(U))
4253       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4254                           /* Return */ true);
4255 
4256     unsigned ArgNo = CB->getArgOperandNo(U);
4257     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4258     // If we have a abstract no-capture attribute for the argument we can use
4259     // it to justify a non-capture attribute here. This allows recursion!
4260     auto &ArgNoCaptureAA =
4261         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4262     if (ArgNoCaptureAA.isAssumedNoCapture())
4263       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4264                           /* Return */ false);
4265     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4266       addPotentialCopy(*CB);
4267       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4268                           /* Return */ false);
4269     }
4270 
4271     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4272     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4273                         /* Return */ true);
4274   }
4275 
4276   /// Register \p CS as potential copy of the value we are checking.
4277   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4278 
4279   /// See CaptureTracker::shouldExplore(...).
4280   bool shouldExplore(const Use *U) override {
4281     // Check liveness and ignore droppable users.
4282     return !U->getUser()->isDroppable() &&
4283            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4284   }
4285 
4286   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4287   /// \p CapturedInRet, then return the appropriate value for use in the
4288   /// CaptureTracker::captured() interface.
4289   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4290                     bool CapturedInRet) {
4291     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4292                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4293     if (CapturedInMem)
4294       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4295     if (CapturedInInt)
4296       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4297     if (CapturedInRet)
4298       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4299     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4300   }
4301 
4302 private:
4303   /// The attributor providing in-flight abstract attributes.
4304   Attributor &A;
4305 
4306   /// The abstract attribute currently updated.
4307   AANoCapture &NoCaptureAA;
4308 
4309   /// The abstract liveness state.
4310   const AAIsDead &IsDeadAA;
4311 
4312   /// The state currently updated.
4313   AANoCapture::StateType &State;
4314 
4315   /// Set of potential copies of the tracked value.
4316   SmallVectorImpl<const Value *> &PotentialCopies;
4317 
4318   /// Global counter to limit the number of explored uses.
4319   unsigned &RemainingUsesToExplore;
4320 };
4321 
4322 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4323   const IRPosition &IRP = getIRPosition();
4324   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4325                                         : &IRP.getAssociatedValue();
4326   if (!V)
4327     return indicatePessimisticFixpoint();
4328 
4329   const Function *F =
4330       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4331   assert(F && "Expected a function!");
4332   const IRPosition &FnPos = IRPosition::function(*F);
4333   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4334 
4335   AANoCapture::StateType T;
4336 
4337   // Readonly means we cannot capture through memory.
4338   const auto &FnMemAA =
4339       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4340   if (FnMemAA.isAssumedReadOnly()) {
4341     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4342     if (FnMemAA.isKnownReadOnly())
4343       addKnownBits(NOT_CAPTURED_IN_MEM);
4344     else
4345       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4346   }
4347 
4348   // Make sure all returned values are different than the underlying value.
4349   // TODO: we could do this in a more sophisticated way inside
4350   //       AAReturnedValues, e.g., track all values that escape through returns
4351   //       directly somehow.
4352   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4353     bool SeenConstant = false;
4354     for (auto &It : RVAA.returned_values()) {
4355       if (isa<Constant>(It.first)) {
4356         if (SeenConstant)
4357           return false;
4358         SeenConstant = true;
4359       } else if (!isa<Argument>(It.first) ||
4360                  It.first == getAssociatedArgument())
4361         return false;
4362     }
4363     return true;
4364   };
4365 
4366   const auto &NoUnwindAA =
4367       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4368   if (NoUnwindAA.isAssumedNoUnwind()) {
4369     bool IsVoidTy = F->getReturnType()->isVoidTy();
4370     const AAReturnedValues *RVAA =
4371         IsVoidTy ? nullptr
4372                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4373 
4374                                                  DepClassTy::OPTIONAL);
4375     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4376       T.addKnownBits(NOT_CAPTURED_IN_RET);
4377       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4378         return ChangeStatus::UNCHANGED;
4379       if (NoUnwindAA.isKnownNoUnwind() &&
4380           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4381         addKnownBits(NOT_CAPTURED_IN_RET);
4382         if (isKnown(NOT_CAPTURED_IN_MEM))
4383           return indicateOptimisticFixpoint();
4384       }
4385     }
4386   }
4387 
4388   // Use the CaptureTracker interface and logic with the specialized tracker,
4389   // defined in AACaptureUseTracker, that can look at in-flight abstract
4390   // attributes and directly updates the assumed state.
4391   SmallVector<const Value *, 4> PotentialCopies;
4392   unsigned RemainingUsesToExplore =
4393       getDefaultMaxUsesToExploreForCaptureTracking();
4394   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4395                               RemainingUsesToExplore);
4396 
4397   // Check all potential copies of the associated value until we can assume
4398   // none will be captured or we have to assume at least one might be.
4399   unsigned Idx = 0;
4400   PotentialCopies.push_back(V);
4401   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4402     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4403 
4404   AANoCapture::StateType &S = getState();
4405   auto Assumed = S.getAssumed();
4406   S.intersectAssumedBits(T.getAssumed());
4407   if (!isAssumedNoCaptureMaybeReturned())
4408     return indicatePessimisticFixpoint();
4409   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4410                                    : ChangeStatus::CHANGED;
4411 }
4412 
4413 /// NoCapture attribute for function arguments.
4414 struct AANoCaptureArgument final : AANoCaptureImpl {
4415   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4416       : AANoCaptureImpl(IRP, A) {}
4417 
4418   /// See AbstractAttribute::trackStatistics()
4419   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4420 };
4421 
4422 /// NoCapture attribute for call site arguments.
4423 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4424   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4425       : AANoCaptureImpl(IRP, A) {}
4426 
4427   /// See AbstractAttribute::initialize(...).
4428   void initialize(Attributor &A) override {
4429     if (Argument *Arg = getAssociatedArgument())
4430       if (Arg->hasByValAttr())
4431         indicateOptimisticFixpoint();
4432     AANoCaptureImpl::initialize(A);
4433   }
4434 
4435   /// See AbstractAttribute::updateImpl(...).
4436   ChangeStatus updateImpl(Attributor &A) override {
4437     // TODO: Once we have call site specific value information we can provide
4438     //       call site specific liveness information and then it makes
4439     //       sense to specialize attributes for call sites arguments instead of
4440     //       redirecting requests to the callee argument.
4441     Argument *Arg = getAssociatedArgument();
4442     if (!Arg)
4443       return indicatePessimisticFixpoint();
4444     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4445     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4446     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4447   }
4448 
4449   /// See AbstractAttribute::trackStatistics()
4450   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4451 };
4452 
4453 /// NoCapture attribute for floating values.
4454 struct AANoCaptureFloating final : AANoCaptureImpl {
4455   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4456       : AANoCaptureImpl(IRP, A) {}
4457 
4458   /// See AbstractAttribute::trackStatistics()
4459   void trackStatistics() const override {
4460     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4461   }
4462 };
4463 
4464 /// NoCapture attribute for function return value.
4465 struct AANoCaptureReturned final : AANoCaptureImpl {
4466   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4467       : AANoCaptureImpl(IRP, A) {
4468     llvm_unreachable("NoCapture is not applicable to function returns!");
4469   }
4470 
4471   /// See AbstractAttribute::initialize(...).
4472   void initialize(Attributor &A) override {
4473     llvm_unreachable("NoCapture is not applicable to function returns!");
4474   }
4475 
4476   /// See AbstractAttribute::updateImpl(...).
4477   ChangeStatus updateImpl(Attributor &A) override {
4478     llvm_unreachable("NoCapture is not applicable to function returns!");
4479   }
4480 
4481   /// See AbstractAttribute::trackStatistics()
4482   void trackStatistics() const override {}
4483 };
4484 
4485 /// NoCapture attribute deduction for a call site return value.
4486 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4487   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4488       : AANoCaptureImpl(IRP, A) {}
4489 
4490   /// See AbstractAttribute::initialize(...).
4491   void initialize(Attributor &A) override {
4492     const Function *F = getAnchorScope();
4493     // Check what state the associated function can actually capture.
4494     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4495   }
4496 
4497   /// See AbstractAttribute::trackStatistics()
4498   void trackStatistics() const override {
4499     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4500   }
4501 };
4502 
4503 /// ------------------ Value Simplify Attribute ----------------------------
4504 
4505 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
4506   // FIXME: Add a typecast support.
4507   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
4508       SimplifiedAssociatedValue, Other, Ty);
4509   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
4510     return false;
4511 
4512   LLVM_DEBUG({
4513     if (SimplifiedAssociatedValue.hasValue())
4514       dbgs() << "[ValueSimplify] is assumed to be "
4515              << **SimplifiedAssociatedValue << "\n";
4516     else
4517       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
4518   });
4519   return true;
4520 }
4521 
4522 struct AAValueSimplifyImpl : AAValueSimplify {
4523   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4524       : AAValueSimplify(IRP, A) {}
4525 
4526   /// See AbstractAttribute::initialize(...).
4527   void initialize(Attributor &A) override {
4528     if (getAssociatedValue().getType()->isVoidTy())
4529       indicatePessimisticFixpoint();
4530   }
4531 
4532   /// See AbstractAttribute::getAsStr().
4533   const std::string getAsStr() const override {
4534     LLVM_DEBUG({
4535       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
4536       if (SimplifiedAssociatedValue)
4537         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
4538     });
4539     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
4540                           : "not-simple";
4541   }
4542 
4543   /// See AbstractAttribute::trackStatistics()
4544   void trackStatistics() const override {}
4545 
4546   /// See AAValueSimplify::getAssumedSimplifiedValue()
4547   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4548     if (!isValidState())
4549       return const_cast<Value *>(&getAssociatedValue());
4550     return SimplifiedAssociatedValue;
4551   }
4552 
4553   /// Helper function for querying AAValueSimplify and updating candicate.
4554   /// \param IRP The value position we are trying to unify with SimplifiedValue
4555   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4556                       const IRPosition &IRP) {
4557     bool UsedAssumedInformation = false;
4558     Optional<Value *> QueryingValueSimplified =
4559         A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
4560     return unionAssumed(QueryingValueSimplified);
4561   }
4562 
4563   /// Returns a candidate is found or not
4564   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4565     if (!getAssociatedValue().getType()->isIntegerTy())
4566       return false;
4567 
4568     // This will also pass the call base context.
4569     const auto &AA =
4570         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4571 
4572     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4573 
4574     if (!COpt.hasValue()) {
4575       SimplifiedAssociatedValue = llvm::None;
4576       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4577       return true;
4578     }
4579     if (auto *C = COpt.getValue()) {
4580       SimplifiedAssociatedValue = C;
4581       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4582       return true;
4583     }
4584     return false;
4585   }
4586 
4587   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4588     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4589       return true;
4590     if (askSimplifiedValueFor<AAPotentialValues>(A))
4591       return true;
4592     return false;
4593   }
4594 
4595   /// See AbstractAttribute::manifest(...).
4596   ChangeStatus manifest(Attributor &A) override {
4597     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4598 
4599     if (SimplifiedAssociatedValue.hasValue() &&
4600         !SimplifiedAssociatedValue.getValue())
4601       return Changed;
4602 
4603     Value &V = getAssociatedValue();
4604     auto *C = SimplifiedAssociatedValue.hasValue()
4605                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4606                   : UndefValue::get(V.getType());
4607     if (C && C != &V && !V.user_empty()) {
4608       Value *NewV = AA::getWithType(*C, *V.getType());
4609       // We can replace the AssociatedValue with the constant.
4610       if (NewV && NewV != &V) {
4611         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4612                           << " :: " << *this << "\n");
4613         if (A.changeValueAfterManifest(V, *NewV))
4614           Changed = ChangeStatus::CHANGED;
4615       }
4616     }
4617 
4618     return Changed | AAValueSimplify::manifest(A);
4619   }
4620 
4621   /// See AbstractState::indicatePessimisticFixpoint(...).
4622   ChangeStatus indicatePessimisticFixpoint() override {
4623     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4624     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4625     SimplifiedAssociatedValue = &getAssociatedValue();
4626     indicateOptimisticFixpoint();
4627     return ChangeStatus::CHANGED;
4628   }
4629 };
4630 
4631 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4632   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4633       : AAValueSimplifyImpl(IRP, A) {}
4634 
4635   void initialize(Attributor &A) override {
4636     AAValueSimplifyImpl::initialize(A);
4637     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4638       indicatePessimisticFixpoint();
4639     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4640                  Attribute::StructRet, Attribute::Nest},
4641                 /* IgnoreSubsumingPositions */ true))
4642       indicatePessimisticFixpoint();
4643 
4644     // FIXME: This is a hack to prevent us from propagating function poiner in
4645     // the new pass manager CGSCC pass as it creates call edges the
4646     // CallGraphUpdater cannot handle yet.
4647     Value &V = getAssociatedValue();
4648     if (V.getType()->isPointerTy() &&
4649         V.getType()->getPointerElementType()->isFunctionTy() &&
4650         !A.isModulePass())
4651       indicatePessimisticFixpoint();
4652   }
4653 
4654   /// See AbstractAttribute::updateImpl(...).
4655   ChangeStatus updateImpl(Attributor &A) override {
4656     // Byval is only replacable if it is readonly otherwise we would write into
4657     // the replaced value and not the copy that byval creates implicitly.
4658     Argument *Arg = getAssociatedArgument();
4659     if (Arg->hasByValAttr()) {
4660       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4661       //       there is no race by not copying a constant byval.
4662       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4663                                                        DepClassTy::REQUIRED);
4664       if (!MemAA.isAssumedReadOnly())
4665         return indicatePessimisticFixpoint();
4666     }
4667 
4668     auto Before = SimplifiedAssociatedValue;
4669 
4670     auto PredForCallSite = [&](AbstractCallSite ACS) {
4671       const IRPosition &ACSArgPos =
4672           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4673       // Check if a coresponding argument was found or if it is on not
4674       // associated (which can happen for callback calls).
4675       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4676         return false;
4677 
4678       // We can only propagate thread independent values through callbacks.
4679       // This is different to direct/indirect call sites because for them we
4680       // know the thread executing the caller and callee is the same. For
4681       // callbacks this is not guaranteed, thus a thread dependent value could
4682       // be different for the caller and callee, making it invalid to propagate.
4683       Value &ArgOp = ACSArgPos.getAssociatedValue();
4684       if (ACS.isCallbackCall())
4685         if (auto *C = dyn_cast<Constant>(&ArgOp))
4686           if (C->isThreadDependent())
4687             return false;
4688       return checkAndUpdate(A, *this, ACSArgPos);
4689     };
4690 
4691     // Generate a answer specific to a call site context.
4692     bool Success;
4693     bool AllCallSitesKnown;
4694     if (hasCallBaseContext())
4695       Success = PredForCallSite(
4696           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
4697     else
4698       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
4699                                        AllCallSitesKnown);
4700 
4701     if (!Success)
4702       if (!askSimplifiedValueForOtherAAs(A))
4703         return indicatePessimisticFixpoint();
4704 
4705     // If a candicate was found in this update, return CHANGED.
4706     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4707                                                : ChangeStatus ::CHANGED;
4708   }
4709 
4710   /// See AbstractAttribute::trackStatistics()
4711   void trackStatistics() const override {
4712     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4713   }
4714 };
4715 
4716 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4717   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4718       : AAValueSimplifyImpl(IRP, A) {}
4719 
4720   /// See AbstractAttribute::updateImpl(...).
4721   ChangeStatus updateImpl(Attributor &A) override {
4722     auto Before = SimplifiedAssociatedValue;
4723 
4724     auto PredForReturned = [&](Value &V) {
4725       return checkAndUpdate(A, *this,
4726                             IRPosition::value(V, getCallBaseContext()));
4727     };
4728 
4729     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4730       if (!askSimplifiedValueForOtherAAs(A))
4731         return indicatePessimisticFixpoint();
4732 
4733     // If a candicate was found in this update, return CHANGED.
4734     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4735                                                : ChangeStatus ::CHANGED;
4736   }
4737 
4738   ChangeStatus manifest(Attributor &A) override {
4739     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4740 
4741     if (SimplifiedAssociatedValue.hasValue() &&
4742         !SimplifiedAssociatedValue.getValue())
4743       return Changed | AAValueSimplify::manifest(A);
4744 
4745     auto *C = SimplifiedAssociatedValue.hasValue()
4746                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4747                   : UndefValue::get(getAssociatedType());
4748     if (!C || C == &getAssociatedValue())
4749       return Changed | AAValueSimplify::manifest(A);
4750 
4751     auto PredForReturned =
4752         [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4753           // We can replace the AssociatedValue with the constant.
4754           if (&V == C || isa<UndefValue>(V))
4755             return true;
4756 
4757           for (ReturnInst *RI : RetInsts) {
4758             if (RI->getFunction() != getAnchorScope())
4759               continue;
4760             Value *NewV = AA::getWithType(*C, *RI->getReturnValue()->getType());
4761             if (!NewV)
4762               continue;
4763             LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4764                               << " in " << *RI << " :: " << *this << "\n");
4765             if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
4766               Changed = ChangeStatus::CHANGED;
4767           }
4768           return true;
4769         };
4770     A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4771 
4772     return Changed | AAValueSimplify::manifest(A);
4773   }
4774 
4775   /// See AbstractAttribute::trackStatistics()
4776   void trackStatistics() const override {
4777     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4778   }
4779 };
4780 
4781 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4782   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4783       : AAValueSimplifyImpl(IRP, A) {}
4784 
4785   /// See AbstractAttribute::initialize(...).
4786   void initialize(Attributor &A) override {
4787     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4788     //        Needs investigation.
4789     // AAValueSimplifyImpl::initialize(A);
4790     Value &V = getAnchorValue();
4791 
4792     // TODO: add other stuffs
4793     if (isa<Constant>(V))
4794       indicatePessimisticFixpoint();
4795   }
4796 
4797   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4798   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4799   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4800   /// updated and \p Changed is set appropriately.
4801   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4802                               ChangeStatus &Changed) {
4803     if (!ICmp)
4804       return false;
4805     if (!ICmp->isEquality())
4806       return false;
4807 
4808     // This is a comparison with == or !-. We check for nullptr now.
4809     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4810     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4811     if (!Op0IsNull && !Op1IsNull)
4812       return false;
4813 
4814     LLVMContext &Ctx = ICmp->getContext();
4815     // Check for `nullptr ==/!= nullptr` first:
4816     if (Op0IsNull && Op1IsNull) {
4817       Value *NewVal = ConstantInt::get(
4818           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4819       assert(!SimplifiedAssociatedValue.hasValue() &&
4820              "Did not expect non-fixed value for constant comparison");
4821       SimplifiedAssociatedValue = NewVal;
4822       indicateOptimisticFixpoint();
4823       Changed = ChangeStatus::CHANGED;
4824       return true;
4825     }
4826 
4827     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4828     // non-nullptr operand and if we assume it's non-null we can conclude the
4829     // result of the comparison.
4830     assert((Op0IsNull || Op1IsNull) &&
4831            "Expected nullptr versus non-nullptr comparison at this point");
4832 
4833     // The index is the operand that we assume is not null.
4834     unsigned PtrIdx = Op0IsNull;
4835     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4836         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4837         DepClassTy::REQUIRED);
4838     if (!PtrNonNullAA.isAssumedNonNull())
4839       return false;
4840 
4841     // The new value depends on the predicate, true for != and false for ==.
4842     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4843                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4844 
4845     assert((!SimplifiedAssociatedValue.hasValue() ||
4846             SimplifiedAssociatedValue == NewVal) &&
4847            "Did not expect to change value for zero-comparison");
4848 
4849     auto Before = SimplifiedAssociatedValue;
4850     SimplifiedAssociatedValue = NewVal;
4851 
4852     if (PtrNonNullAA.isKnownNonNull())
4853       indicateOptimisticFixpoint();
4854 
4855     Changed = Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4856                                                   : ChangeStatus ::CHANGED;
4857     return true;
4858   }
4859 
4860   /// See AbstractAttribute::updateImpl(...).
4861   ChangeStatus updateImpl(Attributor &A) override {
4862     auto Before = SimplifiedAssociatedValue;
4863 
4864     ChangeStatus Changed;
4865     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4866                                Changed))
4867       return Changed;
4868 
4869     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4870                             bool Stripped) -> bool {
4871       auto &AA = A.getAAFor<AAValueSimplify>(
4872           *this, IRPosition::value(V, getCallBaseContext()),
4873           DepClassTy::REQUIRED);
4874       if (!Stripped && this == &AA) {
4875         // TODO: Look the instruction and check recursively.
4876 
4877         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4878                           << "\n");
4879         return false;
4880       }
4881       return checkAndUpdate(A, *this,
4882                             IRPosition::value(V, getCallBaseContext()));
4883     };
4884 
4885     bool Dummy = false;
4886     if (!genericValueTraversal<AAValueSimplify, bool>(
4887             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4888             /* UseValueSimplify */ false))
4889       if (!askSimplifiedValueForOtherAAs(A))
4890         return indicatePessimisticFixpoint();
4891 
4892     // If a candicate was found in this update, return CHANGED.
4893     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4894                                                : ChangeStatus ::CHANGED;
4895   }
4896 
4897   /// See AbstractAttribute::trackStatistics()
4898   void trackStatistics() const override {
4899     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4900   }
4901 };
4902 
4903 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4904   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4905       : AAValueSimplifyImpl(IRP, A) {}
4906 
4907   /// See AbstractAttribute::initialize(...).
4908   void initialize(Attributor &A) override {
4909     SimplifiedAssociatedValue = &getAnchorValue();
4910     indicateOptimisticFixpoint();
4911   }
4912   /// See AbstractAttribute::initialize(...).
4913   ChangeStatus updateImpl(Attributor &A) override {
4914     llvm_unreachable(
4915         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4916   }
4917   /// See AbstractAttribute::trackStatistics()
4918   void trackStatistics() const override {
4919     STATS_DECLTRACK_FN_ATTR(value_simplify)
4920   }
4921 };
4922 
4923 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4924   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4925       : AAValueSimplifyFunction(IRP, A) {}
4926   /// See AbstractAttribute::trackStatistics()
4927   void trackStatistics() const override {
4928     STATS_DECLTRACK_CS_ATTR(value_simplify)
4929   }
4930 };
4931 
4932 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4933   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4934       : AAValueSimplifyReturned(IRP, A) {}
4935 
4936   /// See AbstractAttribute::manifest(...).
4937   ChangeStatus manifest(Attributor &A) override {
4938     return AAValueSimplifyImpl::manifest(A);
4939   }
4940 
4941   void trackStatistics() const override {
4942     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4943   }
4944 };
4945 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4946   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4947       : AAValueSimplifyFloating(IRP, A) {}
4948 
4949   /// See AbstractAttribute::manifest(...).
4950   ChangeStatus manifest(Attributor &A) override {
4951     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4952 
4953     if (SimplifiedAssociatedValue.hasValue() &&
4954         !SimplifiedAssociatedValue.getValue())
4955       return Changed;
4956 
4957     Value &V = getAssociatedValue();
4958     auto *C = SimplifiedAssociatedValue.hasValue()
4959                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4960                   : UndefValue::get(V.getType());
4961     if (C) {
4962       Use &U = cast<CallBase>(&getAnchorValue())
4963                    ->getArgOperandUse(getCallSiteArgNo());
4964       // We can replace the AssociatedValue with the constant.
4965       if (&V != C) {
4966         if (Value *NewV = AA::getWithType(*C, *V.getType()))
4967           if (A.changeUseAfterManifest(U, *NewV))
4968             Changed = ChangeStatus::CHANGED;
4969       }
4970     }
4971 
4972     return Changed | AAValueSimplify::manifest(A);
4973   }
4974 
4975   void trackStatistics() const override {
4976     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4977   }
4978 };
4979 
4980 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4981 struct AAHeapToStackImpl : public AAHeapToStack {
4982   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4983       : AAHeapToStack(IRP, A) {}
4984 
4985   const std::string getAsStr() const override {
4986     return "[H2S] Mallocs Good/Bad: " + std::to_string(MallocCalls.size()) +
4987            "/" + std::to_string(BadMallocCalls.size());
4988   }
4989 
4990   bool isAssumedHeapToStack(CallBase &CB) const override {
4991     return isValidState() && MallocCalls.contains(&CB) &&
4992            !BadMallocCalls.count(&CB);
4993   }
4994 
4995   bool isKnownHeapToStack(CallBase &CB) const override {
4996     return isValidState() && MallocCalls.contains(&CB) &&
4997            !BadMallocCalls.count(&CB);
4998   }
4999 
5000   ChangeStatus manifest(Attributor &A) override {
5001     assert(getState().isValidState() &&
5002            "Attempted to manifest an invalid state!");
5003 
5004     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5005     Function *F = getAnchorScope();
5006     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5007 
5008     for (Instruction *MallocCall : MallocCalls) {
5009       // This malloc cannot be replaced.
5010       if (BadMallocCalls.count(MallocCall))
5011         continue;
5012 
5013       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
5014         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5015         A.deleteAfterManifest(*FreeCall);
5016         HasChanged = ChangeStatus::CHANGED;
5017       }
5018 
5019       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
5020                         << "\n");
5021 
5022       auto Remark = [&](OptimizationRemark OR) {
5023         LibFunc IsAllocShared;
5024         if (auto *CB = dyn_cast<CallBase>(MallocCall)) {
5025           TLI->getLibFunc(*CB, IsAllocShared);
5026           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5027             return OR << "Moving globalized variable to the stack.";
5028         }
5029         return OR << "Moving memory allocation from the heap to the stack.";
5030       };
5031       A.emitRemark<OptimizationRemark>(MallocCall, "HeapToStack", Remark);
5032 
5033       Align Alignment;
5034       Value *Size;
5035       if (isCallocLikeFn(MallocCall, TLI)) {
5036         auto *Num = MallocCall->getOperand(0);
5037         auto *SizeT = MallocCall->getOperand(1);
5038         IRBuilder<> B(MallocCall);
5039         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5040       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5041         Size = MallocCall->getOperand(1);
5042         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5043                                    ->getValue()
5044                                    .getZExtValue())
5045                         .valueOrOne();
5046       } else {
5047         Size = MallocCall->getOperand(0);
5048       }
5049 
5050       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5051       Instruction *AI =
5052           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5053                          "", MallocCall->getNextNode());
5054 
5055       if (AI->getType() != MallocCall->getType())
5056         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5057                              AI->getNextNode());
5058 
5059       A.changeValueAfterManifest(*MallocCall, *AI);
5060 
5061       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5062         auto *NBB = II->getNormalDest();
5063         BranchInst::Create(NBB, MallocCall->getParent());
5064         A.deleteAfterManifest(*MallocCall);
5065       } else {
5066         A.deleteAfterManifest(*MallocCall);
5067       }
5068 
5069       // Zero out the allocated memory if it was a calloc.
5070       if (isCallocLikeFn(MallocCall, TLI)) {
5071         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5072                                    AI->getNextNode());
5073         Value *Ops[] = {
5074             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5075             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5076 
5077         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5078         Module *M = F->getParent();
5079         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5080         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5081       }
5082       HasChanged = ChangeStatus::CHANGED;
5083     }
5084 
5085     return HasChanged;
5086   }
5087 
5088   /// Collection of all malloc calls in a function.
5089   SmallSetVector<Instruction *, 4> MallocCalls;
5090 
5091   /// Collection of malloc calls that cannot be converted.
5092   DenseSet<const Instruction *> BadMallocCalls;
5093 
5094   /// A map for each malloc call to the set of associated free calls.
5095   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5096 
5097   ChangeStatus updateImpl(Attributor &A) override;
5098 };
5099 
5100 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5101   const Function *F = getAnchorScope();
5102   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5103 
5104   MustBeExecutedContextExplorer &Explorer =
5105       A.getInfoCache().getMustBeExecutedContextExplorer();
5106 
5107   bool StackIsAccessibleByOtherThreads =
5108       A.getInfoCache().stackIsAccessibleByOtherThreads();
5109 
5110   auto FreeCheck = [&](Instruction &I) {
5111     // If the stack is not accessible by other threads, the "must-free" logic
5112     // doesn't apply as the pointer could be shared and needs to be places in
5113     // "shareable" memory.
5114     if (!StackIsAccessibleByOtherThreads) {
5115       auto &NoSyncAA =
5116           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
5117       if (!NoSyncAA.isAssumedNoSync()) {
5118         LLVM_DEBUG(
5119             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
5120                       "other threads and function is not nosync:\n");
5121         return false;
5122       }
5123     }
5124     const auto &Frees = FreesForMalloc.lookup(&I);
5125     if (Frees.size() != 1) {
5126       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
5127                         << Frees.size() << "\n");
5128       return false;
5129     }
5130     Instruction *UniqueFree = *Frees.begin();
5131     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5132   };
5133 
5134   auto UsesCheck = [&](Instruction &I) {
5135     bool ValidUsesOnly = true;
5136     bool MustUse = true;
5137     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5138       Instruction *UserI = cast<Instruction>(U.getUser());
5139       if (isa<LoadInst>(UserI))
5140         return true;
5141       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5142         if (SI->getValueOperand() == U.get()) {
5143           LLVM_DEBUG(dbgs()
5144                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5145           ValidUsesOnly = false;
5146         } else {
5147           // A store into the malloc'ed memory is fine.
5148         }
5149         return true;
5150       }
5151       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5152         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5153           return true;
5154         // Record malloc.
5155         if (isFreeCall(UserI, TLI)) {
5156           if (MustUse) {
5157             FreesForMalloc[&I].insert(UserI);
5158           } else {
5159             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5160                               << *UserI << "\n");
5161             ValidUsesOnly = false;
5162           }
5163           return true;
5164         }
5165 
5166         unsigned ArgNo = CB->getArgOperandNo(&U);
5167 
5168         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5169             *this, IRPosition::callsite_argument(*CB, ArgNo),
5170             DepClassTy::OPTIONAL);
5171 
5172         // If a callsite argument use is nofree, we are fine.
5173         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5174             *this, IRPosition::callsite_argument(*CB, ArgNo),
5175             DepClassTy::OPTIONAL);
5176 
5177         if (!NoCaptureAA.isAssumedNoCapture() ||
5178             !ArgNoFreeAA.isAssumedNoFree()) {
5179 
5180           // Emit a missed remark if this is missed OpenMP globalization.
5181           auto Remark = [&](OptimizationRemarkMissed ORM) {
5182             return ORM << "Could not move globalized variable to the stack. "
5183                        << "Variable is potentially "
5184                        << (!NoCaptureAA.isAssumedNoCapture() ? "captured. "
5185                                                              : "freed. ")
5186                        << "Mark as noescape to override.";
5187           };
5188 
5189           LibFunc IsAllocShared;
5190           if (auto *AllocShared = dyn_cast<CallBase>(&I)) {
5191             TLI->getLibFunc(*AllocShared, IsAllocShared);
5192             if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5193               A.emitRemark<OptimizationRemarkMissed>(
5194                   AllocShared, "HeapToStackFailed", Remark);
5195           }
5196 
5197           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5198           ValidUsesOnly = false;
5199         }
5200         return true;
5201       }
5202 
5203       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5204           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5205         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5206         Follow = true;
5207         return true;
5208       }
5209       // Unknown user for which we can not track uses further (in a way that
5210       // makes sense).
5211       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5212       ValidUsesOnly = false;
5213       return true;
5214     };
5215     A.checkForAllUses(Pred, *this, I);
5216     return ValidUsesOnly;
5217   };
5218 
5219   auto MallocCallocCheck = [&](Instruction &I) {
5220     if (BadMallocCalls.count(&I))
5221       return true;
5222 
5223     bool IsMalloc = isMallocLikeFn(&I, TLI);
5224     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5225     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5226     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5227       BadMallocCalls.insert(&I);
5228       return true;
5229     }
5230 
5231     if (IsMalloc) {
5232       if (MaxHeapToStackSize == -1) {
5233         if (UsesCheck(I) || FreeCheck(I)) {
5234           MallocCalls.insert(&I);
5235           return true;
5236         }
5237       }
5238       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5239         if (Size->getValue().ule(MaxHeapToStackSize))
5240           if (UsesCheck(I) || FreeCheck(I)) {
5241             MallocCalls.insert(&I);
5242             return true;
5243           }
5244     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5245       if (MaxHeapToStackSize == -1) {
5246         if (UsesCheck(I) || FreeCheck(I)) {
5247           MallocCalls.insert(&I);
5248           return true;
5249         }
5250       }
5251       // Only if the alignment and sizes are constant.
5252       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5253         if (Size->getValue().ule(MaxHeapToStackSize))
5254           if (UsesCheck(I) || FreeCheck(I)) {
5255             MallocCalls.insert(&I);
5256             return true;
5257           }
5258     } else if (IsCalloc) {
5259       if (MaxHeapToStackSize == -1) {
5260         if (UsesCheck(I) || FreeCheck(I)) {
5261           MallocCalls.insert(&I);
5262           return true;
5263         }
5264       }
5265       bool Overflow = false;
5266       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5267         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5268           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5269                   .ule(MaxHeapToStackSize))
5270             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5271               MallocCalls.insert(&I);
5272               return true;
5273             }
5274     }
5275 
5276     BadMallocCalls.insert(&I);
5277     return true;
5278   };
5279 
5280   size_t NumBadMallocs = BadMallocCalls.size();
5281 
5282   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5283 
5284   if (NumBadMallocs != BadMallocCalls.size())
5285     return ChangeStatus::CHANGED;
5286 
5287   return ChangeStatus::UNCHANGED;
5288 }
5289 
5290 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5291   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5292       : AAHeapToStackImpl(IRP, A) {}
5293 
5294   /// See AbstractAttribute::trackStatistics().
5295   void trackStatistics() const override {
5296     STATS_DECL(
5297         MallocCalls, Function,
5298         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5299     for (auto *C : MallocCalls)
5300       if (!BadMallocCalls.count(C))
5301         ++BUILD_STAT_NAME(MallocCalls, Function);
5302   }
5303 };
5304 
5305 /// ----------------------- Privatizable Pointers ------------------------------
5306 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5307   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5308       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5309 
5310   ChangeStatus indicatePessimisticFixpoint() override {
5311     AAPrivatizablePtr::indicatePessimisticFixpoint();
5312     PrivatizableType = nullptr;
5313     return ChangeStatus::CHANGED;
5314   }
5315 
5316   /// Identify the type we can chose for a private copy of the underlying
5317   /// argument. None means it is not clear yet, nullptr means there is none.
5318   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5319 
5320   /// Return a privatizable type that encloses both T0 and T1.
5321   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5322   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5323     if (!T0.hasValue())
5324       return T1;
5325     if (!T1.hasValue())
5326       return T0;
5327     if (T0 == T1)
5328       return T0;
5329     return nullptr;
5330   }
5331 
5332   Optional<Type *> getPrivatizableType() const override {
5333     return PrivatizableType;
5334   }
5335 
5336   const std::string getAsStr() const override {
5337     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5338   }
5339 
5340 protected:
5341   Optional<Type *> PrivatizableType;
5342 };
5343 
5344 // TODO: Do this for call site arguments (probably also other values) as well.
5345 
5346 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5347   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5348       : AAPrivatizablePtrImpl(IRP, A) {}
5349 
5350   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5351   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5352     // If this is a byval argument and we know all the call sites (so we can
5353     // rewrite them), there is no need to check them explicitly.
5354     bool AllCallSitesKnown;
5355     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5356         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5357                                true, AllCallSitesKnown))
5358       return getAssociatedValue().getType()->getPointerElementType();
5359 
5360     Optional<Type *> Ty;
5361     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5362 
5363     // Make sure the associated call site argument has the same type at all call
5364     // sites and it is an allocation we know is safe to privatize, for now that
5365     // means we only allow alloca instructions.
5366     // TODO: We can additionally analyze the accesses in the callee to  create
5367     //       the type from that information instead. That is a little more
5368     //       involved and will be done in a follow up patch.
5369     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5370       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5371       // Check if a coresponding argument was found or if it is one not
5372       // associated (which can happen for callback calls).
5373       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5374         return false;
5375 
5376       // Check that all call sites agree on a type.
5377       auto &PrivCSArgAA =
5378           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5379       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5380 
5381       LLVM_DEBUG({
5382         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5383         if (CSTy.hasValue() && CSTy.getValue())
5384           CSTy.getValue()->print(dbgs());
5385         else if (CSTy.hasValue())
5386           dbgs() << "<nullptr>";
5387         else
5388           dbgs() << "<none>";
5389       });
5390 
5391       Ty = combineTypes(Ty, CSTy);
5392 
5393       LLVM_DEBUG({
5394         dbgs() << " : New Type: ";
5395         if (Ty.hasValue() && Ty.getValue())
5396           Ty.getValue()->print(dbgs());
5397         else if (Ty.hasValue())
5398           dbgs() << "<nullptr>";
5399         else
5400           dbgs() << "<none>";
5401         dbgs() << "\n";
5402       });
5403 
5404       return !Ty.hasValue() || Ty.getValue();
5405     };
5406 
5407     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5408       return nullptr;
5409     return Ty;
5410   }
5411 
5412   /// See AbstractAttribute::updateImpl(...).
5413   ChangeStatus updateImpl(Attributor &A) override {
5414     PrivatizableType = identifyPrivatizableType(A);
5415     if (!PrivatizableType.hasValue())
5416       return ChangeStatus::UNCHANGED;
5417     if (!PrivatizableType.getValue())
5418       return indicatePessimisticFixpoint();
5419 
5420     // The dependence is optional so we don't give up once we give up on the
5421     // alignment.
5422     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5423                         DepClassTy::OPTIONAL);
5424 
5425     // Avoid arguments with padding for now.
5426     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5427         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5428                                                 A.getInfoCache().getDL())) {
5429       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5430       return indicatePessimisticFixpoint();
5431     }
5432 
5433     // Verify callee and caller agree on how the promoted argument would be
5434     // passed.
5435     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5436     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5437     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5438     Function &Fn = *getIRPosition().getAnchorScope();
5439     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5440     ArgsToPromote.insert(getAssociatedArgument());
5441     const auto *TTI =
5442         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5443     if (!TTI ||
5444         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5445             Fn, *TTI, ArgsToPromote, Dummy) ||
5446         ArgsToPromote.empty()) {
5447       LLVM_DEBUG(
5448           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5449                  << Fn.getName() << "\n");
5450       return indicatePessimisticFixpoint();
5451     }
5452 
5453     // Collect the types that will replace the privatizable type in the function
5454     // signature.
5455     SmallVector<Type *, 16> ReplacementTypes;
5456     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5457 
5458     // Register a rewrite of the argument.
5459     Argument *Arg = getAssociatedArgument();
5460     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5461       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5462       return indicatePessimisticFixpoint();
5463     }
5464 
5465     unsigned ArgNo = Arg->getArgNo();
5466 
5467     // Helper to check if for the given call site the associated argument is
5468     // passed to a callback where the privatization would be different.
5469     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5470       SmallVector<const Use *, 4> CallbackUses;
5471       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5472       for (const Use *U : CallbackUses) {
5473         AbstractCallSite CBACS(U);
5474         assert(CBACS && CBACS.isCallbackCall());
5475         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5476           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5477 
5478           LLVM_DEBUG({
5479             dbgs()
5480                 << "[AAPrivatizablePtr] Argument " << *Arg
5481                 << "check if can be privatized in the context of its parent ("
5482                 << Arg->getParent()->getName()
5483                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5484                    "callback ("
5485                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5486                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5487                 << CBACS.getCallArgOperand(CBArg) << " vs "
5488                 << CB.getArgOperand(ArgNo) << "\n"
5489                 << "[AAPrivatizablePtr] " << CBArg << " : "
5490                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5491           });
5492 
5493           if (CBArgNo != int(ArgNo))
5494             continue;
5495           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5496               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5497           if (CBArgPrivAA.isValidState()) {
5498             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5499             if (!CBArgPrivTy.hasValue())
5500               continue;
5501             if (CBArgPrivTy.getValue() == PrivatizableType)
5502               continue;
5503           }
5504 
5505           LLVM_DEBUG({
5506             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5507                    << " cannot be privatized in the context of its parent ("
5508                    << Arg->getParent()->getName()
5509                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5510                       "callback ("
5511                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5512                    << ").\n[AAPrivatizablePtr] for which the argument "
5513                       "privatization is not compatible.\n";
5514           });
5515           return false;
5516         }
5517       }
5518       return true;
5519     };
5520 
5521     // Helper to check if for the given call site the associated argument is
5522     // passed to a direct call where the privatization would be different.
5523     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5524       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5525       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5526       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5527              "Expected a direct call operand for callback call operand");
5528 
5529       LLVM_DEBUG({
5530         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5531                << " check if be privatized in the context of its parent ("
5532                << Arg->getParent()->getName()
5533                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5534                   "direct call of ("
5535                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5536                << ").\n";
5537       });
5538 
5539       Function *DCCallee = DC->getCalledFunction();
5540       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5541         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5542             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
5543             DepClassTy::REQUIRED);
5544         if (DCArgPrivAA.isValidState()) {
5545           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5546           if (!DCArgPrivTy.hasValue())
5547             return true;
5548           if (DCArgPrivTy.getValue() == PrivatizableType)
5549             return true;
5550         }
5551       }
5552 
5553       LLVM_DEBUG({
5554         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5555                << " cannot be privatized in the context of its parent ("
5556                << Arg->getParent()->getName()
5557                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5558                   "direct call of ("
5559                << ACS.getInstruction()->getCalledFunction()->getName()
5560                << ").\n[AAPrivatizablePtr] for which the argument "
5561                   "privatization is not compatible.\n";
5562       });
5563       return false;
5564     };
5565 
5566     // Helper to check if the associated argument is used at the given abstract
5567     // call site in a way that is incompatible with the privatization assumed
5568     // here.
5569     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5570       if (ACS.isDirectCall())
5571         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5572       if (ACS.isCallbackCall())
5573         return IsCompatiblePrivArgOfDirectCS(ACS);
5574       return false;
5575     };
5576 
5577     bool AllCallSitesKnown;
5578     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5579                                 AllCallSitesKnown))
5580       return indicatePessimisticFixpoint();
5581 
5582     return ChangeStatus::UNCHANGED;
5583   }
5584 
5585   /// Given a type to private \p PrivType, collect the constituates (which are
5586   /// used) in \p ReplacementTypes.
5587   static void
5588   identifyReplacementTypes(Type *PrivType,
5589                            SmallVectorImpl<Type *> &ReplacementTypes) {
5590     // TODO: For now we expand the privatization type to the fullest which can
5591     //       lead to dead arguments that need to be removed later.
5592     assert(PrivType && "Expected privatizable type!");
5593 
5594     // Traverse the type, extract constituate types on the outermost level.
5595     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5596       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5597         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5598     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5599       ReplacementTypes.append(PrivArrayType->getNumElements(),
5600                               PrivArrayType->getElementType());
5601     } else {
5602       ReplacementTypes.push_back(PrivType);
5603     }
5604   }
5605 
5606   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5607   /// The values needed are taken from the arguments of \p F starting at
5608   /// position \p ArgNo.
5609   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5610                                    unsigned ArgNo, Instruction &IP) {
5611     assert(PrivType && "Expected privatizable type!");
5612 
5613     IRBuilder<NoFolder> IRB(&IP);
5614     const DataLayout &DL = F.getParent()->getDataLayout();
5615 
5616     // Traverse the type, build GEPs and stores.
5617     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5618       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5619       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5620         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5621         Value *Ptr =
5622             constructPointer(PointeeTy, PrivType, &Base,
5623                              PrivStructLayout->getElementOffset(u), IRB, DL);
5624         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5625       }
5626     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5627       Type *PointeeTy = PrivArrayType->getElementType();
5628       Type *PointeePtrTy = PointeeTy->getPointerTo();
5629       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5630       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5631         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
5632                                       u * PointeeTySize, IRB, DL);
5633         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5634       }
5635     } else {
5636       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5637     }
5638   }
5639 
5640   /// Extract values from \p Base according to the type \p PrivType at the
5641   /// call position \p ACS. The values are appended to \p ReplacementValues.
5642   void createReplacementValues(Align Alignment, Type *PrivType,
5643                                AbstractCallSite ACS, Value *Base,
5644                                SmallVectorImpl<Value *> &ReplacementValues) {
5645     assert(Base && "Expected base value!");
5646     assert(PrivType && "Expected privatizable type!");
5647     Instruction *IP = ACS.getInstruction();
5648 
5649     IRBuilder<NoFolder> IRB(IP);
5650     const DataLayout &DL = IP->getModule()->getDataLayout();
5651 
5652     if (Base->getType()->getPointerElementType() != PrivType)
5653       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5654                                                  "", ACS.getInstruction());
5655 
5656     // Traverse the type, build GEPs and loads.
5657     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5658       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5659       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5660         Type *PointeeTy = PrivStructType->getElementType(u);
5661         Value *Ptr =
5662             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
5663                              PrivStructLayout->getElementOffset(u), IRB, DL);
5664         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5665         L->setAlignment(Alignment);
5666         ReplacementValues.push_back(L);
5667       }
5668     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5669       Type *PointeeTy = PrivArrayType->getElementType();
5670       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5671       Type *PointeePtrTy = PointeeTy->getPointerTo();
5672       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5673         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
5674                                       u * PointeeTySize, IRB, DL);
5675         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5676         L->setAlignment(Alignment);
5677         ReplacementValues.push_back(L);
5678       }
5679     } else {
5680       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5681       L->setAlignment(Alignment);
5682       ReplacementValues.push_back(L);
5683     }
5684   }
5685 
5686   /// See AbstractAttribute::manifest(...)
5687   ChangeStatus manifest(Attributor &A) override {
5688     if (!PrivatizableType.hasValue())
5689       return ChangeStatus::UNCHANGED;
5690     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5691 
5692     // Collect all tail calls in the function as we cannot allow new allocas to
5693     // escape into tail recursion.
5694     // TODO: Be smarter about new allocas escaping into tail calls.
5695     SmallVector<CallInst *, 16> TailCalls;
5696     if (!A.checkForAllInstructions(
5697             [&](Instruction &I) {
5698               CallInst &CI = cast<CallInst>(I);
5699               if (CI.isTailCall())
5700                 TailCalls.push_back(&CI);
5701               return true;
5702             },
5703             *this, {Instruction::Call}))
5704       return ChangeStatus::UNCHANGED;
5705 
5706     Argument *Arg = getAssociatedArgument();
5707     // Query AAAlign attribute for alignment of associated argument to
5708     // determine the best alignment of loads.
5709     const auto &AlignAA =
5710         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
5711 
5712     // Callback to repair the associated function. A new alloca is placed at the
5713     // beginning and initialized with the values passed through arguments. The
5714     // new alloca replaces the use of the old pointer argument.
5715     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5716         [=](const Attributor::ArgumentReplacementInfo &ARI,
5717             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5718           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5719           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5720           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5721                                            Arg->getName() + ".priv", IP);
5722           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5723                                ArgIt->getArgNo(), *IP);
5724 
5725           if (AI->getType() != Arg->getType())
5726             AI =
5727                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5728           Arg->replaceAllUsesWith(AI);
5729 
5730           for (CallInst *CI : TailCalls)
5731             CI->setTailCall(false);
5732         };
5733 
5734     // Callback to repair a call site of the associated function. The elements
5735     // of the privatizable type are loaded prior to the call and passed to the
5736     // new function version.
5737     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5738         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5739                       AbstractCallSite ACS,
5740                       SmallVectorImpl<Value *> &NewArgOperands) {
5741           // When no alignment is specified for the load instruction,
5742           // natural alignment is assumed.
5743           createReplacementValues(
5744               assumeAligned(AlignAA.getAssumedAlign()),
5745               PrivatizableType.getValue(), ACS,
5746               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5747               NewArgOperands);
5748         };
5749 
5750     // Collect the types that will replace the privatizable type in the function
5751     // signature.
5752     SmallVector<Type *, 16> ReplacementTypes;
5753     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5754 
5755     // Register a rewrite of the argument.
5756     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5757                                            std::move(FnRepairCB),
5758                                            std::move(ACSRepairCB)))
5759       return ChangeStatus::CHANGED;
5760     return ChangeStatus::UNCHANGED;
5761   }
5762 
5763   /// See AbstractAttribute::trackStatistics()
5764   void trackStatistics() const override {
5765     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5766   }
5767 };
5768 
5769 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5770   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5771       : AAPrivatizablePtrImpl(IRP, A) {}
5772 
5773   /// See AbstractAttribute::initialize(...).
5774   virtual void initialize(Attributor &A) override {
5775     // TODO: We can privatize more than arguments.
5776     indicatePessimisticFixpoint();
5777   }
5778 
5779   ChangeStatus updateImpl(Attributor &A) override {
5780     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5781                      "updateImpl will not be called");
5782   }
5783 
5784   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5785   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5786     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5787     if (!Obj) {
5788       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5789       return nullptr;
5790     }
5791 
5792     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5793       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5794         if (CI->isOne())
5795           return Obj->getType()->getPointerElementType();
5796     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5797       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
5798           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
5799       if (PrivArgAA.isAssumedPrivatizablePtr())
5800         return Obj->getType()->getPointerElementType();
5801     }
5802 
5803     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5804                          "alloca nor privatizable argument: "
5805                       << *Obj << "!\n");
5806     return nullptr;
5807   }
5808 
5809   /// See AbstractAttribute::trackStatistics()
5810   void trackStatistics() const override {
5811     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5812   }
5813 };
5814 
5815 struct AAPrivatizablePtrCallSiteArgument final
5816     : public AAPrivatizablePtrFloating {
5817   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5818       : AAPrivatizablePtrFloating(IRP, A) {}
5819 
5820   /// See AbstractAttribute::initialize(...).
5821   void initialize(Attributor &A) override {
5822     if (getIRPosition().hasAttr(Attribute::ByVal))
5823       indicateOptimisticFixpoint();
5824   }
5825 
5826   /// See AbstractAttribute::updateImpl(...).
5827   ChangeStatus updateImpl(Attributor &A) override {
5828     PrivatizableType = identifyPrivatizableType(A);
5829     if (!PrivatizableType.hasValue())
5830       return ChangeStatus::UNCHANGED;
5831     if (!PrivatizableType.getValue())
5832       return indicatePessimisticFixpoint();
5833 
5834     const IRPosition &IRP = getIRPosition();
5835     auto &NoCaptureAA =
5836         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
5837     if (!NoCaptureAA.isAssumedNoCapture()) {
5838       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5839       return indicatePessimisticFixpoint();
5840     }
5841 
5842     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
5843     if (!NoAliasAA.isAssumedNoAlias()) {
5844       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5845       return indicatePessimisticFixpoint();
5846     }
5847 
5848     const auto &MemBehaviorAA =
5849         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
5850     if (!MemBehaviorAA.isAssumedReadOnly()) {
5851       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5852       return indicatePessimisticFixpoint();
5853     }
5854 
5855     return ChangeStatus::UNCHANGED;
5856   }
5857 
5858   /// See AbstractAttribute::trackStatistics()
5859   void trackStatistics() const override {
5860     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5861   }
5862 };
5863 
5864 struct AAPrivatizablePtrCallSiteReturned final
5865     : public AAPrivatizablePtrFloating {
5866   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5867       : AAPrivatizablePtrFloating(IRP, A) {}
5868 
5869   /// See AbstractAttribute::initialize(...).
5870   void initialize(Attributor &A) override {
5871     // TODO: We can privatize more than arguments.
5872     indicatePessimisticFixpoint();
5873   }
5874 
5875   /// See AbstractAttribute::trackStatistics()
5876   void trackStatistics() const override {
5877     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5878   }
5879 };
5880 
5881 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5882   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5883       : AAPrivatizablePtrFloating(IRP, A) {}
5884 
5885   /// See AbstractAttribute::initialize(...).
5886   void initialize(Attributor &A) override {
5887     // TODO: We can privatize more than arguments.
5888     indicatePessimisticFixpoint();
5889   }
5890 
5891   /// See AbstractAttribute::trackStatistics()
5892   void trackStatistics() const override {
5893     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5894   }
5895 };
5896 
5897 /// -------------------- Memory Behavior Attributes ----------------------------
5898 /// Includes read-none, read-only, and write-only.
5899 /// ----------------------------------------------------------------------------
5900 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5901   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5902       : AAMemoryBehavior(IRP, A) {}
5903 
5904   /// See AbstractAttribute::initialize(...).
5905   void initialize(Attributor &A) override {
5906     intersectAssumedBits(BEST_STATE);
5907     getKnownStateFromValue(getIRPosition(), getState());
5908     AAMemoryBehavior::initialize(A);
5909   }
5910 
5911   /// Return the memory behavior information encoded in the IR for \p IRP.
5912   static void getKnownStateFromValue(const IRPosition &IRP,
5913                                      BitIntegerState &State,
5914                                      bool IgnoreSubsumingPositions = false) {
5915     SmallVector<Attribute, 2> Attrs;
5916     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5917     for (const Attribute &Attr : Attrs) {
5918       switch (Attr.getKindAsEnum()) {
5919       case Attribute::ReadNone:
5920         State.addKnownBits(NO_ACCESSES);
5921         break;
5922       case Attribute::ReadOnly:
5923         State.addKnownBits(NO_WRITES);
5924         break;
5925       case Attribute::WriteOnly:
5926         State.addKnownBits(NO_READS);
5927         break;
5928       default:
5929         llvm_unreachable("Unexpected attribute!");
5930       }
5931     }
5932 
5933     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5934       if (!I->mayReadFromMemory())
5935         State.addKnownBits(NO_READS);
5936       if (!I->mayWriteToMemory())
5937         State.addKnownBits(NO_WRITES);
5938     }
5939   }
5940 
5941   /// See AbstractAttribute::getDeducedAttributes(...).
5942   void getDeducedAttributes(LLVMContext &Ctx,
5943                             SmallVectorImpl<Attribute> &Attrs) const override {
5944     assert(Attrs.size() == 0);
5945     if (isAssumedReadNone())
5946       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5947     else if (isAssumedReadOnly())
5948       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5949     else if (isAssumedWriteOnly())
5950       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5951     assert(Attrs.size() <= 1);
5952   }
5953 
5954   /// See AbstractAttribute::manifest(...).
5955   ChangeStatus manifest(Attributor &A) override {
5956     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5957       return ChangeStatus::UNCHANGED;
5958 
5959     const IRPosition &IRP = getIRPosition();
5960 
5961     // Check if we would improve the existing attributes first.
5962     SmallVector<Attribute, 4> DeducedAttrs;
5963     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5964     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5965           return IRP.hasAttr(Attr.getKindAsEnum(),
5966                              /* IgnoreSubsumingPositions */ true);
5967         }))
5968       return ChangeStatus::UNCHANGED;
5969 
5970     // Clear existing attributes.
5971     IRP.removeAttrs(AttrKinds);
5972 
5973     // Use the generic manifest method.
5974     return IRAttribute::manifest(A);
5975   }
5976 
5977   /// See AbstractState::getAsStr().
5978   const std::string getAsStr() const override {
5979     if (isAssumedReadNone())
5980       return "readnone";
5981     if (isAssumedReadOnly())
5982       return "readonly";
5983     if (isAssumedWriteOnly())
5984       return "writeonly";
5985     return "may-read/write";
5986   }
5987 
5988   /// The set of IR attributes AAMemoryBehavior deals with.
5989   static const Attribute::AttrKind AttrKinds[3];
5990 };
5991 
5992 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5993     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5994 
5995 /// Memory behavior attribute for a floating value.
5996 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5997   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5998       : AAMemoryBehaviorImpl(IRP, A) {}
5999 
6000   /// See AbstractAttribute::initialize(...).
6001   void initialize(Attributor &A) override {
6002     AAMemoryBehaviorImpl::initialize(A);
6003     addUsesOf(A, getAssociatedValue());
6004   }
6005 
6006   /// See AbstractAttribute::updateImpl(...).
6007   ChangeStatus updateImpl(Attributor &A) override;
6008 
6009   /// See AbstractAttribute::trackStatistics()
6010   void trackStatistics() const override {
6011     if (isAssumedReadNone())
6012       STATS_DECLTRACK_FLOATING_ATTR(readnone)
6013     else if (isAssumedReadOnly())
6014       STATS_DECLTRACK_FLOATING_ATTR(readonly)
6015     else if (isAssumedWriteOnly())
6016       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
6017   }
6018 
6019 private:
6020   /// Return true if users of \p UserI might access the underlying
6021   /// variable/location described by \p U and should therefore be analyzed.
6022   bool followUsersOfUseIn(Attributor &A, const Use *U,
6023                           const Instruction *UserI);
6024 
6025   /// Update the state according to the effect of use \p U in \p UserI.
6026   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
6027 
6028 protected:
6029   /// Add the uses of \p V to the `Uses` set we look at during the update step.
6030   void addUsesOf(Attributor &A, const Value &V);
6031 
6032   /// Container for (transitive) uses of the associated argument.
6033   SmallVector<const Use *, 8> Uses;
6034 
6035   /// Set to remember the uses we already traversed.
6036   SmallPtrSet<const Use *, 8> Visited;
6037 };
6038 
6039 /// Memory behavior attribute for function argument.
6040 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
6041   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6042       : AAMemoryBehaviorFloating(IRP, A) {}
6043 
6044   /// See AbstractAttribute::initialize(...).
6045   void initialize(Attributor &A) override {
6046     intersectAssumedBits(BEST_STATE);
6047     const IRPosition &IRP = getIRPosition();
6048     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
6049     // can query it when we use has/getAttr. That would allow us to reuse the
6050     // initialize of the base class here.
6051     bool HasByVal =
6052         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
6053     getKnownStateFromValue(IRP, getState(),
6054                            /* IgnoreSubsumingPositions */ HasByVal);
6055 
6056     // Initialize the use vector with all direct uses of the associated value.
6057     Argument *Arg = getAssociatedArgument();
6058     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
6059       indicatePessimisticFixpoint();
6060     } else {
6061       addUsesOf(A, *Arg);
6062     }
6063   }
6064 
6065   ChangeStatus manifest(Attributor &A) override {
6066     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6067     if (!getAssociatedValue().getType()->isPointerTy())
6068       return ChangeStatus::UNCHANGED;
6069 
6070     // TODO: From readattrs.ll: "inalloca parameters are always
6071     //                           considered written"
6072     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6073       removeKnownBits(NO_WRITES);
6074       removeAssumedBits(NO_WRITES);
6075     }
6076     return AAMemoryBehaviorFloating::manifest(A);
6077   }
6078 
6079   /// See AbstractAttribute::trackStatistics()
6080   void trackStatistics() const override {
6081     if (isAssumedReadNone())
6082       STATS_DECLTRACK_ARG_ATTR(readnone)
6083     else if (isAssumedReadOnly())
6084       STATS_DECLTRACK_ARG_ATTR(readonly)
6085     else if (isAssumedWriteOnly())
6086       STATS_DECLTRACK_ARG_ATTR(writeonly)
6087   }
6088 };
6089 
6090 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
6091   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6092       : AAMemoryBehaviorArgument(IRP, A) {}
6093 
6094   /// See AbstractAttribute::initialize(...).
6095   void initialize(Attributor &A) override {
6096     // If we don't have an associated attribute this is either a variadic call
6097     // or an indirect call, either way, nothing to do here.
6098     Argument *Arg = getAssociatedArgument();
6099     if (!Arg) {
6100       indicatePessimisticFixpoint();
6101       return;
6102     }
6103     if (Arg->hasByValAttr()) {
6104       addKnownBits(NO_WRITES);
6105       removeKnownBits(NO_READS);
6106       removeAssumedBits(NO_READS);
6107     }
6108     AAMemoryBehaviorArgument::initialize(A);
6109     if (getAssociatedFunction()->isDeclaration())
6110       indicatePessimisticFixpoint();
6111   }
6112 
6113   /// See AbstractAttribute::updateImpl(...).
6114   ChangeStatus updateImpl(Attributor &A) override {
6115     // TODO: Once we have call site specific value information we can provide
6116     //       call site specific liveness liveness information and then it makes
6117     //       sense to specialize attributes for call sites arguments instead of
6118     //       redirecting requests to the callee argument.
6119     Argument *Arg = getAssociatedArgument();
6120     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6121     auto &ArgAA =
6122         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6123     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6124   }
6125 
6126   /// See AbstractAttribute::trackStatistics()
6127   void trackStatistics() const override {
6128     if (isAssumedReadNone())
6129       STATS_DECLTRACK_CSARG_ATTR(readnone)
6130     else if (isAssumedReadOnly())
6131       STATS_DECLTRACK_CSARG_ATTR(readonly)
6132     else if (isAssumedWriteOnly())
6133       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6134   }
6135 };
6136 
6137 /// Memory behavior attribute for a call site return position.
6138 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6139   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6140       : AAMemoryBehaviorFloating(IRP, A) {}
6141 
6142   /// See AbstractAttribute::initialize(...).
6143   void initialize(Attributor &A) override {
6144     AAMemoryBehaviorImpl::initialize(A);
6145     Function *F = getAssociatedFunction();
6146     if (!F || F->isDeclaration())
6147       indicatePessimisticFixpoint();
6148   }
6149 
6150   /// See AbstractAttribute::manifest(...).
6151   ChangeStatus manifest(Attributor &A) override {
6152     // We do not annotate returned values.
6153     return ChangeStatus::UNCHANGED;
6154   }
6155 
6156   /// See AbstractAttribute::trackStatistics()
6157   void trackStatistics() const override {}
6158 };
6159 
6160 /// An AA to represent the memory behavior function attributes.
6161 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6162   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6163       : AAMemoryBehaviorImpl(IRP, A) {}
6164 
6165   /// See AbstractAttribute::updateImpl(Attributor &A).
6166   virtual ChangeStatus updateImpl(Attributor &A) override;
6167 
6168   /// See AbstractAttribute::manifest(...).
6169   ChangeStatus manifest(Attributor &A) override {
6170     Function &F = cast<Function>(getAnchorValue());
6171     if (isAssumedReadNone()) {
6172       F.removeFnAttr(Attribute::ArgMemOnly);
6173       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6174       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6175     }
6176     return AAMemoryBehaviorImpl::manifest(A);
6177   }
6178 
6179   /// See AbstractAttribute::trackStatistics()
6180   void trackStatistics() const override {
6181     if (isAssumedReadNone())
6182       STATS_DECLTRACK_FN_ATTR(readnone)
6183     else if (isAssumedReadOnly())
6184       STATS_DECLTRACK_FN_ATTR(readonly)
6185     else if (isAssumedWriteOnly())
6186       STATS_DECLTRACK_FN_ATTR(writeonly)
6187   }
6188 };
6189 
6190 /// AAMemoryBehavior attribute for call sites.
6191 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6192   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6193       : AAMemoryBehaviorImpl(IRP, A) {}
6194 
6195   /// See AbstractAttribute::initialize(...).
6196   void initialize(Attributor &A) override {
6197     AAMemoryBehaviorImpl::initialize(A);
6198     Function *F = getAssociatedFunction();
6199     if (!F || F->isDeclaration())
6200       indicatePessimisticFixpoint();
6201   }
6202 
6203   /// See AbstractAttribute::updateImpl(...).
6204   ChangeStatus updateImpl(Attributor &A) override {
6205     // TODO: Once we have call site specific value information we can provide
6206     //       call site specific liveness liveness information and then it makes
6207     //       sense to specialize attributes for call sites arguments instead of
6208     //       redirecting requests to the callee argument.
6209     Function *F = getAssociatedFunction();
6210     const IRPosition &FnPos = IRPosition::function(*F);
6211     auto &FnAA =
6212         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
6213     return clampStateAndIndicateChange(getState(), FnAA.getState());
6214   }
6215 
6216   /// See AbstractAttribute::trackStatistics()
6217   void trackStatistics() const override {
6218     if (isAssumedReadNone())
6219       STATS_DECLTRACK_CS_ATTR(readnone)
6220     else if (isAssumedReadOnly())
6221       STATS_DECLTRACK_CS_ATTR(readonly)
6222     else if (isAssumedWriteOnly())
6223       STATS_DECLTRACK_CS_ATTR(writeonly)
6224   }
6225 };
6226 
6227 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6228 
6229   // The current assumed state used to determine a change.
6230   auto AssumedState = getAssumed();
6231 
6232   auto CheckRWInst = [&](Instruction &I) {
6233     // If the instruction has an own memory behavior state, use it to restrict
6234     // the local state. No further analysis is required as the other memory
6235     // state is as optimistic as it gets.
6236     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6237       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6238           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
6239       intersectAssumedBits(MemBehaviorAA.getAssumed());
6240       return !isAtFixpoint();
6241     }
6242 
6243     // Remove access kind modifiers if necessary.
6244     if (I.mayReadFromMemory())
6245       removeAssumedBits(NO_READS);
6246     if (I.mayWriteToMemory())
6247       removeAssumedBits(NO_WRITES);
6248     return !isAtFixpoint();
6249   };
6250 
6251   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6252     return indicatePessimisticFixpoint();
6253 
6254   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6255                                         : ChangeStatus::UNCHANGED;
6256 }
6257 
6258 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6259 
6260   const IRPosition &IRP = getIRPosition();
6261   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6262   AAMemoryBehavior::StateType &S = getState();
6263 
6264   // First, check the function scope. We take the known information and we avoid
6265   // work if the assumed information implies the current assumed information for
6266   // this attribute. This is a valid for all but byval arguments.
6267   Argument *Arg = IRP.getAssociatedArgument();
6268   AAMemoryBehavior::base_t FnMemAssumedState =
6269       AAMemoryBehavior::StateType::getWorstState();
6270   if (!Arg || !Arg->hasByValAttr()) {
6271     const auto &FnMemAA =
6272         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
6273     FnMemAssumedState = FnMemAA.getAssumed();
6274     S.addKnownBits(FnMemAA.getKnown());
6275     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6276       return ChangeStatus::UNCHANGED;
6277   }
6278 
6279   // Make sure the value is not captured (except through "return"), if
6280   // it is, any information derived would be irrelevant anyway as we cannot
6281   // check the potential aliases introduced by the capture. However, no need
6282   // to fall back to anythign less optimistic than the function state.
6283   const auto &ArgNoCaptureAA =
6284       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
6285   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6286     S.intersectAssumedBits(FnMemAssumedState);
6287     return ChangeStatus::CHANGED;
6288   }
6289 
6290   // The current assumed state used to determine a change.
6291   auto AssumedState = S.getAssumed();
6292 
6293   // Liveness information to exclude dead users.
6294   // TODO: Take the FnPos once we have call site specific liveness information.
6295   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6296       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6297       DepClassTy::NONE);
6298 
6299   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6300   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6301     const Use *U = Uses[i];
6302     Instruction *UserI = cast<Instruction>(U->getUser());
6303     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6304                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6305                       << "]\n");
6306     if (A.isAssumedDead(*U, this, &LivenessAA))
6307       continue;
6308 
6309     // Droppable users, e.g., llvm::assume does not actually perform any action.
6310     if (UserI->isDroppable())
6311       continue;
6312 
6313     // Check if the users of UserI should also be visited.
6314     if (followUsersOfUseIn(A, U, UserI))
6315       addUsesOf(A, *UserI);
6316 
6317     // If UserI might touch memory we analyze the use in detail.
6318     if (UserI->mayReadOrWriteMemory())
6319       analyzeUseIn(A, U, UserI);
6320   }
6321 
6322   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6323                                         : ChangeStatus::UNCHANGED;
6324 }
6325 
6326 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6327   SmallVector<const Use *, 8> WL;
6328   for (const Use &U : V.uses())
6329     WL.push_back(&U);
6330 
6331   while (!WL.empty()) {
6332     const Use *U = WL.pop_back_val();
6333     if (!Visited.insert(U).second)
6334       continue;
6335 
6336     const Instruction *UserI = cast<Instruction>(U->getUser());
6337     if (UserI->mayReadOrWriteMemory()) {
6338       Uses.push_back(U);
6339       continue;
6340     }
6341     if (!followUsersOfUseIn(A, U, UserI))
6342       continue;
6343     for (const Use &UU : UserI->uses())
6344       WL.push_back(&UU);
6345   }
6346 }
6347 
6348 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6349                                                   const Instruction *UserI) {
6350   // The loaded value is unrelated to the pointer argument, no need to
6351   // follow the users of the load.
6352   if (isa<LoadInst>(UserI))
6353     return false;
6354 
6355   // By default we follow all uses assuming UserI might leak information on U,
6356   // we have special handling for call sites operands though.
6357   const auto *CB = dyn_cast<CallBase>(UserI);
6358   if (!CB || !CB->isArgOperand(U))
6359     return true;
6360 
6361   // If the use is a call argument known not to be captured, the users of
6362   // the call do not need to be visited because they have to be unrelated to
6363   // the input. Note that this check is not trivial even though we disallow
6364   // general capturing of the underlying argument. The reason is that the
6365   // call might the argument "through return", which we allow and for which we
6366   // need to check call users.
6367   if (U->get()->getType()->isPointerTy()) {
6368     unsigned ArgNo = CB->getArgOperandNo(U);
6369     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6370         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
6371     return !ArgNoCaptureAA.isAssumedNoCapture();
6372   }
6373 
6374   return true;
6375 }
6376 
6377 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6378                                             const Instruction *UserI) {
6379   assert(UserI->mayReadOrWriteMemory());
6380 
6381   switch (UserI->getOpcode()) {
6382   default:
6383     // TODO: Handle all atomics and other side-effect operations we know of.
6384     break;
6385   case Instruction::Load:
6386     // Loads cause the NO_READS property to disappear.
6387     removeAssumedBits(NO_READS);
6388     return;
6389 
6390   case Instruction::Store:
6391     // Stores cause the NO_WRITES property to disappear if the use is the
6392     // pointer operand. Note that we do assume that capturing was taken care of
6393     // somewhere else.
6394     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6395       removeAssumedBits(NO_WRITES);
6396     return;
6397 
6398   case Instruction::Call:
6399   case Instruction::CallBr:
6400   case Instruction::Invoke: {
6401     // For call sites we look at the argument memory behavior attribute (this
6402     // could be recursive!) in order to restrict our own state.
6403     const auto *CB = cast<CallBase>(UserI);
6404 
6405     // Give up on operand bundles.
6406     if (CB->isBundleOperand(U)) {
6407       indicatePessimisticFixpoint();
6408       return;
6409     }
6410 
6411     // Calling a function does read the function pointer, maybe write it if the
6412     // function is self-modifying.
6413     if (CB->isCallee(U)) {
6414       removeAssumedBits(NO_READS);
6415       break;
6416     }
6417 
6418     // Adjust the possible access behavior based on the information on the
6419     // argument.
6420     IRPosition Pos;
6421     if (U->get()->getType()->isPointerTy())
6422       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6423     else
6424       Pos = IRPosition::callsite_function(*CB);
6425     const auto &MemBehaviorAA =
6426         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
6427     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6428     // and at least "known".
6429     intersectAssumedBits(MemBehaviorAA.getAssumed());
6430     return;
6431   }
6432   };
6433 
6434   // Generally, look at the "may-properties" and adjust the assumed state if we
6435   // did not trigger special handling before.
6436   if (UserI->mayReadFromMemory())
6437     removeAssumedBits(NO_READS);
6438   if (UserI->mayWriteToMemory())
6439     removeAssumedBits(NO_WRITES);
6440 }
6441 
6442 /// -------------------- Memory Locations Attributes ---------------------------
6443 /// Includes read-none, argmemonly, inaccessiblememonly,
6444 /// inaccessiblememorargmemonly
6445 /// ----------------------------------------------------------------------------
6446 
6447 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6448     AAMemoryLocation::MemoryLocationsKind MLK) {
6449   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6450     return "all memory";
6451   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6452     return "no memory";
6453   std::string S = "memory:";
6454   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6455     S += "stack,";
6456   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6457     S += "constant,";
6458   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6459     S += "internal global,";
6460   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6461     S += "external global,";
6462   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6463     S += "argument,";
6464   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6465     S += "inaccessible,";
6466   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6467     S += "malloced,";
6468   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6469     S += "unknown,";
6470   S.pop_back();
6471   return S;
6472 }
6473 
6474 namespace {
6475 struct AAMemoryLocationImpl : public AAMemoryLocation {
6476 
6477   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6478       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6479     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6480       AccessKind2Accesses[u] = nullptr;
6481   }
6482 
6483   ~AAMemoryLocationImpl() {
6484     // The AccessSets are allocated via a BumpPtrAllocator, we call
6485     // the destructor manually.
6486     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6487       if (AccessKind2Accesses[u])
6488         AccessKind2Accesses[u]->~AccessSet();
6489   }
6490 
6491   /// See AbstractAttribute::initialize(...).
6492   void initialize(Attributor &A) override {
6493     intersectAssumedBits(BEST_STATE);
6494     getKnownStateFromValue(A, getIRPosition(), getState());
6495     AAMemoryLocation::initialize(A);
6496   }
6497 
6498   /// Return the memory behavior information encoded in the IR for \p IRP.
6499   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6500                                      BitIntegerState &State,
6501                                      bool IgnoreSubsumingPositions = false) {
6502     // For internal functions we ignore `argmemonly` and
6503     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6504     // constant propagation. It is unclear if this is the best way but it is
6505     // unlikely this will cause real performance problems. If we are deriving
6506     // attributes for the anchor function we even remove the attribute in
6507     // addition to ignoring it.
6508     bool UseArgMemOnly = true;
6509     Function *AnchorFn = IRP.getAnchorScope();
6510     if (AnchorFn && A.isRunOn(*AnchorFn))
6511       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6512 
6513     SmallVector<Attribute, 2> Attrs;
6514     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6515     for (const Attribute &Attr : Attrs) {
6516       switch (Attr.getKindAsEnum()) {
6517       case Attribute::ReadNone:
6518         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6519         break;
6520       case Attribute::InaccessibleMemOnly:
6521         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6522         break;
6523       case Attribute::ArgMemOnly:
6524         if (UseArgMemOnly)
6525           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6526         else
6527           IRP.removeAttrs({Attribute::ArgMemOnly});
6528         break;
6529       case Attribute::InaccessibleMemOrArgMemOnly:
6530         if (UseArgMemOnly)
6531           State.addKnownBits(inverseLocation(
6532               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6533         else
6534           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6535         break;
6536       default:
6537         llvm_unreachable("Unexpected attribute!");
6538       }
6539     }
6540   }
6541 
6542   /// See AbstractAttribute::getDeducedAttributes(...).
6543   void getDeducedAttributes(LLVMContext &Ctx,
6544                             SmallVectorImpl<Attribute> &Attrs) const override {
6545     assert(Attrs.size() == 0);
6546     if (isAssumedReadNone()) {
6547       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6548     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6549       if (isAssumedInaccessibleMemOnly())
6550         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6551       else if (isAssumedArgMemOnly())
6552         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6553       else if (isAssumedInaccessibleOrArgMemOnly())
6554         Attrs.push_back(
6555             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6556     }
6557     assert(Attrs.size() <= 1);
6558   }
6559 
6560   /// See AbstractAttribute::manifest(...).
6561   ChangeStatus manifest(Attributor &A) override {
6562     const IRPosition &IRP = getIRPosition();
6563 
6564     // Check if we would improve the existing attributes first.
6565     SmallVector<Attribute, 4> DeducedAttrs;
6566     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6567     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6568           return IRP.hasAttr(Attr.getKindAsEnum(),
6569                              /* IgnoreSubsumingPositions */ true);
6570         }))
6571       return ChangeStatus::UNCHANGED;
6572 
6573     // Clear existing attributes.
6574     IRP.removeAttrs(AttrKinds);
6575     if (isAssumedReadNone())
6576       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6577 
6578     // Use the generic manifest method.
6579     return IRAttribute::manifest(A);
6580   }
6581 
6582   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6583   bool checkForAllAccessesToMemoryKind(
6584       function_ref<bool(const Instruction *, const Value *, AccessKind,
6585                         MemoryLocationsKind)>
6586           Pred,
6587       MemoryLocationsKind RequestedMLK) const override {
6588     if (!isValidState())
6589       return false;
6590 
6591     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6592     if (AssumedMLK == NO_LOCATIONS)
6593       return true;
6594 
6595     unsigned Idx = 0;
6596     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6597          CurMLK *= 2, ++Idx) {
6598       if (CurMLK & RequestedMLK)
6599         continue;
6600 
6601       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6602         for (const AccessInfo &AI : *Accesses)
6603           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6604             return false;
6605     }
6606 
6607     return true;
6608   }
6609 
6610   ChangeStatus indicatePessimisticFixpoint() override {
6611     // If we give up and indicate a pessimistic fixpoint this instruction will
6612     // become an access for all potential access kinds:
6613     // TODO: Add pointers for argmemonly and globals to improve the results of
6614     //       checkForAllAccessesToMemoryKind.
6615     bool Changed = false;
6616     MemoryLocationsKind KnownMLK = getKnown();
6617     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6618     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6619       if (!(CurMLK & KnownMLK))
6620         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6621                                   getAccessKindFromInst(I));
6622     return AAMemoryLocation::indicatePessimisticFixpoint();
6623   }
6624 
6625 protected:
6626   /// Helper struct to tie together an instruction that has a read or write
6627   /// effect with the pointer it accesses (if any).
6628   struct AccessInfo {
6629 
6630     /// The instruction that caused the access.
6631     const Instruction *I;
6632 
6633     /// The base pointer that is accessed, or null if unknown.
6634     const Value *Ptr;
6635 
6636     /// The kind of access (read/write/read+write).
6637     AccessKind Kind;
6638 
6639     bool operator==(const AccessInfo &RHS) const {
6640       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6641     }
6642     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6643       if (LHS.I != RHS.I)
6644         return LHS.I < RHS.I;
6645       if (LHS.Ptr != RHS.Ptr)
6646         return LHS.Ptr < RHS.Ptr;
6647       if (LHS.Kind != RHS.Kind)
6648         return LHS.Kind < RHS.Kind;
6649       return false;
6650     }
6651   };
6652 
6653   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6654   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6655   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6656   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6657 
6658   /// Categorize the pointer arguments of CB that might access memory in
6659   /// AccessedLoc and update the state and access map accordingly.
6660   void
6661   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6662                                      AAMemoryLocation::StateType &AccessedLocs,
6663                                      bool &Changed);
6664 
6665   /// Return the kind(s) of location that may be accessed by \p V.
6666   AAMemoryLocation::MemoryLocationsKind
6667   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6668 
6669   /// Return the access kind as determined by \p I.
6670   AccessKind getAccessKindFromInst(const Instruction *I) {
6671     AccessKind AK = READ_WRITE;
6672     if (I) {
6673       AK = I->mayReadFromMemory() ? READ : NONE;
6674       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6675     }
6676     return AK;
6677   }
6678 
6679   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6680   /// an access of kind \p AK to a \p MLK memory location with the access
6681   /// pointer \p Ptr.
6682   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6683                                  MemoryLocationsKind MLK, const Instruction *I,
6684                                  const Value *Ptr, bool &Changed,
6685                                  AccessKind AK = READ_WRITE) {
6686 
6687     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6688     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6689     if (!Accesses)
6690       Accesses = new (Allocator) AccessSet();
6691     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6692     State.removeAssumedBits(MLK);
6693   }
6694 
6695   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6696   /// arguments, and update the state and access map accordingly.
6697   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6698                           AAMemoryLocation::StateType &State, bool &Changed);
6699 
6700   /// Used to allocate access sets.
6701   BumpPtrAllocator &Allocator;
6702 
6703   /// The set of IR attributes AAMemoryLocation deals with.
6704   static const Attribute::AttrKind AttrKinds[4];
6705 };
6706 
6707 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6708     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6709     Attribute::InaccessibleMemOrArgMemOnly};
6710 
6711 void AAMemoryLocationImpl::categorizePtrValue(
6712     Attributor &A, const Instruction &I, const Value &Ptr,
6713     AAMemoryLocation::StateType &State, bool &Changed) {
6714   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6715                     << Ptr << " ["
6716                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6717 
6718   auto StripGEPCB = [](Value *V) -> Value * {
6719     auto *GEP = dyn_cast<GEPOperator>(V);
6720     while (GEP) {
6721       V = GEP->getPointerOperand();
6722       GEP = dyn_cast<GEPOperator>(V);
6723     }
6724     return V;
6725   };
6726 
6727   auto VisitValueCB = [&](Value &V, const Instruction *,
6728                           AAMemoryLocation::StateType &T,
6729                           bool Stripped) -> bool {
6730     // TODO: recognize the TBAA used for constant accesses.
6731     MemoryLocationsKind MLK = NO_LOCATIONS;
6732     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6733     if (isa<UndefValue>(V))
6734       return true;
6735     if (auto *Arg = dyn_cast<Argument>(&V)) {
6736       if (Arg->hasByValAttr())
6737         MLK = NO_LOCAL_MEM;
6738       else
6739         MLK = NO_ARGUMENT_MEM;
6740     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6741       // Reading constant memory is not treated as a read "effect" by the
6742       // function attr pass so we won't neither. Constants defined by TBAA are
6743       // similar. (We know we do not write it because it is constant.)
6744       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6745         if (GVar->isConstant())
6746           return true;
6747 
6748       if (GV->hasLocalLinkage())
6749         MLK = NO_GLOBAL_INTERNAL_MEM;
6750       else
6751         MLK = NO_GLOBAL_EXTERNAL_MEM;
6752     } else if (isa<ConstantPointerNull>(V) &&
6753                !NullPointerIsDefined(getAssociatedFunction(),
6754                                      V.getType()->getPointerAddressSpace())) {
6755       return true;
6756     } else if (isa<AllocaInst>(V)) {
6757       MLK = NO_LOCAL_MEM;
6758     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6759       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
6760           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
6761       if (NoAliasAA.isAssumedNoAlias())
6762         MLK = NO_MALLOCED_MEM;
6763       else
6764         MLK = NO_UNKOWN_MEM;
6765     } else {
6766       MLK = NO_UNKOWN_MEM;
6767     }
6768 
6769     assert(MLK != NO_LOCATIONS && "No location specified!");
6770     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6771                               getAccessKindFromInst(&I));
6772     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6773                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6774                       << "\n");
6775     return true;
6776   };
6777 
6778   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6779           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6780           /* UseValueSimplify */ true,
6781           /* MaxValues */ 32, StripGEPCB)) {
6782     LLVM_DEBUG(
6783         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6784     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6785                               getAccessKindFromInst(&I));
6786   } else {
6787     LLVM_DEBUG(
6788         dbgs()
6789         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6790         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6791   }
6792 }
6793 
6794 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6795     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6796     bool &Changed) {
6797   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6798 
6799     // Skip non-pointer arguments.
6800     const Value *ArgOp = CB.getArgOperand(ArgNo);
6801     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6802       continue;
6803 
6804     // Skip readnone arguments.
6805     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6806     const auto &ArgOpMemLocationAA =
6807         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
6808 
6809     if (ArgOpMemLocationAA.isAssumedReadNone())
6810       continue;
6811 
6812     // Categorize potentially accessed pointer arguments as if there was an
6813     // access instruction with them as pointer.
6814     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6815   }
6816 }
6817 
6818 AAMemoryLocation::MemoryLocationsKind
6819 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6820                                                   bool &Changed) {
6821   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6822                     << I << "\n");
6823 
6824   AAMemoryLocation::StateType AccessedLocs;
6825   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6826 
6827   if (auto *CB = dyn_cast<CallBase>(&I)) {
6828 
6829     // First check if we assume any memory is access is visible.
6830     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
6831         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
6832     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6833                       << " [" << CBMemLocationAA << "]\n");
6834 
6835     if (CBMemLocationAA.isAssumedReadNone())
6836       return NO_LOCATIONS;
6837 
6838     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6839       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6840                                 Changed, getAccessKindFromInst(&I));
6841       return AccessedLocs.getAssumed();
6842     }
6843 
6844     uint32_t CBAssumedNotAccessedLocs =
6845         CBMemLocationAA.getAssumedNotAccessedLocation();
6846 
6847     // Set the argmemonly and global bit as we handle them separately below.
6848     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6849         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6850 
6851     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6852       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6853         continue;
6854       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6855                                 getAccessKindFromInst(&I));
6856     }
6857 
6858     // Now handle global memory if it might be accessed. This is slightly tricky
6859     // as NO_GLOBAL_MEM has multiple bits set.
6860     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6861     if (HasGlobalAccesses) {
6862       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6863                             AccessKind Kind, MemoryLocationsKind MLK) {
6864         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6865                                   getAccessKindFromInst(&I));
6866         return true;
6867       };
6868       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6869               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6870         return AccessedLocs.getWorstState();
6871     }
6872 
6873     LLVM_DEBUG(
6874         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6875                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6876 
6877     // Now handle argument memory if it might be accessed.
6878     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6879     if (HasArgAccesses)
6880       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6881 
6882     LLVM_DEBUG(
6883         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6884                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6885 
6886     return AccessedLocs.getAssumed();
6887   }
6888 
6889   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6890     LLVM_DEBUG(
6891         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6892                << I << " [" << *Ptr << "]\n");
6893     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6894     return AccessedLocs.getAssumed();
6895   }
6896 
6897   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6898                     << I << "\n");
6899   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6900                             getAccessKindFromInst(&I));
6901   return AccessedLocs.getAssumed();
6902 }
6903 
6904 /// An AA to represent the memory behavior function attributes.
6905 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6906   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6907       : AAMemoryLocationImpl(IRP, A) {}
6908 
6909   /// See AbstractAttribute::updateImpl(Attributor &A).
6910   virtual ChangeStatus updateImpl(Attributor &A) override {
6911 
6912     const auto &MemBehaviorAA =
6913         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
6914     if (MemBehaviorAA.isAssumedReadNone()) {
6915       if (MemBehaviorAA.isKnownReadNone())
6916         return indicateOptimisticFixpoint();
6917       assert(isAssumedReadNone() &&
6918              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6919       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6920       return ChangeStatus::UNCHANGED;
6921     }
6922 
6923     // The current assumed state used to determine a change.
6924     auto AssumedState = getAssumed();
6925     bool Changed = false;
6926 
6927     auto CheckRWInst = [&](Instruction &I) {
6928       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6929       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6930                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6931       removeAssumedBits(inverseLocation(MLK, false, false));
6932       // Stop once only the valid bit set in the *not assumed location*, thus
6933       // once we don't actually exclude any memory locations in the state.
6934       return getAssumedNotAccessedLocation() != VALID_STATE;
6935     };
6936 
6937     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6938       return indicatePessimisticFixpoint();
6939 
6940     Changed |= AssumedState != getAssumed();
6941     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6942   }
6943 
6944   /// See AbstractAttribute::trackStatistics()
6945   void trackStatistics() const override {
6946     if (isAssumedReadNone())
6947       STATS_DECLTRACK_FN_ATTR(readnone)
6948     else if (isAssumedArgMemOnly())
6949       STATS_DECLTRACK_FN_ATTR(argmemonly)
6950     else if (isAssumedInaccessibleMemOnly())
6951       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6952     else if (isAssumedInaccessibleOrArgMemOnly())
6953       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6954   }
6955 };
6956 
6957 /// AAMemoryLocation attribute for call sites.
6958 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6959   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6960       : AAMemoryLocationImpl(IRP, A) {}
6961 
6962   /// See AbstractAttribute::initialize(...).
6963   void initialize(Attributor &A) override {
6964     AAMemoryLocationImpl::initialize(A);
6965     Function *F = getAssociatedFunction();
6966     if (!F || F->isDeclaration())
6967       indicatePessimisticFixpoint();
6968   }
6969 
6970   /// See AbstractAttribute::updateImpl(...).
6971   ChangeStatus updateImpl(Attributor &A) override {
6972     // TODO: Once we have call site specific value information we can provide
6973     //       call site specific liveness liveness information and then it makes
6974     //       sense to specialize attributes for call sites arguments instead of
6975     //       redirecting requests to the callee argument.
6976     Function *F = getAssociatedFunction();
6977     const IRPosition &FnPos = IRPosition::function(*F);
6978     auto &FnAA =
6979         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
6980     bool Changed = false;
6981     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6982                           AccessKind Kind, MemoryLocationsKind MLK) {
6983       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6984                                 getAccessKindFromInst(I));
6985       return true;
6986     };
6987     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6988       return indicatePessimisticFixpoint();
6989     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6990   }
6991 
6992   /// See AbstractAttribute::trackStatistics()
6993   void trackStatistics() const override {
6994     if (isAssumedReadNone())
6995       STATS_DECLTRACK_CS_ATTR(readnone)
6996   }
6997 };
6998 
6999 /// ------------------ Value Constant Range Attribute -------------------------
7000 
7001 struct AAValueConstantRangeImpl : AAValueConstantRange {
7002   using StateType = IntegerRangeState;
7003   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7004       : AAValueConstantRange(IRP, A) {}
7005 
7006   /// See AbstractAttribute::getAsStr().
7007   const std::string getAsStr() const override {
7008     std::string Str;
7009     llvm::raw_string_ostream OS(Str);
7010     OS << "range(" << getBitWidth() << ")<";
7011     getKnown().print(OS);
7012     OS << " / ";
7013     getAssumed().print(OS);
7014     OS << ">";
7015     return OS.str();
7016   }
7017 
7018   /// Helper function to get a SCEV expr for the associated value at program
7019   /// point \p I.
7020   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7021     if (!getAnchorScope())
7022       return nullptr;
7023 
7024     ScalarEvolution *SE =
7025         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7026             *getAnchorScope());
7027 
7028     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7029         *getAnchorScope());
7030 
7031     if (!SE || !LI)
7032       return nullptr;
7033 
7034     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7035     if (!I)
7036       return S;
7037 
7038     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7039   }
7040 
7041   /// Helper function to get a range from SCEV for the associated value at
7042   /// program point \p I.
7043   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7044                                          const Instruction *I = nullptr) const {
7045     if (!getAnchorScope())
7046       return getWorstState(getBitWidth());
7047 
7048     ScalarEvolution *SE =
7049         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7050             *getAnchorScope());
7051 
7052     const SCEV *S = getSCEV(A, I);
7053     if (!SE || !S)
7054       return getWorstState(getBitWidth());
7055 
7056     return SE->getUnsignedRange(S);
7057   }
7058 
7059   /// Helper function to get a range from LVI for the associated value at
7060   /// program point \p I.
7061   ConstantRange
7062   getConstantRangeFromLVI(Attributor &A,
7063                           const Instruction *CtxI = nullptr) const {
7064     if (!getAnchorScope())
7065       return getWorstState(getBitWidth());
7066 
7067     LazyValueInfo *LVI =
7068         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7069             *getAnchorScope());
7070 
7071     if (!LVI || !CtxI)
7072       return getWorstState(getBitWidth());
7073     return LVI->getConstantRange(&getAssociatedValue(),
7074                                  const_cast<Instruction *>(CtxI));
7075   }
7076 
7077   /// See AAValueConstantRange::getKnownConstantRange(..).
7078   ConstantRange
7079   getKnownConstantRange(Attributor &A,
7080                         const Instruction *CtxI = nullptr) const override {
7081     if (!CtxI || CtxI == getCtxI())
7082       return getKnown();
7083 
7084     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7085     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7086     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7087   }
7088 
7089   /// See AAValueConstantRange::getAssumedConstantRange(..).
7090   ConstantRange
7091   getAssumedConstantRange(Attributor &A,
7092                           const Instruction *CtxI = nullptr) const override {
7093     // TODO: Make SCEV use Attributor assumption.
7094     //       We may be able to bound a variable range via assumptions in
7095     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7096     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7097 
7098     if (!CtxI || CtxI == getCtxI() ||
7099         !AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
7100       return getAssumed();
7101 
7102     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7103     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7104     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7105   }
7106 
7107   /// See AbstractAttribute::initialize(..).
7108   void initialize(Attributor &A) override {
7109     // Intersect a range given by SCEV.
7110     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7111 
7112     // Intersect a range given by LVI.
7113     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7114   }
7115 
7116   /// Helper function to create MDNode for range metadata.
7117   static MDNode *
7118   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7119                             const ConstantRange &AssumedConstantRange) {
7120     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7121                                   Ty, AssumedConstantRange.getLower())),
7122                               ConstantAsMetadata::get(ConstantInt::get(
7123                                   Ty, AssumedConstantRange.getUpper()))};
7124     return MDNode::get(Ctx, LowAndHigh);
7125   }
7126 
7127   /// Return true if \p Assumed is included in \p KnownRanges.
7128   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7129 
7130     if (Assumed.isFullSet())
7131       return false;
7132 
7133     if (!KnownRanges)
7134       return true;
7135 
7136     // If multiple ranges are annotated in IR, we give up to annotate assumed
7137     // range for now.
7138 
7139     // TODO:  If there exists a known range which containts assumed range, we
7140     // can say assumed range is better.
7141     if (KnownRanges->getNumOperands() > 2)
7142       return false;
7143 
7144     ConstantInt *Lower =
7145         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7146     ConstantInt *Upper =
7147         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7148 
7149     ConstantRange Known(Lower->getValue(), Upper->getValue());
7150     return Known.contains(Assumed) && Known != Assumed;
7151   }
7152 
7153   /// Helper function to set range metadata.
7154   static bool
7155   setRangeMetadataIfisBetterRange(Instruction *I,
7156                                   const ConstantRange &AssumedConstantRange) {
7157     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7158     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7159       if (!AssumedConstantRange.isEmptySet()) {
7160         I->setMetadata(LLVMContext::MD_range,
7161                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7162                                                  AssumedConstantRange));
7163         return true;
7164       }
7165     }
7166     return false;
7167   }
7168 
7169   /// See AbstractAttribute::manifest()
7170   ChangeStatus manifest(Attributor &A) override {
7171     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7172     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7173     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7174 
7175     auto &V = getAssociatedValue();
7176     if (!AssumedConstantRange.isEmptySet() &&
7177         !AssumedConstantRange.isSingleElement()) {
7178       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7179         assert(I == getCtxI() && "Should not annotate an instruction which is "
7180                                  "not the context instruction");
7181         if (isa<CallInst>(I) || isa<LoadInst>(I))
7182           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7183             Changed = ChangeStatus::CHANGED;
7184       }
7185     }
7186 
7187     return Changed;
7188   }
7189 };
7190 
7191 struct AAValueConstantRangeArgument final
7192     : AAArgumentFromCallSiteArguments<
7193           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7194           true /* BridgeCallBaseContext */> {
7195   using Base = AAArgumentFromCallSiteArguments<
7196       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7197       true /* BridgeCallBaseContext */>;
7198   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7199       : Base(IRP, A) {}
7200 
7201   /// See AbstractAttribute::initialize(..).
7202   void initialize(Attributor &A) override {
7203     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7204       indicatePessimisticFixpoint();
7205     } else {
7206       Base::initialize(A);
7207     }
7208   }
7209 
7210   /// See AbstractAttribute::trackStatistics()
7211   void trackStatistics() const override {
7212     STATS_DECLTRACK_ARG_ATTR(value_range)
7213   }
7214 };
7215 
7216 struct AAValueConstantRangeReturned
7217     : AAReturnedFromReturnedValues<AAValueConstantRange,
7218                                    AAValueConstantRangeImpl,
7219                                    AAValueConstantRangeImpl::StateType,
7220                                    /* PropogateCallBaseContext */ true> {
7221   using Base =
7222       AAReturnedFromReturnedValues<AAValueConstantRange,
7223                                    AAValueConstantRangeImpl,
7224                                    AAValueConstantRangeImpl::StateType,
7225                                    /* PropogateCallBaseContext */ true>;
7226   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7227       : Base(IRP, A) {}
7228 
7229   /// See AbstractAttribute::initialize(...).
7230   void initialize(Attributor &A) override {}
7231 
7232   /// See AbstractAttribute::trackStatistics()
7233   void trackStatistics() const override {
7234     STATS_DECLTRACK_FNRET_ATTR(value_range)
7235   }
7236 };
7237 
7238 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7239   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7240       : AAValueConstantRangeImpl(IRP, A) {}
7241 
7242   /// See AbstractAttribute::initialize(...).
7243   void initialize(Attributor &A) override {
7244     AAValueConstantRangeImpl::initialize(A);
7245     Value &V = getAssociatedValue();
7246 
7247     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7248       unionAssumed(ConstantRange(C->getValue()));
7249       indicateOptimisticFixpoint();
7250       return;
7251     }
7252 
7253     if (isa<UndefValue>(&V)) {
7254       // Collapse the undef state to 0.
7255       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7256       indicateOptimisticFixpoint();
7257       return;
7258     }
7259 
7260     if (isa<CallBase>(&V))
7261       return;
7262 
7263     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7264       return;
7265     // If it is a load instruction with range metadata, use it.
7266     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7267       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7268         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7269         return;
7270       }
7271 
7272     // We can work with PHI and select instruction as we traverse their operands
7273     // during update.
7274     if (isa<SelectInst>(V) || isa<PHINode>(V))
7275       return;
7276 
7277     // Otherwise we give up.
7278     indicatePessimisticFixpoint();
7279 
7280     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7281                       << getAssociatedValue() << "\n");
7282   }
7283 
7284   bool calculateBinaryOperator(
7285       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7286       const Instruction *CtxI,
7287       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7288     Value *LHS = BinOp->getOperand(0);
7289     Value *RHS = BinOp->getOperand(1);
7290 
7291     // Simplify the operands first.
7292     bool UsedAssumedInformation = false;
7293     const auto &SimplifiedLHS =
7294         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
7295                                *this, UsedAssumedInformation);
7296     if (!SimplifiedLHS.hasValue())
7297       return true;
7298     if (SimplifiedLHS.getValue())
7299       LHS = *SimplifiedLHS;
7300 
7301     const auto &SimplifiedRHS =
7302         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
7303                                *this, UsedAssumedInformation);
7304     if (!SimplifiedRHS.hasValue())
7305       return true;
7306     if (SimplifiedRHS.getValue())
7307       RHS = *SimplifiedRHS;
7308 
7309     // TODO: Allow non integers as well.
7310     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7311       return false;
7312 
7313     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7314         *this, IRPosition::value(*LHS, getCallBaseContext()),
7315         DepClassTy::REQUIRED);
7316     QuerriedAAs.push_back(&LHSAA);
7317     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7318 
7319     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7320         *this, IRPosition::value(*RHS, getCallBaseContext()),
7321         DepClassTy::REQUIRED);
7322     QuerriedAAs.push_back(&RHSAA);
7323     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7324 
7325     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7326 
7327     T.unionAssumed(AssumedRange);
7328 
7329     // TODO: Track a known state too.
7330 
7331     return T.isValidState();
7332   }
7333 
7334   bool calculateCastInst(
7335       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7336       const Instruction *CtxI,
7337       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7338     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7339     // TODO: Allow non integers as well.
7340     Value *OpV = CastI->getOperand(0);
7341 
7342     // Simplify the operand first.
7343     bool UsedAssumedInformation = false;
7344     const auto &SimplifiedOpV =
7345         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
7346                                *this, UsedAssumedInformation);
7347     if (!SimplifiedOpV.hasValue())
7348       return true;
7349     if (SimplifiedOpV.getValue())
7350       OpV = *SimplifiedOpV;
7351 
7352     if (!OpV->getType()->isIntegerTy())
7353       return false;
7354 
7355     auto &OpAA = A.getAAFor<AAValueConstantRange>(
7356         *this, IRPosition::value(*OpV, getCallBaseContext()),
7357         DepClassTy::REQUIRED);
7358     QuerriedAAs.push_back(&OpAA);
7359     T.unionAssumed(
7360         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7361     return T.isValidState();
7362   }
7363 
7364   bool
7365   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7366                    const Instruction *CtxI,
7367                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7368     Value *LHS = CmpI->getOperand(0);
7369     Value *RHS = CmpI->getOperand(1);
7370 
7371     // Simplify the operands first.
7372     bool UsedAssumedInformation = false;
7373     const auto &SimplifiedLHS =
7374         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
7375                                *this, UsedAssumedInformation);
7376     if (!SimplifiedLHS.hasValue())
7377       return true;
7378     if (SimplifiedLHS.getValue())
7379       LHS = *SimplifiedLHS;
7380 
7381     const auto &SimplifiedRHS =
7382         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
7383                                *this, UsedAssumedInformation);
7384     if (!SimplifiedRHS.hasValue())
7385       return true;
7386     if (SimplifiedRHS.getValue())
7387       RHS = *SimplifiedRHS;
7388 
7389     // TODO: Allow non integers as well.
7390     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7391       return false;
7392 
7393     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7394         *this, IRPosition::value(*LHS, getCallBaseContext()),
7395         DepClassTy::REQUIRED);
7396     QuerriedAAs.push_back(&LHSAA);
7397     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7398         *this, IRPosition::value(*RHS, getCallBaseContext()),
7399         DepClassTy::REQUIRED);
7400     QuerriedAAs.push_back(&RHSAA);
7401     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7402     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7403 
7404     // If one of them is empty set, we can't decide.
7405     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7406       return true;
7407 
7408     bool MustTrue = false, MustFalse = false;
7409 
7410     auto AllowedRegion =
7411         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7412 
7413     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7414       MustFalse = true;
7415 
7416     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
7417       MustTrue = true;
7418 
7419     assert((!MustTrue || !MustFalse) &&
7420            "Either MustTrue or MustFalse should be false!");
7421 
7422     if (MustTrue)
7423       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7424     else if (MustFalse)
7425       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7426     else
7427       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7428 
7429     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7430                       << " " << RHSAA << "\n");
7431 
7432     // TODO: Track a known state too.
7433     return T.isValidState();
7434   }
7435 
7436   /// See AbstractAttribute::updateImpl(...).
7437   ChangeStatus updateImpl(Attributor &A) override {
7438     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7439                             IntegerRangeState &T, bool Stripped) -> bool {
7440       Instruction *I = dyn_cast<Instruction>(&V);
7441       if (!I || isa<CallBase>(I)) {
7442 
7443         // If the value is not instruction, we query AA to Attributor.
7444         const auto &AA = A.getAAFor<AAValueConstantRange>(
7445             *this, IRPosition::value(V, getCallBaseContext()),
7446             DepClassTy::REQUIRED);
7447 
7448         // Clamp operator is not used to utilize a program point CtxI.
7449         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7450 
7451         return T.isValidState();
7452       }
7453 
7454       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7455       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7456         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7457           return false;
7458       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7459         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7460           return false;
7461       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7462         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7463           return false;
7464       } else {
7465         // Give up with other instructions.
7466         // TODO: Add other instructions
7467 
7468         T.indicatePessimisticFixpoint();
7469         return false;
7470       }
7471 
7472       // Catch circular reasoning in a pessimistic way for now.
7473       // TODO: Check how the range evolves and if we stripped anything, see also
7474       //       AADereferenceable or AAAlign for similar situations.
7475       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7476         if (QueriedAA != this)
7477           continue;
7478         // If we are in a stady state we do not need to worry.
7479         if (T.getAssumed() == getState().getAssumed())
7480           continue;
7481         T.indicatePessimisticFixpoint();
7482       }
7483 
7484       return T.isValidState();
7485     };
7486 
7487     IntegerRangeState T(getBitWidth());
7488 
7489     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7490             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7491             /* UseValueSimplify */ false))
7492       return indicatePessimisticFixpoint();
7493 
7494     return clampStateAndIndicateChange(getState(), T);
7495   }
7496 
7497   /// See AbstractAttribute::trackStatistics()
7498   void trackStatistics() const override {
7499     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7500   }
7501 };
7502 
7503 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7504   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7505       : AAValueConstantRangeImpl(IRP, A) {}
7506 
7507   /// See AbstractAttribute::initialize(...).
7508   ChangeStatus updateImpl(Attributor &A) override {
7509     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7510                      "not be called");
7511   }
7512 
7513   /// See AbstractAttribute::trackStatistics()
7514   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7515 };
7516 
7517 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7518   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7519       : AAValueConstantRangeFunction(IRP, A) {}
7520 
7521   /// See AbstractAttribute::trackStatistics()
7522   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7523 };
7524 
7525 struct AAValueConstantRangeCallSiteReturned
7526     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7527                                      AAValueConstantRangeImpl,
7528                                      AAValueConstantRangeImpl::StateType,
7529                                      /* IntroduceCallBaseContext */ true> {
7530   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7531       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7532                                        AAValueConstantRangeImpl,
7533                                        AAValueConstantRangeImpl::StateType,
7534                                        /* IntroduceCallBaseContext */ true>(IRP,
7535                                                                             A) {
7536   }
7537 
7538   /// See AbstractAttribute::initialize(...).
7539   void initialize(Attributor &A) override {
7540     // If it is a load instruction with range metadata, use the metadata.
7541     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7542       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7543         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7544 
7545     AAValueConstantRangeImpl::initialize(A);
7546   }
7547 
7548   /// See AbstractAttribute::trackStatistics()
7549   void trackStatistics() const override {
7550     STATS_DECLTRACK_CSRET_ATTR(value_range)
7551   }
7552 };
7553 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7554   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7555       : AAValueConstantRangeFloating(IRP, A) {}
7556 
7557   /// See AbstractAttribute::manifest()
7558   ChangeStatus manifest(Attributor &A) override {
7559     return ChangeStatus::UNCHANGED;
7560   }
7561 
7562   /// See AbstractAttribute::trackStatistics()
7563   void trackStatistics() const override {
7564     STATS_DECLTRACK_CSARG_ATTR(value_range)
7565   }
7566 };
7567 
7568 /// ------------------ Potential Values Attribute -------------------------
7569 
7570 struct AAPotentialValuesImpl : AAPotentialValues {
7571   using StateType = PotentialConstantIntValuesState;
7572 
7573   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7574       : AAPotentialValues(IRP, A) {}
7575 
7576   /// See AbstractAttribute::getAsStr().
7577   const std::string getAsStr() const override {
7578     std::string Str;
7579     llvm::raw_string_ostream OS(Str);
7580     OS << getState();
7581     return OS.str();
7582   }
7583 
7584   /// See AbstractAttribute::updateImpl(...).
7585   ChangeStatus updateImpl(Attributor &A) override {
7586     return indicatePessimisticFixpoint();
7587   }
7588 };
7589 
7590 struct AAPotentialValuesArgument final
7591     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7592                                       PotentialConstantIntValuesState> {
7593   using Base =
7594       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7595                                       PotentialConstantIntValuesState>;
7596   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7597       : Base(IRP, A) {}
7598 
7599   /// See AbstractAttribute::initialize(..).
7600   void initialize(Attributor &A) override {
7601     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7602       indicatePessimisticFixpoint();
7603     } else {
7604       Base::initialize(A);
7605     }
7606   }
7607 
7608   /// See AbstractAttribute::trackStatistics()
7609   void trackStatistics() const override {
7610     STATS_DECLTRACK_ARG_ATTR(potential_values)
7611   }
7612 };
7613 
7614 struct AAPotentialValuesReturned
7615     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7616   using Base =
7617       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7618   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7619       : Base(IRP, A) {}
7620 
7621   /// See AbstractAttribute::trackStatistics()
7622   void trackStatistics() const override {
7623     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7624   }
7625 };
7626 
7627 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7628   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7629       : AAPotentialValuesImpl(IRP, A) {}
7630 
7631   /// See AbstractAttribute::initialize(..).
7632   void initialize(Attributor &A) override {
7633     Value &V = getAssociatedValue();
7634 
7635     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7636       unionAssumed(C->getValue());
7637       indicateOptimisticFixpoint();
7638       return;
7639     }
7640 
7641     if (isa<UndefValue>(&V)) {
7642       unionAssumedWithUndef();
7643       indicateOptimisticFixpoint();
7644       return;
7645     }
7646 
7647     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7648       return;
7649 
7650     if (isa<SelectInst>(V) || isa<PHINode>(V))
7651       return;
7652 
7653     indicatePessimisticFixpoint();
7654 
7655     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7656                       << getAssociatedValue() << "\n");
7657   }
7658 
7659   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7660                                 const APInt &RHS) {
7661     ICmpInst::Predicate Pred = ICI->getPredicate();
7662     switch (Pred) {
7663     case ICmpInst::ICMP_UGT:
7664       return LHS.ugt(RHS);
7665     case ICmpInst::ICMP_SGT:
7666       return LHS.sgt(RHS);
7667     case ICmpInst::ICMP_EQ:
7668       return LHS.eq(RHS);
7669     case ICmpInst::ICMP_UGE:
7670       return LHS.uge(RHS);
7671     case ICmpInst::ICMP_SGE:
7672       return LHS.sge(RHS);
7673     case ICmpInst::ICMP_ULT:
7674       return LHS.ult(RHS);
7675     case ICmpInst::ICMP_SLT:
7676       return LHS.slt(RHS);
7677     case ICmpInst::ICMP_NE:
7678       return LHS.ne(RHS);
7679     case ICmpInst::ICMP_ULE:
7680       return LHS.ule(RHS);
7681     case ICmpInst::ICMP_SLE:
7682       return LHS.sle(RHS);
7683     default:
7684       llvm_unreachable("Invalid ICmp predicate!");
7685     }
7686   }
7687 
7688   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7689                                  uint32_t ResultBitWidth) {
7690     Instruction::CastOps CastOp = CI->getOpcode();
7691     switch (CastOp) {
7692     default:
7693       llvm_unreachable("unsupported or not integer cast");
7694     case Instruction::Trunc:
7695       return Src.trunc(ResultBitWidth);
7696     case Instruction::SExt:
7697       return Src.sext(ResultBitWidth);
7698     case Instruction::ZExt:
7699       return Src.zext(ResultBitWidth);
7700     case Instruction::BitCast:
7701       return Src;
7702     }
7703   }
7704 
7705   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7706                                        const APInt &LHS, const APInt &RHS,
7707                                        bool &SkipOperation, bool &Unsupported) {
7708     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7709     // Unsupported is set to true when the binary operator is not supported.
7710     // SkipOperation is set to true when UB occur with the given operand pair
7711     // (LHS, RHS).
7712     // TODO: we should look at nsw and nuw keywords to handle operations
7713     //       that create poison or undef value.
7714     switch (BinOpcode) {
7715     default:
7716       Unsupported = true;
7717       return LHS;
7718     case Instruction::Add:
7719       return LHS + RHS;
7720     case Instruction::Sub:
7721       return LHS - RHS;
7722     case Instruction::Mul:
7723       return LHS * RHS;
7724     case Instruction::UDiv:
7725       if (RHS.isNullValue()) {
7726         SkipOperation = true;
7727         return LHS;
7728       }
7729       return LHS.udiv(RHS);
7730     case Instruction::SDiv:
7731       if (RHS.isNullValue()) {
7732         SkipOperation = true;
7733         return LHS;
7734       }
7735       return LHS.sdiv(RHS);
7736     case Instruction::URem:
7737       if (RHS.isNullValue()) {
7738         SkipOperation = true;
7739         return LHS;
7740       }
7741       return LHS.urem(RHS);
7742     case Instruction::SRem:
7743       if (RHS.isNullValue()) {
7744         SkipOperation = true;
7745         return LHS;
7746       }
7747       return LHS.srem(RHS);
7748     case Instruction::Shl:
7749       return LHS.shl(RHS);
7750     case Instruction::LShr:
7751       return LHS.lshr(RHS);
7752     case Instruction::AShr:
7753       return LHS.ashr(RHS);
7754     case Instruction::And:
7755       return LHS & RHS;
7756     case Instruction::Or:
7757       return LHS | RHS;
7758     case Instruction::Xor:
7759       return LHS ^ RHS;
7760     }
7761   }
7762 
7763   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7764                                            const APInt &LHS, const APInt &RHS) {
7765     bool SkipOperation = false;
7766     bool Unsupported = false;
7767     APInt Result =
7768         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7769     if (Unsupported)
7770       return false;
7771     // If SkipOperation is true, we can ignore this operand pair (L, R).
7772     if (!SkipOperation)
7773       unionAssumed(Result);
7774     return isValidState();
7775   }
7776 
7777   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7778     auto AssumedBefore = getAssumed();
7779     Value *LHS = ICI->getOperand(0);
7780     Value *RHS = ICI->getOperand(1);
7781 
7782     // Simplify the operands first.
7783     bool UsedAssumedInformation = false;
7784     const auto &SimplifiedLHS =
7785         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
7786                                *this, UsedAssumedInformation);
7787     if (!SimplifiedLHS.hasValue())
7788       return ChangeStatus::UNCHANGED;
7789     if (SimplifiedLHS.getValue())
7790       LHS = *SimplifiedLHS;
7791 
7792     const auto &SimplifiedRHS =
7793         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
7794                                *this, UsedAssumedInformation);
7795     if (!SimplifiedRHS.hasValue())
7796       return ChangeStatus::UNCHANGED;
7797     if (SimplifiedRHS.getValue())
7798       RHS = *SimplifiedRHS;
7799 
7800     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7801       return indicatePessimisticFixpoint();
7802 
7803     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7804                                                 DepClassTy::REQUIRED);
7805     if (!LHSAA.isValidState())
7806       return indicatePessimisticFixpoint();
7807 
7808     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7809                                                 DepClassTy::REQUIRED);
7810     if (!RHSAA.isValidState())
7811       return indicatePessimisticFixpoint();
7812 
7813     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7814     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7815 
7816     // TODO: make use of undef flag to limit potential values aggressively.
7817     bool MaybeTrue = false, MaybeFalse = false;
7818     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7819     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7820       // The result of any comparison between undefs can be soundly replaced
7821       // with undef.
7822       unionAssumedWithUndef();
7823     } else if (LHSAA.undefIsContained()) {
7824       for (const APInt &R : RHSAAPVS) {
7825         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7826         MaybeTrue |= CmpResult;
7827         MaybeFalse |= !CmpResult;
7828         if (MaybeTrue & MaybeFalse)
7829           return indicatePessimisticFixpoint();
7830       }
7831     } else if (RHSAA.undefIsContained()) {
7832       for (const APInt &L : LHSAAPVS) {
7833         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7834         MaybeTrue |= CmpResult;
7835         MaybeFalse |= !CmpResult;
7836         if (MaybeTrue & MaybeFalse)
7837           return indicatePessimisticFixpoint();
7838       }
7839     } else {
7840       for (const APInt &L : LHSAAPVS) {
7841         for (const APInt &R : RHSAAPVS) {
7842           bool CmpResult = calculateICmpInst(ICI, L, R);
7843           MaybeTrue |= CmpResult;
7844           MaybeFalse |= !CmpResult;
7845           if (MaybeTrue & MaybeFalse)
7846             return indicatePessimisticFixpoint();
7847         }
7848       }
7849     }
7850     if (MaybeTrue)
7851       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7852     if (MaybeFalse)
7853       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7854     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7855                                          : ChangeStatus::CHANGED;
7856   }
7857 
7858   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7859     auto AssumedBefore = getAssumed();
7860     Value *LHS = SI->getTrueValue();
7861     Value *RHS = SI->getFalseValue();
7862 
7863     // Simplify the operands first.
7864     bool UsedAssumedInformation = false;
7865     const auto &SimplifiedLHS =
7866         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
7867                                *this, UsedAssumedInformation);
7868     if (!SimplifiedLHS.hasValue())
7869       return ChangeStatus::UNCHANGED;
7870     if (SimplifiedLHS.getValue())
7871       LHS = *SimplifiedLHS;
7872 
7873     const auto &SimplifiedRHS =
7874         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
7875                                *this, UsedAssumedInformation);
7876     if (!SimplifiedRHS.hasValue())
7877       return ChangeStatus::UNCHANGED;
7878     if (SimplifiedRHS.getValue())
7879       RHS = *SimplifiedRHS;
7880 
7881     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7882       return indicatePessimisticFixpoint();
7883 
7884     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
7885                                                   UsedAssumedInformation);
7886 
7887     // Check if we only need one operand.
7888     bool OnlyLeft = false, OnlyRight = false;
7889     if (C.hasValue() && *C && (*C)->isOneValue())
7890       OnlyLeft = true;
7891     else if (C.hasValue() && *C && (*C)->isZeroValue())
7892       OnlyRight = true;
7893 
7894     const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr;
7895     if (!OnlyRight) {
7896       LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7897                                              DepClassTy::REQUIRED);
7898       if (!LHSAA->isValidState())
7899         return indicatePessimisticFixpoint();
7900     }
7901     if (!OnlyLeft) {
7902       RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7903                                              DepClassTy::REQUIRED);
7904       if (!RHSAA->isValidState())
7905         return indicatePessimisticFixpoint();
7906     }
7907 
7908     if (!LHSAA || !RHSAA) {
7909       // select (true/false), lhs, rhs
7910       auto *OpAA = LHSAA ? LHSAA : RHSAA;
7911 
7912       if (OpAA->undefIsContained())
7913         unionAssumedWithUndef();
7914       else
7915         unionAssumed(*OpAA);
7916 
7917     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
7918       // select i1 *, undef , undef => undef
7919       unionAssumedWithUndef();
7920     } else {
7921       unionAssumed(*LHSAA);
7922       unionAssumed(*RHSAA);
7923     }
7924     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7925                                          : ChangeStatus::CHANGED;
7926   }
7927 
7928   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7929     auto AssumedBefore = getAssumed();
7930     if (!CI->isIntegerCast())
7931       return indicatePessimisticFixpoint();
7932     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7933     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7934     Value *Src = CI->getOperand(0);
7935 
7936     // Simplify the operand first.
7937     bool UsedAssumedInformation = false;
7938     const auto &SimplifiedSrc =
7939         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
7940                                *this, UsedAssumedInformation);
7941     if (!SimplifiedSrc.hasValue())
7942       return ChangeStatus::UNCHANGED;
7943     if (SimplifiedSrc.getValue())
7944       Src = *SimplifiedSrc;
7945 
7946     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
7947                                                 DepClassTy::REQUIRED);
7948     if (!SrcAA.isValidState())
7949       return indicatePessimisticFixpoint();
7950     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7951     if (SrcAA.undefIsContained())
7952       unionAssumedWithUndef();
7953     else {
7954       for (const APInt &S : SrcAAPVS) {
7955         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7956         unionAssumed(T);
7957       }
7958     }
7959     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7960                                          : ChangeStatus::CHANGED;
7961   }
7962 
7963   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7964     auto AssumedBefore = getAssumed();
7965     Value *LHS = BinOp->getOperand(0);
7966     Value *RHS = BinOp->getOperand(1);
7967 
7968     // Simplify the operands first.
7969     bool UsedAssumedInformation = false;
7970     const auto &SimplifiedLHS =
7971         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
7972                                *this, UsedAssumedInformation);
7973     if (!SimplifiedLHS.hasValue())
7974       return ChangeStatus::UNCHANGED;
7975     if (SimplifiedLHS.getValue())
7976       LHS = *SimplifiedLHS;
7977 
7978     const auto &SimplifiedRHS =
7979         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
7980                                *this, UsedAssumedInformation);
7981     if (!SimplifiedRHS.hasValue())
7982       return ChangeStatus::UNCHANGED;
7983     if (SimplifiedRHS.getValue())
7984       RHS = *SimplifiedRHS;
7985 
7986     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7987       return indicatePessimisticFixpoint();
7988 
7989     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7990                                                 DepClassTy::REQUIRED);
7991     if (!LHSAA.isValidState())
7992       return indicatePessimisticFixpoint();
7993 
7994     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7995                                                 DepClassTy::REQUIRED);
7996     if (!RHSAA.isValidState())
7997       return indicatePessimisticFixpoint();
7998 
7999     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8000     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8001     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
8002 
8003     // TODO: make use of undef flag to limit potential values aggressively.
8004     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8005       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
8006         return indicatePessimisticFixpoint();
8007     } else if (LHSAA.undefIsContained()) {
8008       for (const APInt &R : RHSAAPVS) {
8009         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
8010           return indicatePessimisticFixpoint();
8011       }
8012     } else if (RHSAA.undefIsContained()) {
8013       for (const APInt &L : LHSAAPVS) {
8014         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
8015           return indicatePessimisticFixpoint();
8016       }
8017     } else {
8018       for (const APInt &L : LHSAAPVS) {
8019         for (const APInt &R : RHSAAPVS) {
8020           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
8021             return indicatePessimisticFixpoint();
8022         }
8023       }
8024     }
8025     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8026                                          : ChangeStatus::CHANGED;
8027   }
8028 
8029   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
8030     auto AssumedBefore = getAssumed();
8031     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
8032       Value *IncomingValue = PHI->getIncomingValue(u);
8033 
8034       // Simplify the operand first.
8035       bool UsedAssumedInformation = false;
8036       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
8037           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
8038           UsedAssumedInformation);
8039       if (!SimplifiedIncomingValue.hasValue())
8040         continue;
8041       if (SimplifiedIncomingValue.getValue())
8042         IncomingValue = *SimplifiedIncomingValue;
8043 
8044       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
8045           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
8046       if (!PotentialValuesAA.isValidState())
8047         return indicatePessimisticFixpoint();
8048       if (PotentialValuesAA.undefIsContained())
8049         unionAssumedWithUndef();
8050       else
8051         unionAssumed(PotentialValuesAA.getAssumed());
8052     }
8053     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8054                                          : ChangeStatus::CHANGED;
8055   }
8056 
8057   /// See AbstractAttribute::updateImpl(...).
8058   ChangeStatus updateImpl(Attributor &A) override {
8059     Value &V = getAssociatedValue();
8060     Instruction *I = dyn_cast<Instruction>(&V);
8061 
8062     if (auto *ICI = dyn_cast<ICmpInst>(I))
8063       return updateWithICmpInst(A, ICI);
8064 
8065     if (auto *SI = dyn_cast<SelectInst>(I))
8066       return updateWithSelectInst(A, SI);
8067 
8068     if (auto *CI = dyn_cast<CastInst>(I))
8069       return updateWithCastInst(A, CI);
8070 
8071     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
8072       return updateWithBinaryOperator(A, BinOp);
8073 
8074     if (auto *PHI = dyn_cast<PHINode>(I))
8075       return updateWithPHINode(A, PHI);
8076 
8077     return indicatePessimisticFixpoint();
8078   }
8079 
8080   /// See AbstractAttribute::trackStatistics()
8081   void trackStatistics() const override {
8082     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
8083   }
8084 };
8085 
8086 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
8087   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
8088       : AAPotentialValuesImpl(IRP, A) {}
8089 
8090   /// See AbstractAttribute::initialize(...).
8091   ChangeStatus updateImpl(Attributor &A) override {
8092     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
8093                      "not be called");
8094   }
8095 
8096   /// See AbstractAttribute::trackStatistics()
8097   void trackStatistics() const override {
8098     STATS_DECLTRACK_FN_ATTR(potential_values)
8099   }
8100 };
8101 
8102 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
8103   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
8104       : AAPotentialValuesFunction(IRP, A) {}
8105 
8106   /// See AbstractAttribute::trackStatistics()
8107   void trackStatistics() const override {
8108     STATS_DECLTRACK_CS_ATTR(potential_values)
8109   }
8110 };
8111 
8112 struct AAPotentialValuesCallSiteReturned
8113     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
8114   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
8115       : AACallSiteReturnedFromReturned<AAPotentialValues,
8116                                        AAPotentialValuesImpl>(IRP, A) {}
8117 
8118   /// See AbstractAttribute::trackStatistics()
8119   void trackStatistics() const override {
8120     STATS_DECLTRACK_CSRET_ATTR(potential_values)
8121   }
8122 };
8123 
8124 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
8125   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
8126       : AAPotentialValuesFloating(IRP, A) {}
8127 
8128   /// See AbstractAttribute::initialize(..).
8129   void initialize(Attributor &A) override {
8130     Value &V = getAssociatedValue();
8131 
8132     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8133       unionAssumed(C->getValue());
8134       indicateOptimisticFixpoint();
8135       return;
8136     }
8137 
8138     if (isa<UndefValue>(&V)) {
8139       unionAssumedWithUndef();
8140       indicateOptimisticFixpoint();
8141       return;
8142     }
8143   }
8144 
8145   /// See AbstractAttribute::updateImpl(...).
8146   ChangeStatus updateImpl(Attributor &A) override {
8147     Value &V = getAssociatedValue();
8148     auto AssumedBefore = getAssumed();
8149     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
8150                                              DepClassTy::REQUIRED);
8151     const auto &S = AA.getAssumed();
8152     unionAssumed(S);
8153     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8154                                          : ChangeStatus::CHANGED;
8155   }
8156 
8157   /// See AbstractAttribute::trackStatistics()
8158   void trackStatistics() const override {
8159     STATS_DECLTRACK_CSARG_ATTR(potential_values)
8160   }
8161 };
8162 
8163 /// ------------------------ NoUndef Attribute ---------------------------------
8164 struct AANoUndefImpl : AANoUndef {
8165   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
8166 
8167   /// See AbstractAttribute::initialize(...).
8168   void initialize(Attributor &A) override {
8169     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
8170       indicateOptimisticFixpoint();
8171       return;
8172     }
8173     Value &V = getAssociatedValue();
8174     if (isa<UndefValue>(V))
8175       indicatePessimisticFixpoint();
8176     else if (isa<FreezeInst>(V))
8177       indicateOptimisticFixpoint();
8178     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
8179              isGuaranteedNotToBeUndefOrPoison(&V))
8180       indicateOptimisticFixpoint();
8181     else
8182       AANoUndef::initialize(A);
8183   }
8184 
8185   /// See followUsesInMBEC
8186   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
8187                        AANoUndef::StateType &State) {
8188     const Value *UseV = U->get();
8189     const DominatorTree *DT = nullptr;
8190     AssumptionCache *AC = nullptr;
8191     InformationCache &InfoCache = A.getInfoCache();
8192     if (Function *F = getAnchorScope()) {
8193       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
8194       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
8195     }
8196     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
8197     bool TrackUse = false;
8198     // Track use for instructions which must produce undef or poison bits when
8199     // at least one operand contains such bits.
8200     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
8201       TrackUse = true;
8202     return TrackUse;
8203   }
8204 
8205   /// See AbstractAttribute::getAsStr().
8206   const std::string getAsStr() const override {
8207     return getAssumed() ? "noundef" : "may-undef-or-poison";
8208   }
8209 
8210   ChangeStatus manifest(Attributor &A) override {
8211     // We don't manifest noundef attribute for dead positions because the
8212     // associated values with dead positions would be replaced with undef
8213     // values.
8214     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
8215       return ChangeStatus::UNCHANGED;
8216     // A position whose simplified value does not have any value is
8217     // considered to be dead. We don't manifest noundef in such positions for
8218     // the same reason above.
8219     bool UsedAssumedInformation = false;
8220     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
8221              .hasValue())
8222       return ChangeStatus::UNCHANGED;
8223     return AANoUndef::manifest(A);
8224   }
8225 };
8226 
8227 struct AANoUndefFloating : public AANoUndefImpl {
8228   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
8229       : AANoUndefImpl(IRP, A) {}
8230 
8231   /// See AbstractAttribute::initialize(...).
8232   void initialize(Attributor &A) override {
8233     AANoUndefImpl::initialize(A);
8234     if (!getState().isAtFixpoint())
8235       if (Instruction *CtxI = getCtxI())
8236         followUsesInMBEC(*this, A, getState(), *CtxI);
8237   }
8238 
8239   /// See AbstractAttribute::updateImpl(...).
8240   ChangeStatus updateImpl(Attributor &A) override {
8241     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8242                             AANoUndef::StateType &T, bool Stripped) -> bool {
8243       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
8244                                              DepClassTy::REQUIRED);
8245       if (!Stripped && this == &AA) {
8246         T.indicatePessimisticFixpoint();
8247       } else {
8248         const AANoUndef::StateType &S =
8249             static_cast<const AANoUndef::StateType &>(AA.getState());
8250         T ^= S;
8251       }
8252       return T.isValidState();
8253     };
8254 
8255     StateType T;
8256     if (!genericValueTraversal<AANoUndef, StateType>(
8257             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
8258       return indicatePessimisticFixpoint();
8259 
8260     return clampStateAndIndicateChange(getState(), T);
8261   }
8262 
8263   /// See AbstractAttribute::trackStatistics()
8264   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8265 };
8266 
8267 struct AANoUndefReturned final
8268     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
8269   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
8270       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
8271 
8272   /// See AbstractAttribute::trackStatistics()
8273   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8274 };
8275 
8276 struct AANoUndefArgument final
8277     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
8278   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8279       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8280 
8281   /// See AbstractAttribute::trackStatistics()
8282   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8283 };
8284 
8285 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8286   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8287       : AANoUndefFloating(IRP, A) {}
8288 
8289   /// See AbstractAttribute::trackStatistics()
8290   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8291 };
8292 
8293 struct AANoUndefCallSiteReturned final
8294     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8295   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8296       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8297 
8298   /// See AbstractAttribute::trackStatistics()
8299   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8300 };
8301 
8302 struct AACallEdgesFunction : public AACallEdges {
8303   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
8304       : AACallEdges(IRP, A) {}
8305 
8306   /// See AbstractAttribute::updateImpl(...).
8307   ChangeStatus updateImpl(Attributor &A) override {
8308     ChangeStatus Change = ChangeStatus::UNCHANGED;
8309     bool OldHasUnknownCallee = HasUnknownCallee;
8310 
8311     auto AddCalledFunction = [&](Function *Fn) {
8312       if (CalledFunctions.insert(Fn)) {
8313         Change = ChangeStatus::CHANGED;
8314         LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
8315                           << "\n");
8316       }
8317     };
8318 
8319     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
8320                           bool Stripped) -> bool {
8321       if (Function *Fn = dyn_cast<Function>(&V)) {
8322         AddCalledFunction(Fn);
8323       } else {
8324         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
8325         HasUnknown = true;
8326       }
8327 
8328       // Explore all values.
8329       return true;
8330     };
8331 
8332     // Process any value that we might call.
8333     auto ProcessCalledOperand = [&](Value *V, Instruction *Ctx) {
8334       if (!genericValueTraversal<AACallEdges, bool>(A, IRPosition::value(*V),
8335                                                     *this, HasUnknownCallee,
8336                                                     VisitValue, nullptr, false))
8337         // If we haven't gone through all values, assume that there are unknown
8338         // callees.
8339         HasUnknownCallee = true;
8340     };
8341 
8342     auto ProcessCallInst = [&](Instruction &Inst) {
8343       CallBase &CB = static_cast<CallBase &>(Inst);
8344 
8345       // Process callee metadata if available.
8346       if (auto *MD = Inst.getMetadata(LLVMContext::MD_callees)) {
8347         for (auto &Op : MD->operands()) {
8348           Function *Callee = mdconst::extract_or_null<Function>(Op);
8349           if (Callee)
8350             AddCalledFunction(Callee);
8351         }
8352         // Callees metadata grantees that the called function is one of its
8353         // operands, So we are done.
8354         return true;
8355       }
8356 
8357       // The most simple case.
8358       ProcessCalledOperand(CB.getCalledOperand(), &Inst);
8359 
8360       // Process callback functions.
8361       SmallVector<const Use *, 4u> CallbackUses;
8362       AbstractCallSite::getCallbackUses(CB, CallbackUses);
8363       for (const Use *U : CallbackUses)
8364         ProcessCalledOperand(U->get(), &Inst);
8365 
8366       return true;
8367     };
8368 
8369     // Visit all callable instructions.
8370     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this))
8371       // If we haven't looked at all call like instructions, assume that there
8372       // are unknown callees.
8373       HasUnknownCallee = true;
8374     // Track changes.
8375     if (OldHasUnknownCallee != HasUnknownCallee)
8376       Change = ChangeStatus::CHANGED;
8377 
8378     return Change;
8379   }
8380 
8381   virtual const SetVector<Function *> &getOptimisticEdges() const override {
8382     return CalledFunctions;
8383   };
8384 
8385   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
8386 
8387   const std::string getAsStr() const override {
8388     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
8389            std::to_string(CalledFunctions.size()) + "]";
8390   }
8391 
8392   void trackStatistics() const override {}
8393 
8394   /// Optimistic set of functions that might be called by this function.
8395   SetVector<Function *> CalledFunctions;
8396 
8397   /// Does this function have a call to a function that we don't know about.
8398   bool HasUnknownCallee = false;
8399 };
8400 
8401 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
8402   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
8403       : AAFunctionReachability(IRP, A) {}
8404 
8405   bool canReach(Attributor &A, Function *Fn) const override {
8406     // Assume that we can reach any function if we can reach a call with
8407     // unknown callee.
8408     if (CanReachUnknownCallee)
8409       return true;
8410 
8411     if (ReachableQueries.count(Fn))
8412       return true;
8413 
8414     if (UnreachableQueries.count(Fn))
8415       return false;
8416 
8417     const AACallEdges &AAEdges =
8418         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
8419 
8420     const SetVector<Function *> &Edges = AAEdges.getOptimisticEdges();
8421     bool Result = checkIfReachable(A, Edges, Fn);
8422 
8423     // Attributor returns attributes as const, so this function has to be
8424     // const for users of this attribute to use it without having to do
8425     // a const_cast.
8426     // This is a hack for us to be able to cache queries.
8427     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
8428 
8429     if (Result)
8430       NonConstThis->ReachableQueries.insert(Fn);
8431     else
8432       NonConstThis->UnreachableQueries.insert(Fn);
8433 
8434     return Result;
8435   }
8436 
8437   /// See AbstractAttribute::updateImpl(...).
8438   ChangeStatus updateImpl(Attributor &A) override {
8439     if (CanReachUnknownCallee)
8440       return ChangeStatus::UNCHANGED;
8441 
8442     const AACallEdges &AAEdges =
8443         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
8444     const SetVector<Function *> &Edges = AAEdges.getOptimisticEdges();
8445     ChangeStatus Change = ChangeStatus::UNCHANGED;
8446 
8447     if (AAEdges.hasUnknownCallee()) {
8448       bool OldCanReachUnknown = CanReachUnknownCallee;
8449       CanReachUnknownCallee = true;
8450       return OldCanReachUnknown ? ChangeStatus::UNCHANGED
8451                                 : ChangeStatus::CHANGED;
8452     }
8453 
8454     // Check if any of the unreachable functions become reachable.
8455     for (auto Current = UnreachableQueries.begin();
8456          Current != UnreachableQueries.end();) {
8457       if (!checkIfReachable(A, Edges, *Current)) {
8458         Current++;
8459         continue;
8460       }
8461       ReachableQueries.insert(*Current);
8462       UnreachableQueries.erase(*Current++);
8463       Change = ChangeStatus::CHANGED;
8464     }
8465 
8466     return Change;
8467   }
8468 
8469   const std::string getAsStr() const override {
8470     size_t QueryCount = ReachableQueries.size() + UnreachableQueries.size();
8471 
8472     return "FunctionReachability [" + std::to_string(ReachableQueries.size()) +
8473            "," + std::to_string(QueryCount) + "]";
8474   }
8475 
8476   void trackStatistics() const override {}
8477 
8478 private:
8479   bool canReachUnknownCallee() const override { return CanReachUnknownCallee; }
8480 
8481   bool checkIfReachable(Attributor &A, const SetVector<Function *> &Edges,
8482                         Function *Fn) const {
8483     if (Edges.count(Fn))
8484       return true;
8485 
8486     for (Function *Edge : Edges) {
8487       // We don't need a dependency if the result is reachable.
8488       const AAFunctionReachability &EdgeReachability =
8489           A.getAAFor<AAFunctionReachability>(*this, IRPosition::function(*Edge),
8490                                              DepClassTy::NONE);
8491 
8492       if (EdgeReachability.canReach(A, Fn))
8493         return true;
8494     }
8495     for (Function *Fn : Edges)
8496       A.getAAFor<AAFunctionReachability>(*this, IRPosition::function(*Fn),
8497                                          DepClassTy::REQUIRED);
8498 
8499     return false;
8500   }
8501 
8502   /// Set of functions that we know for sure is reachable.
8503   SmallPtrSet<Function *, 8> ReachableQueries;
8504 
8505   /// Set of functions that are unreachable, but might become reachable.
8506   SmallPtrSet<Function *, 8> UnreachableQueries;
8507 
8508   /// If we can reach a function with a call to a unknown function we assume
8509   /// that we can reach any function.
8510   bool CanReachUnknownCallee = false;
8511 };
8512 
8513 } // namespace
8514 
8515 AACallGraphNode *AACallEdgeIterator::operator*() const {
8516   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
8517       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
8518 }
8519 
8520 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
8521 
8522 const char AAReturnedValues::ID = 0;
8523 const char AANoUnwind::ID = 0;
8524 const char AANoSync::ID = 0;
8525 const char AANoFree::ID = 0;
8526 const char AANonNull::ID = 0;
8527 const char AANoRecurse::ID = 0;
8528 const char AAWillReturn::ID = 0;
8529 const char AAUndefinedBehavior::ID = 0;
8530 const char AANoAlias::ID = 0;
8531 const char AAReachability::ID = 0;
8532 const char AANoReturn::ID = 0;
8533 const char AAIsDead::ID = 0;
8534 const char AADereferenceable::ID = 0;
8535 const char AAAlign::ID = 0;
8536 const char AANoCapture::ID = 0;
8537 const char AAValueSimplify::ID = 0;
8538 const char AAHeapToStack::ID = 0;
8539 const char AAPrivatizablePtr::ID = 0;
8540 const char AAMemoryBehavior::ID = 0;
8541 const char AAMemoryLocation::ID = 0;
8542 const char AAValueConstantRange::ID = 0;
8543 const char AAPotentialValues::ID = 0;
8544 const char AANoUndef::ID = 0;
8545 const char AACallEdges::ID = 0;
8546 const char AAFunctionReachability::ID = 0;
8547 
8548 // Macro magic to create the static generator function for attributes that
8549 // follow the naming scheme.
8550 
8551 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8552   case IRPosition::PK:                                                         \
8553     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8554 
8555 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8556   case IRPosition::PK:                                                         \
8557     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8558     ++NumAAs;                                                                  \
8559     break;
8560 
8561 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8562   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8563     CLASS *AA = nullptr;                                                       \
8564     switch (IRP.getPositionKind()) {                                           \
8565       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8566       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8567       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8568       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8569       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8570       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8571       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8572       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8573     }                                                                          \
8574     return *AA;                                                                \
8575   }
8576 
8577 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8578   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8579     CLASS *AA = nullptr;                                                       \
8580     switch (IRP.getPositionKind()) {                                           \
8581       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8582       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8583       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8584       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8585       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8586       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8587       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8588       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8589     }                                                                          \
8590     return *AA;                                                                \
8591   }
8592 
8593 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8594   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8595     CLASS *AA = nullptr;                                                       \
8596     switch (IRP.getPositionKind()) {                                           \
8597       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8598       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8599       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8600       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8601       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8602       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8603       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8604       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8605     }                                                                          \
8606     return *AA;                                                                \
8607   }
8608 
8609 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8610   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8611     CLASS *AA = nullptr;                                                       \
8612     switch (IRP.getPositionKind()) {                                           \
8613       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8614       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8615       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8616       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8617       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8618       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8619       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8620       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8621     }                                                                          \
8622     return *AA;                                                                \
8623   }
8624 
8625 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8626   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8627     CLASS *AA = nullptr;                                                       \
8628     switch (IRP.getPositionKind()) {                                           \
8629       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8630       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8631       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8632       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8633       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8634       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8635       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8636       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8637     }                                                                          \
8638     return *AA;                                                                \
8639   }
8640 
8641 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8642 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8643 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8644 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8645 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8646 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8647 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8648 
8649 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8650 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8651 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8652 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8653 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8654 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8655 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8656 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8657 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8658 
8659 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8660 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8661 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8662 
8663 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8664 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8665 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8666 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
8667 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
8668 
8669 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8670 
8671 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8672 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8673 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8674 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8675 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8676 #undef SWITCH_PK_CREATE
8677 #undef SWITCH_PK_INV
8678