1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/FileSystem.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
36 #include "llvm/Transforms/Utils/Local.h"
37 #include <cassert>
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "attributor"
42 
43 static cl::opt<bool> ManifestInternal(
44     "attributor-manifest-internal", cl::Hidden,
45     cl::desc("Manifest Attributor internal string attributes."),
46     cl::init(false));
47 
48 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
49                                        cl::Hidden);
50 
51 template <>
52 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
53 
54 static cl::opt<unsigned, true> MaxPotentialValues(
55     "attributor-max-potential-values", cl::Hidden,
56     cl::desc("Maximum number of potential values to be "
57              "tracked for each position."),
58     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
59     cl::init(7));
60 
61 STATISTIC(NumAAs, "Number of abstract attributes created");
62 
63 // Some helper macros to deal with statistics tracking.
64 //
65 // Usage:
66 // For simple IR attribute tracking overload trackStatistics in the abstract
67 // attribute and choose the right STATS_DECLTRACK_********* macro,
68 // e.g.,:
69 //  void trackStatistics() const override {
70 //    STATS_DECLTRACK_ARG_ATTR(returned)
71 //  }
72 // If there is a single "increment" side one can use the macro
73 // STATS_DECLTRACK with a custom message. If there are multiple increment
74 // sides, STATS_DECL and STATS_TRACK can also be used separately.
75 //
76 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
77   ("Number of " #TYPE " marked '" #NAME "'")
78 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
79 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
80 #define STATS_DECL(NAME, TYPE, MSG)                                            \
81   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
82 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
83 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
84   {                                                                            \
85     STATS_DECL(NAME, TYPE, MSG)                                                \
86     STATS_TRACK(NAME, TYPE)                                                    \
87   }
88 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
89   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
90 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
91   STATS_DECLTRACK(NAME, CSArguments,                                           \
92                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
93 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
94   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
95 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
96   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
97 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
98   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
99                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
100 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
101   STATS_DECLTRACK(NAME, CSReturn,                                              \
102                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
103 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
104   STATS_DECLTRACK(NAME, Floating,                                              \
105                   ("Number of floating values known to be '" #NAME "'"))
106 
107 // Specialization of the operator<< for abstract attributes subclasses. This
108 // disambiguates situations where multiple operators are applicable.
109 namespace llvm {
110 #define PIPE_OPERATOR(CLASS)                                                   \
111   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
112     return OS << static_cast<const AbstractAttribute &>(AA);                   \
113   }
114 
115 PIPE_OPERATOR(AAIsDead)
116 PIPE_OPERATOR(AANoUnwind)
117 PIPE_OPERATOR(AANoSync)
118 PIPE_OPERATOR(AANoRecurse)
119 PIPE_OPERATOR(AAWillReturn)
120 PIPE_OPERATOR(AANoReturn)
121 PIPE_OPERATOR(AAReturnedValues)
122 PIPE_OPERATOR(AANonNull)
123 PIPE_OPERATOR(AANoAlias)
124 PIPE_OPERATOR(AADereferenceable)
125 PIPE_OPERATOR(AAAlign)
126 PIPE_OPERATOR(AANoCapture)
127 PIPE_OPERATOR(AAValueSimplify)
128 PIPE_OPERATOR(AANoFree)
129 PIPE_OPERATOR(AAHeapToStack)
130 PIPE_OPERATOR(AAReachability)
131 PIPE_OPERATOR(AAMemoryBehavior)
132 PIPE_OPERATOR(AAMemoryLocation)
133 PIPE_OPERATOR(AAValueConstantRange)
134 PIPE_OPERATOR(AAPrivatizablePtr)
135 PIPE_OPERATOR(AAUndefinedBehavior)
136 PIPE_OPERATOR(AAPotentialValues)
137 PIPE_OPERATOR(AANoUndef)
138 PIPE_OPERATOR(AACallEdges)
139 PIPE_OPERATOR(AAFunctionReachability)
140 
141 #undef PIPE_OPERATOR
142 } // namespace llvm
143 
144 namespace {
145 
146 /// Get pointer operand of memory accessing instruction. If \p I is
147 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
148 /// is set to false and the instruction is volatile, return nullptr.
149 static const Value *getPointerOperand(const Instruction *I,
150                                       bool AllowVolatile) {
151   if (!AllowVolatile && I->isVolatile())
152     return nullptr;
153 
154   if (auto *LI = dyn_cast<LoadInst>(I)) {
155     return LI->getPointerOperand();
156   }
157 
158   if (auto *SI = dyn_cast<StoreInst>(I)) {
159     return SI->getPointerOperand();
160   }
161 
162   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
163     return CXI->getPointerOperand();
164   }
165 
166   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
167     return RMWI->getPointerOperand();
168   }
169 
170   return nullptr;
171 }
172 
173 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
174 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
175 /// getelement pointer instructions that traverse the natural type of \p Ptr if
176 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
177 /// through a cast to i8*.
178 ///
179 /// TODO: This could probably live somewhere more prominantly if it doesn't
180 ///       already exist.
181 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
182                                int64_t Offset, IRBuilder<NoFolder> &IRB,
183                                const DataLayout &DL) {
184   assert(Offset >= 0 && "Negative offset not supported yet!");
185   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
186                     << "-bytes as " << *ResTy << "\n");
187 
188   if (Offset) {
189     SmallVector<Value *, 4> Indices;
190     std::string GEPName = Ptr->getName().str() + ".0";
191 
192     // Add 0 index to look through the pointer.
193     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
194            "Offset out of bounds");
195     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
196 
197     Type *Ty = PtrElemTy;
198     do {
199       auto *STy = dyn_cast<StructType>(Ty);
200       if (!STy)
201         // Non-aggregate type, we cast and make byte-wise progress now.
202         break;
203 
204       const StructLayout *SL = DL.getStructLayout(STy);
205       if (int64_t(SL->getSizeInBytes()) < Offset)
206         break;
207 
208       uint64_t Idx = SL->getElementContainingOffset(Offset);
209       assert(Idx < STy->getNumElements() && "Offset calculation error!");
210       uint64_t Rem = Offset - SL->getElementOffset(Idx);
211       Ty = STy->getElementType(Idx);
212 
213       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
214                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
215 
216       GEPName += "." + std::to_string(Idx);
217       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
218       Offset = Rem;
219     } while (Offset);
220 
221     // Create a GEP for the indices collected above.
222     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
223 
224     // If an offset is left we use byte-wise adjustment.
225     if (Offset) {
226       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
227       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
228                           GEPName + ".b" + Twine(Offset));
229     }
230   }
231 
232   // Ensure the result has the requested type.
233   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
234 
235   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
236   return Ptr;
237 }
238 
239 /// Recursively visit all values that might become \p IRP at some point. This
240 /// will be done by looking through cast instructions, selects, phis, and calls
241 /// with the "returned" attribute. Once we cannot look through the value any
242 /// further, the callback \p VisitValueCB is invoked and passed the current
243 /// value, the \p State, and a flag to indicate if we stripped anything.
244 /// Stripped means that we unpacked the value associated with \p IRP at least
245 /// once. Note that the value used for the callback may still be the value
246 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
247 /// we will never visit more values than specified by \p MaxValues.
248 template <typename AAType, typename StateTy>
249 static bool genericValueTraversal(
250     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
251     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
252         VisitValueCB,
253     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
254     function_ref<Value *(Value *)> StripCB = nullptr) {
255 
256   const AAIsDead *LivenessAA = nullptr;
257   if (IRP.getAnchorScope())
258     LivenessAA = &A.getAAFor<AAIsDead>(
259         QueryingAA,
260         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
261         DepClassTy::NONE);
262   bool AnyDead = false;
263 
264   using Item = std::pair<Value *, const Instruction *>;
265   SmallSet<Item, 16> Visited;
266   SmallVector<Item, 16> Worklist;
267   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
268 
269   int Iteration = 0;
270   do {
271     Item I = Worklist.pop_back_val();
272     Value *V = I.first;
273     CtxI = I.second;
274     if (StripCB)
275       V = StripCB(V);
276 
277     // Check if we should process the current value. To prevent endless
278     // recursion keep a record of the values we followed!
279     if (!Visited.insert(I).second)
280       continue;
281 
282     // Make sure we limit the compile time for complex expressions.
283     if (Iteration++ >= MaxValues)
284       return false;
285 
286     // Explicitly look through calls with a "returned" attribute if we do
287     // not have a pointer as stripPointerCasts only works on them.
288     Value *NewV = nullptr;
289     if (V->getType()->isPointerTy()) {
290       NewV = V->stripPointerCasts();
291     } else {
292       auto *CB = dyn_cast<CallBase>(V);
293       if (CB && CB->getCalledFunction()) {
294         for (Argument &Arg : CB->getCalledFunction()->args())
295           if (Arg.hasReturnedAttr()) {
296             NewV = CB->getArgOperand(Arg.getArgNo());
297             break;
298           }
299       }
300     }
301     if (NewV && NewV != V) {
302       Worklist.push_back({NewV, CtxI});
303       continue;
304     }
305 
306     // Look through select instructions, visit both potential values.
307     if (auto *SI = dyn_cast<SelectInst>(V)) {
308       Worklist.push_back({SI->getTrueValue(), CtxI});
309       Worklist.push_back({SI->getFalseValue(), CtxI});
310       continue;
311     }
312 
313     // Look through phi nodes, visit all live operands.
314     if (auto *PHI = dyn_cast<PHINode>(V)) {
315       assert(LivenessAA &&
316              "Expected liveness in the presence of instructions!");
317       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
318         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
319         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
320                             LivenessAA,
321                             /* CheckBBLivenessOnly */ true)) {
322           AnyDead = true;
323           continue;
324         }
325         Worklist.push_back(
326             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
327       }
328       continue;
329     }
330 
331     if (UseValueSimplify && !isa<Constant>(V)) {
332       bool UsedAssumedInformation = false;
333       Optional<Constant *> C =
334           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
335       if (!C.hasValue())
336         continue;
337       if (Value *NewV = C.getValue()) {
338         Worklist.push_back({NewV, CtxI});
339         continue;
340       }
341     }
342 
343     // Once a leaf is reached we inform the user through the callback.
344     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
345       return false;
346   } while (!Worklist.empty());
347 
348   // If we actually used liveness information so we have to record a dependence.
349   if (AnyDead)
350     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
351 
352   // All values have been visited.
353   return true;
354 }
355 
356 const Value *stripAndAccumulateMinimalOffsets(
357     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
358     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
359     bool UseAssumed = false) {
360 
361   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
362     const IRPosition &Pos = IRPosition::value(V);
363     // Only track dependence if we are going to use the assumed info.
364     const AAValueConstantRange &ValueConstantRangeAA =
365         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
366                                          UseAssumed ? DepClassTy::OPTIONAL
367                                                     : DepClassTy::NONE);
368     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
369                                      : ValueConstantRangeAA.getKnown();
370     // We can only use the lower part of the range because the upper part can
371     // be higher than what the value can really be.
372     ROffset = Range.getSignedMin();
373     return true;
374   };
375 
376   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
377                                                 AttributorAnalysis);
378 }
379 
380 static const Value *getMinimalBaseOfAccsesPointerOperand(
381     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
382     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
383   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
384   if (!Ptr)
385     return nullptr;
386   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
387   const Value *Base = stripAndAccumulateMinimalOffsets(
388       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
389 
390   BytesOffset = OffsetAPInt.getSExtValue();
391   return Base;
392 }
393 
394 static const Value *
395 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
396                                      const DataLayout &DL,
397                                      bool AllowNonInbounds = false) {
398   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
399   if (!Ptr)
400     return nullptr;
401 
402   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
403                                           AllowNonInbounds);
404 }
405 
406 /// Helper function to clamp a state \p S of type \p StateType with the
407 /// information in \p R and indicate/return if \p S did change (as-in update is
408 /// required to be run again).
409 template <typename StateType>
410 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
411   auto Assumed = S.getAssumed();
412   S ^= R;
413   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
414                                    : ChangeStatus::CHANGED;
415 }
416 
417 /// Clamp the information known for all returned values of a function
418 /// (identified by \p QueryingAA) into \p S.
419 template <typename AAType, typename StateType = typename AAType::StateType>
420 static void clampReturnedValueStates(
421     Attributor &A, const AAType &QueryingAA, StateType &S,
422     const IRPosition::CallBaseContext *CBContext = nullptr) {
423   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
424                     << QueryingAA << " into " << S << "\n");
425 
426   assert((QueryingAA.getIRPosition().getPositionKind() ==
427               IRPosition::IRP_RETURNED ||
428           QueryingAA.getIRPosition().getPositionKind() ==
429               IRPosition::IRP_CALL_SITE_RETURNED) &&
430          "Can only clamp returned value states for a function returned or call "
431          "site returned position!");
432 
433   // Use an optional state as there might not be any return values and we want
434   // to join (IntegerState::operator&) the state of all there are.
435   Optional<StateType> T;
436 
437   // Callback for each possibly returned value.
438   auto CheckReturnValue = [&](Value &RV) -> bool {
439     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
440     const AAType &AA =
441         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
442     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
443                       << " @ " << RVPos << "\n");
444     const StateType &AAS = AA.getState();
445     if (T.hasValue())
446       *T &= AAS;
447     else
448       T = AAS;
449     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
450                       << "\n");
451     return T->isValidState();
452   };
453 
454   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
455     S.indicatePessimisticFixpoint();
456   else if (T.hasValue())
457     S ^= *T;
458 }
459 
460 /// Helper class for generic deduction: return value -> returned position.
461 template <typename AAType, typename BaseType,
462           typename StateType = typename BaseType::StateType,
463           bool PropagateCallBaseContext = false>
464 struct AAReturnedFromReturnedValues : public BaseType {
465   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
466       : BaseType(IRP, A) {}
467 
468   /// See AbstractAttribute::updateImpl(...).
469   ChangeStatus updateImpl(Attributor &A) override {
470     StateType S(StateType::getBestState(this->getState()));
471     clampReturnedValueStates<AAType, StateType>(
472         A, *this, S,
473         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
474     // TODO: If we know we visited all returned values, thus no are assumed
475     // dead, we can take the known information from the state T.
476     return clampStateAndIndicateChange<StateType>(this->getState(), S);
477   }
478 };
479 
480 /// Clamp the information known at all call sites for a given argument
481 /// (identified by \p QueryingAA) into \p S.
482 template <typename AAType, typename StateType = typename AAType::StateType>
483 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
484                                         StateType &S) {
485   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
486                     << QueryingAA << " into " << S << "\n");
487 
488   assert(QueryingAA.getIRPosition().getPositionKind() ==
489              IRPosition::IRP_ARGUMENT &&
490          "Can only clamp call site argument states for an argument position!");
491 
492   // Use an optional state as there might not be any return values and we want
493   // to join (IntegerState::operator&) the state of all there are.
494   Optional<StateType> T;
495 
496   // The argument number which is also the call site argument number.
497   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
498 
499   auto CallSiteCheck = [&](AbstractCallSite ACS) {
500     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
501     // Check if a coresponding argument was found or if it is on not associated
502     // (which can happen for callback calls).
503     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
504       return false;
505 
506     const AAType &AA =
507         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
508     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
509                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
510     const StateType &AAS = AA.getState();
511     if (T.hasValue())
512       *T &= AAS;
513     else
514       T = AAS;
515     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
516                       << "\n");
517     return T->isValidState();
518   };
519 
520   bool AllCallSitesKnown;
521   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
522                               AllCallSitesKnown))
523     S.indicatePessimisticFixpoint();
524   else if (T.hasValue())
525     S ^= *T;
526 }
527 
528 /// This function is the bridge between argument position and the call base
529 /// context.
530 template <typename AAType, typename BaseType,
531           typename StateType = typename AAType::StateType>
532 bool getArgumentStateFromCallBaseContext(Attributor &A,
533                                          BaseType &QueryingAttribute,
534                                          IRPosition &Pos, StateType &State) {
535   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
536          "Expected an 'argument' position !");
537   const CallBase *CBContext = Pos.getCallBaseContext();
538   if (!CBContext)
539     return false;
540 
541   int ArgNo = Pos.getCallSiteArgNo();
542   assert(ArgNo >= 0 && "Invalid Arg No!");
543 
544   const auto &AA = A.getAAFor<AAType>(
545       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
546       DepClassTy::REQUIRED);
547   const StateType &CBArgumentState =
548       static_cast<const StateType &>(AA.getState());
549 
550   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
551                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
552                     << "\n");
553 
554   // NOTE: If we want to do call site grouping it should happen here.
555   State ^= CBArgumentState;
556   return true;
557 }
558 
559 /// Helper class for generic deduction: call site argument -> argument position.
560 template <typename AAType, typename BaseType,
561           typename StateType = typename AAType::StateType,
562           bool BridgeCallBaseContext = false>
563 struct AAArgumentFromCallSiteArguments : public BaseType {
564   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
565       : BaseType(IRP, A) {}
566 
567   /// See AbstractAttribute::updateImpl(...).
568   ChangeStatus updateImpl(Attributor &A) override {
569     StateType S = StateType::getBestState(this->getState());
570 
571     if (BridgeCallBaseContext) {
572       bool Success =
573           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
574               A, *this, this->getIRPosition(), S);
575       if (Success)
576         return clampStateAndIndicateChange<StateType>(this->getState(), S);
577     }
578     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
579 
580     // TODO: If we know we visited all incoming values, thus no are assumed
581     // dead, we can take the known information from the state T.
582     return clampStateAndIndicateChange<StateType>(this->getState(), S);
583   }
584 };
585 
586 /// Helper class for generic replication: function returned -> cs returned.
587 template <typename AAType, typename BaseType,
588           typename StateType = typename BaseType::StateType,
589           bool IntroduceCallBaseContext = false>
590 struct AACallSiteReturnedFromReturned : public BaseType {
591   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
592       : BaseType(IRP, A) {}
593 
594   /// See AbstractAttribute::updateImpl(...).
595   ChangeStatus updateImpl(Attributor &A) override {
596     assert(this->getIRPosition().getPositionKind() ==
597                IRPosition::IRP_CALL_SITE_RETURNED &&
598            "Can only wrap function returned positions for call site returned "
599            "positions!");
600     auto &S = this->getState();
601 
602     const Function *AssociatedFunction =
603         this->getIRPosition().getAssociatedFunction();
604     if (!AssociatedFunction)
605       return S.indicatePessimisticFixpoint();
606 
607     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
608     if (IntroduceCallBaseContext)
609       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
610                         << CBContext << "\n");
611 
612     IRPosition FnPos = IRPosition::returned(
613         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
614     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
615     return clampStateAndIndicateChange(S, AA.getState());
616   }
617 };
618 
619 /// Helper function to accumulate uses.
620 template <class AAType, typename StateType = typename AAType::StateType>
621 static void followUsesInContext(AAType &AA, Attributor &A,
622                                 MustBeExecutedContextExplorer &Explorer,
623                                 const Instruction *CtxI,
624                                 SetVector<const Use *> &Uses,
625                                 StateType &State) {
626   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
627   for (unsigned u = 0; u < Uses.size(); ++u) {
628     const Use *U = Uses[u];
629     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
630       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
631       if (Found && AA.followUseInMBEC(A, U, UserI, State))
632         for (const Use &Us : UserI->uses())
633           Uses.insert(&Us);
634     }
635   }
636 }
637 
638 /// Use the must-be-executed-context around \p I to add information into \p S.
639 /// The AAType class is required to have `followUseInMBEC` method with the
640 /// following signature and behaviour:
641 ///
642 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
643 /// U - Underlying use.
644 /// I - The user of the \p U.
645 /// Returns true if the value should be tracked transitively.
646 ///
647 template <class AAType, typename StateType = typename AAType::StateType>
648 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
649                              Instruction &CtxI) {
650 
651   // Container for (transitive) uses of the associated value.
652   SetVector<const Use *> Uses;
653   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
654     Uses.insert(&U);
655 
656   MustBeExecutedContextExplorer &Explorer =
657       A.getInfoCache().getMustBeExecutedContextExplorer();
658 
659   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
660 
661   if (S.isAtFixpoint())
662     return;
663 
664   SmallVector<const BranchInst *, 4> BrInsts;
665   auto Pred = [&](const Instruction *I) {
666     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
667       if (Br->isConditional())
668         BrInsts.push_back(Br);
669     return true;
670   };
671 
672   // Here, accumulate conditional branch instructions in the context. We
673   // explore the child paths and collect the known states. The disjunction of
674   // those states can be merged to its own state. Let ParentState_i be a state
675   // to indicate the known information for an i-th branch instruction in the
676   // context. ChildStates are created for its successors respectively.
677   //
678   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
679   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
680   //      ...
681   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
682   //
683   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
684   //
685   // FIXME: Currently, recursive branches are not handled. For example, we
686   // can't deduce that ptr must be dereferenced in below function.
687   //
688   // void f(int a, int c, int *ptr) {
689   //    if(a)
690   //      if (b) {
691   //        *ptr = 0;
692   //      } else {
693   //        *ptr = 1;
694   //      }
695   //    else {
696   //      if (b) {
697   //        *ptr = 0;
698   //      } else {
699   //        *ptr = 1;
700   //      }
701   //    }
702   // }
703 
704   Explorer.checkForAllContext(&CtxI, Pred);
705   for (const BranchInst *Br : BrInsts) {
706     StateType ParentState;
707 
708     // The known state of the parent state is a conjunction of children's
709     // known states so it is initialized with a best state.
710     ParentState.indicateOptimisticFixpoint();
711 
712     for (const BasicBlock *BB : Br->successors()) {
713       StateType ChildState;
714 
715       size_t BeforeSize = Uses.size();
716       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
717 
718       // Erase uses which only appear in the child.
719       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
720         It = Uses.erase(It);
721 
722       ParentState &= ChildState;
723     }
724 
725     // Use only known state.
726     S += ParentState;
727   }
728 }
729 
730 /// -----------------------NoUnwind Function Attribute--------------------------
731 
732 struct AANoUnwindImpl : AANoUnwind {
733   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
734 
735   const std::string getAsStr() const override {
736     return getAssumed() ? "nounwind" : "may-unwind";
737   }
738 
739   /// See AbstractAttribute::updateImpl(...).
740   ChangeStatus updateImpl(Attributor &A) override {
741     auto Opcodes = {
742         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
743         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
744         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
745 
746     auto CheckForNoUnwind = [&](Instruction &I) {
747       if (!I.mayThrow())
748         return true;
749 
750       if (const auto *CB = dyn_cast<CallBase>(&I)) {
751         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
752             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
753         return NoUnwindAA.isAssumedNoUnwind();
754       }
755       return false;
756     };
757 
758     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
759       return indicatePessimisticFixpoint();
760 
761     return ChangeStatus::UNCHANGED;
762   }
763 };
764 
765 struct AANoUnwindFunction final : public AANoUnwindImpl {
766   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
767       : AANoUnwindImpl(IRP, A) {}
768 
769   /// See AbstractAttribute::trackStatistics()
770   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
771 };
772 
773 /// NoUnwind attribute deduction for a call sites.
774 struct AANoUnwindCallSite final : AANoUnwindImpl {
775   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
776       : AANoUnwindImpl(IRP, A) {}
777 
778   /// See AbstractAttribute::initialize(...).
779   void initialize(Attributor &A) override {
780     AANoUnwindImpl::initialize(A);
781     Function *F = getAssociatedFunction();
782     if (!F || F->isDeclaration())
783       indicatePessimisticFixpoint();
784   }
785 
786   /// See AbstractAttribute::updateImpl(...).
787   ChangeStatus updateImpl(Attributor &A) override {
788     // TODO: Once we have call site specific value information we can provide
789     //       call site specific liveness information and then it makes
790     //       sense to specialize attributes for call sites arguments instead of
791     //       redirecting requests to the callee argument.
792     Function *F = getAssociatedFunction();
793     const IRPosition &FnPos = IRPosition::function(*F);
794     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
795     return clampStateAndIndicateChange(getState(), FnAA.getState());
796   }
797 
798   /// See AbstractAttribute::trackStatistics()
799   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
800 };
801 
802 /// --------------------- Function Return Values -------------------------------
803 
804 /// "Attribute" that collects all potential returned values and the return
805 /// instructions that they arise from.
806 ///
807 /// If there is a unique returned value R, the manifest method will:
808 ///   - mark R with the "returned" attribute, if R is an argument.
809 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
810 
811   /// Mapping of values potentially returned by the associated function to the
812   /// return instructions that might return them.
813   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
814 
815   /// Mapping to remember the number of returned values for a call site such
816   /// that we can avoid updates if nothing changed.
817   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
818 
819   /// Set of unresolved calls returned by the associated function.
820   SmallSetVector<CallBase *, 4> UnresolvedCalls;
821 
822   /// State flags
823   ///
824   ///{
825   bool IsFixed = false;
826   bool IsValidState = true;
827   ///}
828 
829 public:
830   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
831       : AAReturnedValues(IRP, A) {}
832 
833   /// See AbstractAttribute::initialize(...).
834   void initialize(Attributor &A) override {
835     // Reset the state.
836     IsFixed = false;
837     IsValidState = true;
838     ReturnedValues.clear();
839 
840     Function *F = getAssociatedFunction();
841     if (!F || F->isDeclaration()) {
842       indicatePessimisticFixpoint();
843       return;
844     }
845     assert(!F->getReturnType()->isVoidTy() &&
846            "Did not expect a void return type!");
847 
848     // The map from instruction opcodes to those instructions in the function.
849     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
850 
851     // Look through all arguments, if one is marked as returned we are done.
852     for (Argument &Arg : F->args()) {
853       if (Arg.hasReturnedAttr()) {
854         auto &ReturnInstSet = ReturnedValues[&Arg];
855         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
856           for (Instruction *RI : *Insts)
857             ReturnInstSet.insert(cast<ReturnInst>(RI));
858 
859         indicateOptimisticFixpoint();
860         return;
861       }
862     }
863 
864     if (!A.isFunctionIPOAmendable(*F))
865       indicatePessimisticFixpoint();
866   }
867 
868   /// See AbstractAttribute::manifest(...).
869   ChangeStatus manifest(Attributor &A) override;
870 
871   /// See AbstractAttribute::getState(...).
872   AbstractState &getState() override { return *this; }
873 
874   /// See AbstractAttribute::getState(...).
875   const AbstractState &getState() const override { return *this; }
876 
877   /// See AbstractAttribute::updateImpl(Attributor &A).
878   ChangeStatus updateImpl(Attributor &A) override;
879 
880   llvm::iterator_range<iterator> returned_values() override {
881     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
882   }
883 
884   llvm::iterator_range<const_iterator> returned_values() const override {
885     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
886   }
887 
888   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
889     return UnresolvedCalls;
890   }
891 
892   /// Return the number of potential return values, -1 if unknown.
893   size_t getNumReturnValues() const override {
894     return isValidState() ? ReturnedValues.size() : -1;
895   }
896 
897   /// Return an assumed unique return value if a single candidate is found. If
898   /// there cannot be one, return a nullptr. If it is not clear yet, return the
899   /// Optional::NoneType.
900   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
901 
902   /// See AbstractState::checkForAllReturnedValues(...).
903   bool checkForAllReturnedValuesAndReturnInsts(
904       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
905       const override;
906 
907   /// Pretty print the attribute similar to the IR representation.
908   const std::string getAsStr() const override;
909 
910   /// See AbstractState::isAtFixpoint().
911   bool isAtFixpoint() const override { return IsFixed; }
912 
913   /// See AbstractState::isValidState().
914   bool isValidState() const override { return IsValidState; }
915 
916   /// See AbstractState::indicateOptimisticFixpoint(...).
917   ChangeStatus indicateOptimisticFixpoint() override {
918     IsFixed = true;
919     return ChangeStatus::UNCHANGED;
920   }
921 
922   ChangeStatus indicatePessimisticFixpoint() override {
923     IsFixed = true;
924     IsValidState = false;
925     return ChangeStatus::CHANGED;
926   }
927 };
928 
929 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
930   ChangeStatus Changed = ChangeStatus::UNCHANGED;
931 
932   // Bookkeeping.
933   assert(isValidState());
934   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
935                   "Number of function with known return values");
936 
937   // Check if we have an assumed unique return value that we could manifest.
938   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
939 
940   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
941     return Changed;
942 
943   // Bookkeeping.
944   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
945                   "Number of function with unique return");
946 
947   // Callback to replace the uses of CB with the constant C.
948   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
949     if (CB.use_empty())
950       return ChangeStatus::UNCHANGED;
951     if (A.changeValueAfterManifest(CB, C))
952       return ChangeStatus::CHANGED;
953     return ChangeStatus::UNCHANGED;
954   };
955 
956   // If the assumed unique return value is an argument, annotate it.
957   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
958     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
959             getAssociatedFunction()->getReturnType())) {
960       getIRPosition() = IRPosition::argument(*UniqueRVArg);
961       Changed = IRAttribute::manifest(A);
962     }
963   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
964     // We can replace the returned value with the unique returned constant.
965     Value &AnchorValue = getAnchorValue();
966     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
967       for (const Use &U : F->uses())
968         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
969           if (CB->isCallee(&U)) {
970             Constant *RVCCast =
971                 CB->getType() == RVC->getType()
972                     ? RVC
973                     : ConstantExpr::getPointerCast(RVC, CB->getType());
974             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
975           }
976     } else {
977       assert(isa<CallBase>(AnchorValue) &&
978              "Expcected a function or call base anchor!");
979       Constant *RVCCast =
980           AnchorValue.getType() == RVC->getType()
981               ? RVC
982               : ConstantExpr::getPointerCast(RVC, AnchorValue.getType());
983       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
984     }
985     if (Changed == ChangeStatus::CHANGED)
986       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
987                       "Number of function returns replaced by constant return");
988   }
989 
990   return Changed;
991 }
992 
993 const std::string AAReturnedValuesImpl::getAsStr() const {
994   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
995          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
996          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
997 }
998 
999 Optional<Value *>
1000 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1001   // If checkForAllReturnedValues provides a unique value, ignoring potential
1002   // undef values that can also be present, it is assumed to be the actual
1003   // return value and forwarded to the caller of this method. If there are
1004   // multiple, a nullptr is returned indicating there cannot be a unique
1005   // returned value.
1006   Optional<Value *> UniqueRV;
1007 
1008   auto Pred = [&](Value &RV) -> bool {
1009     // If we found a second returned value and neither the current nor the saved
1010     // one is an undef, there is no unique returned value. Undefs are special
1011     // since we can pretend they have any value.
1012     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1013         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1014       UniqueRV = nullptr;
1015       return false;
1016     }
1017 
1018     // Do not overwrite a value with an undef.
1019     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1020       UniqueRV = &RV;
1021 
1022     return true;
1023   };
1024 
1025   if (!A.checkForAllReturnedValues(Pred, *this))
1026     UniqueRV = nullptr;
1027 
1028   return UniqueRV;
1029 }
1030 
1031 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1032     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1033     const {
1034   if (!isValidState())
1035     return false;
1036 
1037   // Check all returned values but ignore call sites as long as we have not
1038   // encountered an overdefined one during an update.
1039   for (auto &It : ReturnedValues) {
1040     Value *RV = It.first;
1041 
1042     CallBase *CB = dyn_cast<CallBase>(RV);
1043     if (CB && !UnresolvedCalls.count(CB))
1044       continue;
1045 
1046     if (!Pred(*RV, It.second))
1047       return false;
1048   }
1049 
1050   return true;
1051 }
1052 
1053 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1054   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1055   bool Changed = false;
1056 
1057   // State used in the value traversals starting in returned values.
1058   struct RVState {
1059     // The map in which we collect return values -> return instrs.
1060     decltype(ReturnedValues) &RetValsMap;
1061     // The flag to indicate a change.
1062     bool &Changed;
1063     // The return instrs we come from.
1064     SmallSetVector<ReturnInst *, 4> RetInsts;
1065   };
1066 
1067   // Callback for a leaf value returned by the associated function.
1068   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1069                          bool) -> bool {
1070     auto Size = RVS.RetValsMap[&Val].size();
1071     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1072     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1073     RVS.Changed |= Inserted;
1074     LLVM_DEBUG({
1075       if (Inserted)
1076         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1077                << " => " << RVS.RetInsts.size() << "\n";
1078     });
1079     return true;
1080   };
1081 
1082   // Helper method to invoke the generic value traversal.
1083   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1084                                 const Instruction *CtxI) {
1085     IRPosition RetValPos = IRPosition::value(RV, getCallBaseContext());
1086     return genericValueTraversal<AAReturnedValues, RVState>(
1087         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1088         /* UseValueSimplify */ false);
1089   };
1090 
1091   // Callback for all "return intructions" live in the associated function.
1092   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1093     ReturnInst &Ret = cast<ReturnInst>(I);
1094     RVState RVS({ReturnedValues, Changed, {}});
1095     RVS.RetInsts.insert(&Ret);
1096     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1097   };
1098 
1099   // Start by discovering returned values from all live returned instructions in
1100   // the associated function.
1101   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1102     return indicatePessimisticFixpoint();
1103 
1104   // Once returned values "directly" present in the code are handled we try to
1105   // resolve returned calls. To avoid modifications to the ReturnedValues map
1106   // while we iterate over it we kept record of potential new entries in a copy
1107   // map, NewRVsMap.
1108   decltype(ReturnedValues) NewRVsMap;
1109 
1110   auto HandleReturnValue = [&](Value *RV,
1111                                SmallSetVector<ReturnInst *, 4> &RIs) {
1112     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1113                       << RIs.size() << " RIs\n");
1114     CallBase *CB = dyn_cast<CallBase>(RV);
1115     if (!CB || UnresolvedCalls.count(CB))
1116       return;
1117 
1118     if (!CB->getCalledFunction()) {
1119       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1120                         << "\n");
1121       UnresolvedCalls.insert(CB);
1122       return;
1123     }
1124 
1125     // TODO: use the function scope once we have call site AAReturnedValues.
1126     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1127         *this, IRPosition::function(*CB->getCalledFunction()),
1128         DepClassTy::REQUIRED);
1129     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1130                       << RetValAA << "\n");
1131 
1132     // Skip dead ends, thus if we do not know anything about the returned
1133     // call we mark it as unresolved and it will stay that way.
1134     if (!RetValAA.getState().isValidState()) {
1135       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1136                         << "\n");
1137       UnresolvedCalls.insert(CB);
1138       return;
1139     }
1140 
1141     // Do not try to learn partial information. If the callee has unresolved
1142     // return values we will treat the call as unresolved/opaque.
1143     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1144     if (!RetValAAUnresolvedCalls.empty()) {
1145       UnresolvedCalls.insert(CB);
1146       return;
1147     }
1148 
1149     // Now check if we can track transitively returned values. If possible, thus
1150     // if all return value can be represented in the current scope, do so.
1151     bool Unresolved = false;
1152     for (auto &RetValAAIt : RetValAA.returned_values()) {
1153       Value *RetVal = RetValAAIt.first;
1154       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1155           isa<Constant>(RetVal))
1156         continue;
1157       // Anything that did not fit in the above categories cannot be resolved,
1158       // mark the call as unresolved.
1159       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1160                            "cannot be translated: "
1161                         << *RetVal << "\n");
1162       UnresolvedCalls.insert(CB);
1163       Unresolved = true;
1164       break;
1165     }
1166 
1167     if (Unresolved)
1168       return;
1169 
1170     // Now track transitively returned values.
1171     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1172     if (NumRetAA == RetValAA.getNumReturnValues()) {
1173       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1174                            "changed since it was seen last\n");
1175       return;
1176     }
1177     NumRetAA = RetValAA.getNumReturnValues();
1178 
1179     for (auto &RetValAAIt : RetValAA.returned_values()) {
1180       Value *RetVal = RetValAAIt.first;
1181       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1182         // Arguments are mapped to call site operands and we begin the traversal
1183         // again.
1184         bool Unused = false;
1185         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1186         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1187         continue;
1188       }
1189       if (isa<CallBase>(RetVal)) {
1190         // Call sites are resolved by the callee attribute over time, no need to
1191         // do anything for us.
1192         continue;
1193       }
1194       if (isa<Constant>(RetVal)) {
1195         // Constants are valid everywhere, we can simply take them.
1196         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1197         continue;
1198       }
1199     }
1200   };
1201 
1202   for (auto &It : ReturnedValues)
1203     HandleReturnValue(It.first, It.second);
1204 
1205   // Because processing the new information can again lead to new return values
1206   // we have to be careful and iterate until this iteration is complete. The
1207   // idea is that we are in a stable state at the end of an update. All return
1208   // values have been handled and properly categorized. We might not update
1209   // again if we have not requested a non-fix attribute so we cannot "wait" for
1210   // the next update to analyze a new return value.
1211   while (!NewRVsMap.empty()) {
1212     auto It = std::move(NewRVsMap.back());
1213     NewRVsMap.pop_back();
1214 
1215     assert(!It.second.empty() && "Entry does not add anything.");
1216     auto &ReturnInsts = ReturnedValues[It.first];
1217     for (ReturnInst *RI : It.second)
1218       if (ReturnInsts.insert(RI)) {
1219         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1220                           << *It.first << " => " << *RI << "\n");
1221         HandleReturnValue(It.first, ReturnInsts);
1222         Changed = true;
1223       }
1224   }
1225 
1226   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1227   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1228 }
1229 
1230 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1231   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1232       : AAReturnedValuesImpl(IRP, A) {}
1233 
1234   /// See AbstractAttribute::trackStatistics()
1235   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1236 };
1237 
1238 /// Returned values information for a call sites.
1239 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1240   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1241       : AAReturnedValuesImpl(IRP, A) {}
1242 
1243   /// See AbstractAttribute::initialize(...).
1244   void initialize(Attributor &A) override {
1245     // TODO: Once we have call site specific value information we can provide
1246     //       call site specific liveness information and then it makes
1247     //       sense to specialize attributes for call sites instead of
1248     //       redirecting requests to the callee.
1249     llvm_unreachable("Abstract attributes for returned values are not "
1250                      "supported for call sites yet!");
1251   }
1252 
1253   /// See AbstractAttribute::updateImpl(...).
1254   ChangeStatus updateImpl(Attributor &A) override {
1255     return indicatePessimisticFixpoint();
1256   }
1257 
1258   /// See AbstractAttribute::trackStatistics()
1259   void trackStatistics() const override {}
1260 };
1261 
1262 /// ------------------------ NoSync Function Attribute -------------------------
1263 
1264 struct AANoSyncImpl : AANoSync {
1265   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1266 
1267   const std::string getAsStr() const override {
1268     return getAssumed() ? "nosync" : "may-sync";
1269   }
1270 
1271   /// See AbstractAttribute::updateImpl(...).
1272   ChangeStatus updateImpl(Attributor &A) override;
1273 
1274   /// Helper function used to determine whether an instruction is non-relaxed
1275   /// atomic. In other words, if an atomic instruction does not have unordered
1276   /// or monotonic ordering
1277   static bool isNonRelaxedAtomic(Instruction *I);
1278 
1279   /// Helper function specific for intrinsics which are potentially volatile
1280   static bool isNoSyncIntrinsic(Instruction *I);
1281 };
1282 
1283 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1284   if (!I->isAtomic())
1285     return false;
1286 
1287   if (auto *FI = dyn_cast<FenceInst>(I))
1288     // All legal orderings for fence are stronger than monotonic.
1289     return FI->getSyncScopeID() != SyncScope::SingleThread;
1290   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1291     // Unordered is not a legal ordering for cmpxchg.
1292     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1293             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1294   }
1295 
1296   AtomicOrdering Ordering;
1297   switch (I->getOpcode()) {
1298   case Instruction::AtomicRMW:
1299     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1300     break;
1301   case Instruction::Store:
1302     Ordering = cast<StoreInst>(I)->getOrdering();
1303     break;
1304   case Instruction::Load:
1305     Ordering = cast<LoadInst>(I)->getOrdering();
1306     break;
1307   default:
1308     llvm_unreachable(
1309         "New atomic operations need to be known in the attributor.");
1310   }
1311 
1312   return (Ordering != AtomicOrdering::Unordered &&
1313           Ordering != AtomicOrdering::Monotonic);
1314 }
1315 
1316 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1317 /// which would be nosync except that they have a volatile flag.  All other
1318 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1319 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1320   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1321     return !MI->isVolatile();
1322   return false;
1323 }
1324 
1325 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1326 
1327   auto CheckRWInstForNoSync = [&](Instruction &I) {
1328     /// We are looking for volatile instructions or Non-Relaxed atomics.
1329 
1330     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1331       if (CB->hasFnAttr(Attribute::NoSync))
1332         return true;
1333 
1334       if (isNoSyncIntrinsic(&I))
1335         return true;
1336 
1337       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1338           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1339       return NoSyncAA.isAssumedNoSync();
1340     }
1341 
1342     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1343       return true;
1344 
1345     return false;
1346   };
1347 
1348   auto CheckForNoSync = [&](Instruction &I) {
1349     // At this point we handled all read/write effects and they are all
1350     // nosync, so they can be skipped.
1351     if (I.mayReadOrWriteMemory())
1352       return true;
1353 
1354     // non-convergent and readnone imply nosync.
1355     return !cast<CallBase>(I).isConvergent();
1356   };
1357 
1358   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1359       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1360     return indicatePessimisticFixpoint();
1361 
1362   return ChangeStatus::UNCHANGED;
1363 }
1364 
1365 struct AANoSyncFunction final : public AANoSyncImpl {
1366   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1367       : AANoSyncImpl(IRP, A) {}
1368 
1369   /// See AbstractAttribute::trackStatistics()
1370   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1371 };
1372 
1373 /// NoSync attribute deduction for a call sites.
1374 struct AANoSyncCallSite final : AANoSyncImpl {
1375   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1376       : AANoSyncImpl(IRP, A) {}
1377 
1378   /// See AbstractAttribute::initialize(...).
1379   void initialize(Attributor &A) override {
1380     AANoSyncImpl::initialize(A);
1381     Function *F = getAssociatedFunction();
1382     if (!F || F->isDeclaration())
1383       indicatePessimisticFixpoint();
1384   }
1385 
1386   /// See AbstractAttribute::updateImpl(...).
1387   ChangeStatus updateImpl(Attributor &A) override {
1388     // TODO: Once we have call site specific value information we can provide
1389     //       call site specific liveness information and then it makes
1390     //       sense to specialize attributes for call sites arguments instead of
1391     //       redirecting requests to the callee argument.
1392     Function *F = getAssociatedFunction();
1393     const IRPosition &FnPos = IRPosition::function(*F);
1394     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1395     return clampStateAndIndicateChange(getState(), FnAA.getState());
1396   }
1397 
1398   /// See AbstractAttribute::trackStatistics()
1399   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1400 };
1401 
1402 /// ------------------------ No-Free Attributes ----------------------------
1403 
1404 struct AANoFreeImpl : public AANoFree {
1405   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1406 
1407   /// See AbstractAttribute::updateImpl(...).
1408   ChangeStatus updateImpl(Attributor &A) override {
1409     auto CheckForNoFree = [&](Instruction &I) {
1410       const auto &CB = cast<CallBase>(I);
1411       if (CB.hasFnAttr(Attribute::NoFree))
1412         return true;
1413 
1414       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1415           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1416       return NoFreeAA.isAssumedNoFree();
1417     };
1418 
1419     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1420       return indicatePessimisticFixpoint();
1421     return ChangeStatus::UNCHANGED;
1422   }
1423 
1424   /// See AbstractAttribute::getAsStr().
1425   const std::string getAsStr() const override {
1426     return getAssumed() ? "nofree" : "may-free";
1427   }
1428 };
1429 
1430 struct AANoFreeFunction final : public AANoFreeImpl {
1431   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1432       : AANoFreeImpl(IRP, A) {}
1433 
1434   /// See AbstractAttribute::trackStatistics()
1435   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1436 };
1437 
1438 /// NoFree attribute deduction for a call sites.
1439 struct AANoFreeCallSite final : AANoFreeImpl {
1440   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1441       : AANoFreeImpl(IRP, A) {}
1442 
1443   /// See AbstractAttribute::initialize(...).
1444   void initialize(Attributor &A) override {
1445     AANoFreeImpl::initialize(A);
1446     Function *F = getAssociatedFunction();
1447     if (!F || F->isDeclaration())
1448       indicatePessimisticFixpoint();
1449   }
1450 
1451   /// See AbstractAttribute::updateImpl(...).
1452   ChangeStatus updateImpl(Attributor &A) override {
1453     // TODO: Once we have call site specific value information we can provide
1454     //       call site specific liveness information and then it makes
1455     //       sense to specialize attributes for call sites arguments instead of
1456     //       redirecting requests to the callee argument.
1457     Function *F = getAssociatedFunction();
1458     const IRPosition &FnPos = IRPosition::function(*F);
1459     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1460     return clampStateAndIndicateChange(getState(), FnAA.getState());
1461   }
1462 
1463   /// See AbstractAttribute::trackStatistics()
1464   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1465 };
1466 
1467 /// NoFree attribute for floating values.
1468 struct AANoFreeFloating : AANoFreeImpl {
1469   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1470       : AANoFreeImpl(IRP, A) {}
1471 
1472   /// See AbstractAttribute::trackStatistics()
1473   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1474 
1475   /// See Abstract Attribute::updateImpl(...).
1476   ChangeStatus updateImpl(Attributor &A) override {
1477     const IRPosition &IRP = getIRPosition();
1478 
1479     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1480         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1481     if (NoFreeAA.isAssumedNoFree())
1482       return ChangeStatus::UNCHANGED;
1483 
1484     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1485     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1486       Instruction *UserI = cast<Instruction>(U.getUser());
1487       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1488         if (CB->isBundleOperand(&U))
1489           return false;
1490         if (!CB->isArgOperand(&U))
1491           return true;
1492         unsigned ArgNo = CB->getArgOperandNo(&U);
1493 
1494         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1495             *this, IRPosition::callsite_argument(*CB, ArgNo),
1496             DepClassTy::REQUIRED);
1497         return NoFreeArg.isAssumedNoFree();
1498       }
1499 
1500       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1501           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1502         Follow = true;
1503         return true;
1504       }
1505       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
1506           isa<ReturnInst>(UserI))
1507         return true;
1508 
1509       // Unknown user.
1510       return false;
1511     };
1512     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1513       return indicatePessimisticFixpoint();
1514 
1515     return ChangeStatus::UNCHANGED;
1516   }
1517 };
1518 
1519 /// NoFree attribute for a call site argument.
1520 struct AANoFreeArgument final : AANoFreeFloating {
1521   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1522       : AANoFreeFloating(IRP, A) {}
1523 
1524   /// See AbstractAttribute::trackStatistics()
1525   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1526 };
1527 
1528 /// NoFree attribute for call site arguments.
1529 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1530   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1531       : AANoFreeFloating(IRP, A) {}
1532 
1533   /// See AbstractAttribute::updateImpl(...).
1534   ChangeStatus updateImpl(Attributor &A) override {
1535     // TODO: Once we have call site specific value information we can provide
1536     //       call site specific liveness information and then it makes
1537     //       sense to specialize attributes for call sites arguments instead of
1538     //       redirecting requests to the callee argument.
1539     Argument *Arg = getAssociatedArgument();
1540     if (!Arg)
1541       return indicatePessimisticFixpoint();
1542     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1543     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1544     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1545   }
1546 
1547   /// See AbstractAttribute::trackStatistics()
1548   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1549 };
1550 
1551 /// NoFree attribute for function return value.
1552 struct AANoFreeReturned final : AANoFreeFloating {
1553   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1554       : AANoFreeFloating(IRP, A) {
1555     llvm_unreachable("NoFree is not applicable to function returns!");
1556   }
1557 
1558   /// See AbstractAttribute::initialize(...).
1559   void initialize(Attributor &A) override {
1560     llvm_unreachable("NoFree is not applicable to function returns!");
1561   }
1562 
1563   /// See AbstractAttribute::updateImpl(...).
1564   ChangeStatus updateImpl(Attributor &A) override {
1565     llvm_unreachable("NoFree is not applicable to function returns!");
1566   }
1567 
1568   /// See AbstractAttribute::trackStatistics()
1569   void trackStatistics() const override {}
1570 };
1571 
1572 /// NoFree attribute deduction for a call site return value.
1573 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1574   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1575       : AANoFreeFloating(IRP, A) {}
1576 
1577   ChangeStatus manifest(Attributor &A) override {
1578     return ChangeStatus::UNCHANGED;
1579   }
1580   /// See AbstractAttribute::trackStatistics()
1581   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1582 };
1583 
1584 /// ------------------------ NonNull Argument Attribute ------------------------
1585 static int64_t getKnownNonNullAndDerefBytesForUse(
1586     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1587     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1588   TrackUse = false;
1589 
1590   const Value *UseV = U->get();
1591   if (!UseV->getType()->isPointerTy())
1592     return 0;
1593 
1594   // We need to follow common pointer manipulation uses to the accesses they
1595   // feed into. We can try to be smart to avoid looking through things we do not
1596   // like for now, e.g., non-inbounds GEPs.
1597   if (isa<CastInst>(I)) {
1598     TrackUse = true;
1599     return 0;
1600   }
1601 
1602   if (isa<GetElementPtrInst>(I)) {
1603     TrackUse = true;
1604     return 0;
1605   }
1606 
1607   Type *PtrTy = UseV->getType();
1608   const Function *F = I->getFunction();
1609   bool NullPointerIsDefined =
1610       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1611   const DataLayout &DL = A.getInfoCache().getDL();
1612   if (const auto *CB = dyn_cast<CallBase>(I)) {
1613     if (CB->isBundleOperand(U)) {
1614       if (RetainedKnowledge RK = getKnowledgeFromUse(
1615               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1616         IsNonNull |=
1617             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1618         return RK.ArgValue;
1619       }
1620       return 0;
1621     }
1622 
1623     if (CB->isCallee(U)) {
1624       IsNonNull |= !NullPointerIsDefined;
1625       return 0;
1626     }
1627 
1628     unsigned ArgNo = CB->getArgOperandNo(U);
1629     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1630     // As long as we only use known information there is no need to track
1631     // dependences here.
1632     auto &DerefAA =
1633         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1634     IsNonNull |= DerefAA.isKnownNonNull();
1635     return DerefAA.getKnownDereferenceableBytes();
1636   }
1637 
1638   int64_t Offset;
1639   const Value *Base =
1640       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1641   if (Base) {
1642     if (Base == &AssociatedValue &&
1643         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1644       int64_t DerefBytes =
1645           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1646 
1647       IsNonNull |= !NullPointerIsDefined;
1648       return std::max(int64_t(0), DerefBytes);
1649     }
1650   }
1651 
1652   /// Corner case when an offset is 0.
1653   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1654                                               /*AllowNonInbounds*/ true);
1655   if (Base) {
1656     if (Offset == 0 && Base == &AssociatedValue &&
1657         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1658       int64_t DerefBytes =
1659           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1660       IsNonNull |= !NullPointerIsDefined;
1661       return std::max(int64_t(0), DerefBytes);
1662     }
1663   }
1664 
1665   return 0;
1666 }
1667 
1668 struct AANonNullImpl : AANonNull {
1669   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1670       : AANonNull(IRP, A),
1671         NullIsDefined(NullPointerIsDefined(
1672             getAnchorScope(),
1673             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1674 
1675   /// See AbstractAttribute::initialize(...).
1676   void initialize(Attributor &A) override {
1677     Value &V = getAssociatedValue();
1678     if (!NullIsDefined &&
1679         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1680                 /* IgnoreSubsumingPositions */ false, &A)) {
1681       indicateOptimisticFixpoint();
1682       return;
1683     }
1684 
1685     if (isa<ConstantPointerNull>(V)) {
1686       indicatePessimisticFixpoint();
1687       return;
1688     }
1689 
1690     AANonNull::initialize(A);
1691 
1692     bool CanBeNull, CanBeFreed;
1693     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
1694                                          CanBeFreed)) {
1695       if (!CanBeNull) {
1696         indicateOptimisticFixpoint();
1697         return;
1698       }
1699     }
1700 
1701     if (isa<GlobalValue>(&getAssociatedValue())) {
1702       indicatePessimisticFixpoint();
1703       return;
1704     }
1705 
1706     if (Instruction *CtxI = getCtxI())
1707       followUsesInMBEC(*this, A, getState(), *CtxI);
1708   }
1709 
1710   /// See followUsesInMBEC
1711   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1712                        AANonNull::StateType &State) {
1713     bool IsNonNull = false;
1714     bool TrackUse = false;
1715     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1716                                        IsNonNull, TrackUse);
1717     State.setKnown(IsNonNull);
1718     return TrackUse;
1719   }
1720 
1721   /// See AbstractAttribute::getAsStr().
1722   const std::string getAsStr() const override {
1723     return getAssumed() ? "nonnull" : "may-null";
1724   }
1725 
1726   /// Flag to determine if the underlying value can be null and still allow
1727   /// valid accesses.
1728   const bool NullIsDefined;
1729 };
1730 
1731 /// NonNull attribute for a floating value.
1732 struct AANonNullFloating : public AANonNullImpl {
1733   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1734       : AANonNullImpl(IRP, A) {}
1735 
1736   /// See AbstractAttribute::updateImpl(...).
1737   ChangeStatus updateImpl(Attributor &A) override {
1738     const DataLayout &DL = A.getDataLayout();
1739 
1740     DominatorTree *DT = nullptr;
1741     AssumptionCache *AC = nullptr;
1742     InformationCache &InfoCache = A.getInfoCache();
1743     if (const Function *Fn = getAnchorScope()) {
1744       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1745       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1746     }
1747 
1748     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1749                             AANonNull::StateType &T, bool Stripped) -> bool {
1750       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1751                                              DepClassTy::REQUIRED);
1752       if (!Stripped && this == &AA) {
1753         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1754           T.indicatePessimisticFixpoint();
1755       } else {
1756         // Use abstract attribute information.
1757         const AANonNull::StateType &NS = AA.getState();
1758         T ^= NS;
1759       }
1760       return T.isValidState();
1761     };
1762 
1763     StateType T;
1764     if (!genericValueTraversal<AANonNull, StateType>(
1765             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1766       return indicatePessimisticFixpoint();
1767 
1768     return clampStateAndIndicateChange(getState(), T);
1769   }
1770 
1771   /// See AbstractAttribute::trackStatistics()
1772   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1773 };
1774 
1775 /// NonNull attribute for function return value.
1776 struct AANonNullReturned final
1777     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1778   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1779       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1780 
1781   /// See AbstractAttribute::getAsStr().
1782   const std::string getAsStr() const override {
1783     return getAssumed() ? "nonnull" : "may-null";
1784   }
1785 
1786   /// See AbstractAttribute::trackStatistics()
1787   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1788 };
1789 
1790 /// NonNull attribute for function argument.
1791 struct AANonNullArgument final
1792     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1793   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1794       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1795 
1796   /// See AbstractAttribute::trackStatistics()
1797   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1798 };
1799 
1800 struct AANonNullCallSiteArgument final : AANonNullFloating {
1801   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1802       : AANonNullFloating(IRP, A) {}
1803 
1804   /// See AbstractAttribute::trackStatistics()
1805   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1806 };
1807 
1808 /// NonNull attribute for a call site return position.
1809 struct AANonNullCallSiteReturned final
1810     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1811   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1812       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1813 
1814   /// See AbstractAttribute::trackStatistics()
1815   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1816 };
1817 
1818 /// ------------------------ No-Recurse Attributes ----------------------------
1819 
1820 struct AANoRecurseImpl : public AANoRecurse {
1821   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1822 
1823   /// See AbstractAttribute::getAsStr()
1824   const std::string getAsStr() const override {
1825     return getAssumed() ? "norecurse" : "may-recurse";
1826   }
1827 };
1828 
1829 struct AANoRecurseFunction final : AANoRecurseImpl {
1830   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1831       : AANoRecurseImpl(IRP, A) {}
1832 
1833   /// See AbstractAttribute::initialize(...).
1834   void initialize(Attributor &A) override {
1835     AANoRecurseImpl::initialize(A);
1836     if (const Function *F = getAnchorScope())
1837       if (A.getInfoCache().getSccSize(*F) != 1)
1838         indicatePessimisticFixpoint();
1839   }
1840 
1841   /// See AbstractAttribute::updateImpl(...).
1842   ChangeStatus updateImpl(Attributor &A) override {
1843 
1844     // If all live call sites are known to be no-recurse, we are as well.
1845     auto CallSitePred = [&](AbstractCallSite ACS) {
1846       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1847           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1848           DepClassTy::NONE);
1849       return NoRecurseAA.isKnownNoRecurse();
1850     };
1851     bool AllCallSitesKnown;
1852     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1853       // If we know all call sites and all are known no-recurse, we are done.
1854       // If all known call sites, which might not be all that exist, are known
1855       // to be no-recurse, we are not done but we can continue to assume
1856       // no-recurse. If one of the call sites we have not visited will become
1857       // live, another update is triggered.
1858       if (AllCallSitesKnown)
1859         indicateOptimisticFixpoint();
1860       return ChangeStatus::UNCHANGED;
1861     }
1862 
1863     // If the above check does not hold anymore we look at the calls.
1864     auto CheckForNoRecurse = [&](Instruction &I) {
1865       const auto &CB = cast<CallBase>(I);
1866       if (CB.hasFnAttr(Attribute::NoRecurse))
1867         return true;
1868 
1869       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1870           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1871       if (!NoRecurseAA.isAssumedNoRecurse())
1872         return false;
1873 
1874       // Recursion to the same function
1875       if (CB.getCalledFunction() == getAnchorScope())
1876         return false;
1877 
1878       return true;
1879     };
1880 
1881     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1882       return indicatePessimisticFixpoint();
1883     return ChangeStatus::UNCHANGED;
1884   }
1885 
1886   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1887 };
1888 
1889 /// NoRecurse attribute deduction for a call sites.
1890 struct AANoRecurseCallSite final : AANoRecurseImpl {
1891   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1892       : AANoRecurseImpl(IRP, A) {}
1893 
1894   /// See AbstractAttribute::initialize(...).
1895   void initialize(Attributor &A) override {
1896     AANoRecurseImpl::initialize(A);
1897     Function *F = getAssociatedFunction();
1898     if (!F || F->isDeclaration())
1899       indicatePessimisticFixpoint();
1900   }
1901 
1902   /// See AbstractAttribute::updateImpl(...).
1903   ChangeStatus updateImpl(Attributor &A) override {
1904     // TODO: Once we have call site specific value information we can provide
1905     //       call site specific liveness information and then it makes
1906     //       sense to specialize attributes for call sites arguments instead of
1907     //       redirecting requests to the callee argument.
1908     Function *F = getAssociatedFunction();
1909     const IRPosition &FnPos = IRPosition::function(*F);
1910     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1911     return clampStateAndIndicateChange(getState(), FnAA.getState());
1912   }
1913 
1914   /// See AbstractAttribute::trackStatistics()
1915   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1916 };
1917 
1918 /// -------------------- Undefined-Behavior Attributes ------------------------
1919 
1920 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1921   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1922       : AAUndefinedBehavior(IRP, A) {}
1923 
1924   /// See AbstractAttribute::updateImpl(...).
1925   // through a pointer (i.e. also branches etc.)
1926   ChangeStatus updateImpl(Attributor &A) override {
1927     const size_t UBPrevSize = KnownUBInsts.size();
1928     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1929 
1930     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1931       // Skip instructions that are already saved.
1932       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1933         return true;
1934 
1935       // If we reach here, we know we have an instruction
1936       // that accesses memory through a pointer operand,
1937       // for which getPointerOperand() should give it to us.
1938       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1939       assert(PtrOp &&
1940              "Expected pointer operand of memory accessing instruction");
1941 
1942       // Either we stopped and the appropriate action was taken,
1943       // or we got back a simplified value to continue.
1944       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1945       if (!SimplifiedPtrOp.hasValue())
1946         return true;
1947       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1948 
1949       // A memory access through a pointer is considered UB
1950       // only if the pointer has constant null value.
1951       // TODO: Expand it to not only check constant values.
1952       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1953         AssumedNoUBInsts.insert(&I);
1954         return true;
1955       }
1956       const Type *PtrTy = PtrOpVal->getType();
1957 
1958       // Because we only consider instructions inside functions,
1959       // assume that a parent function exists.
1960       const Function *F = I.getFunction();
1961 
1962       // A memory access using constant null pointer is only considered UB
1963       // if null pointer is _not_ defined for the target platform.
1964       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1965         AssumedNoUBInsts.insert(&I);
1966       else
1967         KnownUBInsts.insert(&I);
1968       return true;
1969     };
1970 
1971     auto InspectBrInstForUB = [&](Instruction &I) {
1972       // A conditional branch instruction is considered UB if it has `undef`
1973       // condition.
1974 
1975       // Skip instructions that are already saved.
1976       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1977         return true;
1978 
1979       // We know we have a branch instruction.
1980       auto BrInst = cast<BranchInst>(&I);
1981 
1982       // Unconditional branches are never considered UB.
1983       if (BrInst->isUnconditional())
1984         return true;
1985 
1986       // Either we stopped and the appropriate action was taken,
1987       // or we got back a simplified value to continue.
1988       Optional<Value *> SimplifiedCond =
1989           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1990       if (!SimplifiedCond.hasValue())
1991         return true;
1992       AssumedNoUBInsts.insert(&I);
1993       return true;
1994     };
1995 
1996     auto InspectCallSiteForUB = [&](Instruction &I) {
1997       // Check whether a callsite always cause UB or not
1998 
1999       // Skip instructions that are already saved.
2000       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2001         return true;
2002 
2003       // Check nonnull and noundef argument attribute violation for each
2004       // callsite.
2005       CallBase &CB = cast<CallBase>(I);
2006       Function *Callee = CB.getCalledFunction();
2007       if (!Callee)
2008         return true;
2009       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2010         // If current argument is known to be simplified to null pointer and the
2011         // corresponding argument position is known to have nonnull attribute,
2012         // the argument is poison. Furthermore, if the argument is poison and
2013         // the position is known to have noundef attriubte, this callsite is
2014         // considered UB.
2015         if (idx >= Callee->arg_size())
2016           break;
2017         Value *ArgVal = CB.getArgOperand(idx);
2018         if (!ArgVal)
2019           continue;
2020         // Here, we handle three cases.
2021         //   (1) Not having a value means it is dead. (we can replace the value
2022         //       with undef)
2023         //   (2) Simplified to undef. The argument violate noundef attriubte.
2024         //   (3) Simplified to null pointer where known to be nonnull.
2025         //       The argument is a poison value and violate noundef attribute.
2026         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2027         auto &NoUndefAA =
2028             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2029         if (!NoUndefAA.isKnownNoUndef())
2030           continue;
2031         bool UsedAssumedInformation = false;
2032         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2033             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2034         if (UsedAssumedInformation)
2035           continue;
2036         if (!SimplifiedVal.hasValue() ||
2037             isa<UndefValue>(*SimplifiedVal.getValue())) {
2038           KnownUBInsts.insert(&I);
2039           continue;
2040         }
2041         if (!ArgVal->getType()->isPointerTy() ||
2042             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2043           continue;
2044         auto &NonNullAA =
2045             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2046         if (NonNullAA.isKnownNonNull())
2047           KnownUBInsts.insert(&I);
2048       }
2049       return true;
2050     };
2051 
2052     auto InspectReturnInstForUB =
2053         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2054           // Check if a return instruction always cause UB or not
2055           // Note: It is guaranteed that the returned position of the anchor
2056           //       scope has noundef attribute when this is called.
2057           //       We also ensure the return position is not "assumed dead"
2058           //       because the returned value was then potentially simplified to
2059           //       `undef` in AAReturnedValues without removing the `noundef`
2060           //       attribute yet.
2061 
2062           // When the returned position has noundef attriubte, UB occur in the
2063           // following cases.
2064           //   (1) Returned value is known to be undef.
2065           //   (2) The value is known to be a null pointer and the returned
2066           //       position has nonnull attribute (because the returned value is
2067           //       poison).
2068           bool FoundUB = false;
2069           if (isa<UndefValue>(V)) {
2070             FoundUB = true;
2071           } else {
2072             if (isa<ConstantPointerNull>(V)) {
2073               auto &NonNullAA = A.getAAFor<AANonNull>(
2074                   *this, IRPosition::returned(*getAnchorScope()),
2075                   DepClassTy::NONE);
2076               if (NonNullAA.isKnownNonNull())
2077                 FoundUB = true;
2078             }
2079           }
2080 
2081           if (FoundUB)
2082             for (ReturnInst *RI : RetInsts)
2083               KnownUBInsts.insert(RI);
2084           return true;
2085         };
2086 
2087     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2088                               {Instruction::Load, Instruction::Store,
2089                                Instruction::AtomicCmpXchg,
2090                                Instruction::AtomicRMW},
2091                               /* CheckBBLivenessOnly */ true);
2092     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2093                               /* CheckBBLivenessOnly */ true);
2094     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2095 
2096     // If the returned position of the anchor scope has noundef attriubte, check
2097     // all returned instructions.
2098     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2099       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2100       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2101         auto &RetPosNoUndefAA =
2102             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2103         if (RetPosNoUndefAA.isKnownNoUndef())
2104           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2105                                                     *this);
2106       }
2107     }
2108 
2109     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2110         UBPrevSize != KnownUBInsts.size())
2111       return ChangeStatus::CHANGED;
2112     return ChangeStatus::UNCHANGED;
2113   }
2114 
2115   bool isKnownToCauseUB(Instruction *I) const override {
2116     return KnownUBInsts.count(I);
2117   }
2118 
2119   bool isAssumedToCauseUB(Instruction *I) const override {
2120     // In simple words, if an instruction is not in the assumed to _not_
2121     // cause UB, then it is assumed UB (that includes those
2122     // in the KnownUBInsts set). The rest is boilerplate
2123     // is to ensure that it is one of the instructions we test
2124     // for UB.
2125 
2126     switch (I->getOpcode()) {
2127     case Instruction::Load:
2128     case Instruction::Store:
2129     case Instruction::AtomicCmpXchg:
2130     case Instruction::AtomicRMW:
2131       return !AssumedNoUBInsts.count(I);
2132     case Instruction::Br: {
2133       auto BrInst = cast<BranchInst>(I);
2134       if (BrInst->isUnconditional())
2135         return false;
2136       return !AssumedNoUBInsts.count(I);
2137     } break;
2138     default:
2139       return false;
2140     }
2141     return false;
2142   }
2143 
2144   ChangeStatus manifest(Attributor &A) override {
2145     if (KnownUBInsts.empty())
2146       return ChangeStatus::UNCHANGED;
2147     for (Instruction *I : KnownUBInsts)
2148       A.changeToUnreachableAfterManifest(I);
2149     return ChangeStatus::CHANGED;
2150   }
2151 
2152   /// See AbstractAttribute::getAsStr()
2153   const std::string getAsStr() const override {
2154     return getAssumed() ? "undefined-behavior" : "no-ub";
2155   }
2156 
2157   /// Note: The correctness of this analysis depends on the fact that the
2158   /// following 2 sets will stop changing after some point.
2159   /// "Change" here means that their size changes.
2160   /// The size of each set is monotonically increasing
2161   /// (we only add items to them) and it is upper bounded by the number of
2162   /// instructions in the processed function (we can never save more
2163   /// elements in either set than this number). Hence, at some point,
2164   /// they will stop increasing.
2165   /// Consequently, at some point, both sets will have stopped
2166   /// changing, effectively making the analysis reach a fixpoint.
2167 
2168   /// Note: These 2 sets are disjoint and an instruction can be considered
2169   /// one of 3 things:
2170   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2171   ///    the KnownUBInsts set.
2172   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2173   ///    has a reason to assume it).
2174   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2175   ///    could not find a reason to assume or prove that it can cause UB,
2176   ///    hence it assumes it doesn't. We have a set for these instructions
2177   ///    so that we don't reprocess them in every update.
2178   ///    Note however that instructions in this set may cause UB.
2179 
2180 protected:
2181   /// A set of all live instructions _known_ to cause UB.
2182   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2183 
2184 private:
2185   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2186   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2187 
2188   // Should be called on updates in which if we're processing an instruction
2189   // \p I that depends on a value \p V, one of the following has to happen:
2190   // - If the value is assumed, then stop.
2191   // - If the value is known but undef, then consider it UB.
2192   // - Otherwise, do specific processing with the simplified value.
2193   // We return None in the first 2 cases to signify that an appropriate
2194   // action was taken and the caller should stop.
2195   // Otherwise, we return the simplified value that the caller should
2196   // use for specific processing.
2197   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2198                                          Instruction *I) {
2199     bool UsedAssumedInformation = false;
2200     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2201         IRPosition::value(*V), *this, UsedAssumedInformation);
2202     if (UsedAssumedInformation) {
2203       // Don't depend on assumed values.
2204       return llvm::None;
2205     }
2206     if (!SimplifiedV.hasValue()) {
2207       // If it is known (which we tested above) but it doesn't have a value,
2208       // then we can assume `undef` and hence the instruction is UB.
2209       KnownUBInsts.insert(I);
2210       return llvm::None;
2211     }
2212     Value *Val = SimplifiedV.getValue();
2213     if (isa<UndefValue>(Val)) {
2214       KnownUBInsts.insert(I);
2215       return llvm::None;
2216     }
2217     return Val;
2218   }
2219 };
2220 
2221 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2222   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2223       : AAUndefinedBehaviorImpl(IRP, A) {}
2224 
2225   /// See AbstractAttribute::trackStatistics()
2226   void trackStatistics() const override {
2227     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2228                "Number of instructions known to have UB");
2229     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2230         KnownUBInsts.size();
2231   }
2232 };
2233 
2234 /// ------------------------ Will-Return Attributes ----------------------------
2235 
2236 // Helper function that checks whether a function has any cycle which we don't
2237 // know if it is bounded or not.
2238 // Loops with maximum trip count are considered bounded, any other cycle not.
2239 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2240   ScalarEvolution *SE =
2241       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2242   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2243   // If either SCEV or LoopInfo is not available for the function then we assume
2244   // any cycle to be unbounded cycle.
2245   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2246   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2247   if (!SE || !LI) {
2248     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2249       if (SCCI.hasCycle())
2250         return true;
2251     return false;
2252   }
2253 
2254   // If there's irreducible control, the function may contain non-loop cycles.
2255   if (mayContainIrreducibleControl(F, LI))
2256     return true;
2257 
2258   // Any loop that does not have a max trip count is considered unbounded cycle.
2259   for (auto *L : LI->getLoopsInPreorder()) {
2260     if (!SE->getSmallConstantMaxTripCount(L))
2261       return true;
2262   }
2263   return false;
2264 }
2265 
2266 struct AAWillReturnImpl : public AAWillReturn {
2267   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2268       : AAWillReturn(IRP, A) {}
2269 
2270   /// See AbstractAttribute::initialize(...).
2271   void initialize(Attributor &A) override {
2272     AAWillReturn::initialize(A);
2273 
2274     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2275       indicateOptimisticFixpoint();
2276       return;
2277     }
2278   }
2279 
2280   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2281   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2282     // Check for `mustprogress` in the scope and the associated function which
2283     // might be different if this is a call site.
2284     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2285         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2286       return false;
2287 
2288     const auto &MemAA =
2289         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2290     if (!MemAA.isAssumedReadOnly())
2291       return false;
2292     if (KnownOnly && !MemAA.isKnownReadOnly())
2293       return false;
2294     if (!MemAA.isKnownReadOnly())
2295       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2296 
2297     return true;
2298   }
2299 
2300   /// See AbstractAttribute::updateImpl(...).
2301   ChangeStatus updateImpl(Attributor &A) override {
2302     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2303       return ChangeStatus::UNCHANGED;
2304 
2305     auto CheckForWillReturn = [&](Instruction &I) {
2306       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2307       const auto &WillReturnAA =
2308           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2309       if (WillReturnAA.isKnownWillReturn())
2310         return true;
2311       if (!WillReturnAA.isAssumedWillReturn())
2312         return false;
2313       const auto &NoRecurseAA =
2314           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2315       return NoRecurseAA.isAssumedNoRecurse();
2316     };
2317 
2318     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2319       return indicatePessimisticFixpoint();
2320 
2321     return ChangeStatus::UNCHANGED;
2322   }
2323 
2324   /// See AbstractAttribute::getAsStr()
2325   const std::string getAsStr() const override {
2326     return getAssumed() ? "willreturn" : "may-noreturn";
2327   }
2328 };
2329 
2330 struct AAWillReturnFunction final : AAWillReturnImpl {
2331   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2332       : AAWillReturnImpl(IRP, A) {}
2333 
2334   /// See AbstractAttribute::initialize(...).
2335   void initialize(Attributor &A) override {
2336     AAWillReturnImpl::initialize(A);
2337 
2338     Function *F = getAnchorScope();
2339     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2340       indicatePessimisticFixpoint();
2341   }
2342 
2343   /// See AbstractAttribute::trackStatistics()
2344   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2345 };
2346 
2347 /// WillReturn attribute deduction for a call sites.
2348 struct AAWillReturnCallSite final : AAWillReturnImpl {
2349   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2350       : AAWillReturnImpl(IRP, A) {}
2351 
2352   /// See AbstractAttribute::initialize(...).
2353   void initialize(Attributor &A) override {
2354     AAWillReturnImpl::initialize(A);
2355     Function *F = getAssociatedFunction();
2356     if (!F || !A.isFunctionIPOAmendable(*F))
2357       indicatePessimisticFixpoint();
2358   }
2359 
2360   /// See AbstractAttribute::updateImpl(...).
2361   ChangeStatus updateImpl(Attributor &A) override {
2362     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2363       return ChangeStatus::UNCHANGED;
2364 
2365     // TODO: Once we have call site specific value information we can provide
2366     //       call site specific liveness information and then it makes
2367     //       sense to specialize attributes for call sites arguments instead of
2368     //       redirecting requests to the callee argument.
2369     Function *F = getAssociatedFunction();
2370     const IRPosition &FnPos = IRPosition::function(*F);
2371     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2372     return clampStateAndIndicateChange(getState(), FnAA.getState());
2373   }
2374 
2375   /// See AbstractAttribute::trackStatistics()
2376   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2377 };
2378 
2379 /// -------------------AAReachability Attribute--------------------------
2380 
2381 struct AAReachabilityImpl : AAReachability {
2382   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2383       : AAReachability(IRP, A) {}
2384 
2385   const std::string getAsStr() const override {
2386     // TODO: Return the number of reachable queries.
2387     return "reachable";
2388   }
2389 
2390   /// See AbstractAttribute::updateImpl(...).
2391   ChangeStatus updateImpl(Attributor &A) override {
2392     return ChangeStatus::UNCHANGED;
2393   }
2394 };
2395 
2396 struct AAReachabilityFunction final : public AAReachabilityImpl {
2397   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2398       : AAReachabilityImpl(IRP, A) {}
2399 
2400   /// See AbstractAttribute::trackStatistics()
2401   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2402 };
2403 
2404 /// ------------------------ NoAlias Argument Attribute ------------------------
2405 
2406 struct AANoAliasImpl : AANoAlias {
2407   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2408     assert(getAssociatedType()->isPointerTy() &&
2409            "Noalias is a pointer attribute");
2410   }
2411 
2412   const std::string getAsStr() const override {
2413     return getAssumed() ? "noalias" : "may-alias";
2414   }
2415 };
2416 
2417 /// NoAlias attribute for a floating value.
2418 struct AANoAliasFloating final : AANoAliasImpl {
2419   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2420       : AANoAliasImpl(IRP, A) {}
2421 
2422   /// See AbstractAttribute::initialize(...).
2423   void initialize(Attributor &A) override {
2424     AANoAliasImpl::initialize(A);
2425     Value *Val = &getAssociatedValue();
2426     do {
2427       CastInst *CI = dyn_cast<CastInst>(Val);
2428       if (!CI)
2429         break;
2430       Value *Base = CI->getOperand(0);
2431       if (!Base->hasOneUse())
2432         break;
2433       Val = Base;
2434     } while (true);
2435 
2436     if (!Val->getType()->isPointerTy()) {
2437       indicatePessimisticFixpoint();
2438       return;
2439     }
2440 
2441     if (isa<AllocaInst>(Val))
2442       indicateOptimisticFixpoint();
2443     else if (isa<ConstantPointerNull>(Val) &&
2444              !NullPointerIsDefined(getAnchorScope(),
2445                                    Val->getType()->getPointerAddressSpace()))
2446       indicateOptimisticFixpoint();
2447     else if (Val != &getAssociatedValue()) {
2448       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2449           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2450       if (ValNoAliasAA.isKnownNoAlias())
2451         indicateOptimisticFixpoint();
2452     }
2453   }
2454 
2455   /// See AbstractAttribute::updateImpl(...).
2456   ChangeStatus updateImpl(Attributor &A) override {
2457     // TODO: Implement this.
2458     return indicatePessimisticFixpoint();
2459   }
2460 
2461   /// See AbstractAttribute::trackStatistics()
2462   void trackStatistics() const override {
2463     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2464   }
2465 };
2466 
2467 /// NoAlias attribute for an argument.
2468 struct AANoAliasArgument final
2469     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2470   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2471   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2472 
2473   /// See AbstractAttribute::initialize(...).
2474   void initialize(Attributor &A) override {
2475     Base::initialize(A);
2476     // See callsite argument attribute and callee argument attribute.
2477     if (hasAttr({Attribute::ByVal}))
2478       indicateOptimisticFixpoint();
2479   }
2480 
2481   /// See AbstractAttribute::update(...).
2482   ChangeStatus updateImpl(Attributor &A) override {
2483     // We have to make sure no-alias on the argument does not break
2484     // synchronization when this is a callback argument, see also [1] below.
2485     // If synchronization cannot be affected, we delegate to the base updateImpl
2486     // function, otherwise we give up for now.
2487 
2488     // If the function is no-sync, no-alias cannot break synchronization.
2489     const auto &NoSyncAA =
2490         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2491                              DepClassTy::OPTIONAL);
2492     if (NoSyncAA.isAssumedNoSync())
2493       return Base::updateImpl(A);
2494 
2495     // If the argument is read-only, no-alias cannot break synchronization.
2496     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2497         *this, getIRPosition(), DepClassTy::OPTIONAL);
2498     if (MemBehaviorAA.isAssumedReadOnly())
2499       return Base::updateImpl(A);
2500 
2501     // If the argument is never passed through callbacks, no-alias cannot break
2502     // synchronization.
2503     bool AllCallSitesKnown;
2504     if (A.checkForAllCallSites(
2505             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2506             true, AllCallSitesKnown))
2507       return Base::updateImpl(A);
2508 
2509     // TODO: add no-alias but make sure it doesn't break synchronization by
2510     // introducing fake uses. See:
2511     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2512     //     International Workshop on OpenMP 2018,
2513     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2514 
2515     return indicatePessimisticFixpoint();
2516   }
2517 
2518   /// See AbstractAttribute::trackStatistics()
2519   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2520 };
2521 
2522 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2523   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2524       : AANoAliasImpl(IRP, A) {}
2525 
2526   /// See AbstractAttribute::initialize(...).
2527   void initialize(Attributor &A) override {
2528     // See callsite argument attribute and callee argument attribute.
2529     const auto &CB = cast<CallBase>(getAnchorValue());
2530     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2531       indicateOptimisticFixpoint();
2532     Value &Val = getAssociatedValue();
2533     if (isa<ConstantPointerNull>(Val) &&
2534         !NullPointerIsDefined(getAnchorScope(),
2535                               Val.getType()->getPointerAddressSpace()))
2536       indicateOptimisticFixpoint();
2537   }
2538 
2539   /// Determine if the underlying value may alias with the call site argument
2540   /// \p OtherArgNo of \p ICS (= the underlying call site).
2541   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2542                             const AAMemoryBehavior &MemBehaviorAA,
2543                             const CallBase &CB, unsigned OtherArgNo) {
2544     // We do not need to worry about aliasing with the underlying IRP.
2545     if (this->getCalleeArgNo() == (int)OtherArgNo)
2546       return false;
2547 
2548     // If it is not a pointer or pointer vector we do not alias.
2549     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2550     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2551       return false;
2552 
2553     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2554         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2555 
2556     // If the argument is readnone, there is no read-write aliasing.
2557     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2558       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2559       return false;
2560     }
2561 
2562     // If the argument is readonly and the underlying value is readonly, there
2563     // is no read-write aliasing.
2564     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2565     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2566       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2567       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2568       return false;
2569     }
2570 
2571     // We have to utilize actual alias analysis queries so we need the object.
2572     if (!AAR)
2573       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2574 
2575     // Try to rule it out at the call site.
2576     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2577     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2578                          "callsite arguments: "
2579                       << getAssociatedValue() << " " << *ArgOp << " => "
2580                       << (IsAliasing ? "" : "no-") << "alias \n");
2581 
2582     return IsAliasing;
2583   }
2584 
2585   bool
2586   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2587                                          const AAMemoryBehavior &MemBehaviorAA,
2588                                          const AANoAlias &NoAliasAA) {
2589     // We can deduce "noalias" if the following conditions hold.
2590     // (i)   Associated value is assumed to be noalias in the definition.
2591     // (ii)  Associated value is assumed to be no-capture in all the uses
2592     //       possibly executed before this callsite.
2593     // (iii) There is no other pointer argument which could alias with the
2594     //       value.
2595 
2596     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2597     if (!AssociatedValueIsNoAliasAtDef) {
2598       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2599                         << " is not no-alias at the definition\n");
2600       return false;
2601     }
2602 
2603     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2604 
2605     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2606     const Function *ScopeFn = VIRP.getAnchorScope();
2607     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2608     // Check whether the value is captured in the scope using AANoCapture.
2609     //      Look at CFG and check only uses possibly executed before this
2610     //      callsite.
2611     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2612       Instruction *UserI = cast<Instruction>(U.getUser());
2613 
2614       // If UserI is the curr instruction and there is a single potential use of
2615       // the value in UserI we allow the use.
2616       // TODO: We should inspect the operands and allow those that cannot alias
2617       //       with the value.
2618       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2619         return true;
2620 
2621       if (ScopeFn) {
2622         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2623             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2624 
2625         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2626           return true;
2627 
2628         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2629           if (CB->isArgOperand(&U)) {
2630 
2631             unsigned ArgNo = CB->getArgOperandNo(&U);
2632 
2633             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2634                 *this, IRPosition::callsite_argument(*CB, ArgNo),
2635                 DepClassTy::OPTIONAL);
2636 
2637             if (NoCaptureAA.isAssumedNoCapture())
2638               return true;
2639           }
2640         }
2641       }
2642 
2643       // For cases which can potentially have more users
2644       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2645           isa<SelectInst>(U)) {
2646         Follow = true;
2647         return true;
2648       }
2649 
2650       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2651       return false;
2652     };
2653 
2654     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2655       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2656         LLVM_DEBUG(
2657             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2658                    << " cannot be noalias as it is potentially captured\n");
2659         return false;
2660       }
2661     }
2662     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2663 
2664     // Check there is no other pointer argument which could alias with the
2665     // value passed at this call site.
2666     // TODO: AbstractCallSite
2667     const auto &CB = cast<CallBase>(getAnchorValue());
2668     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2669          OtherArgNo++)
2670       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2671         return false;
2672 
2673     return true;
2674   }
2675 
2676   /// See AbstractAttribute::updateImpl(...).
2677   ChangeStatus updateImpl(Attributor &A) override {
2678     // If the argument is readnone we are done as there are no accesses via the
2679     // argument.
2680     auto &MemBehaviorAA =
2681         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2682     if (MemBehaviorAA.isAssumedReadNone()) {
2683       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2684       return ChangeStatus::UNCHANGED;
2685     }
2686 
2687     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2688     const auto &NoAliasAA =
2689         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2690 
2691     AAResults *AAR = nullptr;
2692     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2693                                                NoAliasAA)) {
2694       LLVM_DEBUG(
2695           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2696       return ChangeStatus::UNCHANGED;
2697     }
2698 
2699     return indicatePessimisticFixpoint();
2700   }
2701 
2702   /// See AbstractAttribute::trackStatistics()
2703   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2704 };
2705 
2706 /// NoAlias attribute for function return value.
2707 struct AANoAliasReturned final : AANoAliasImpl {
2708   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2709       : AANoAliasImpl(IRP, A) {}
2710 
2711   /// See AbstractAttribute::initialize(...).
2712   void initialize(Attributor &A) override {
2713     AANoAliasImpl::initialize(A);
2714     Function *F = getAssociatedFunction();
2715     if (!F || F->isDeclaration())
2716       indicatePessimisticFixpoint();
2717   }
2718 
2719   /// See AbstractAttribute::updateImpl(...).
2720   virtual ChangeStatus updateImpl(Attributor &A) override {
2721 
2722     auto CheckReturnValue = [&](Value &RV) -> bool {
2723       if (Constant *C = dyn_cast<Constant>(&RV))
2724         if (C->isNullValue() || isa<UndefValue>(C))
2725           return true;
2726 
2727       /// For now, we can only deduce noalias if we have call sites.
2728       /// FIXME: add more support.
2729       if (!isa<CallBase>(&RV))
2730         return false;
2731 
2732       const IRPosition &RVPos = IRPosition::value(RV);
2733       const auto &NoAliasAA =
2734           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2735       if (!NoAliasAA.isAssumedNoAlias())
2736         return false;
2737 
2738       const auto &NoCaptureAA =
2739           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2740       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2741     };
2742 
2743     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2744       return indicatePessimisticFixpoint();
2745 
2746     return ChangeStatus::UNCHANGED;
2747   }
2748 
2749   /// See AbstractAttribute::trackStatistics()
2750   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2751 };
2752 
2753 /// NoAlias attribute deduction for a call site return value.
2754 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2755   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2756       : AANoAliasImpl(IRP, A) {}
2757 
2758   /// See AbstractAttribute::initialize(...).
2759   void initialize(Attributor &A) override {
2760     AANoAliasImpl::initialize(A);
2761     Function *F = getAssociatedFunction();
2762     if (!F || F->isDeclaration())
2763       indicatePessimisticFixpoint();
2764   }
2765 
2766   /// See AbstractAttribute::updateImpl(...).
2767   ChangeStatus updateImpl(Attributor &A) override {
2768     // TODO: Once we have call site specific value information we can provide
2769     //       call site specific liveness information and then it makes
2770     //       sense to specialize attributes for call sites arguments instead of
2771     //       redirecting requests to the callee argument.
2772     Function *F = getAssociatedFunction();
2773     const IRPosition &FnPos = IRPosition::returned(*F);
2774     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2775     return clampStateAndIndicateChange(getState(), FnAA.getState());
2776   }
2777 
2778   /// See AbstractAttribute::trackStatistics()
2779   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2780 };
2781 
2782 /// -------------------AAIsDead Function Attribute-----------------------
2783 
2784 struct AAIsDeadValueImpl : public AAIsDead {
2785   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2786 
2787   /// See AAIsDead::isAssumedDead().
2788   bool isAssumedDead() const override { return getAssumed(); }
2789 
2790   /// See AAIsDead::isKnownDead().
2791   bool isKnownDead() const override { return getKnown(); }
2792 
2793   /// See AAIsDead::isAssumedDead(BasicBlock *).
2794   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2795 
2796   /// See AAIsDead::isKnownDead(BasicBlock *).
2797   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2798 
2799   /// See AAIsDead::isAssumedDead(Instruction *I).
2800   bool isAssumedDead(const Instruction *I) const override {
2801     return I == getCtxI() && isAssumedDead();
2802   }
2803 
2804   /// See AAIsDead::isKnownDead(Instruction *I).
2805   bool isKnownDead(const Instruction *I) const override {
2806     return isAssumedDead(I) && getKnown();
2807   }
2808 
2809   /// See AbstractAttribute::getAsStr().
2810   const std::string getAsStr() const override {
2811     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2812   }
2813 
2814   /// Check if all uses are assumed dead.
2815   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2816     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2817     // Explicitly set the dependence class to required because we want a long
2818     // chain of N dependent instructions to be considered live as soon as one is
2819     // without going through N update cycles. This is not required for
2820     // correctness.
2821     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2822   }
2823 
2824   /// Determine if \p I is assumed to be side-effect free.
2825   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2826     if (!I || wouldInstructionBeTriviallyDead(I))
2827       return true;
2828 
2829     auto *CB = dyn_cast<CallBase>(I);
2830     if (!CB || isa<IntrinsicInst>(CB))
2831       return false;
2832 
2833     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2834     const auto &NoUnwindAA =
2835         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2836     if (!NoUnwindAA.isAssumedNoUnwind())
2837       return false;
2838     if (!NoUnwindAA.isKnownNoUnwind())
2839       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2840 
2841     const auto &MemBehaviorAA =
2842         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2843     if (MemBehaviorAA.isAssumedReadOnly()) {
2844       if (!MemBehaviorAA.isKnownReadOnly())
2845         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2846       return true;
2847     }
2848     return false;
2849   }
2850 };
2851 
2852 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2853   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2854       : AAIsDeadValueImpl(IRP, A) {}
2855 
2856   /// See AbstractAttribute::initialize(...).
2857   void initialize(Attributor &A) override {
2858     if (isa<UndefValue>(getAssociatedValue())) {
2859       indicatePessimisticFixpoint();
2860       return;
2861     }
2862 
2863     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2864     if (!isAssumedSideEffectFree(A, I))
2865       indicatePessimisticFixpoint();
2866   }
2867 
2868   /// See AbstractAttribute::updateImpl(...).
2869   ChangeStatus updateImpl(Attributor &A) override {
2870     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2871     if (!isAssumedSideEffectFree(A, I))
2872       return indicatePessimisticFixpoint();
2873 
2874     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2875       return indicatePessimisticFixpoint();
2876     return ChangeStatus::UNCHANGED;
2877   }
2878 
2879   /// See AbstractAttribute::manifest(...).
2880   ChangeStatus manifest(Attributor &A) override {
2881     Value &V = getAssociatedValue();
2882     if (auto *I = dyn_cast<Instruction>(&V)) {
2883       // If we get here we basically know the users are all dead. We check if
2884       // isAssumedSideEffectFree returns true here again because it might not be
2885       // the case and only the users are dead but the instruction (=call) is
2886       // still needed.
2887       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2888         A.deleteAfterManifest(*I);
2889         return ChangeStatus::CHANGED;
2890       }
2891     }
2892     if (V.use_empty())
2893       return ChangeStatus::UNCHANGED;
2894 
2895     bool UsedAssumedInformation = false;
2896     Optional<Constant *> C =
2897         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2898     if (C.hasValue() && C.getValue())
2899       return ChangeStatus::UNCHANGED;
2900 
2901     // Replace the value with undef as it is dead but keep droppable uses around
2902     // as they provide information we don't want to give up on just yet.
2903     UndefValue &UV = *UndefValue::get(V.getType());
2904     bool AnyChange =
2905         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2906     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2907   }
2908 
2909   /// See AbstractAttribute::trackStatistics()
2910   void trackStatistics() const override {
2911     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2912   }
2913 };
2914 
2915 struct AAIsDeadArgument : public AAIsDeadFloating {
2916   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2917       : AAIsDeadFloating(IRP, A) {}
2918 
2919   /// See AbstractAttribute::initialize(...).
2920   void initialize(Attributor &A) override {
2921     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2922       indicatePessimisticFixpoint();
2923   }
2924 
2925   /// See AbstractAttribute::manifest(...).
2926   ChangeStatus manifest(Attributor &A) override {
2927     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2928     Argument &Arg = *getAssociatedArgument();
2929     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2930       if (A.registerFunctionSignatureRewrite(
2931               Arg, /* ReplacementTypes */ {},
2932               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2933               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2934         Arg.dropDroppableUses();
2935         return ChangeStatus::CHANGED;
2936       }
2937     return Changed;
2938   }
2939 
2940   /// See AbstractAttribute::trackStatistics()
2941   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2942 };
2943 
2944 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2945   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2946       : AAIsDeadValueImpl(IRP, A) {}
2947 
2948   /// See AbstractAttribute::initialize(...).
2949   void initialize(Attributor &A) override {
2950     if (isa<UndefValue>(getAssociatedValue()))
2951       indicatePessimisticFixpoint();
2952   }
2953 
2954   /// See AbstractAttribute::updateImpl(...).
2955   ChangeStatus updateImpl(Attributor &A) override {
2956     // TODO: Once we have call site specific value information we can provide
2957     //       call site specific liveness information and then it makes
2958     //       sense to specialize attributes for call sites arguments instead of
2959     //       redirecting requests to the callee argument.
2960     Argument *Arg = getAssociatedArgument();
2961     if (!Arg)
2962       return indicatePessimisticFixpoint();
2963     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2964     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
2965     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2966   }
2967 
2968   /// See AbstractAttribute::manifest(...).
2969   ChangeStatus manifest(Attributor &A) override {
2970     CallBase &CB = cast<CallBase>(getAnchorValue());
2971     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2972     assert(!isa<UndefValue>(U.get()) &&
2973            "Expected undef values to be filtered out!");
2974     UndefValue &UV = *UndefValue::get(U->getType());
2975     if (A.changeUseAfterManifest(U, UV))
2976       return ChangeStatus::CHANGED;
2977     return ChangeStatus::UNCHANGED;
2978   }
2979 
2980   /// See AbstractAttribute::trackStatistics()
2981   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2982 };
2983 
2984 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2985   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2986       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2987 
2988   /// See AAIsDead::isAssumedDead().
2989   bool isAssumedDead() const override {
2990     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2991   }
2992 
2993   /// See AbstractAttribute::initialize(...).
2994   void initialize(Attributor &A) override {
2995     if (isa<UndefValue>(getAssociatedValue())) {
2996       indicatePessimisticFixpoint();
2997       return;
2998     }
2999 
3000     // We track this separately as a secondary state.
3001     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3002   }
3003 
3004   /// See AbstractAttribute::updateImpl(...).
3005   ChangeStatus updateImpl(Attributor &A) override {
3006     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3007     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3008       IsAssumedSideEffectFree = false;
3009       Changed = ChangeStatus::CHANGED;
3010     }
3011 
3012     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3013       return indicatePessimisticFixpoint();
3014     return Changed;
3015   }
3016 
3017   /// See AbstractAttribute::trackStatistics()
3018   void trackStatistics() const override {
3019     if (IsAssumedSideEffectFree)
3020       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3021     else
3022       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3023   }
3024 
3025   /// See AbstractAttribute::getAsStr().
3026   const std::string getAsStr() const override {
3027     return isAssumedDead()
3028                ? "assumed-dead"
3029                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3030   }
3031 
3032 private:
3033   bool IsAssumedSideEffectFree;
3034 };
3035 
3036 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3037   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3038       : AAIsDeadValueImpl(IRP, A) {}
3039 
3040   /// See AbstractAttribute::updateImpl(...).
3041   ChangeStatus updateImpl(Attributor &A) override {
3042 
3043     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3044                               {Instruction::Ret});
3045 
3046     auto PredForCallSite = [&](AbstractCallSite ACS) {
3047       if (ACS.isCallbackCall() || !ACS.getInstruction())
3048         return false;
3049       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3050     };
3051 
3052     bool AllCallSitesKnown;
3053     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3054                                 AllCallSitesKnown))
3055       return indicatePessimisticFixpoint();
3056 
3057     return ChangeStatus::UNCHANGED;
3058   }
3059 
3060   /// See AbstractAttribute::manifest(...).
3061   ChangeStatus manifest(Attributor &A) override {
3062     // TODO: Rewrite the signature to return void?
3063     bool AnyChange = false;
3064     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3065     auto RetInstPred = [&](Instruction &I) {
3066       ReturnInst &RI = cast<ReturnInst>(I);
3067       if (!isa<UndefValue>(RI.getReturnValue()))
3068         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3069       return true;
3070     };
3071     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3072     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3073   }
3074 
3075   /// See AbstractAttribute::trackStatistics()
3076   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3077 };
3078 
3079 struct AAIsDeadFunction : public AAIsDead {
3080   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3081 
3082   /// See AbstractAttribute::initialize(...).
3083   void initialize(Attributor &A) override {
3084     const Function *F = getAnchorScope();
3085     if (F && !F->isDeclaration()) {
3086       // We only want to compute liveness once. If the function is not part of
3087       // the SCC, skip it.
3088       if (A.isRunOn(*const_cast<Function *>(F))) {
3089         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3090         assumeLive(A, F->getEntryBlock());
3091       } else {
3092         indicatePessimisticFixpoint();
3093       }
3094     }
3095   }
3096 
3097   /// See AbstractAttribute::getAsStr().
3098   const std::string getAsStr() const override {
3099     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3100            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3101            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3102            std::to_string(KnownDeadEnds.size()) + "]";
3103   }
3104 
3105   /// See AbstractAttribute::manifest(...).
3106   ChangeStatus manifest(Attributor &A) override {
3107     assert(getState().isValidState() &&
3108            "Attempted to manifest an invalid state!");
3109 
3110     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3111     Function &F = *getAnchorScope();
3112 
3113     if (AssumedLiveBlocks.empty()) {
3114       A.deleteAfterManifest(F);
3115       return ChangeStatus::CHANGED;
3116     }
3117 
3118     // Flag to determine if we can change an invoke to a call assuming the
3119     // callee is nounwind. This is not possible if the personality of the
3120     // function allows to catch asynchronous exceptions.
3121     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3122 
3123     KnownDeadEnds.set_union(ToBeExploredFrom);
3124     for (const Instruction *DeadEndI : KnownDeadEnds) {
3125       auto *CB = dyn_cast<CallBase>(DeadEndI);
3126       if (!CB)
3127         continue;
3128       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3129           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3130       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3131       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3132         continue;
3133 
3134       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3135         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3136       else
3137         A.changeToUnreachableAfterManifest(
3138             const_cast<Instruction *>(DeadEndI->getNextNode()));
3139       HasChanged = ChangeStatus::CHANGED;
3140     }
3141 
3142     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3143     for (BasicBlock &BB : F)
3144       if (!AssumedLiveBlocks.count(&BB)) {
3145         A.deleteAfterManifest(BB);
3146         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3147       }
3148 
3149     return HasChanged;
3150   }
3151 
3152   /// See AbstractAttribute::updateImpl(...).
3153   ChangeStatus updateImpl(Attributor &A) override;
3154 
3155   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3156     return !AssumedLiveEdges.count(std::make_pair(From, To));
3157   }
3158 
3159   /// See AbstractAttribute::trackStatistics()
3160   void trackStatistics() const override {}
3161 
3162   /// Returns true if the function is assumed dead.
3163   bool isAssumedDead() const override { return false; }
3164 
3165   /// See AAIsDead::isKnownDead().
3166   bool isKnownDead() const override { return false; }
3167 
3168   /// See AAIsDead::isAssumedDead(BasicBlock *).
3169   bool isAssumedDead(const BasicBlock *BB) const override {
3170     assert(BB->getParent() == getAnchorScope() &&
3171            "BB must be in the same anchor scope function.");
3172 
3173     if (!getAssumed())
3174       return false;
3175     return !AssumedLiveBlocks.count(BB);
3176   }
3177 
3178   /// See AAIsDead::isKnownDead(BasicBlock *).
3179   bool isKnownDead(const BasicBlock *BB) const override {
3180     return getKnown() && isAssumedDead(BB);
3181   }
3182 
3183   /// See AAIsDead::isAssumed(Instruction *I).
3184   bool isAssumedDead(const Instruction *I) const override {
3185     assert(I->getParent()->getParent() == getAnchorScope() &&
3186            "Instruction must be in the same anchor scope function.");
3187 
3188     if (!getAssumed())
3189       return false;
3190 
3191     // If it is not in AssumedLiveBlocks then it for sure dead.
3192     // Otherwise, it can still be after noreturn call in a live block.
3193     if (!AssumedLiveBlocks.count(I->getParent()))
3194       return true;
3195 
3196     // If it is not after a liveness barrier it is live.
3197     const Instruction *PrevI = I->getPrevNode();
3198     while (PrevI) {
3199       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3200         return true;
3201       PrevI = PrevI->getPrevNode();
3202     }
3203     return false;
3204   }
3205 
3206   /// See AAIsDead::isKnownDead(Instruction *I).
3207   bool isKnownDead(const Instruction *I) const override {
3208     return getKnown() && isAssumedDead(I);
3209   }
3210 
3211   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3212   /// that internal function called from \p BB should now be looked at.
3213   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3214     if (!AssumedLiveBlocks.insert(&BB).second)
3215       return false;
3216 
3217     // We assume that all of BB is (probably) live now and if there are calls to
3218     // internal functions we will assume that those are now live as well. This
3219     // is a performance optimization for blocks with calls to a lot of internal
3220     // functions. It can however cause dead functions to be treated as live.
3221     for (const Instruction &I : BB)
3222       if (const auto *CB = dyn_cast<CallBase>(&I))
3223         if (const Function *F = CB->getCalledFunction())
3224           if (F->hasLocalLinkage())
3225             A.markLiveInternalFunction(*F);
3226     return true;
3227   }
3228 
3229   /// Collection of instructions that need to be explored again, e.g., we
3230   /// did assume they do not transfer control to (one of their) successors.
3231   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3232 
3233   /// Collection of instructions that are known to not transfer control.
3234   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3235 
3236   /// Collection of all assumed live edges
3237   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3238 
3239   /// Collection of all assumed live BasicBlocks.
3240   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3241 };
3242 
3243 static bool
3244 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3245                         AbstractAttribute &AA,
3246                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3247   const IRPosition &IPos = IRPosition::callsite_function(CB);
3248 
3249   const auto &NoReturnAA =
3250       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3251   if (NoReturnAA.isAssumedNoReturn())
3252     return !NoReturnAA.isKnownNoReturn();
3253   if (CB.isTerminator())
3254     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3255   else
3256     AliveSuccessors.push_back(CB.getNextNode());
3257   return false;
3258 }
3259 
3260 static bool
3261 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3262                         AbstractAttribute &AA,
3263                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3264   bool UsedAssumedInformation =
3265       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3266 
3267   // First, determine if we can change an invoke to a call assuming the
3268   // callee is nounwind. This is not possible if the personality of the
3269   // function allows to catch asynchronous exceptions.
3270   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3271     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3272   } else {
3273     const IRPosition &IPos = IRPosition::callsite_function(II);
3274     const auto &AANoUnw =
3275         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3276     if (AANoUnw.isAssumedNoUnwind()) {
3277       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3278     } else {
3279       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3280     }
3281   }
3282   return UsedAssumedInformation;
3283 }
3284 
3285 static bool
3286 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3287                         AbstractAttribute &AA,
3288                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3289   bool UsedAssumedInformation = false;
3290   if (BI.getNumSuccessors() == 1) {
3291     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3292   } else {
3293     Optional<Constant *> C =
3294         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3295     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3296       // No value yet, assume both edges are dead.
3297     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3298       const BasicBlock *SuccBB =
3299           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3300       AliveSuccessors.push_back(&SuccBB->front());
3301     } else {
3302       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3303       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3304       UsedAssumedInformation = false;
3305     }
3306   }
3307   return UsedAssumedInformation;
3308 }
3309 
3310 static bool
3311 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3312                         AbstractAttribute &AA,
3313                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3314   bool UsedAssumedInformation = false;
3315   Optional<Constant *> C =
3316       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3317   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3318     // No value yet, assume all edges are dead.
3319   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3320     for (auto &CaseIt : SI.cases()) {
3321       if (CaseIt.getCaseValue() == C.getValue()) {
3322         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3323         return UsedAssumedInformation;
3324       }
3325     }
3326     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3327     return UsedAssumedInformation;
3328   } else {
3329     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3330       AliveSuccessors.push_back(&SuccBB->front());
3331   }
3332   return UsedAssumedInformation;
3333 }
3334 
3335 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3336   ChangeStatus Change = ChangeStatus::UNCHANGED;
3337 
3338   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3339                     << getAnchorScope()->size() << "] BBs and "
3340                     << ToBeExploredFrom.size() << " exploration points and "
3341                     << KnownDeadEnds.size() << " known dead ends\n");
3342 
3343   // Copy and clear the list of instructions we need to explore from. It is
3344   // refilled with instructions the next update has to look at.
3345   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3346                                                ToBeExploredFrom.end());
3347   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3348 
3349   SmallVector<const Instruction *, 8> AliveSuccessors;
3350   while (!Worklist.empty()) {
3351     const Instruction *I = Worklist.pop_back_val();
3352     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3353 
3354     // Fast forward for uninteresting instructions. We could look for UB here
3355     // though.
3356     while (!I->isTerminator() && !isa<CallBase>(I)) {
3357       Change = ChangeStatus::CHANGED;
3358       I = I->getNextNode();
3359     }
3360 
3361     AliveSuccessors.clear();
3362 
3363     bool UsedAssumedInformation = false;
3364     switch (I->getOpcode()) {
3365     // TODO: look for (assumed) UB to backwards propagate "deadness".
3366     default:
3367       assert(I->isTerminator() &&
3368              "Expected non-terminators to be handled already!");
3369       for (const BasicBlock *SuccBB : successors(I->getParent()))
3370         AliveSuccessors.push_back(&SuccBB->front());
3371       break;
3372     case Instruction::Call:
3373       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3374                                                        *this, AliveSuccessors);
3375       break;
3376     case Instruction::Invoke:
3377       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3378                                                        *this, AliveSuccessors);
3379       break;
3380     case Instruction::Br:
3381       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3382                                                        *this, AliveSuccessors);
3383       break;
3384     case Instruction::Switch:
3385       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3386                                                        *this, AliveSuccessors);
3387       break;
3388     }
3389 
3390     if (UsedAssumedInformation) {
3391       NewToBeExploredFrom.insert(I);
3392     } else {
3393       Change = ChangeStatus::CHANGED;
3394       if (AliveSuccessors.empty() ||
3395           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3396         KnownDeadEnds.insert(I);
3397     }
3398 
3399     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3400                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3401                       << UsedAssumedInformation << "\n");
3402 
3403     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3404       if (!I->isTerminator()) {
3405         assert(AliveSuccessors.size() == 1 &&
3406                "Non-terminator expected to have a single successor!");
3407         Worklist.push_back(AliveSuccessor);
3408       } else {
3409         // record the assumed live edge
3410         AssumedLiveEdges.insert(
3411             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3412         if (assumeLive(A, *AliveSuccessor->getParent()))
3413           Worklist.push_back(AliveSuccessor);
3414       }
3415     }
3416   }
3417 
3418   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3419 
3420   // If we know everything is live there is no need to query for liveness.
3421   // Instead, indicating a pessimistic fixpoint will cause the state to be
3422   // "invalid" and all queries to be answered conservatively without lookups.
3423   // To be in this state we have to (1) finished the exploration and (3) not
3424   // discovered any non-trivial dead end and (2) not ruled unreachable code
3425   // dead.
3426   if (ToBeExploredFrom.empty() &&
3427       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3428       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3429         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3430       }))
3431     return indicatePessimisticFixpoint();
3432   return Change;
3433 }
3434 
3435 /// Liveness information for a call sites.
3436 struct AAIsDeadCallSite final : AAIsDeadFunction {
3437   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3438       : AAIsDeadFunction(IRP, A) {}
3439 
3440   /// See AbstractAttribute::initialize(...).
3441   void initialize(Attributor &A) override {
3442     // TODO: Once we have call site specific value information we can provide
3443     //       call site specific liveness information and then it makes
3444     //       sense to specialize attributes for call sites instead of
3445     //       redirecting requests to the callee.
3446     llvm_unreachable("Abstract attributes for liveness are not "
3447                      "supported for call sites yet!");
3448   }
3449 
3450   /// See AbstractAttribute::updateImpl(...).
3451   ChangeStatus updateImpl(Attributor &A) override {
3452     return indicatePessimisticFixpoint();
3453   }
3454 
3455   /// See AbstractAttribute::trackStatistics()
3456   void trackStatistics() const override {}
3457 };
3458 
3459 /// -------------------- Dereferenceable Argument Attribute --------------------
3460 
3461 template <>
3462 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3463                                                      const DerefState &R) {
3464   ChangeStatus CS0 =
3465       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3466   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3467   return CS0 | CS1;
3468 }
3469 
3470 struct AADereferenceableImpl : AADereferenceable {
3471   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3472       : AADereferenceable(IRP, A) {}
3473   using StateType = DerefState;
3474 
3475   /// See AbstractAttribute::initialize(...).
3476   void initialize(Attributor &A) override {
3477     SmallVector<Attribute, 4> Attrs;
3478     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3479              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3480     for (const Attribute &Attr : Attrs)
3481       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3482 
3483     const IRPosition &IRP = this->getIRPosition();
3484     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3485 
3486     bool CanBeNull, CanBeFreed;
3487     takeKnownDerefBytesMaximum(
3488         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3489             A.getDataLayout(), CanBeNull, CanBeFreed));
3490 
3491     bool IsFnInterface = IRP.isFnInterfaceKind();
3492     Function *FnScope = IRP.getAnchorScope();
3493     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3494       indicatePessimisticFixpoint();
3495       return;
3496     }
3497 
3498     if (Instruction *CtxI = getCtxI())
3499       followUsesInMBEC(*this, A, getState(), *CtxI);
3500   }
3501 
3502   /// See AbstractAttribute::getState()
3503   /// {
3504   StateType &getState() override { return *this; }
3505   const StateType &getState() const override { return *this; }
3506   /// }
3507 
3508   /// Helper function for collecting accessed bytes in must-be-executed-context
3509   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3510                               DerefState &State) {
3511     const Value *UseV = U->get();
3512     if (!UseV->getType()->isPointerTy())
3513       return;
3514 
3515     Type *PtrTy = UseV->getType();
3516     const DataLayout &DL = A.getDataLayout();
3517     int64_t Offset;
3518     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3519             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3520       if (Base == &getAssociatedValue() &&
3521           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3522         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3523         State.addAccessedBytes(Offset, Size);
3524       }
3525     }
3526   }
3527 
3528   /// See followUsesInMBEC
3529   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3530                        AADereferenceable::StateType &State) {
3531     bool IsNonNull = false;
3532     bool TrackUse = false;
3533     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3534         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3535     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3536                       << " for instruction " << *I << "\n");
3537 
3538     addAccessedBytesForUse(A, U, I, State);
3539     State.takeKnownDerefBytesMaximum(DerefBytes);
3540     return TrackUse;
3541   }
3542 
3543   /// See AbstractAttribute::manifest(...).
3544   ChangeStatus manifest(Attributor &A) override {
3545     ChangeStatus Change = AADereferenceable::manifest(A);
3546     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3547       removeAttrs({Attribute::DereferenceableOrNull});
3548       return ChangeStatus::CHANGED;
3549     }
3550     return Change;
3551   }
3552 
3553   void getDeducedAttributes(LLVMContext &Ctx,
3554                             SmallVectorImpl<Attribute> &Attrs) const override {
3555     // TODO: Add *_globally support
3556     if (isAssumedNonNull())
3557       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3558           Ctx, getAssumedDereferenceableBytes()));
3559     else
3560       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3561           Ctx, getAssumedDereferenceableBytes()));
3562   }
3563 
3564   /// See AbstractAttribute::getAsStr().
3565   const std::string getAsStr() const override {
3566     if (!getAssumedDereferenceableBytes())
3567       return "unknown-dereferenceable";
3568     return std::string("dereferenceable") +
3569            (isAssumedNonNull() ? "" : "_or_null") +
3570            (isAssumedGlobal() ? "_globally" : "") + "<" +
3571            std::to_string(getKnownDereferenceableBytes()) + "-" +
3572            std::to_string(getAssumedDereferenceableBytes()) + ">";
3573   }
3574 };
3575 
3576 /// Dereferenceable attribute for a floating value.
3577 struct AADereferenceableFloating : AADereferenceableImpl {
3578   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3579       : AADereferenceableImpl(IRP, A) {}
3580 
3581   /// See AbstractAttribute::updateImpl(...).
3582   ChangeStatus updateImpl(Attributor &A) override {
3583     const DataLayout &DL = A.getDataLayout();
3584 
3585     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3586                             bool Stripped) -> bool {
3587       unsigned IdxWidth =
3588           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3589       APInt Offset(IdxWidth, 0);
3590       const Value *Base =
3591           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3592 
3593       const auto &AA = A.getAAFor<AADereferenceable>(
3594           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3595       int64_t DerefBytes = 0;
3596       if (!Stripped && this == &AA) {
3597         // Use IR information if we did not strip anything.
3598         // TODO: track globally.
3599         bool CanBeNull, CanBeFreed;
3600         DerefBytes =
3601             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
3602         T.GlobalState.indicatePessimisticFixpoint();
3603       } else {
3604         const DerefState &DS = AA.getState();
3605         DerefBytes = DS.DerefBytesState.getAssumed();
3606         T.GlobalState &= DS.GlobalState;
3607       }
3608 
3609       // For now we do not try to "increase" dereferenceability due to negative
3610       // indices as we first have to come up with code to deal with loops and
3611       // for overflows of the dereferenceable bytes.
3612       int64_t OffsetSExt = Offset.getSExtValue();
3613       if (OffsetSExt < 0)
3614         OffsetSExt = 0;
3615 
3616       T.takeAssumedDerefBytesMinimum(
3617           std::max(int64_t(0), DerefBytes - OffsetSExt));
3618 
3619       if (this == &AA) {
3620         if (!Stripped) {
3621           // If nothing was stripped IR information is all we got.
3622           T.takeKnownDerefBytesMaximum(
3623               std::max(int64_t(0), DerefBytes - OffsetSExt));
3624           T.indicatePessimisticFixpoint();
3625         } else if (OffsetSExt > 0) {
3626           // If something was stripped but there is circular reasoning we look
3627           // for the offset. If it is positive we basically decrease the
3628           // dereferenceable bytes in a circluar loop now, which will simply
3629           // drive them down to the known value in a very slow way which we
3630           // can accelerate.
3631           T.indicatePessimisticFixpoint();
3632         }
3633       }
3634 
3635       return T.isValidState();
3636     };
3637 
3638     DerefState T;
3639     if (!genericValueTraversal<AADereferenceable, DerefState>(
3640             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3641       return indicatePessimisticFixpoint();
3642 
3643     return clampStateAndIndicateChange(getState(), T);
3644   }
3645 
3646   /// See AbstractAttribute::trackStatistics()
3647   void trackStatistics() const override {
3648     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3649   }
3650 };
3651 
3652 /// Dereferenceable attribute for a return value.
3653 struct AADereferenceableReturned final
3654     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3655   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3656       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3657             IRP, A) {}
3658 
3659   /// See AbstractAttribute::trackStatistics()
3660   void trackStatistics() const override {
3661     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3662   }
3663 };
3664 
3665 /// Dereferenceable attribute for an argument
3666 struct AADereferenceableArgument final
3667     : AAArgumentFromCallSiteArguments<AADereferenceable,
3668                                       AADereferenceableImpl> {
3669   using Base =
3670       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3671   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3672       : Base(IRP, A) {}
3673 
3674   /// See AbstractAttribute::trackStatistics()
3675   void trackStatistics() const override {
3676     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3677   }
3678 };
3679 
3680 /// Dereferenceable attribute for a call site argument.
3681 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3682   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3683       : AADereferenceableFloating(IRP, A) {}
3684 
3685   /// See AbstractAttribute::trackStatistics()
3686   void trackStatistics() const override {
3687     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3688   }
3689 };
3690 
3691 /// Dereferenceable attribute deduction for a call site return value.
3692 struct AADereferenceableCallSiteReturned final
3693     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3694   using Base =
3695       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3696   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3697       : Base(IRP, A) {}
3698 
3699   /// See AbstractAttribute::trackStatistics()
3700   void trackStatistics() const override {
3701     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3702   }
3703 };
3704 
3705 // ------------------------ Align Argument Attribute ------------------------
3706 
3707 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3708                                     Value &AssociatedValue, const Use *U,
3709                                     const Instruction *I, bool &TrackUse) {
3710   // We need to follow common pointer manipulation uses to the accesses they
3711   // feed into.
3712   if (isa<CastInst>(I)) {
3713     // Follow all but ptr2int casts.
3714     TrackUse = !isa<PtrToIntInst>(I);
3715     return 0;
3716   }
3717   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3718     if (GEP->hasAllConstantIndices())
3719       TrackUse = true;
3720     return 0;
3721   }
3722 
3723   MaybeAlign MA;
3724   if (const auto *CB = dyn_cast<CallBase>(I)) {
3725     if (CB->isBundleOperand(U) || CB->isCallee(U))
3726       return 0;
3727 
3728     unsigned ArgNo = CB->getArgOperandNo(U);
3729     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3730     // As long as we only use known information there is no need to track
3731     // dependences here.
3732     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3733     MA = MaybeAlign(AlignAA.getKnownAlign());
3734   }
3735 
3736   const DataLayout &DL = A.getDataLayout();
3737   const Value *UseV = U->get();
3738   if (auto *SI = dyn_cast<StoreInst>(I)) {
3739     if (SI->getPointerOperand() == UseV)
3740       MA = SI->getAlign();
3741   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3742     if (LI->getPointerOperand() == UseV)
3743       MA = LI->getAlign();
3744   }
3745 
3746   if (!MA || *MA <= QueryingAA.getKnownAlign())
3747     return 0;
3748 
3749   unsigned Alignment = MA->value();
3750   int64_t Offset;
3751 
3752   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3753     if (Base == &AssociatedValue) {
3754       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3755       // So we can say that the maximum power of two which is a divisor of
3756       // gcd(Offset, Alignment) is an alignment.
3757 
3758       uint32_t gcd =
3759           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3760       Alignment = llvm::PowerOf2Floor(gcd);
3761     }
3762   }
3763 
3764   return Alignment;
3765 }
3766 
3767 struct AAAlignImpl : AAAlign {
3768   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3769 
3770   /// See AbstractAttribute::initialize(...).
3771   void initialize(Attributor &A) override {
3772     SmallVector<Attribute, 4> Attrs;
3773     getAttrs({Attribute::Alignment}, Attrs);
3774     for (const Attribute &Attr : Attrs)
3775       takeKnownMaximum(Attr.getValueAsInt());
3776 
3777     Value &V = getAssociatedValue();
3778     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3779     //       use of the function pointer. This was caused by D73131. We want to
3780     //       avoid this for function pointers especially because we iterate
3781     //       their uses and int2ptr is not handled. It is not a correctness
3782     //       problem though!
3783     if (!V.getType()->getPointerElementType()->isFunctionTy())
3784       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3785 
3786     if (getIRPosition().isFnInterfaceKind() &&
3787         (!getAnchorScope() ||
3788          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3789       indicatePessimisticFixpoint();
3790       return;
3791     }
3792 
3793     if (Instruction *CtxI = getCtxI())
3794       followUsesInMBEC(*this, A, getState(), *CtxI);
3795   }
3796 
3797   /// See AbstractAttribute::manifest(...).
3798   ChangeStatus manifest(Attributor &A) override {
3799     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3800 
3801     // Check for users that allow alignment annotations.
3802     Value &AssociatedValue = getAssociatedValue();
3803     for (const Use &U : AssociatedValue.uses()) {
3804       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3805         if (SI->getPointerOperand() == &AssociatedValue)
3806           if (SI->getAlignment() < getAssumedAlign()) {
3807             STATS_DECLTRACK(AAAlign, Store,
3808                             "Number of times alignment added to a store");
3809             SI->setAlignment(Align(getAssumedAlign()));
3810             LoadStoreChanged = ChangeStatus::CHANGED;
3811           }
3812       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3813         if (LI->getPointerOperand() == &AssociatedValue)
3814           if (LI->getAlignment() < getAssumedAlign()) {
3815             LI->setAlignment(Align(getAssumedAlign()));
3816             STATS_DECLTRACK(AAAlign, Load,
3817                             "Number of times alignment added to a load");
3818             LoadStoreChanged = ChangeStatus::CHANGED;
3819           }
3820       }
3821     }
3822 
3823     ChangeStatus Changed = AAAlign::manifest(A);
3824 
3825     Align InheritAlign =
3826         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3827     if (InheritAlign >= getAssumedAlign())
3828       return LoadStoreChanged;
3829     return Changed | LoadStoreChanged;
3830   }
3831 
3832   // TODO: Provide a helper to determine the implied ABI alignment and check in
3833   //       the existing manifest method and a new one for AAAlignImpl that value
3834   //       to avoid making the alignment explicit if it did not improve.
3835 
3836   /// See AbstractAttribute::getDeducedAttributes
3837   virtual void
3838   getDeducedAttributes(LLVMContext &Ctx,
3839                        SmallVectorImpl<Attribute> &Attrs) const override {
3840     if (getAssumedAlign() > 1)
3841       Attrs.emplace_back(
3842           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3843   }
3844 
3845   /// See followUsesInMBEC
3846   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3847                        AAAlign::StateType &State) {
3848     bool TrackUse = false;
3849 
3850     unsigned int KnownAlign =
3851         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3852     State.takeKnownMaximum(KnownAlign);
3853 
3854     return TrackUse;
3855   }
3856 
3857   /// See AbstractAttribute::getAsStr().
3858   const std::string getAsStr() const override {
3859     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3860                                 "-" + std::to_string(getAssumedAlign()) + ">")
3861                              : "unknown-align";
3862   }
3863 };
3864 
3865 /// Align attribute for a floating value.
3866 struct AAAlignFloating : AAAlignImpl {
3867   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3868 
3869   /// See AbstractAttribute::updateImpl(...).
3870   ChangeStatus updateImpl(Attributor &A) override {
3871     const DataLayout &DL = A.getDataLayout();
3872 
3873     auto VisitValueCB = [&](Value &V, const Instruction *,
3874                             AAAlign::StateType &T, bool Stripped) -> bool {
3875       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3876                                            DepClassTy::REQUIRED);
3877       if (!Stripped && this == &AA) {
3878         int64_t Offset;
3879         unsigned Alignment = 1;
3880         if (const Value *Base =
3881                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3882           Align PA = Base->getPointerAlignment(DL);
3883           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3884           // So we can say that the maximum power of two which is a divisor of
3885           // gcd(Offset, Alignment) is an alignment.
3886 
3887           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3888                                                uint32_t(PA.value()));
3889           Alignment = llvm::PowerOf2Floor(gcd);
3890         } else {
3891           Alignment = V.getPointerAlignment(DL).value();
3892         }
3893         // Use only IR information if we did not strip anything.
3894         T.takeKnownMaximum(Alignment);
3895         T.indicatePessimisticFixpoint();
3896       } else {
3897         // Use abstract attribute information.
3898         const AAAlign::StateType &DS = AA.getState();
3899         T ^= DS;
3900       }
3901       return T.isValidState();
3902     };
3903 
3904     StateType T;
3905     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3906                                                    VisitValueCB, getCtxI()))
3907       return indicatePessimisticFixpoint();
3908 
3909     // TODO: If we know we visited all incoming values, thus no are assumed
3910     // dead, we can take the known information from the state T.
3911     return clampStateAndIndicateChange(getState(), T);
3912   }
3913 
3914   /// See AbstractAttribute::trackStatistics()
3915   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3916 };
3917 
3918 /// Align attribute for function return value.
3919 struct AAAlignReturned final
3920     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3921   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3922   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3923 
3924   /// See AbstractAttribute::initialize(...).
3925   void initialize(Attributor &A) override {
3926     Base::initialize(A);
3927     Function *F = getAssociatedFunction();
3928     if (!F || F->isDeclaration())
3929       indicatePessimisticFixpoint();
3930   }
3931 
3932   /// See AbstractAttribute::trackStatistics()
3933   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3934 };
3935 
3936 /// Align attribute for function argument.
3937 struct AAAlignArgument final
3938     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3939   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3940   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3941 
3942   /// See AbstractAttribute::manifest(...).
3943   ChangeStatus manifest(Attributor &A) override {
3944     // If the associated argument is involved in a must-tail call we give up
3945     // because we would need to keep the argument alignments of caller and
3946     // callee in-sync. Just does not seem worth the trouble right now.
3947     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3948       return ChangeStatus::UNCHANGED;
3949     return Base::manifest(A);
3950   }
3951 
3952   /// See AbstractAttribute::trackStatistics()
3953   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3954 };
3955 
3956 struct AAAlignCallSiteArgument final : AAAlignFloating {
3957   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3958       : AAAlignFloating(IRP, A) {}
3959 
3960   /// See AbstractAttribute::manifest(...).
3961   ChangeStatus manifest(Attributor &A) override {
3962     // If the associated argument is involved in a must-tail call we give up
3963     // because we would need to keep the argument alignments of caller and
3964     // callee in-sync. Just does not seem worth the trouble right now.
3965     if (Argument *Arg = getAssociatedArgument())
3966       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3967         return ChangeStatus::UNCHANGED;
3968     ChangeStatus Changed = AAAlignImpl::manifest(A);
3969     Align InheritAlign =
3970         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3971     if (InheritAlign >= getAssumedAlign())
3972       Changed = ChangeStatus::UNCHANGED;
3973     return Changed;
3974   }
3975 
3976   /// See AbstractAttribute::updateImpl(Attributor &A).
3977   ChangeStatus updateImpl(Attributor &A) override {
3978     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3979     if (Argument *Arg = getAssociatedArgument()) {
3980       // We only take known information from the argument
3981       // so we do not need to track a dependence.
3982       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3983           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
3984       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3985     }
3986     return Changed;
3987   }
3988 
3989   /// See AbstractAttribute::trackStatistics()
3990   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3991 };
3992 
3993 /// Align attribute deduction for a call site return value.
3994 struct AAAlignCallSiteReturned final
3995     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3996   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3997   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3998       : Base(IRP, A) {}
3999 
4000   /// See AbstractAttribute::initialize(...).
4001   void initialize(Attributor &A) override {
4002     Base::initialize(A);
4003     Function *F = getAssociatedFunction();
4004     if (!F || F->isDeclaration())
4005       indicatePessimisticFixpoint();
4006   }
4007 
4008   /// See AbstractAttribute::trackStatistics()
4009   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4010 };
4011 
4012 /// ------------------ Function No-Return Attribute ----------------------------
4013 struct AANoReturnImpl : public AANoReturn {
4014   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4015 
4016   /// See AbstractAttribute::initialize(...).
4017   void initialize(Attributor &A) override {
4018     AANoReturn::initialize(A);
4019     Function *F = getAssociatedFunction();
4020     if (!F || F->isDeclaration())
4021       indicatePessimisticFixpoint();
4022   }
4023 
4024   /// See AbstractAttribute::getAsStr().
4025   const std::string getAsStr() const override {
4026     return getAssumed() ? "noreturn" : "may-return";
4027   }
4028 
4029   /// See AbstractAttribute::updateImpl(Attributor &A).
4030   virtual ChangeStatus updateImpl(Attributor &A) override {
4031     auto CheckForNoReturn = [](Instruction &) { return false; };
4032     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4033                                    {(unsigned)Instruction::Ret}))
4034       return indicatePessimisticFixpoint();
4035     return ChangeStatus::UNCHANGED;
4036   }
4037 };
4038 
4039 struct AANoReturnFunction final : AANoReturnImpl {
4040   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4041       : AANoReturnImpl(IRP, A) {}
4042 
4043   /// See AbstractAttribute::trackStatistics()
4044   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4045 };
4046 
4047 /// NoReturn attribute deduction for a call sites.
4048 struct AANoReturnCallSite final : AANoReturnImpl {
4049   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4050       : AANoReturnImpl(IRP, A) {}
4051 
4052   /// See AbstractAttribute::initialize(...).
4053   void initialize(Attributor &A) override {
4054     AANoReturnImpl::initialize(A);
4055     if (Function *F = getAssociatedFunction()) {
4056       const IRPosition &FnPos = IRPosition::function(*F);
4057       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4058       if (!FnAA.isAssumedNoReturn())
4059         indicatePessimisticFixpoint();
4060     }
4061   }
4062 
4063   /// See AbstractAttribute::updateImpl(...).
4064   ChangeStatus updateImpl(Attributor &A) override {
4065     // TODO: Once we have call site specific value information we can provide
4066     //       call site specific liveness information and then it makes
4067     //       sense to specialize attributes for call sites arguments instead of
4068     //       redirecting requests to the callee argument.
4069     Function *F = getAssociatedFunction();
4070     const IRPosition &FnPos = IRPosition::function(*F);
4071     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4072     return clampStateAndIndicateChange(getState(), FnAA.getState());
4073   }
4074 
4075   /// See AbstractAttribute::trackStatistics()
4076   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4077 };
4078 
4079 /// ----------------------- Variable Capturing ---------------------------------
4080 
4081 /// A class to hold the state of for no-capture attributes.
4082 struct AANoCaptureImpl : public AANoCapture {
4083   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4084 
4085   /// See AbstractAttribute::initialize(...).
4086   void initialize(Attributor &A) override {
4087     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4088       indicateOptimisticFixpoint();
4089       return;
4090     }
4091     Function *AnchorScope = getAnchorScope();
4092     if (isFnInterfaceKind() &&
4093         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4094       indicatePessimisticFixpoint();
4095       return;
4096     }
4097 
4098     // You cannot "capture" null in the default address space.
4099     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4100         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4101       indicateOptimisticFixpoint();
4102       return;
4103     }
4104 
4105     const Function *F =
4106         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4107 
4108     // Check what state the associated function can actually capture.
4109     if (F)
4110       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4111     else
4112       indicatePessimisticFixpoint();
4113   }
4114 
4115   /// See AbstractAttribute::updateImpl(...).
4116   ChangeStatus updateImpl(Attributor &A) override;
4117 
4118   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4119   virtual void
4120   getDeducedAttributes(LLVMContext &Ctx,
4121                        SmallVectorImpl<Attribute> &Attrs) const override {
4122     if (!isAssumedNoCaptureMaybeReturned())
4123       return;
4124 
4125     if (isArgumentPosition()) {
4126       if (isAssumedNoCapture())
4127         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4128       else if (ManifestInternal)
4129         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4130     }
4131   }
4132 
4133   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4134   /// depending on the ability of the function associated with \p IRP to capture
4135   /// state in memory and through "returning/throwing", respectively.
4136   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4137                                                    const Function &F,
4138                                                    BitIntegerState &State) {
4139     // TODO: Once we have memory behavior attributes we should use them here.
4140 
4141     // If we know we cannot communicate or write to memory, we do not care about
4142     // ptr2int anymore.
4143     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4144         F.getReturnType()->isVoidTy()) {
4145       State.addKnownBits(NO_CAPTURE);
4146       return;
4147     }
4148 
4149     // A function cannot capture state in memory if it only reads memory, it can
4150     // however return/throw state and the state might be influenced by the
4151     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4152     if (F.onlyReadsMemory())
4153       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4154 
4155     // A function cannot communicate state back if it does not through
4156     // exceptions and doesn not return values.
4157     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4158       State.addKnownBits(NOT_CAPTURED_IN_RET);
4159 
4160     // Check existing "returned" attributes.
4161     int ArgNo = IRP.getCalleeArgNo();
4162     if (F.doesNotThrow() && ArgNo >= 0) {
4163       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4164         if (F.hasParamAttribute(u, Attribute::Returned)) {
4165           if (u == unsigned(ArgNo))
4166             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4167           else if (F.onlyReadsMemory())
4168             State.addKnownBits(NO_CAPTURE);
4169           else
4170             State.addKnownBits(NOT_CAPTURED_IN_RET);
4171           break;
4172         }
4173     }
4174   }
4175 
4176   /// See AbstractState::getAsStr().
4177   const std::string getAsStr() const override {
4178     if (isKnownNoCapture())
4179       return "known not-captured";
4180     if (isAssumedNoCapture())
4181       return "assumed not-captured";
4182     if (isKnownNoCaptureMaybeReturned())
4183       return "known not-captured-maybe-returned";
4184     if (isAssumedNoCaptureMaybeReturned())
4185       return "assumed not-captured-maybe-returned";
4186     return "assumed-captured";
4187   }
4188 };
4189 
4190 /// Attributor-aware capture tracker.
4191 struct AACaptureUseTracker final : public CaptureTracker {
4192 
4193   /// Create a capture tracker that can lookup in-flight abstract attributes
4194   /// through the Attributor \p A.
4195   ///
4196   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4197   /// search is stopped. If a use leads to a return instruction,
4198   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4199   /// If a use leads to a ptr2int which may capture the value,
4200   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4201   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4202   /// set. All values in \p PotentialCopies are later tracked as well. For every
4203   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4204   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4205   /// conservatively set to true.
4206   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4207                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4208                       SmallVectorImpl<const Value *> &PotentialCopies,
4209                       unsigned &RemainingUsesToExplore)
4210       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4211         PotentialCopies(PotentialCopies),
4212         RemainingUsesToExplore(RemainingUsesToExplore) {}
4213 
4214   /// Determine if \p V maybe captured. *Also updates the state!*
4215   bool valueMayBeCaptured(const Value *V) {
4216     if (V->getType()->isPointerTy()) {
4217       PointerMayBeCaptured(V, this);
4218     } else {
4219       State.indicatePessimisticFixpoint();
4220     }
4221     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4222   }
4223 
4224   /// See CaptureTracker::tooManyUses().
4225   void tooManyUses() override {
4226     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4227   }
4228 
4229   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4230     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4231       return true;
4232     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4233         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4234     return DerefAA.getAssumedDereferenceableBytes();
4235   }
4236 
4237   /// See CaptureTracker::captured(...).
4238   bool captured(const Use *U) override {
4239     Instruction *UInst = cast<Instruction>(U->getUser());
4240     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4241                       << "\n");
4242 
4243     // Because we may reuse the tracker multiple times we keep track of the
4244     // number of explored uses ourselves as well.
4245     if (RemainingUsesToExplore-- == 0) {
4246       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4247       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4248                           /* Return */ true);
4249     }
4250 
4251     // Deal with ptr2int by following uses.
4252     if (isa<PtrToIntInst>(UInst)) {
4253       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4254       return valueMayBeCaptured(UInst);
4255     }
4256 
4257     // Explicitly catch return instructions.
4258     if (isa<ReturnInst>(UInst))
4259       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4260                           /* Return */ true);
4261 
4262     // For now we only use special logic for call sites. However, the tracker
4263     // itself knows about a lot of other non-capturing cases already.
4264     auto *CB = dyn_cast<CallBase>(UInst);
4265     if (!CB || !CB->isArgOperand(U))
4266       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4267                           /* Return */ true);
4268 
4269     unsigned ArgNo = CB->getArgOperandNo(U);
4270     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4271     // If we have a abstract no-capture attribute for the argument we can use
4272     // it to justify a non-capture attribute here. This allows recursion!
4273     auto &ArgNoCaptureAA =
4274         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4275     if (ArgNoCaptureAA.isAssumedNoCapture())
4276       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4277                           /* Return */ false);
4278     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4279       addPotentialCopy(*CB);
4280       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4281                           /* Return */ false);
4282     }
4283 
4284     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4285     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4286                         /* Return */ true);
4287   }
4288 
4289   /// Register \p CS as potential copy of the value we are checking.
4290   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4291 
4292   /// See CaptureTracker::shouldExplore(...).
4293   bool shouldExplore(const Use *U) override {
4294     // Check liveness and ignore droppable users.
4295     return !U->getUser()->isDroppable() &&
4296            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4297   }
4298 
4299   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4300   /// \p CapturedInRet, then return the appropriate value for use in the
4301   /// CaptureTracker::captured() interface.
4302   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4303                     bool CapturedInRet) {
4304     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4305                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4306     if (CapturedInMem)
4307       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4308     if (CapturedInInt)
4309       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4310     if (CapturedInRet)
4311       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4312     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4313   }
4314 
4315 private:
4316   /// The attributor providing in-flight abstract attributes.
4317   Attributor &A;
4318 
4319   /// The abstract attribute currently updated.
4320   AANoCapture &NoCaptureAA;
4321 
4322   /// The abstract liveness state.
4323   const AAIsDead &IsDeadAA;
4324 
4325   /// The state currently updated.
4326   AANoCapture::StateType &State;
4327 
4328   /// Set of potential copies of the tracked value.
4329   SmallVectorImpl<const Value *> &PotentialCopies;
4330 
4331   /// Global counter to limit the number of explored uses.
4332   unsigned &RemainingUsesToExplore;
4333 };
4334 
4335 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4336   const IRPosition &IRP = getIRPosition();
4337   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4338                                         : &IRP.getAssociatedValue();
4339   if (!V)
4340     return indicatePessimisticFixpoint();
4341 
4342   const Function *F =
4343       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4344   assert(F && "Expected a function!");
4345   const IRPosition &FnPos = IRPosition::function(*F);
4346   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4347 
4348   AANoCapture::StateType T;
4349 
4350   // Readonly means we cannot capture through memory.
4351   const auto &FnMemAA =
4352       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4353   if (FnMemAA.isAssumedReadOnly()) {
4354     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4355     if (FnMemAA.isKnownReadOnly())
4356       addKnownBits(NOT_CAPTURED_IN_MEM);
4357     else
4358       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4359   }
4360 
4361   // Make sure all returned values are different than the underlying value.
4362   // TODO: we could do this in a more sophisticated way inside
4363   //       AAReturnedValues, e.g., track all values that escape through returns
4364   //       directly somehow.
4365   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4366     bool SeenConstant = false;
4367     for (auto &It : RVAA.returned_values()) {
4368       if (isa<Constant>(It.first)) {
4369         if (SeenConstant)
4370           return false;
4371         SeenConstant = true;
4372       } else if (!isa<Argument>(It.first) ||
4373                  It.first == getAssociatedArgument())
4374         return false;
4375     }
4376     return true;
4377   };
4378 
4379   const auto &NoUnwindAA =
4380       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4381   if (NoUnwindAA.isAssumedNoUnwind()) {
4382     bool IsVoidTy = F->getReturnType()->isVoidTy();
4383     const AAReturnedValues *RVAA =
4384         IsVoidTy ? nullptr
4385                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4386 
4387                                                  DepClassTy::OPTIONAL);
4388     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4389       T.addKnownBits(NOT_CAPTURED_IN_RET);
4390       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4391         return ChangeStatus::UNCHANGED;
4392       if (NoUnwindAA.isKnownNoUnwind() &&
4393           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4394         addKnownBits(NOT_CAPTURED_IN_RET);
4395         if (isKnown(NOT_CAPTURED_IN_MEM))
4396           return indicateOptimisticFixpoint();
4397       }
4398     }
4399   }
4400 
4401   // Use the CaptureTracker interface and logic with the specialized tracker,
4402   // defined in AACaptureUseTracker, that can look at in-flight abstract
4403   // attributes and directly updates the assumed state.
4404   SmallVector<const Value *, 4> PotentialCopies;
4405   unsigned RemainingUsesToExplore =
4406       getDefaultMaxUsesToExploreForCaptureTracking();
4407   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4408                               RemainingUsesToExplore);
4409 
4410   // Check all potential copies of the associated value until we can assume
4411   // none will be captured or we have to assume at least one might be.
4412   unsigned Idx = 0;
4413   PotentialCopies.push_back(V);
4414   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4415     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4416 
4417   AANoCapture::StateType &S = getState();
4418   auto Assumed = S.getAssumed();
4419   S.intersectAssumedBits(T.getAssumed());
4420   if (!isAssumedNoCaptureMaybeReturned())
4421     return indicatePessimisticFixpoint();
4422   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4423                                    : ChangeStatus::CHANGED;
4424 }
4425 
4426 /// NoCapture attribute for function arguments.
4427 struct AANoCaptureArgument final : AANoCaptureImpl {
4428   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4429       : AANoCaptureImpl(IRP, A) {}
4430 
4431   /// See AbstractAttribute::trackStatistics()
4432   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4433 };
4434 
4435 /// NoCapture attribute for call site arguments.
4436 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4437   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4438       : AANoCaptureImpl(IRP, A) {}
4439 
4440   /// See AbstractAttribute::initialize(...).
4441   void initialize(Attributor &A) override {
4442     if (Argument *Arg = getAssociatedArgument())
4443       if (Arg->hasByValAttr())
4444         indicateOptimisticFixpoint();
4445     AANoCaptureImpl::initialize(A);
4446   }
4447 
4448   /// See AbstractAttribute::updateImpl(...).
4449   ChangeStatus updateImpl(Attributor &A) override {
4450     // TODO: Once we have call site specific value information we can provide
4451     //       call site specific liveness information and then it makes
4452     //       sense to specialize attributes for call sites arguments instead of
4453     //       redirecting requests to the callee argument.
4454     Argument *Arg = getAssociatedArgument();
4455     if (!Arg)
4456       return indicatePessimisticFixpoint();
4457     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4458     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4459     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4460   }
4461 
4462   /// See AbstractAttribute::trackStatistics()
4463   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4464 };
4465 
4466 /// NoCapture attribute for floating values.
4467 struct AANoCaptureFloating final : AANoCaptureImpl {
4468   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4469       : AANoCaptureImpl(IRP, A) {}
4470 
4471   /// See AbstractAttribute::trackStatistics()
4472   void trackStatistics() const override {
4473     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4474   }
4475 };
4476 
4477 /// NoCapture attribute for function return value.
4478 struct AANoCaptureReturned final : AANoCaptureImpl {
4479   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4480       : AANoCaptureImpl(IRP, A) {
4481     llvm_unreachable("NoCapture is not applicable to function returns!");
4482   }
4483 
4484   /// See AbstractAttribute::initialize(...).
4485   void initialize(Attributor &A) override {
4486     llvm_unreachable("NoCapture is not applicable to function returns!");
4487   }
4488 
4489   /// See AbstractAttribute::updateImpl(...).
4490   ChangeStatus updateImpl(Attributor &A) override {
4491     llvm_unreachable("NoCapture is not applicable to function returns!");
4492   }
4493 
4494   /// See AbstractAttribute::trackStatistics()
4495   void trackStatistics() const override {}
4496 };
4497 
4498 /// NoCapture attribute deduction for a call site return value.
4499 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4500   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4501       : AANoCaptureImpl(IRP, A) {}
4502 
4503   /// See AbstractAttribute::initialize(...).
4504   void initialize(Attributor &A) override {
4505     const Function *F = getAnchorScope();
4506     // Check what state the associated function can actually capture.
4507     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4508   }
4509 
4510   /// See AbstractAttribute::trackStatistics()
4511   void trackStatistics() const override {
4512     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4513   }
4514 };
4515 
4516 /// ------------------ Value Simplify Attribute ----------------------------
4517 struct AAValueSimplifyImpl : AAValueSimplify {
4518   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4519       : AAValueSimplify(IRP, A) {}
4520 
4521   /// See AbstractAttribute::initialize(...).
4522   void initialize(Attributor &A) override {
4523     if (getAssociatedValue().getType()->isVoidTy())
4524       indicatePessimisticFixpoint();
4525   }
4526 
4527   /// See AbstractAttribute::getAsStr().
4528   const std::string getAsStr() const override {
4529     LLVM_DEBUG({
4530       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
4531       if (SimplifiedAssociatedValue)
4532         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
4533     });
4534     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4535                         : "not-simple";
4536   }
4537 
4538   /// See AbstractAttribute::trackStatistics()
4539   void trackStatistics() const override {}
4540 
4541   /// See AAValueSimplify::getAssumedSimplifiedValue()
4542   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4543     if (!getAssumed())
4544       return const_cast<Value *>(&getAssociatedValue());
4545     return SimplifiedAssociatedValue;
4546   }
4547 
4548   /// Helper function for querying AAValueSimplify and updating candicate.
4549   /// \param IRP The value position we are trying to unify with SimplifiedValue
4550   /// \param AccumulatedSimplifiedValue Current simplification result.
4551   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4552                              const IRPosition &IRP,
4553                              Optional<Value *> &AccumulatedSimplifiedValue) {
4554     // FIXME: Add a typecast support.
4555     bool UsedAssumedInformation = false;
4556     Optional<Value *> QueryingValueSimplified =
4557         A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
4558 
4559     if (!QueryingValueSimplified.hasValue())
4560       return true;
4561 
4562     if (!QueryingValueSimplified.getValue())
4563       return false;
4564 
4565     Value &QueryingValueSimplifiedUnwrapped =
4566         *QueryingValueSimplified.getValue();
4567 
4568     if (AccumulatedSimplifiedValue.hasValue() &&
4569         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4570         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4571       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4572     if (AccumulatedSimplifiedValue.hasValue() &&
4573         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4574       return true;
4575 
4576     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << IRP.getAssociatedValue()
4577                       << " is assumed to be "
4578                       << QueryingValueSimplifiedUnwrapped << "\n");
4579 
4580     AccumulatedSimplifiedValue = QueryingValueSimplified;
4581     return true;
4582   }
4583 
4584   /// Returns a candidate is found or not
4585   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4586     if (!getAssociatedValue().getType()->isIntegerTy())
4587       return false;
4588 
4589     // This will also pass the call base context.
4590     const auto &AA =
4591         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4592 
4593     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4594 
4595     if (!COpt.hasValue()) {
4596       SimplifiedAssociatedValue = llvm::None;
4597       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4598       return true;
4599     }
4600     if (auto *C = COpt.getValue()) {
4601       SimplifiedAssociatedValue = C;
4602       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4603       return true;
4604     }
4605     return false;
4606   }
4607 
4608   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4609     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4610       return true;
4611     if (askSimplifiedValueFor<AAPotentialValues>(A))
4612       return true;
4613     return false;
4614   }
4615 
4616   /// See AbstractAttribute::manifest(...).
4617   ChangeStatus manifest(Attributor &A) override {
4618     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4619 
4620     if (SimplifiedAssociatedValue.hasValue() &&
4621         !SimplifiedAssociatedValue.getValue())
4622       return Changed;
4623 
4624     Value &V = getAssociatedValue();
4625     auto *C = SimplifiedAssociatedValue.hasValue()
4626                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4627                   : UndefValue::get(V.getType());
4628     if (C && C != &V && !V.user_empty()) {
4629       Value *NewV = AA::getWithType(*C, *V.getType());
4630       // We can replace the AssociatedValue with the constant.
4631       if (NewV && NewV != &V) {
4632         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4633                           << " :: " << *this << "\n");
4634         if (A.changeValueAfterManifest(V, *NewV))
4635           Changed = ChangeStatus::CHANGED;
4636       }
4637     }
4638 
4639     return Changed | AAValueSimplify::manifest(A);
4640   }
4641 
4642   /// See AbstractState::indicatePessimisticFixpoint(...).
4643   ChangeStatus indicatePessimisticFixpoint() override {
4644     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4645     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4646     SimplifiedAssociatedValue = &getAssociatedValue();
4647     indicateOptimisticFixpoint();
4648     return ChangeStatus::CHANGED;
4649   }
4650 
4651 protected:
4652   // An assumed simplified value. Initially, it is set to Optional::None, which
4653   // means that the value is not clear under current assumption. If in the
4654   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4655   // returns orignal associated value.
4656   Optional<Value *> SimplifiedAssociatedValue;
4657 };
4658 
4659 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4660   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4661       : AAValueSimplifyImpl(IRP, A) {}
4662 
4663   void initialize(Attributor &A) override {
4664     AAValueSimplifyImpl::initialize(A);
4665     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4666       indicatePessimisticFixpoint();
4667     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4668                  Attribute::StructRet, Attribute::Nest},
4669                 /* IgnoreSubsumingPositions */ true))
4670       indicatePessimisticFixpoint();
4671 
4672     // FIXME: This is a hack to prevent us from propagating function poiner in
4673     // the new pass manager CGSCC pass as it creates call edges the
4674     // CallGraphUpdater cannot handle yet.
4675     Value &V = getAssociatedValue();
4676     if (V.getType()->isPointerTy() &&
4677         V.getType()->getPointerElementType()->isFunctionTy() &&
4678         !A.isModulePass())
4679       indicatePessimisticFixpoint();
4680   }
4681 
4682   /// See AbstractAttribute::updateImpl(...).
4683   ChangeStatus updateImpl(Attributor &A) override {
4684     // Byval is only replacable if it is readonly otherwise we would write into
4685     // the replaced value and not the copy that byval creates implicitly.
4686     Argument *Arg = getAssociatedArgument();
4687     if (Arg->hasByValAttr()) {
4688       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4689       //       there is no race by not copying a constant byval.
4690       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4691                                                        DepClassTy::REQUIRED);
4692       if (!MemAA.isAssumedReadOnly())
4693         return indicatePessimisticFixpoint();
4694     }
4695 
4696     auto Before = SimplifiedAssociatedValue;
4697 
4698     auto PredForCallSite = [&](AbstractCallSite ACS) {
4699       const IRPosition &ACSArgPos =
4700           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4701       // Check if a coresponding argument was found or if it is on not
4702       // associated (which can happen for callback calls).
4703       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4704         return false;
4705 
4706       // We can only propagate thread independent values through callbacks.
4707       // This is different to direct/indirect call sites because for them we
4708       // know the thread executing the caller and callee is the same. For
4709       // callbacks this is not guaranteed, thus a thread dependent value could
4710       // be different for the caller and callee, making it invalid to propagate.
4711       Value &ArgOp = ACSArgPos.getAssociatedValue();
4712       if (ACS.isCallbackCall())
4713         if (auto *C = dyn_cast<Constant>(&ArgOp))
4714           if (C->isThreadDependent())
4715             return false;
4716       return checkAndUpdate(A, *this, ACSArgPos, SimplifiedAssociatedValue);
4717     };
4718 
4719     // Generate a answer specific to a call site context.
4720     bool Success;
4721     bool AllCallSitesKnown;
4722     if (hasCallBaseContext())
4723       Success = PredForCallSite(
4724           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
4725     else
4726       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
4727                                        AllCallSitesKnown);
4728 
4729     if (!Success)
4730       if (!askSimplifiedValueForOtherAAs(A))
4731         return indicatePessimisticFixpoint();
4732 
4733     // If a candicate was found in this update, return CHANGED.
4734     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4735                                                : ChangeStatus ::CHANGED;
4736   }
4737 
4738   /// See AbstractAttribute::trackStatistics()
4739   void trackStatistics() const override {
4740     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4741   }
4742 };
4743 
4744 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4745   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4746       : AAValueSimplifyImpl(IRP, A) {}
4747 
4748   /// See AbstractAttribute::updateImpl(...).
4749   ChangeStatus updateImpl(Attributor &A) override {
4750     auto Before = SimplifiedAssociatedValue;
4751 
4752     auto PredForReturned = [&](Value &V) {
4753       return checkAndUpdate(A, *this,
4754                             IRPosition::value(V, getCallBaseContext()),
4755                             SimplifiedAssociatedValue);
4756     };
4757 
4758     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4759       if (!askSimplifiedValueForOtherAAs(A))
4760         return indicatePessimisticFixpoint();
4761 
4762     // If a candicate was found in this update, return CHANGED.
4763     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4764                                                : ChangeStatus ::CHANGED;
4765   }
4766 
4767   ChangeStatus manifest(Attributor &A) override {
4768     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4769 
4770     if (SimplifiedAssociatedValue.hasValue() &&
4771         !SimplifiedAssociatedValue.getValue())
4772       return Changed | AAValueSimplify::manifest(A);
4773 
4774     auto *C = SimplifiedAssociatedValue.hasValue()
4775                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4776                   : UndefValue::get(getAssociatedType());
4777     if (!C || C == &getAssociatedValue())
4778       return Changed | AAValueSimplify::manifest(A);
4779 
4780     auto PredForReturned =
4781         [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4782           // We can replace the AssociatedValue with the constant.
4783           if (&V == C || isa<UndefValue>(V))
4784             return true;
4785 
4786           for (ReturnInst *RI : RetInsts) {
4787             if (RI->getFunction() != getAnchorScope())
4788               continue;
4789             Value *NewV = AA::getWithType(*C, *RI->getReturnValue()->getType());
4790             if (!NewV)
4791               continue;
4792             LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4793                               << " in " << *RI << " :: " << *this << "\n");
4794             if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
4795               Changed = ChangeStatus::CHANGED;
4796           }
4797           return true;
4798         };
4799     A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4800 
4801     return Changed | AAValueSimplify::manifest(A);
4802   }
4803 
4804   /// See AbstractAttribute::trackStatistics()
4805   void trackStatistics() const override {
4806     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4807   }
4808 };
4809 
4810 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4811   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4812       : AAValueSimplifyImpl(IRP, A) {}
4813 
4814   /// See AbstractAttribute::initialize(...).
4815   void initialize(Attributor &A) override {
4816     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4817     //        Needs investigation.
4818     // AAValueSimplifyImpl::initialize(A);
4819     Value &V = getAnchorValue();
4820 
4821     // TODO: add other stuffs
4822     if (isa<Constant>(V))
4823       indicatePessimisticFixpoint();
4824   }
4825 
4826   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4827   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4828   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4829   /// updated and \p Changed is set appropriately.
4830   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4831                               ChangeStatus &Changed) {
4832     if (!ICmp)
4833       return false;
4834     if (!ICmp->isEquality())
4835       return false;
4836 
4837     // This is a comparison with == or !-. We check for nullptr now.
4838     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4839     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4840     if (!Op0IsNull && !Op1IsNull)
4841       return false;
4842 
4843     LLVMContext &Ctx = ICmp->getContext();
4844     // Check for `nullptr ==/!= nullptr` first:
4845     if (Op0IsNull && Op1IsNull) {
4846       Value *NewVal = ConstantInt::get(
4847           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4848       assert(!SimplifiedAssociatedValue.hasValue() &&
4849              "Did not expect non-fixed value for constant comparison");
4850       SimplifiedAssociatedValue = NewVal;
4851       indicateOptimisticFixpoint();
4852       Changed = ChangeStatus::CHANGED;
4853       return true;
4854     }
4855 
4856     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4857     // non-nullptr operand and if we assume it's non-null we can conclude the
4858     // result of the comparison.
4859     assert((Op0IsNull || Op1IsNull) &&
4860            "Expected nullptr versus non-nullptr comparison at this point");
4861 
4862     // The index is the operand that we assume is not null.
4863     unsigned PtrIdx = Op0IsNull;
4864     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4865         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4866         DepClassTy::REQUIRED);
4867     if (!PtrNonNullAA.isAssumedNonNull())
4868       return false;
4869 
4870     // The new value depends on the predicate, true for != and false for ==.
4871     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4872                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4873 
4874     assert((!SimplifiedAssociatedValue.hasValue() ||
4875             SimplifiedAssociatedValue == NewVal) &&
4876            "Did not expect to change value for zero-comparison");
4877 
4878     auto Before = SimplifiedAssociatedValue;
4879     SimplifiedAssociatedValue = NewVal;
4880 
4881     if (PtrNonNullAA.isKnownNonNull())
4882       indicateOptimisticFixpoint();
4883 
4884     Changed = Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4885                                                   : ChangeStatus ::CHANGED;
4886     return true;
4887   }
4888 
4889   /// See AbstractAttribute::updateImpl(...).
4890   ChangeStatus updateImpl(Attributor &A) override {
4891     auto Before = SimplifiedAssociatedValue;
4892 
4893     ChangeStatus Changed;
4894     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4895                                Changed))
4896       return Changed;
4897 
4898     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4899                             bool Stripped) -> bool {
4900       auto &AA = A.getAAFor<AAValueSimplify>(
4901           *this, IRPosition::value(V, getCallBaseContext()),
4902           DepClassTy::REQUIRED);
4903       if (!Stripped && this == &AA) {
4904         // TODO: Look the instruction and check recursively.
4905 
4906         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4907                           << "\n");
4908         return false;
4909       }
4910       return checkAndUpdate(A, *this,
4911                             IRPosition::value(V, getCallBaseContext()),
4912                             SimplifiedAssociatedValue);
4913     };
4914 
4915     bool Dummy = false;
4916     if (!genericValueTraversal<AAValueSimplify, bool>(
4917             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4918             /* UseValueSimplify */ false))
4919       if (!askSimplifiedValueForOtherAAs(A))
4920         return indicatePessimisticFixpoint();
4921 
4922     // If a candicate was found in this update, return CHANGED.
4923     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4924                                                : ChangeStatus ::CHANGED;
4925   }
4926 
4927   /// See AbstractAttribute::trackStatistics()
4928   void trackStatistics() const override {
4929     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4930   }
4931 };
4932 
4933 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4934   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4935       : AAValueSimplifyImpl(IRP, A) {}
4936 
4937   /// See AbstractAttribute::initialize(...).
4938   void initialize(Attributor &A) override {
4939     SimplifiedAssociatedValue = &getAnchorValue();
4940     indicateOptimisticFixpoint();
4941   }
4942   /// See AbstractAttribute::initialize(...).
4943   ChangeStatus updateImpl(Attributor &A) override {
4944     llvm_unreachable(
4945         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4946   }
4947   /// See AbstractAttribute::trackStatistics()
4948   void trackStatistics() const override {
4949     STATS_DECLTRACK_FN_ATTR(value_simplify)
4950   }
4951 };
4952 
4953 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4954   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4955       : AAValueSimplifyFunction(IRP, A) {}
4956   /// See AbstractAttribute::trackStatistics()
4957   void trackStatistics() const override {
4958     STATS_DECLTRACK_CS_ATTR(value_simplify)
4959   }
4960 };
4961 
4962 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4963   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4964       : AAValueSimplifyReturned(IRP, A) {}
4965 
4966   /// See AbstractAttribute::manifest(...).
4967   ChangeStatus manifest(Attributor &A) override {
4968     return AAValueSimplifyImpl::manifest(A);
4969   }
4970 
4971   void trackStatistics() const override {
4972     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4973   }
4974 };
4975 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4976   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4977       : AAValueSimplifyFloating(IRP, A) {}
4978 
4979   /// See AbstractAttribute::manifest(...).
4980   ChangeStatus manifest(Attributor &A) override {
4981     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4982 
4983     if (SimplifiedAssociatedValue.hasValue() &&
4984         !SimplifiedAssociatedValue.getValue())
4985       return Changed;
4986 
4987     Value &V = getAssociatedValue();
4988     auto *C = SimplifiedAssociatedValue.hasValue()
4989                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4990                   : UndefValue::get(V.getType());
4991     if (C) {
4992       Use &U = cast<CallBase>(&getAnchorValue())
4993                    ->getArgOperandUse(getCallSiteArgNo());
4994       // We can replace the AssociatedValue with the constant.
4995       if (&V != C) {
4996         if (Value *NewV = AA::getWithType(*C, *V.getType()))
4997           if (A.changeUseAfterManifest(U, *NewV))
4998             Changed = ChangeStatus::CHANGED;
4999       }
5000     }
5001 
5002     return Changed | AAValueSimplify::manifest(A);
5003   }
5004 
5005   void trackStatistics() const override {
5006     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5007   }
5008 };
5009 
5010 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5011 struct AAHeapToStackImpl : public AAHeapToStack {
5012   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
5013       : AAHeapToStack(IRP, A) {}
5014 
5015   const std::string getAsStr() const override {
5016     return "[H2S] Mallocs Good/Bad: " + std::to_string(MallocCalls.size()) +
5017            "/" + std::to_string(BadMallocCalls.size());
5018   }
5019 
5020   bool isAssumedHeapToStack(CallBase &CB) const override {
5021     return isValidState() && MallocCalls.contains(&CB) &&
5022            !BadMallocCalls.count(&CB);
5023   }
5024 
5025   bool isKnownHeapToStack(CallBase &CB) const override {
5026     return isValidState() && MallocCalls.contains(&CB) &&
5027            !BadMallocCalls.count(&CB);
5028   }
5029 
5030   ChangeStatus manifest(Attributor &A) override {
5031     assert(getState().isValidState() &&
5032            "Attempted to manifest an invalid state!");
5033 
5034     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5035     Function *F = getAnchorScope();
5036     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5037 
5038     for (Instruction *MallocCall : MallocCalls) {
5039       // This malloc cannot be replaced.
5040       if (BadMallocCalls.count(MallocCall))
5041         continue;
5042 
5043       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
5044         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5045         A.deleteAfterManifest(*FreeCall);
5046         HasChanged = ChangeStatus::CHANGED;
5047       }
5048 
5049       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
5050                         << "\n");
5051 
5052       auto Remark = [&](OptimizationRemark OR) {
5053         LibFunc IsAllocShared;
5054         if (auto *CB = dyn_cast<CallBase>(MallocCall)) {
5055           TLI->getLibFunc(*CB, IsAllocShared);
5056           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5057             return OR << "Moving globalized variable to the stack.";
5058         }
5059         return OR << "Moving memory allocation from the heap to the stack.";
5060       };
5061       A.emitRemark<OptimizationRemark>(MallocCall, "HeapToStack", Remark);
5062 
5063       Align Alignment;
5064       Value *Size;
5065       if (isCallocLikeFn(MallocCall, TLI)) {
5066         auto *Num = MallocCall->getOperand(0);
5067         auto *SizeT = MallocCall->getOperand(1);
5068         IRBuilder<> B(MallocCall);
5069         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5070       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5071         Size = MallocCall->getOperand(1);
5072         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5073                                    ->getValue()
5074                                    .getZExtValue())
5075                         .valueOrOne();
5076       } else {
5077         Size = MallocCall->getOperand(0);
5078       }
5079 
5080       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5081       Instruction *AI =
5082           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5083                          "", MallocCall->getNextNode());
5084 
5085       if (AI->getType() != MallocCall->getType())
5086         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5087                              AI->getNextNode());
5088 
5089       A.changeValueAfterManifest(*MallocCall, *AI);
5090 
5091       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5092         auto *NBB = II->getNormalDest();
5093         BranchInst::Create(NBB, MallocCall->getParent());
5094         A.deleteAfterManifest(*MallocCall);
5095       } else {
5096         A.deleteAfterManifest(*MallocCall);
5097       }
5098 
5099       // Zero out the allocated memory if it was a calloc.
5100       if (isCallocLikeFn(MallocCall, TLI)) {
5101         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5102                                    AI->getNextNode());
5103         Value *Ops[] = {
5104             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5105             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5106 
5107         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5108         Module *M = F->getParent();
5109         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5110         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5111       }
5112       HasChanged = ChangeStatus::CHANGED;
5113     }
5114 
5115     return HasChanged;
5116   }
5117 
5118   /// Collection of all malloc calls in a function.
5119   SmallSetVector<Instruction *, 4> MallocCalls;
5120 
5121   /// Collection of malloc calls that cannot be converted.
5122   DenseSet<const Instruction *> BadMallocCalls;
5123 
5124   /// A map for each malloc call to the set of associated free calls.
5125   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5126 
5127   ChangeStatus updateImpl(Attributor &A) override;
5128 };
5129 
5130 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5131   const Function *F = getAnchorScope();
5132   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5133 
5134   MustBeExecutedContextExplorer &Explorer =
5135       A.getInfoCache().getMustBeExecutedContextExplorer();
5136 
5137   bool StackIsAccessibleByOtherThreads =
5138       A.getInfoCache().stackIsAccessibleByOtherThreads();
5139 
5140   auto FreeCheck = [&](Instruction &I) {
5141     // If the stack is not accessible by other threads, the "must-free" logic
5142     // doesn't apply as the pointer could be shared and needs to be places in
5143     // "shareable" memory.
5144     if (!StackIsAccessibleByOtherThreads) {
5145       auto &NoSyncAA =
5146           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
5147       if (!NoSyncAA.isAssumedNoSync()) {
5148         LLVM_DEBUG(
5149             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
5150                       "other threads and function is not nosync:\n");
5151         return false;
5152       }
5153     }
5154     const auto &Frees = FreesForMalloc.lookup(&I);
5155     if (Frees.size() != 1) {
5156       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
5157                         << Frees.size() << "\n");
5158       return false;
5159     }
5160     Instruction *UniqueFree = *Frees.begin();
5161     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5162   };
5163 
5164   auto UsesCheck = [&](Instruction &I) {
5165     bool ValidUsesOnly = true;
5166     bool MustUse = true;
5167     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5168       Instruction *UserI = cast<Instruction>(U.getUser());
5169       if (isa<LoadInst>(UserI))
5170         return true;
5171       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5172         if (SI->getValueOperand() == U.get()) {
5173           LLVM_DEBUG(dbgs()
5174                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5175           ValidUsesOnly = false;
5176         } else {
5177           // A store into the malloc'ed memory is fine.
5178         }
5179         return true;
5180       }
5181       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5182         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5183           return true;
5184         // Record malloc.
5185         if (isFreeCall(UserI, TLI)) {
5186           if (MustUse) {
5187             FreesForMalloc[&I].insert(UserI);
5188           } else {
5189             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5190                               << *UserI << "\n");
5191             ValidUsesOnly = false;
5192           }
5193           return true;
5194         }
5195 
5196         unsigned ArgNo = CB->getArgOperandNo(&U);
5197 
5198         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5199             *this, IRPosition::callsite_argument(*CB, ArgNo),
5200             DepClassTy::OPTIONAL);
5201 
5202         // If a callsite argument use is nofree, we are fine.
5203         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5204             *this, IRPosition::callsite_argument(*CB, ArgNo),
5205             DepClassTy::OPTIONAL);
5206 
5207         if (!NoCaptureAA.isAssumedNoCapture() ||
5208             !ArgNoFreeAA.isAssumedNoFree()) {
5209 
5210           // Emit a missed remark if this is missed OpenMP globalization.
5211           auto Remark = [&](OptimizationRemarkMissed ORM) {
5212             return ORM << "Could not move globalized variable to the stack. "
5213                        << "Variable is potentially "
5214                        << ((!NoCaptureAA.isAssumedNoCapture()) ? "captured."
5215                                                                : "freed.");
5216           };
5217 
5218           LibFunc IsAllocShared;
5219           if (auto *AllocShared = dyn_cast<CallBase>(&I)) {
5220             TLI->getLibFunc(*AllocShared, IsAllocShared);
5221             if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5222               A.emitRemark<OptimizationRemarkMissed>(
5223                   AllocShared, "HeapToStackFailed", Remark);
5224           }
5225 
5226           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5227           ValidUsesOnly = false;
5228         }
5229         return true;
5230       }
5231 
5232       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5233           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5234         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5235         Follow = true;
5236         return true;
5237       }
5238       // Unknown user for which we can not track uses further (in a way that
5239       // makes sense).
5240       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5241       ValidUsesOnly = false;
5242       return true;
5243     };
5244     A.checkForAllUses(Pred, *this, I);
5245     return ValidUsesOnly;
5246   };
5247 
5248   auto MallocCallocCheck = [&](Instruction &I) {
5249     if (BadMallocCalls.count(&I))
5250       return true;
5251 
5252     bool IsMalloc = isMallocLikeFn(&I, TLI);
5253     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5254     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5255     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5256       BadMallocCalls.insert(&I);
5257       return true;
5258     }
5259 
5260     if (IsMalloc) {
5261       if (MaxHeapToStackSize == -1) {
5262         if (UsesCheck(I) || FreeCheck(I)) {
5263           MallocCalls.insert(&I);
5264           return true;
5265         }
5266       }
5267       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5268         if (Size->getValue().ule(MaxHeapToStackSize))
5269           if (UsesCheck(I) || FreeCheck(I)) {
5270             MallocCalls.insert(&I);
5271             return true;
5272           }
5273     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5274       if (MaxHeapToStackSize == -1) {
5275         if (UsesCheck(I) || FreeCheck(I)) {
5276           MallocCalls.insert(&I);
5277           return true;
5278         }
5279       }
5280       // Only if the alignment and sizes are constant.
5281       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5282         if (Size->getValue().ule(MaxHeapToStackSize))
5283           if (UsesCheck(I) || FreeCheck(I)) {
5284             MallocCalls.insert(&I);
5285             return true;
5286           }
5287     } else if (IsCalloc) {
5288       if (MaxHeapToStackSize == -1) {
5289         if (UsesCheck(I) || FreeCheck(I)) {
5290           MallocCalls.insert(&I);
5291           return true;
5292         }
5293       }
5294       bool Overflow = false;
5295       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5296         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5297           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5298                   .ule(MaxHeapToStackSize))
5299             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5300               MallocCalls.insert(&I);
5301               return true;
5302             }
5303     }
5304 
5305     BadMallocCalls.insert(&I);
5306     return true;
5307   };
5308 
5309   size_t NumBadMallocs = BadMallocCalls.size();
5310 
5311   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5312 
5313   if (NumBadMallocs != BadMallocCalls.size())
5314     return ChangeStatus::CHANGED;
5315 
5316   return ChangeStatus::UNCHANGED;
5317 }
5318 
5319 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5320   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5321       : AAHeapToStackImpl(IRP, A) {}
5322 
5323   /// See AbstractAttribute::trackStatistics().
5324   void trackStatistics() const override {
5325     STATS_DECL(
5326         MallocCalls, Function,
5327         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5328     for (auto *C : MallocCalls)
5329       if (!BadMallocCalls.count(C))
5330         ++BUILD_STAT_NAME(MallocCalls, Function);
5331   }
5332 };
5333 
5334 /// ----------------------- Privatizable Pointers ------------------------------
5335 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5336   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5337       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5338 
5339   ChangeStatus indicatePessimisticFixpoint() override {
5340     AAPrivatizablePtr::indicatePessimisticFixpoint();
5341     PrivatizableType = nullptr;
5342     return ChangeStatus::CHANGED;
5343   }
5344 
5345   /// Identify the type we can chose for a private copy of the underlying
5346   /// argument. None means it is not clear yet, nullptr means there is none.
5347   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5348 
5349   /// Return a privatizable type that encloses both T0 and T1.
5350   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5351   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5352     if (!T0.hasValue())
5353       return T1;
5354     if (!T1.hasValue())
5355       return T0;
5356     if (T0 == T1)
5357       return T0;
5358     return nullptr;
5359   }
5360 
5361   Optional<Type *> getPrivatizableType() const override {
5362     return PrivatizableType;
5363   }
5364 
5365   const std::string getAsStr() const override {
5366     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5367   }
5368 
5369 protected:
5370   Optional<Type *> PrivatizableType;
5371 };
5372 
5373 // TODO: Do this for call site arguments (probably also other values) as well.
5374 
5375 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5376   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5377       : AAPrivatizablePtrImpl(IRP, A) {}
5378 
5379   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5380   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5381     // If this is a byval argument and we know all the call sites (so we can
5382     // rewrite them), there is no need to check them explicitly.
5383     bool AllCallSitesKnown;
5384     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5385         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5386                                true, AllCallSitesKnown))
5387       return getAssociatedValue().getType()->getPointerElementType();
5388 
5389     Optional<Type *> Ty;
5390     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5391 
5392     // Make sure the associated call site argument has the same type at all call
5393     // sites and it is an allocation we know is safe to privatize, for now that
5394     // means we only allow alloca instructions.
5395     // TODO: We can additionally analyze the accesses in the callee to  create
5396     //       the type from that information instead. That is a little more
5397     //       involved and will be done in a follow up patch.
5398     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5399       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5400       // Check if a coresponding argument was found or if it is one not
5401       // associated (which can happen for callback calls).
5402       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5403         return false;
5404 
5405       // Check that all call sites agree on a type.
5406       auto &PrivCSArgAA =
5407           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5408       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5409 
5410       LLVM_DEBUG({
5411         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5412         if (CSTy.hasValue() && CSTy.getValue())
5413           CSTy.getValue()->print(dbgs());
5414         else if (CSTy.hasValue())
5415           dbgs() << "<nullptr>";
5416         else
5417           dbgs() << "<none>";
5418       });
5419 
5420       Ty = combineTypes(Ty, CSTy);
5421 
5422       LLVM_DEBUG({
5423         dbgs() << " : New Type: ";
5424         if (Ty.hasValue() && Ty.getValue())
5425           Ty.getValue()->print(dbgs());
5426         else if (Ty.hasValue())
5427           dbgs() << "<nullptr>";
5428         else
5429           dbgs() << "<none>";
5430         dbgs() << "\n";
5431       });
5432 
5433       return !Ty.hasValue() || Ty.getValue();
5434     };
5435 
5436     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5437       return nullptr;
5438     return Ty;
5439   }
5440 
5441   /// See AbstractAttribute::updateImpl(...).
5442   ChangeStatus updateImpl(Attributor &A) override {
5443     PrivatizableType = identifyPrivatizableType(A);
5444     if (!PrivatizableType.hasValue())
5445       return ChangeStatus::UNCHANGED;
5446     if (!PrivatizableType.getValue())
5447       return indicatePessimisticFixpoint();
5448 
5449     // The dependence is optional so we don't give up once we give up on the
5450     // alignment.
5451     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5452                         DepClassTy::OPTIONAL);
5453 
5454     // Avoid arguments with padding for now.
5455     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5456         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5457                                                 A.getInfoCache().getDL())) {
5458       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5459       return indicatePessimisticFixpoint();
5460     }
5461 
5462     // Verify callee and caller agree on how the promoted argument would be
5463     // passed.
5464     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5465     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5466     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5467     Function &Fn = *getIRPosition().getAnchorScope();
5468     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5469     ArgsToPromote.insert(getAssociatedArgument());
5470     const auto *TTI =
5471         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5472     if (!TTI ||
5473         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5474             Fn, *TTI, ArgsToPromote, Dummy) ||
5475         ArgsToPromote.empty()) {
5476       LLVM_DEBUG(
5477           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5478                  << Fn.getName() << "\n");
5479       return indicatePessimisticFixpoint();
5480     }
5481 
5482     // Collect the types that will replace the privatizable type in the function
5483     // signature.
5484     SmallVector<Type *, 16> ReplacementTypes;
5485     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5486 
5487     // Register a rewrite of the argument.
5488     Argument *Arg = getAssociatedArgument();
5489     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5490       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5491       return indicatePessimisticFixpoint();
5492     }
5493 
5494     unsigned ArgNo = Arg->getArgNo();
5495 
5496     // Helper to check if for the given call site the associated argument is
5497     // passed to a callback where the privatization would be different.
5498     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5499       SmallVector<const Use *, 4> CallbackUses;
5500       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5501       for (const Use *U : CallbackUses) {
5502         AbstractCallSite CBACS(U);
5503         assert(CBACS && CBACS.isCallbackCall());
5504         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5505           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5506 
5507           LLVM_DEBUG({
5508             dbgs()
5509                 << "[AAPrivatizablePtr] Argument " << *Arg
5510                 << "check if can be privatized in the context of its parent ("
5511                 << Arg->getParent()->getName()
5512                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5513                    "callback ("
5514                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5515                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5516                 << CBACS.getCallArgOperand(CBArg) << " vs "
5517                 << CB.getArgOperand(ArgNo) << "\n"
5518                 << "[AAPrivatizablePtr] " << CBArg << " : "
5519                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5520           });
5521 
5522           if (CBArgNo != int(ArgNo))
5523             continue;
5524           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5525               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5526           if (CBArgPrivAA.isValidState()) {
5527             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5528             if (!CBArgPrivTy.hasValue())
5529               continue;
5530             if (CBArgPrivTy.getValue() == PrivatizableType)
5531               continue;
5532           }
5533 
5534           LLVM_DEBUG({
5535             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5536                    << " cannot be privatized in the context of its parent ("
5537                    << Arg->getParent()->getName()
5538                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5539                       "callback ("
5540                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5541                    << ").\n[AAPrivatizablePtr] for which the argument "
5542                       "privatization is not compatible.\n";
5543           });
5544           return false;
5545         }
5546       }
5547       return true;
5548     };
5549 
5550     // Helper to check if for the given call site the associated argument is
5551     // passed to a direct call where the privatization would be different.
5552     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5553       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5554       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5555       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5556              "Expected a direct call operand for callback call operand");
5557 
5558       LLVM_DEBUG({
5559         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5560                << " check if be privatized in the context of its parent ("
5561                << Arg->getParent()->getName()
5562                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5563                   "direct call of ("
5564                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5565                << ").\n";
5566       });
5567 
5568       Function *DCCallee = DC->getCalledFunction();
5569       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5570         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5571             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
5572             DepClassTy::REQUIRED);
5573         if (DCArgPrivAA.isValidState()) {
5574           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5575           if (!DCArgPrivTy.hasValue())
5576             return true;
5577           if (DCArgPrivTy.getValue() == PrivatizableType)
5578             return true;
5579         }
5580       }
5581 
5582       LLVM_DEBUG({
5583         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5584                << " cannot be privatized in the context of its parent ("
5585                << Arg->getParent()->getName()
5586                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5587                   "direct call of ("
5588                << ACS.getInstruction()->getCalledFunction()->getName()
5589                << ").\n[AAPrivatizablePtr] for which the argument "
5590                   "privatization is not compatible.\n";
5591       });
5592       return false;
5593     };
5594 
5595     // Helper to check if the associated argument is used at the given abstract
5596     // call site in a way that is incompatible with the privatization assumed
5597     // here.
5598     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5599       if (ACS.isDirectCall())
5600         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5601       if (ACS.isCallbackCall())
5602         return IsCompatiblePrivArgOfDirectCS(ACS);
5603       return false;
5604     };
5605 
5606     bool AllCallSitesKnown;
5607     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5608                                 AllCallSitesKnown))
5609       return indicatePessimisticFixpoint();
5610 
5611     return ChangeStatus::UNCHANGED;
5612   }
5613 
5614   /// Given a type to private \p PrivType, collect the constituates (which are
5615   /// used) in \p ReplacementTypes.
5616   static void
5617   identifyReplacementTypes(Type *PrivType,
5618                            SmallVectorImpl<Type *> &ReplacementTypes) {
5619     // TODO: For now we expand the privatization type to the fullest which can
5620     //       lead to dead arguments that need to be removed later.
5621     assert(PrivType && "Expected privatizable type!");
5622 
5623     // Traverse the type, extract constituate types on the outermost level.
5624     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5625       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5626         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5627     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5628       ReplacementTypes.append(PrivArrayType->getNumElements(),
5629                               PrivArrayType->getElementType());
5630     } else {
5631       ReplacementTypes.push_back(PrivType);
5632     }
5633   }
5634 
5635   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5636   /// The values needed are taken from the arguments of \p F starting at
5637   /// position \p ArgNo.
5638   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5639                                    unsigned ArgNo, Instruction &IP) {
5640     assert(PrivType && "Expected privatizable type!");
5641 
5642     IRBuilder<NoFolder> IRB(&IP);
5643     const DataLayout &DL = F.getParent()->getDataLayout();
5644 
5645     // Traverse the type, build GEPs and stores.
5646     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5647       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5648       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5649         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5650         Value *Ptr =
5651             constructPointer(PointeeTy, PrivType, &Base,
5652                              PrivStructLayout->getElementOffset(u), IRB, DL);
5653         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5654       }
5655     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5656       Type *PointeeTy = PrivArrayType->getElementType();
5657       Type *PointeePtrTy = PointeeTy->getPointerTo();
5658       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5659       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5660         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
5661                                       u * PointeeTySize, IRB, DL);
5662         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5663       }
5664     } else {
5665       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5666     }
5667   }
5668 
5669   /// Extract values from \p Base according to the type \p PrivType at the
5670   /// call position \p ACS. The values are appended to \p ReplacementValues.
5671   void createReplacementValues(Align Alignment, Type *PrivType,
5672                                AbstractCallSite ACS, Value *Base,
5673                                SmallVectorImpl<Value *> &ReplacementValues) {
5674     assert(Base && "Expected base value!");
5675     assert(PrivType && "Expected privatizable type!");
5676     Instruction *IP = ACS.getInstruction();
5677 
5678     IRBuilder<NoFolder> IRB(IP);
5679     const DataLayout &DL = IP->getModule()->getDataLayout();
5680 
5681     if (Base->getType()->getPointerElementType() != PrivType)
5682       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5683                                                  "", ACS.getInstruction());
5684 
5685     // Traverse the type, build GEPs and loads.
5686     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5687       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5688       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5689         Type *PointeeTy = PrivStructType->getElementType(u);
5690         Value *Ptr =
5691             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
5692                              PrivStructLayout->getElementOffset(u), IRB, DL);
5693         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5694         L->setAlignment(Alignment);
5695         ReplacementValues.push_back(L);
5696       }
5697     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5698       Type *PointeeTy = PrivArrayType->getElementType();
5699       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5700       Type *PointeePtrTy = PointeeTy->getPointerTo();
5701       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5702         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
5703                                       u * PointeeTySize, IRB, DL);
5704         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5705         L->setAlignment(Alignment);
5706         ReplacementValues.push_back(L);
5707       }
5708     } else {
5709       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5710       L->setAlignment(Alignment);
5711       ReplacementValues.push_back(L);
5712     }
5713   }
5714 
5715   /// See AbstractAttribute::manifest(...)
5716   ChangeStatus manifest(Attributor &A) override {
5717     if (!PrivatizableType.hasValue())
5718       return ChangeStatus::UNCHANGED;
5719     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5720 
5721     // Collect all tail calls in the function as we cannot allow new allocas to
5722     // escape into tail recursion.
5723     // TODO: Be smarter about new allocas escaping into tail calls.
5724     SmallVector<CallInst *, 16> TailCalls;
5725     if (!A.checkForAllInstructions(
5726             [&](Instruction &I) {
5727               CallInst &CI = cast<CallInst>(I);
5728               if (CI.isTailCall())
5729                 TailCalls.push_back(&CI);
5730               return true;
5731             },
5732             *this, {Instruction::Call}))
5733       return ChangeStatus::UNCHANGED;
5734 
5735     Argument *Arg = getAssociatedArgument();
5736     // Query AAAlign attribute for alignment of associated argument to
5737     // determine the best alignment of loads.
5738     const auto &AlignAA =
5739         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
5740 
5741     // Callback to repair the associated function. A new alloca is placed at the
5742     // beginning and initialized with the values passed through arguments. The
5743     // new alloca replaces the use of the old pointer argument.
5744     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5745         [=](const Attributor::ArgumentReplacementInfo &ARI,
5746             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5747           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5748           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5749           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5750                                            Arg->getName() + ".priv", IP);
5751           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5752                                ArgIt->getArgNo(), *IP);
5753 
5754           if (AI->getType() != Arg->getType())
5755             AI =
5756                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5757           Arg->replaceAllUsesWith(AI);
5758 
5759           for (CallInst *CI : TailCalls)
5760             CI->setTailCall(false);
5761         };
5762 
5763     // Callback to repair a call site of the associated function. The elements
5764     // of the privatizable type are loaded prior to the call and passed to the
5765     // new function version.
5766     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5767         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5768                       AbstractCallSite ACS,
5769                       SmallVectorImpl<Value *> &NewArgOperands) {
5770           // When no alignment is specified for the load instruction,
5771           // natural alignment is assumed.
5772           createReplacementValues(
5773               assumeAligned(AlignAA.getAssumedAlign()),
5774               PrivatizableType.getValue(), ACS,
5775               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5776               NewArgOperands);
5777         };
5778 
5779     // Collect the types that will replace the privatizable type in the function
5780     // signature.
5781     SmallVector<Type *, 16> ReplacementTypes;
5782     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5783 
5784     // Register a rewrite of the argument.
5785     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5786                                            std::move(FnRepairCB),
5787                                            std::move(ACSRepairCB)))
5788       return ChangeStatus::CHANGED;
5789     return ChangeStatus::UNCHANGED;
5790   }
5791 
5792   /// See AbstractAttribute::trackStatistics()
5793   void trackStatistics() const override {
5794     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5795   }
5796 };
5797 
5798 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5799   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5800       : AAPrivatizablePtrImpl(IRP, A) {}
5801 
5802   /// See AbstractAttribute::initialize(...).
5803   virtual void initialize(Attributor &A) override {
5804     // TODO: We can privatize more than arguments.
5805     indicatePessimisticFixpoint();
5806   }
5807 
5808   ChangeStatus updateImpl(Attributor &A) override {
5809     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5810                      "updateImpl will not be called");
5811   }
5812 
5813   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5814   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5815     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5816     if (!Obj) {
5817       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5818       return nullptr;
5819     }
5820 
5821     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5822       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5823         if (CI->isOne())
5824           return Obj->getType()->getPointerElementType();
5825     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5826       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
5827           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
5828       if (PrivArgAA.isAssumedPrivatizablePtr())
5829         return Obj->getType()->getPointerElementType();
5830     }
5831 
5832     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5833                          "alloca nor privatizable argument: "
5834                       << *Obj << "!\n");
5835     return nullptr;
5836   }
5837 
5838   /// See AbstractAttribute::trackStatistics()
5839   void trackStatistics() const override {
5840     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5841   }
5842 };
5843 
5844 struct AAPrivatizablePtrCallSiteArgument final
5845     : public AAPrivatizablePtrFloating {
5846   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5847       : AAPrivatizablePtrFloating(IRP, A) {}
5848 
5849   /// See AbstractAttribute::initialize(...).
5850   void initialize(Attributor &A) override {
5851     if (getIRPosition().hasAttr(Attribute::ByVal))
5852       indicateOptimisticFixpoint();
5853   }
5854 
5855   /// See AbstractAttribute::updateImpl(...).
5856   ChangeStatus updateImpl(Attributor &A) override {
5857     PrivatizableType = identifyPrivatizableType(A);
5858     if (!PrivatizableType.hasValue())
5859       return ChangeStatus::UNCHANGED;
5860     if (!PrivatizableType.getValue())
5861       return indicatePessimisticFixpoint();
5862 
5863     const IRPosition &IRP = getIRPosition();
5864     auto &NoCaptureAA =
5865         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
5866     if (!NoCaptureAA.isAssumedNoCapture()) {
5867       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5868       return indicatePessimisticFixpoint();
5869     }
5870 
5871     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
5872     if (!NoAliasAA.isAssumedNoAlias()) {
5873       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5874       return indicatePessimisticFixpoint();
5875     }
5876 
5877     const auto &MemBehaviorAA =
5878         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
5879     if (!MemBehaviorAA.isAssumedReadOnly()) {
5880       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5881       return indicatePessimisticFixpoint();
5882     }
5883 
5884     return ChangeStatus::UNCHANGED;
5885   }
5886 
5887   /// See AbstractAttribute::trackStatistics()
5888   void trackStatistics() const override {
5889     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5890   }
5891 };
5892 
5893 struct AAPrivatizablePtrCallSiteReturned final
5894     : public AAPrivatizablePtrFloating {
5895   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5896       : AAPrivatizablePtrFloating(IRP, A) {}
5897 
5898   /// See AbstractAttribute::initialize(...).
5899   void initialize(Attributor &A) override {
5900     // TODO: We can privatize more than arguments.
5901     indicatePessimisticFixpoint();
5902   }
5903 
5904   /// See AbstractAttribute::trackStatistics()
5905   void trackStatistics() const override {
5906     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5907   }
5908 };
5909 
5910 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5911   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5912       : AAPrivatizablePtrFloating(IRP, A) {}
5913 
5914   /// See AbstractAttribute::initialize(...).
5915   void initialize(Attributor &A) override {
5916     // TODO: We can privatize more than arguments.
5917     indicatePessimisticFixpoint();
5918   }
5919 
5920   /// See AbstractAttribute::trackStatistics()
5921   void trackStatistics() const override {
5922     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5923   }
5924 };
5925 
5926 /// -------------------- Memory Behavior Attributes ----------------------------
5927 /// Includes read-none, read-only, and write-only.
5928 /// ----------------------------------------------------------------------------
5929 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5930   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5931       : AAMemoryBehavior(IRP, A) {}
5932 
5933   /// See AbstractAttribute::initialize(...).
5934   void initialize(Attributor &A) override {
5935     intersectAssumedBits(BEST_STATE);
5936     getKnownStateFromValue(getIRPosition(), getState());
5937     AAMemoryBehavior::initialize(A);
5938   }
5939 
5940   /// Return the memory behavior information encoded in the IR for \p IRP.
5941   static void getKnownStateFromValue(const IRPosition &IRP,
5942                                      BitIntegerState &State,
5943                                      bool IgnoreSubsumingPositions = false) {
5944     SmallVector<Attribute, 2> Attrs;
5945     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5946     for (const Attribute &Attr : Attrs) {
5947       switch (Attr.getKindAsEnum()) {
5948       case Attribute::ReadNone:
5949         State.addKnownBits(NO_ACCESSES);
5950         break;
5951       case Attribute::ReadOnly:
5952         State.addKnownBits(NO_WRITES);
5953         break;
5954       case Attribute::WriteOnly:
5955         State.addKnownBits(NO_READS);
5956         break;
5957       default:
5958         llvm_unreachable("Unexpected attribute!");
5959       }
5960     }
5961 
5962     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5963       if (!I->mayReadFromMemory())
5964         State.addKnownBits(NO_READS);
5965       if (!I->mayWriteToMemory())
5966         State.addKnownBits(NO_WRITES);
5967     }
5968   }
5969 
5970   /// See AbstractAttribute::getDeducedAttributes(...).
5971   void getDeducedAttributes(LLVMContext &Ctx,
5972                             SmallVectorImpl<Attribute> &Attrs) const override {
5973     assert(Attrs.size() == 0);
5974     if (isAssumedReadNone())
5975       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5976     else if (isAssumedReadOnly())
5977       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5978     else if (isAssumedWriteOnly())
5979       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5980     assert(Attrs.size() <= 1);
5981   }
5982 
5983   /// See AbstractAttribute::manifest(...).
5984   ChangeStatus manifest(Attributor &A) override {
5985     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5986       return ChangeStatus::UNCHANGED;
5987 
5988     const IRPosition &IRP = getIRPosition();
5989 
5990     // Check if we would improve the existing attributes first.
5991     SmallVector<Attribute, 4> DeducedAttrs;
5992     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5993     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5994           return IRP.hasAttr(Attr.getKindAsEnum(),
5995                              /* IgnoreSubsumingPositions */ true);
5996         }))
5997       return ChangeStatus::UNCHANGED;
5998 
5999     // Clear existing attributes.
6000     IRP.removeAttrs(AttrKinds);
6001 
6002     // Use the generic manifest method.
6003     return IRAttribute::manifest(A);
6004   }
6005 
6006   /// See AbstractState::getAsStr().
6007   const std::string getAsStr() const override {
6008     if (isAssumedReadNone())
6009       return "readnone";
6010     if (isAssumedReadOnly())
6011       return "readonly";
6012     if (isAssumedWriteOnly())
6013       return "writeonly";
6014     return "may-read/write";
6015   }
6016 
6017   /// The set of IR attributes AAMemoryBehavior deals with.
6018   static const Attribute::AttrKind AttrKinds[3];
6019 };
6020 
6021 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
6022     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
6023 
6024 /// Memory behavior attribute for a floating value.
6025 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
6026   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
6027       : AAMemoryBehaviorImpl(IRP, A) {}
6028 
6029   /// See AbstractAttribute::initialize(...).
6030   void initialize(Attributor &A) override {
6031     AAMemoryBehaviorImpl::initialize(A);
6032     addUsesOf(A, getAssociatedValue());
6033   }
6034 
6035   /// See AbstractAttribute::updateImpl(...).
6036   ChangeStatus updateImpl(Attributor &A) override;
6037 
6038   /// See AbstractAttribute::trackStatistics()
6039   void trackStatistics() const override {
6040     if (isAssumedReadNone())
6041       STATS_DECLTRACK_FLOATING_ATTR(readnone)
6042     else if (isAssumedReadOnly())
6043       STATS_DECLTRACK_FLOATING_ATTR(readonly)
6044     else if (isAssumedWriteOnly())
6045       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
6046   }
6047 
6048 private:
6049   /// Return true if users of \p UserI might access the underlying
6050   /// variable/location described by \p U and should therefore be analyzed.
6051   bool followUsersOfUseIn(Attributor &A, const Use *U,
6052                           const Instruction *UserI);
6053 
6054   /// Update the state according to the effect of use \p U in \p UserI.
6055   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
6056 
6057 protected:
6058   /// Add the uses of \p V to the `Uses` set we look at during the update step.
6059   void addUsesOf(Attributor &A, const Value &V);
6060 
6061   /// Container for (transitive) uses of the associated argument.
6062   SmallVector<const Use *, 8> Uses;
6063 
6064   /// Set to remember the uses we already traversed.
6065   SmallPtrSet<const Use *, 8> Visited;
6066 };
6067 
6068 /// Memory behavior attribute for function argument.
6069 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
6070   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6071       : AAMemoryBehaviorFloating(IRP, A) {}
6072 
6073   /// See AbstractAttribute::initialize(...).
6074   void initialize(Attributor &A) override {
6075     intersectAssumedBits(BEST_STATE);
6076     const IRPosition &IRP = getIRPosition();
6077     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
6078     // can query it when we use has/getAttr. That would allow us to reuse the
6079     // initialize of the base class here.
6080     bool HasByVal =
6081         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
6082     getKnownStateFromValue(IRP, getState(),
6083                            /* IgnoreSubsumingPositions */ HasByVal);
6084 
6085     // Initialize the use vector with all direct uses of the associated value.
6086     Argument *Arg = getAssociatedArgument();
6087     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
6088       indicatePessimisticFixpoint();
6089     } else {
6090       addUsesOf(A, *Arg);
6091     }
6092   }
6093 
6094   ChangeStatus manifest(Attributor &A) override {
6095     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6096     if (!getAssociatedValue().getType()->isPointerTy())
6097       return ChangeStatus::UNCHANGED;
6098 
6099     // TODO: From readattrs.ll: "inalloca parameters are always
6100     //                           considered written"
6101     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6102       removeKnownBits(NO_WRITES);
6103       removeAssumedBits(NO_WRITES);
6104     }
6105     return AAMemoryBehaviorFloating::manifest(A);
6106   }
6107 
6108   /// See AbstractAttribute::trackStatistics()
6109   void trackStatistics() const override {
6110     if (isAssumedReadNone())
6111       STATS_DECLTRACK_ARG_ATTR(readnone)
6112     else if (isAssumedReadOnly())
6113       STATS_DECLTRACK_ARG_ATTR(readonly)
6114     else if (isAssumedWriteOnly())
6115       STATS_DECLTRACK_ARG_ATTR(writeonly)
6116   }
6117 };
6118 
6119 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
6120   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6121       : AAMemoryBehaviorArgument(IRP, A) {}
6122 
6123   /// See AbstractAttribute::initialize(...).
6124   void initialize(Attributor &A) override {
6125     // If we don't have an associated attribute this is either a variadic call
6126     // or an indirect call, either way, nothing to do here.
6127     Argument *Arg = getAssociatedArgument();
6128     if (!Arg) {
6129       indicatePessimisticFixpoint();
6130       return;
6131     }
6132     if (Arg->hasByValAttr()) {
6133       addKnownBits(NO_WRITES);
6134       removeKnownBits(NO_READS);
6135       removeAssumedBits(NO_READS);
6136     }
6137     AAMemoryBehaviorArgument::initialize(A);
6138     if (getAssociatedFunction()->isDeclaration())
6139       indicatePessimisticFixpoint();
6140   }
6141 
6142   /// See AbstractAttribute::updateImpl(...).
6143   ChangeStatus updateImpl(Attributor &A) override {
6144     // TODO: Once we have call site specific value information we can provide
6145     //       call site specific liveness liveness information and then it makes
6146     //       sense to specialize attributes for call sites arguments instead of
6147     //       redirecting requests to the callee argument.
6148     Argument *Arg = getAssociatedArgument();
6149     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6150     auto &ArgAA =
6151         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6152     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6153   }
6154 
6155   /// See AbstractAttribute::trackStatistics()
6156   void trackStatistics() const override {
6157     if (isAssumedReadNone())
6158       STATS_DECLTRACK_CSARG_ATTR(readnone)
6159     else if (isAssumedReadOnly())
6160       STATS_DECLTRACK_CSARG_ATTR(readonly)
6161     else if (isAssumedWriteOnly())
6162       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6163   }
6164 };
6165 
6166 /// Memory behavior attribute for a call site return position.
6167 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6168   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6169       : AAMemoryBehaviorFloating(IRP, A) {}
6170 
6171   /// See AbstractAttribute::initialize(...).
6172   void initialize(Attributor &A) override {
6173     AAMemoryBehaviorImpl::initialize(A);
6174     Function *F = getAssociatedFunction();
6175     if (!F || F->isDeclaration())
6176       indicatePessimisticFixpoint();
6177   }
6178 
6179   /// See AbstractAttribute::manifest(...).
6180   ChangeStatus manifest(Attributor &A) override {
6181     // We do not annotate returned values.
6182     return ChangeStatus::UNCHANGED;
6183   }
6184 
6185   /// See AbstractAttribute::trackStatistics()
6186   void trackStatistics() const override {}
6187 };
6188 
6189 /// An AA to represent the memory behavior function attributes.
6190 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6191   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6192       : AAMemoryBehaviorImpl(IRP, A) {}
6193 
6194   /// See AbstractAttribute::updateImpl(Attributor &A).
6195   virtual ChangeStatus updateImpl(Attributor &A) override;
6196 
6197   /// See AbstractAttribute::manifest(...).
6198   ChangeStatus manifest(Attributor &A) override {
6199     Function &F = cast<Function>(getAnchorValue());
6200     if (isAssumedReadNone()) {
6201       F.removeFnAttr(Attribute::ArgMemOnly);
6202       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6203       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6204     }
6205     return AAMemoryBehaviorImpl::manifest(A);
6206   }
6207 
6208   /// See AbstractAttribute::trackStatistics()
6209   void trackStatistics() const override {
6210     if (isAssumedReadNone())
6211       STATS_DECLTRACK_FN_ATTR(readnone)
6212     else if (isAssumedReadOnly())
6213       STATS_DECLTRACK_FN_ATTR(readonly)
6214     else if (isAssumedWriteOnly())
6215       STATS_DECLTRACK_FN_ATTR(writeonly)
6216   }
6217 };
6218 
6219 /// AAMemoryBehavior attribute for call sites.
6220 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6221   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6222       : AAMemoryBehaviorImpl(IRP, A) {}
6223 
6224   /// See AbstractAttribute::initialize(...).
6225   void initialize(Attributor &A) override {
6226     AAMemoryBehaviorImpl::initialize(A);
6227     Function *F = getAssociatedFunction();
6228     if (!F || F->isDeclaration())
6229       indicatePessimisticFixpoint();
6230   }
6231 
6232   /// See AbstractAttribute::updateImpl(...).
6233   ChangeStatus updateImpl(Attributor &A) override {
6234     // TODO: Once we have call site specific value information we can provide
6235     //       call site specific liveness liveness information and then it makes
6236     //       sense to specialize attributes for call sites arguments instead of
6237     //       redirecting requests to the callee argument.
6238     Function *F = getAssociatedFunction();
6239     const IRPosition &FnPos = IRPosition::function(*F);
6240     auto &FnAA =
6241         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
6242     return clampStateAndIndicateChange(getState(), FnAA.getState());
6243   }
6244 
6245   /// See AbstractAttribute::trackStatistics()
6246   void trackStatistics() const override {
6247     if (isAssumedReadNone())
6248       STATS_DECLTRACK_CS_ATTR(readnone)
6249     else if (isAssumedReadOnly())
6250       STATS_DECLTRACK_CS_ATTR(readonly)
6251     else if (isAssumedWriteOnly())
6252       STATS_DECLTRACK_CS_ATTR(writeonly)
6253   }
6254 };
6255 
6256 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6257 
6258   // The current assumed state used to determine a change.
6259   auto AssumedState = getAssumed();
6260 
6261   auto CheckRWInst = [&](Instruction &I) {
6262     // If the instruction has an own memory behavior state, use it to restrict
6263     // the local state. No further analysis is required as the other memory
6264     // state is as optimistic as it gets.
6265     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6266       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6267           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
6268       intersectAssumedBits(MemBehaviorAA.getAssumed());
6269       return !isAtFixpoint();
6270     }
6271 
6272     // Remove access kind modifiers if necessary.
6273     if (I.mayReadFromMemory())
6274       removeAssumedBits(NO_READS);
6275     if (I.mayWriteToMemory())
6276       removeAssumedBits(NO_WRITES);
6277     return !isAtFixpoint();
6278   };
6279 
6280   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6281     return indicatePessimisticFixpoint();
6282 
6283   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6284                                         : ChangeStatus::UNCHANGED;
6285 }
6286 
6287 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6288 
6289   const IRPosition &IRP = getIRPosition();
6290   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6291   AAMemoryBehavior::StateType &S = getState();
6292 
6293   // First, check the function scope. We take the known information and we avoid
6294   // work if the assumed information implies the current assumed information for
6295   // this attribute. This is a valid for all but byval arguments.
6296   Argument *Arg = IRP.getAssociatedArgument();
6297   AAMemoryBehavior::base_t FnMemAssumedState =
6298       AAMemoryBehavior::StateType::getWorstState();
6299   if (!Arg || !Arg->hasByValAttr()) {
6300     const auto &FnMemAA =
6301         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
6302     FnMemAssumedState = FnMemAA.getAssumed();
6303     S.addKnownBits(FnMemAA.getKnown());
6304     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6305       return ChangeStatus::UNCHANGED;
6306   }
6307 
6308   // Make sure the value is not captured (except through "return"), if
6309   // it is, any information derived would be irrelevant anyway as we cannot
6310   // check the potential aliases introduced by the capture. However, no need
6311   // to fall back to anythign less optimistic than the function state.
6312   const auto &ArgNoCaptureAA =
6313       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
6314   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6315     S.intersectAssumedBits(FnMemAssumedState);
6316     return ChangeStatus::CHANGED;
6317   }
6318 
6319   // The current assumed state used to determine a change.
6320   auto AssumedState = S.getAssumed();
6321 
6322   // Liveness information to exclude dead users.
6323   // TODO: Take the FnPos once we have call site specific liveness information.
6324   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6325       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6326       DepClassTy::NONE);
6327 
6328   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6329   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6330     const Use *U = Uses[i];
6331     Instruction *UserI = cast<Instruction>(U->getUser());
6332     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6333                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6334                       << "]\n");
6335     if (A.isAssumedDead(*U, this, &LivenessAA))
6336       continue;
6337 
6338     // Droppable users, e.g., llvm::assume does not actually perform any action.
6339     if (UserI->isDroppable())
6340       continue;
6341 
6342     // Check if the users of UserI should also be visited.
6343     if (followUsersOfUseIn(A, U, UserI))
6344       addUsesOf(A, *UserI);
6345 
6346     // If UserI might touch memory we analyze the use in detail.
6347     if (UserI->mayReadOrWriteMemory())
6348       analyzeUseIn(A, U, UserI);
6349   }
6350 
6351   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6352                                         : ChangeStatus::UNCHANGED;
6353 }
6354 
6355 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6356   SmallVector<const Use *, 8> WL;
6357   for (const Use &U : V.uses())
6358     WL.push_back(&U);
6359 
6360   while (!WL.empty()) {
6361     const Use *U = WL.pop_back_val();
6362     if (!Visited.insert(U).second)
6363       continue;
6364 
6365     const Instruction *UserI = cast<Instruction>(U->getUser());
6366     if (UserI->mayReadOrWriteMemory()) {
6367       Uses.push_back(U);
6368       continue;
6369     }
6370     if (!followUsersOfUseIn(A, U, UserI))
6371       continue;
6372     for (const Use &UU : UserI->uses())
6373       WL.push_back(&UU);
6374   }
6375 }
6376 
6377 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6378                                                   const Instruction *UserI) {
6379   // The loaded value is unrelated to the pointer argument, no need to
6380   // follow the users of the load.
6381   if (isa<LoadInst>(UserI))
6382     return false;
6383 
6384   // By default we follow all uses assuming UserI might leak information on U,
6385   // we have special handling for call sites operands though.
6386   const auto *CB = dyn_cast<CallBase>(UserI);
6387   if (!CB || !CB->isArgOperand(U))
6388     return true;
6389 
6390   // If the use is a call argument known not to be captured, the users of
6391   // the call do not need to be visited because they have to be unrelated to
6392   // the input. Note that this check is not trivial even though we disallow
6393   // general capturing of the underlying argument. The reason is that the
6394   // call might the argument "through return", which we allow and for which we
6395   // need to check call users.
6396   if (U->get()->getType()->isPointerTy()) {
6397     unsigned ArgNo = CB->getArgOperandNo(U);
6398     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6399         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
6400     return !ArgNoCaptureAA.isAssumedNoCapture();
6401   }
6402 
6403   return true;
6404 }
6405 
6406 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6407                                             const Instruction *UserI) {
6408   assert(UserI->mayReadOrWriteMemory());
6409 
6410   switch (UserI->getOpcode()) {
6411   default:
6412     // TODO: Handle all atomics and other side-effect operations we know of.
6413     break;
6414   case Instruction::Load:
6415     // Loads cause the NO_READS property to disappear.
6416     removeAssumedBits(NO_READS);
6417     return;
6418 
6419   case Instruction::Store:
6420     // Stores cause the NO_WRITES property to disappear if the use is the
6421     // pointer operand. Note that we do assume that capturing was taken care of
6422     // somewhere else.
6423     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6424       removeAssumedBits(NO_WRITES);
6425     return;
6426 
6427   case Instruction::Call:
6428   case Instruction::CallBr:
6429   case Instruction::Invoke: {
6430     // For call sites we look at the argument memory behavior attribute (this
6431     // could be recursive!) in order to restrict our own state.
6432     const auto *CB = cast<CallBase>(UserI);
6433 
6434     // Give up on operand bundles.
6435     if (CB->isBundleOperand(U)) {
6436       indicatePessimisticFixpoint();
6437       return;
6438     }
6439 
6440     // Calling a function does read the function pointer, maybe write it if the
6441     // function is self-modifying.
6442     if (CB->isCallee(U)) {
6443       removeAssumedBits(NO_READS);
6444       break;
6445     }
6446 
6447     // Adjust the possible access behavior based on the information on the
6448     // argument.
6449     IRPosition Pos;
6450     if (U->get()->getType()->isPointerTy())
6451       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6452     else
6453       Pos = IRPosition::callsite_function(*CB);
6454     const auto &MemBehaviorAA =
6455         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
6456     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6457     // and at least "known".
6458     intersectAssumedBits(MemBehaviorAA.getAssumed());
6459     return;
6460   }
6461   };
6462 
6463   // Generally, look at the "may-properties" and adjust the assumed state if we
6464   // did not trigger special handling before.
6465   if (UserI->mayReadFromMemory())
6466     removeAssumedBits(NO_READS);
6467   if (UserI->mayWriteToMemory())
6468     removeAssumedBits(NO_WRITES);
6469 }
6470 
6471 } // namespace
6472 
6473 /// -------------------- Memory Locations Attributes ---------------------------
6474 /// Includes read-none, argmemonly, inaccessiblememonly,
6475 /// inaccessiblememorargmemonly
6476 /// ----------------------------------------------------------------------------
6477 
6478 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6479     AAMemoryLocation::MemoryLocationsKind MLK) {
6480   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6481     return "all memory";
6482   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6483     return "no memory";
6484   std::string S = "memory:";
6485   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6486     S += "stack,";
6487   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6488     S += "constant,";
6489   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6490     S += "internal global,";
6491   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6492     S += "external global,";
6493   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6494     S += "argument,";
6495   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6496     S += "inaccessible,";
6497   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6498     S += "malloced,";
6499   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6500     S += "unknown,";
6501   S.pop_back();
6502   return S;
6503 }
6504 
6505 namespace {
6506 struct AAMemoryLocationImpl : public AAMemoryLocation {
6507 
6508   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6509       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6510     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6511       AccessKind2Accesses[u] = nullptr;
6512   }
6513 
6514   ~AAMemoryLocationImpl() {
6515     // The AccessSets are allocated via a BumpPtrAllocator, we call
6516     // the destructor manually.
6517     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6518       if (AccessKind2Accesses[u])
6519         AccessKind2Accesses[u]->~AccessSet();
6520   }
6521 
6522   /// See AbstractAttribute::initialize(...).
6523   void initialize(Attributor &A) override {
6524     intersectAssumedBits(BEST_STATE);
6525     getKnownStateFromValue(A, getIRPosition(), getState());
6526     AAMemoryLocation::initialize(A);
6527   }
6528 
6529   /// Return the memory behavior information encoded in the IR for \p IRP.
6530   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6531                                      BitIntegerState &State,
6532                                      bool IgnoreSubsumingPositions = false) {
6533     // For internal functions we ignore `argmemonly` and
6534     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6535     // constant propagation. It is unclear if this is the best way but it is
6536     // unlikely this will cause real performance problems. If we are deriving
6537     // attributes for the anchor function we even remove the attribute in
6538     // addition to ignoring it.
6539     bool UseArgMemOnly = true;
6540     Function *AnchorFn = IRP.getAnchorScope();
6541     if (AnchorFn && A.isRunOn(*AnchorFn))
6542       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6543 
6544     SmallVector<Attribute, 2> Attrs;
6545     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6546     for (const Attribute &Attr : Attrs) {
6547       switch (Attr.getKindAsEnum()) {
6548       case Attribute::ReadNone:
6549         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6550         break;
6551       case Attribute::InaccessibleMemOnly:
6552         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6553         break;
6554       case Attribute::ArgMemOnly:
6555         if (UseArgMemOnly)
6556           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6557         else
6558           IRP.removeAttrs({Attribute::ArgMemOnly});
6559         break;
6560       case Attribute::InaccessibleMemOrArgMemOnly:
6561         if (UseArgMemOnly)
6562           State.addKnownBits(inverseLocation(
6563               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6564         else
6565           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6566         break;
6567       default:
6568         llvm_unreachable("Unexpected attribute!");
6569       }
6570     }
6571   }
6572 
6573   /// See AbstractAttribute::getDeducedAttributes(...).
6574   void getDeducedAttributes(LLVMContext &Ctx,
6575                             SmallVectorImpl<Attribute> &Attrs) const override {
6576     assert(Attrs.size() == 0);
6577     if (isAssumedReadNone()) {
6578       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6579     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6580       if (isAssumedInaccessibleMemOnly())
6581         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6582       else if (isAssumedArgMemOnly())
6583         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6584       else if (isAssumedInaccessibleOrArgMemOnly())
6585         Attrs.push_back(
6586             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6587     }
6588     assert(Attrs.size() <= 1);
6589   }
6590 
6591   /// See AbstractAttribute::manifest(...).
6592   ChangeStatus manifest(Attributor &A) override {
6593     const IRPosition &IRP = getIRPosition();
6594 
6595     // Check if we would improve the existing attributes first.
6596     SmallVector<Attribute, 4> DeducedAttrs;
6597     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6598     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6599           return IRP.hasAttr(Attr.getKindAsEnum(),
6600                              /* IgnoreSubsumingPositions */ true);
6601         }))
6602       return ChangeStatus::UNCHANGED;
6603 
6604     // Clear existing attributes.
6605     IRP.removeAttrs(AttrKinds);
6606     if (isAssumedReadNone())
6607       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6608 
6609     // Use the generic manifest method.
6610     return IRAttribute::manifest(A);
6611   }
6612 
6613   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6614   bool checkForAllAccessesToMemoryKind(
6615       function_ref<bool(const Instruction *, const Value *, AccessKind,
6616                         MemoryLocationsKind)>
6617           Pred,
6618       MemoryLocationsKind RequestedMLK) const override {
6619     if (!isValidState())
6620       return false;
6621 
6622     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6623     if (AssumedMLK == NO_LOCATIONS)
6624       return true;
6625 
6626     unsigned Idx = 0;
6627     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6628          CurMLK *= 2, ++Idx) {
6629       if (CurMLK & RequestedMLK)
6630         continue;
6631 
6632       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6633         for (const AccessInfo &AI : *Accesses)
6634           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6635             return false;
6636     }
6637 
6638     return true;
6639   }
6640 
6641   ChangeStatus indicatePessimisticFixpoint() override {
6642     // If we give up and indicate a pessimistic fixpoint this instruction will
6643     // become an access for all potential access kinds:
6644     // TODO: Add pointers for argmemonly and globals to improve the results of
6645     //       checkForAllAccessesToMemoryKind.
6646     bool Changed = false;
6647     MemoryLocationsKind KnownMLK = getKnown();
6648     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6649     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6650       if (!(CurMLK & KnownMLK))
6651         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6652                                   getAccessKindFromInst(I));
6653     return AAMemoryLocation::indicatePessimisticFixpoint();
6654   }
6655 
6656 protected:
6657   /// Helper struct to tie together an instruction that has a read or write
6658   /// effect with the pointer it accesses (if any).
6659   struct AccessInfo {
6660 
6661     /// The instruction that caused the access.
6662     const Instruction *I;
6663 
6664     /// The base pointer that is accessed, or null if unknown.
6665     const Value *Ptr;
6666 
6667     /// The kind of access (read/write/read+write).
6668     AccessKind Kind;
6669 
6670     bool operator==(const AccessInfo &RHS) const {
6671       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6672     }
6673     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6674       if (LHS.I != RHS.I)
6675         return LHS.I < RHS.I;
6676       if (LHS.Ptr != RHS.Ptr)
6677         return LHS.Ptr < RHS.Ptr;
6678       if (LHS.Kind != RHS.Kind)
6679         return LHS.Kind < RHS.Kind;
6680       return false;
6681     }
6682   };
6683 
6684   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6685   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6686   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6687   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6688 
6689   /// Categorize the pointer arguments of CB that might access memory in
6690   /// AccessedLoc and update the state and access map accordingly.
6691   void
6692   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6693                                      AAMemoryLocation::StateType &AccessedLocs,
6694                                      bool &Changed);
6695 
6696   /// Return the kind(s) of location that may be accessed by \p V.
6697   AAMemoryLocation::MemoryLocationsKind
6698   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6699 
6700   /// Return the access kind as determined by \p I.
6701   AccessKind getAccessKindFromInst(const Instruction *I) {
6702     AccessKind AK = READ_WRITE;
6703     if (I) {
6704       AK = I->mayReadFromMemory() ? READ : NONE;
6705       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6706     }
6707     return AK;
6708   }
6709 
6710   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6711   /// an access of kind \p AK to a \p MLK memory location with the access
6712   /// pointer \p Ptr.
6713   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6714                                  MemoryLocationsKind MLK, const Instruction *I,
6715                                  const Value *Ptr, bool &Changed,
6716                                  AccessKind AK = READ_WRITE) {
6717 
6718     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6719     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6720     if (!Accesses)
6721       Accesses = new (Allocator) AccessSet();
6722     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6723     State.removeAssumedBits(MLK);
6724   }
6725 
6726   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6727   /// arguments, and update the state and access map accordingly.
6728   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6729                           AAMemoryLocation::StateType &State, bool &Changed);
6730 
6731   /// Used to allocate access sets.
6732   BumpPtrAllocator &Allocator;
6733 
6734   /// The set of IR attributes AAMemoryLocation deals with.
6735   static const Attribute::AttrKind AttrKinds[4];
6736 };
6737 
6738 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6739     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6740     Attribute::InaccessibleMemOrArgMemOnly};
6741 
6742 void AAMemoryLocationImpl::categorizePtrValue(
6743     Attributor &A, const Instruction &I, const Value &Ptr,
6744     AAMemoryLocation::StateType &State, bool &Changed) {
6745   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6746                     << Ptr << " ["
6747                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6748 
6749   auto StripGEPCB = [](Value *V) -> Value * {
6750     auto *GEP = dyn_cast<GEPOperator>(V);
6751     while (GEP) {
6752       V = GEP->getPointerOperand();
6753       GEP = dyn_cast<GEPOperator>(V);
6754     }
6755     return V;
6756   };
6757 
6758   auto VisitValueCB = [&](Value &V, const Instruction *,
6759                           AAMemoryLocation::StateType &T,
6760                           bool Stripped) -> bool {
6761     // TODO: recognize the TBAA used for constant accesses.
6762     MemoryLocationsKind MLK = NO_LOCATIONS;
6763     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6764     if (isa<UndefValue>(V))
6765       return true;
6766     if (auto *Arg = dyn_cast<Argument>(&V)) {
6767       if (Arg->hasByValAttr())
6768         MLK = NO_LOCAL_MEM;
6769       else
6770         MLK = NO_ARGUMENT_MEM;
6771     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6772       // Reading constant memory is not treated as a read "effect" by the
6773       // function attr pass so we won't neither. Constants defined by TBAA are
6774       // similar. (We know we do not write it because it is constant.)
6775       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6776         if (GVar->isConstant())
6777           return true;
6778 
6779       if (GV->hasLocalLinkage())
6780         MLK = NO_GLOBAL_INTERNAL_MEM;
6781       else
6782         MLK = NO_GLOBAL_EXTERNAL_MEM;
6783     } else if (isa<ConstantPointerNull>(V) &&
6784                !NullPointerIsDefined(getAssociatedFunction(),
6785                                      V.getType()->getPointerAddressSpace())) {
6786       return true;
6787     } else if (isa<AllocaInst>(V)) {
6788       MLK = NO_LOCAL_MEM;
6789     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6790       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
6791           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
6792       if (NoAliasAA.isAssumedNoAlias())
6793         MLK = NO_MALLOCED_MEM;
6794       else
6795         MLK = NO_UNKOWN_MEM;
6796     } else {
6797       MLK = NO_UNKOWN_MEM;
6798     }
6799 
6800     assert(MLK != NO_LOCATIONS && "No location specified!");
6801     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6802                               getAccessKindFromInst(&I));
6803     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6804                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6805                       << "\n");
6806     return true;
6807   };
6808 
6809   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6810           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6811           /* UseValueSimplify */ true,
6812           /* MaxValues */ 32, StripGEPCB)) {
6813     LLVM_DEBUG(
6814         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6815     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6816                               getAccessKindFromInst(&I));
6817   } else {
6818     LLVM_DEBUG(
6819         dbgs()
6820         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6821         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6822   }
6823 }
6824 
6825 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6826     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6827     bool &Changed) {
6828   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6829 
6830     // Skip non-pointer arguments.
6831     const Value *ArgOp = CB.getArgOperand(ArgNo);
6832     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6833       continue;
6834 
6835     // Skip readnone arguments.
6836     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6837     const auto &ArgOpMemLocationAA =
6838         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
6839 
6840     if (ArgOpMemLocationAA.isAssumedReadNone())
6841       continue;
6842 
6843     // Categorize potentially accessed pointer arguments as if there was an
6844     // access instruction with them as pointer.
6845     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6846   }
6847 }
6848 
6849 AAMemoryLocation::MemoryLocationsKind
6850 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6851                                                   bool &Changed) {
6852   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6853                     << I << "\n");
6854 
6855   AAMemoryLocation::StateType AccessedLocs;
6856   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6857 
6858   if (auto *CB = dyn_cast<CallBase>(&I)) {
6859 
6860     // First check if we assume any memory is access is visible.
6861     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
6862         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
6863     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6864                       << " [" << CBMemLocationAA << "]\n");
6865 
6866     if (CBMemLocationAA.isAssumedReadNone())
6867       return NO_LOCATIONS;
6868 
6869     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6870       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6871                                 Changed, getAccessKindFromInst(&I));
6872       return AccessedLocs.getAssumed();
6873     }
6874 
6875     uint32_t CBAssumedNotAccessedLocs =
6876         CBMemLocationAA.getAssumedNotAccessedLocation();
6877 
6878     // Set the argmemonly and global bit as we handle them separately below.
6879     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6880         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6881 
6882     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6883       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6884         continue;
6885       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6886                                 getAccessKindFromInst(&I));
6887     }
6888 
6889     // Now handle global memory if it might be accessed. This is slightly tricky
6890     // as NO_GLOBAL_MEM has multiple bits set.
6891     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6892     if (HasGlobalAccesses) {
6893       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6894                             AccessKind Kind, MemoryLocationsKind MLK) {
6895         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6896                                   getAccessKindFromInst(&I));
6897         return true;
6898       };
6899       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6900               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6901         return AccessedLocs.getWorstState();
6902     }
6903 
6904     LLVM_DEBUG(
6905         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6906                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6907 
6908     // Now handle argument memory if it might be accessed.
6909     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6910     if (HasArgAccesses)
6911       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6912 
6913     LLVM_DEBUG(
6914         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6915                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6916 
6917     return AccessedLocs.getAssumed();
6918   }
6919 
6920   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6921     LLVM_DEBUG(
6922         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6923                << I << " [" << *Ptr << "]\n");
6924     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6925     return AccessedLocs.getAssumed();
6926   }
6927 
6928   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6929                     << I << "\n");
6930   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6931                             getAccessKindFromInst(&I));
6932   return AccessedLocs.getAssumed();
6933 }
6934 
6935 /// An AA to represent the memory behavior function attributes.
6936 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6937   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6938       : AAMemoryLocationImpl(IRP, A) {}
6939 
6940   /// See AbstractAttribute::updateImpl(Attributor &A).
6941   virtual ChangeStatus updateImpl(Attributor &A) override {
6942 
6943     const auto &MemBehaviorAA =
6944         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
6945     if (MemBehaviorAA.isAssumedReadNone()) {
6946       if (MemBehaviorAA.isKnownReadNone())
6947         return indicateOptimisticFixpoint();
6948       assert(isAssumedReadNone() &&
6949              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6950       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6951       return ChangeStatus::UNCHANGED;
6952     }
6953 
6954     // The current assumed state used to determine a change.
6955     auto AssumedState = getAssumed();
6956     bool Changed = false;
6957 
6958     auto CheckRWInst = [&](Instruction &I) {
6959       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6960       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6961                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6962       removeAssumedBits(inverseLocation(MLK, false, false));
6963       // Stop once only the valid bit set in the *not assumed location*, thus
6964       // once we don't actually exclude any memory locations in the state.
6965       return getAssumedNotAccessedLocation() != VALID_STATE;
6966     };
6967 
6968     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6969       return indicatePessimisticFixpoint();
6970 
6971     Changed |= AssumedState != getAssumed();
6972     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6973   }
6974 
6975   /// See AbstractAttribute::trackStatistics()
6976   void trackStatistics() const override {
6977     if (isAssumedReadNone())
6978       STATS_DECLTRACK_FN_ATTR(readnone)
6979     else if (isAssumedArgMemOnly())
6980       STATS_DECLTRACK_FN_ATTR(argmemonly)
6981     else if (isAssumedInaccessibleMemOnly())
6982       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6983     else if (isAssumedInaccessibleOrArgMemOnly())
6984       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6985   }
6986 };
6987 
6988 /// AAMemoryLocation attribute for call sites.
6989 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6990   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6991       : AAMemoryLocationImpl(IRP, A) {}
6992 
6993   /// See AbstractAttribute::initialize(...).
6994   void initialize(Attributor &A) override {
6995     AAMemoryLocationImpl::initialize(A);
6996     Function *F = getAssociatedFunction();
6997     if (!F || F->isDeclaration())
6998       indicatePessimisticFixpoint();
6999   }
7000 
7001   /// See AbstractAttribute::updateImpl(...).
7002   ChangeStatus updateImpl(Attributor &A) override {
7003     // TODO: Once we have call site specific value information we can provide
7004     //       call site specific liveness liveness information and then it makes
7005     //       sense to specialize attributes for call sites arguments instead of
7006     //       redirecting requests to the callee argument.
7007     Function *F = getAssociatedFunction();
7008     const IRPosition &FnPos = IRPosition::function(*F);
7009     auto &FnAA =
7010         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
7011     bool Changed = false;
7012     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
7013                           AccessKind Kind, MemoryLocationsKind MLK) {
7014       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
7015                                 getAccessKindFromInst(I));
7016       return true;
7017     };
7018     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
7019       return indicatePessimisticFixpoint();
7020     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7021   }
7022 
7023   /// See AbstractAttribute::trackStatistics()
7024   void trackStatistics() const override {
7025     if (isAssumedReadNone())
7026       STATS_DECLTRACK_CS_ATTR(readnone)
7027   }
7028 };
7029 
7030 /// ------------------ Value Constant Range Attribute -------------------------
7031 
7032 struct AAValueConstantRangeImpl : AAValueConstantRange {
7033   using StateType = IntegerRangeState;
7034   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7035       : AAValueConstantRange(IRP, A) {}
7036 
7037   /// See AbstractAttribute::getAsStr().
7038   const std::string getAsStr() const override {
7039     std::string Str;
7040     llvm::raw_string_ostream OS(Str);
7041     OS << "range(" << getBitWidth() << ")<";
7042     getKnown().print(OS);
7043     OS << " / ";
7044     getAssumed().print(OS);
7045     OS << ">";
7046     return OS.str();
7047   }
7048 
7049   /// Helper function to get a SCEV expr for the associated value at program
7050   /// point \p I.
7051   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7052     if (!getAnchorScope())
7053       return nullptr;
7054 
7055     ScalarEvolution *SE =
7056         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7057             *getAnchorScope());
7058 
7059     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7060         *getAnchorScope());
7061 
7062     if (!SE || !LI)
7063       return nullptr;
7064 
7065     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7066     if (!I)
7067       return S;
7068 
7069     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7070   }
7071 
7072   /// Helper function to get a range from SCEV for the associated value at
7073   /// program point \p I.
7074   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7075                                          const Instruction *I = nullptr) const {
7076     if (!getAnchorScope())
7077       return getWorstState(getBitWidth());
7078 
7079     ScalarEvolution *SE =
7080         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7081             *getAnchorScope());
7082 
7083     const SCEV *S = getSCEV(A, I);
7084     if (!SE || !S)
7085       return getWorstState(getBitWidth());
7086 
7087     return SE->getUnsignedRange(S);
7088   }
7089 
7090   /// Helper function to get a range from LVI for the associated value at
7091   /// program point \p I.
7092   ConstantRange
7093   getConstantRangeFromLVI(Attributor &A,
7094                           const Instruction *CtxI = nullptr) const {
7095     if (!getAnchorScope())
7096       return getWorstState(getBitWidth());
7097 
7098     LazyValueInfo *LVI =
7099         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7100             *getAnchorScope());
7101 
7102     if (!LVI || !CtxI)
7103       return getWorstState(getBitWidth());
7104     return LVI->getConstantRange(&getAssociatedValue(),
7105                                  const_cast<Instruction *>(CtxI));
7106   }
7107 
7108   /// See AAValueConstantRange::getKnownConstantRange(..).
7109   ConstantRange
7110   getKnownConstantRange(Attributor &A,
7111                         const Instruction *CtxI = nullptr) const override {
7112     if (!CtxI || CtxI == getCtxI())
7113       return getKnown();
7114 
7115     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7116     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7117     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7118   }
7119 
7120   /// See AAValueConstantRange::getAssumedConstantRange(..).
7121   ConstantRange
7122   getAssumedConstantRange(Attributor &A,
7123                           const Instruction *CtxI = nullptr) const override {
7124     // TODO: Make SCEV use Attributor assumption.
7125     //       We may be able to bound a variable range via assumptions in
7126     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7127     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7128 
7129     if (!CtxI || CtxI == getCtxI())
7130       return getAssumed();
7131 
7132     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7133     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7134     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7135   }
7136 
7137   /// See AbstractAttribute::initialize(..).
7138   void initialize(Attributor &A) override {
7139     // Intersect a range given by SCEV.
7140     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7141 
7142     // Intersect a range given by LVI.
7143     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7144   }
7145 
7146   /// Helper function to create MDNode for range metadata.
7147   static MDNode *
7148   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7149                             const ConstantRange &AssumedConstantRange) {
7150     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7151                                   Ty, AssumedConstantRange.getLower())),
7152                               ConstantAsMetadata::get(ConstantInt::get(
7153                                   Ty, AssumedConstantRange.getUpper()))};
7154     return MDNode::get(Ctx, LowAndHigh);
7155   }
7156 
7157   /// Return true if \p Assumed is included in \p KnownRanges.
7158   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7159 
7160     if (Assumed.isFullSet())
7161       return false;
7162 
7163     if (!KnownRanges)
7164       return true;
7165 
7166     // If multiple ranges are annotated in IR, we give up to annotate assumed
7167     // range for now.
7168 
7169     // TODO:  If there exists a known range which containts assumed range, we
7170     // can say assumed range is better.
7171     if (KnownRanges->getNumOperands() > 2)
7172       return false;
7173 
7174     ConstantInt *Lower =
7175         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7176     ConstantInt *Upper =
7177         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7178 
7179     ConstantRange Known(Lower->getValue(), Upper->getValue());
7180     return Known.contains(Assumed) && Known != Assumed;
7181   }
7182 
7183   /// Helper function to set range metadata.
7184   static bool
7185   setRangeMetadataIfisBetterRange(Instruction *I,
7186                                   const ConstantRange &AssumedConstantRange) {
7187     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7188     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7189       if (!AssumedConstantRange.isEmptySet()) {
7190         I->setMetadata(LLVMContext::MD_range,
7191                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7192                                                  AssumedConstantRange));
7193         return true;
7194       }
7195     }
7196     return false;
7197   }
7198 
7199   /// See AbstractAttribute::manifest()
7200   ChangeStatus manifest(Attributor &A) override {
7201     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7202     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7203     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7204 
7205     auto &V = getAssociatedValue();
7206     if (!AssumedConstantRange.isEmptySet() &&
7207         !AssumedConstantRange.isSingleElement()) {
7208       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7209         assert(I == getCtxI() && "Should not annotate an instruction which is "
7210                                  "not the context instruction");
7211         if (isa<CallInst>(I) || isa<LoadInst>(I))
7212           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7213             Changed = ChangeStatus::CHANGED;
7214       }
7215     }
7216 
7217     return Changed;
7218   }
7219 };
7220 
7221 struct AAValueConstantRangeArgument final
7222     : AAArgumentFromCallSiteArguments<
7223           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7224           true /* BridgeCallBaseContext */> {
7225   using Base = AAArgumentFromCallSiteArguments<
7226       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7227       true /* BridgeCallBaseContext */>;
7228   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7229       : Base(IRP, A) {}
7230 
7231   /// See AbstractAttribute::initialize(..).
7232   void initialize(Attributor &A) override {
7233     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7234       indicatePessimisticFixpoint();
7235     } else {
7236       Base::initialize(A);
7237     }
7238   }
7239 
7240   /// See AbstractAttribute::trackStatistics()
7241   void trackStatistics() const override {
7242     STATS_DECLTRACK_ARG_ATTR(value_range)
7243   }
7244 };
7245 
7246 struct AAValueConstantRangeReturned
7247     : AAReturnedFromReturnedValues<AAValueConstantRange,
7248                                    AAValueConstantRangeImpl,
7249                                    AAValueConstantRangeImpl::StateType,
7250                                    /* PropogateCallBaseContext */ true> {
7251   using Base =
7252       AAReturnedFromReturnedValues<AAValueConstantRange,
7253                                    AAValueConstantRangeImpl,
7254                                    AAValueConstantRangeImpl::StateType,
7255                                    /* PropogateCallBaseContext */ true>;
7256   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7257       : Base(IRP, A) {}
7258 
7259   /// See AbstractAttribute::initialize(...).
7260   void initialize(Attributor &A) override {}
7261 
7262   /// See AbstractAttribute::trackStatistics()
7263   void trackStatistics() const override {
7264     STATS_DECLTRACK_FNRET_ATTR(value_range)
7265   }
7266 };
7267 
7268 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7269   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7270       : AAValueConstantRangeImpl(IRP, A) {}
7271 
7272   /// See AbstractAttribute::initialize(...).
7273   void initialize(Attributor &A) override {
7274     AAValueConstantRangeImpl::initialize(A);
7275     Value &V = getAssociatedValue();
7276 
7277     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7278       unionAssumed(ConstantRange(C->getValue()));
7279       indicateOptimisticFixpoint();
7280       return;
7281     }
7282 
7283     if (isa<UndefValue>(&V)) {
7284       // Collapse the undef state to 0.
7285       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7286       indicateOptimisticFixpoint();
7287       return;
7288     }
7289 
7290     if (isa<CallBase>(&V))
7291       return;
7292 
7293     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7294       return;
7295     // If it is a load instruction with range metadata, use it.
7296     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7297       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7298         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7299         return;
7300       }
7301 
7302     // We can work with PHI and select instruction as we traverse their operands
7303     // during update.
7304     if (isa<SelectInst>(V) || isa<PHINode>(V))
7305       return;
7306 
7307     // Otherwise we give up.
7308     indicatePessimisticFixpoint();
7309 
7310     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7311                       << getAssociatedValue() << "\n");
7312   }
7313 
7314   bool calculateBinaryOperator(
7315       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7316       const Instruction *CtxI,
7317       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7318     Value *LHS = BinOp->getOperand(0);
7319     Value *RHS = BinOp->getOperand(1);
7320     // TODO: Allow non integers as well.
7321     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7322       return false;
7323 
7324     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7325         *this, IRPosition::value(*LHS, getCallBaseContext()),
7326         DepClassTy::REQUIRED);
7327     QuerriedAAs.push_back(&LHSAA);
7328     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7329 
7330     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7331         *this, IRPosition::value(*RHS, getCallBaseContext()),
7332         DepClassTy::REQUIRED);
7333     QuerriedAAs.push_back(&RHSAA);
7334     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7335 
7336     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7337 
7338     T.unionAssumed(AssumedRange);
7339 
7340     // TODO: Track a known state too.
7341 
7342     return T.isValidState();
7343   }
7344 
7345   bool calculateCastInst(
7346       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7347       const Instruction *CtxI,
7348       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7349     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7350     // TODO: Allow non integers as well.
7351     Value &OpV = *CastI->getOperand(0);
7352     if (!OpV.getType()->isIntegerTy())
7353       return false;
7354 
7355     auto &OpAA = A.getAAFor<AAValueConstantRange>(
7356         *this, IRPosition::value(OpV, getCallBaseContext()),
7357         DepClassTy::REQUIRED);
7358     QuerriedAAs.push_back(&OpAA);
7359     T.unionAssumed(
7360         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7361     return T.isValidState();
7362   }
7363 
7364   bool
7365   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7366                    const Instruction *CtxI,
7367                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7368     Value *LHS = CmpI->getOperand(0);
7369     Value *RHS = CmpI->getOperand(1);
7370     // TODO: Allow non integers as well.
7371     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7372       return false;
7373 
7374     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7375         *this, IRPosition::value(*LHS, getCallBaseContext()),
7376         DepClassTy::REQUIRED);
7377     QuerriedAAs.push_back(&LHSAA);
7378     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7379         *this, IRPosition::value(*RHS, getCallBaseContext()),
7380         DepClassTy::REQUIRED);
7381     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7382     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7383 
7384     // If one of them is empty set, we can't decide.
7385     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7386       return true;
7387 
7388     bool MustTrue = false, MustFalse = false;
7389 
7390     auto AllowedRegion =
7391         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7392 
7393     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7394       MustFalse = true;
7395 
7396     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
7397       MustTrue = true;
7398 
7399     assert((!MustTrue || !MustFalse) &&
7400            "Either MustTrue or MustFalse should be false!");
7401 
7402     if (MustTrue)
7403       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7404     else if (MustFalse)
7405       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7406     else
7407       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7408 
7409     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7410                       << " " << RHSAA << "\n");
7411 
7412     // TODO: Track a known state too.
7413     return T.isValidState();
7414   }
7415 
7416   /// See AbstractAttribute::updateImpl(...).
7417   ChangeStatus updateImpl(Attributor &A) override {
7418     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7419                             IntegerRangeState &T, bool Stripped) -> bool {
7420       Instruction *I = dyn_cast<Instruction>(&V);
7421       if (!I || isa<CallBase>(I)) {
7422 
7423         // If the value is not instruction, we query AA to Attributor.
7424         const auto &AA = A.getAAFor<AAValueConstantRange>(
7425             *this, IRPosition::value(V, getCallBaseContext()),
7426             DepClassTy::REQUIRED);
7427 
7428         // Clamp operator is not used to utilize a program point CtxI.
7429         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7430 
7431         return T.isValidState();
7432       }
7433 
7434       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7435       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7436         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7437           return false;
7438       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7439         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7440           return false;
7441       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7442         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7443           return false;
7444       } else {
7445         // Give up with other instructions.
7446         // TODO: Add other instructions
7447 
7448         T.indicatePessimisticFixpoint();
7449         return false;
7450       }
7451 
7452       // Catch circular reasoning in a pessimistic way for now.
7453       // TODO: Check how the range evolves and if we stripped anything, see also
7454       //       AADereferenceable or AAAlign for similar situations.
7455       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7456         if (QueriedAA != this)
7457           continue;
7458         // If we are in a stady state we do not need to worry.
7459         if (T.getAssumed() == getState().getAssumed())
7460           continue;
7461         T.indicatePessimisticFixpoint();
7462       }
7463 
7464       return T.isValidState();
7465     };
7466 
7467     IntegerRangeState T(getBitWidth());
7468 
7469     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7470             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7471             /* UseValueSimplify */ false))
7472       return indicatePessimisticFixpoint();
7473 
7474     return clampStateAndIndicateChange(getState(), T);
7475   }
7476 
7477   /// See AbstractAttribute::trackStatistics()
7478   void trackStatistics() const override {
7479     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7480   }
7481 };
7482 
7483 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7484   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7485       : AAValueConstantRangeImpl(IRP, A) {}
7486 
7487   /// See AbstractAttribute::initialize(...).
7488   ChangeStatus updateImpl(Attributor &A) override {
7489     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7490                      "not be called");
7491   }
7492 
7493   /// See AbstractAttribute::trackStatistics()
7494   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7495 };
7496 
7497 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7498   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7499       : AAValueConstantRangeFunction(IRP, A) {}
7500 
7501   /// See AbstractAttribute::trackStatistics()
7502   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7503 };
7504 
7505 struct AAValueConstantRangeCallSiteReturned
7506     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7507                                      AAValueConstantRangeImpl,
7508                                      AAValueConstantRangeImpl::StateType,
7509                                      /* IntroduceCallBaseContext */ true> {
7510   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7511       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7512                                        AAValueConstantRangeImpl,
7513                                        AAValueConstantRangeImpl::StateType,
7514                                        /* IntroduceCallBaseContext */ true>(IRP,
7515                                                                             A) {
7516   }
7517 
7518   /// See AbstractAttribute::initialize(...).
7519   void initialize(Attributor &A) override {
7520     // If it is a load instruction with range metadata, use the metadata.
7521     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7522       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7523         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7524 
7525     AAValueConstantRangeImpl::initialize(A);
7526   }
7527 
7528   /// See AbstractAttribute::trackStatistics()
7529   void trackStatistics() const override {
7530     STATS_DECLTRACK_CSRET_ATTR(value_range)
7531   }
7532 };
7533 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7534   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7535       : AAValueConstantRangeFloating(IRP, A) {}
7536 
7537   /// See AbstractAttribute::manifest()
7538   ChangeStatus manifest(Attributor &A) override {
7539     return ChangeStatus::UNCHANGED;
7540   }
7541 
7542   /// See AbstractAttribute::trackStatistics()
7543   void trackStatistics() const override {
7544     STATS_DECLTRACK_CSARG_ATTR(value_range)
7545   }
7546 };
7547 
7548 /// ------------------ Potential Values Attribute -------------------------
7549 
7550 struct AAPotentialValuesImpl : AAPotentialValues {
7551   using StateType = PotentialConstantIntValuesState;
7552 
7553   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7554       : AAPotentialValues(IRP, A) {}
7555 
7556   /// See AbstractAttribute::getAsStr().
7557   const std::string getAsStr() const override {
7558     std::string Str;
7559     llvm::raw_string_ostream OS(Str);
7560     OS << getState();
7561     return OS.str();
7562   }
7563 
7564   /// See AbstractAttribute::updateImpl(...).
7565   ChangeStatus updateImpl(Attributor &A) override {
7566     return indicatePessimisticFixpoint();
7567   }
7568 };
7569 
7570 struct AAPotentialValuesArgument final
7571     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7572                                       PotentialConstantIntValuesState> {
7573   using Base =
7574       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7575                                       PotentialConstantIntValuesState>;
7576   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7577       : Base(IRP, A) {}
7578 
7579   /// See AbstractAttribute::initialize(..).
7580   void initialize(Attributor &A) override {
7581     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7582       indicatePessimisticFixpoint();
7583     } else {
7584       Base::initialize(A);
7585     }
7586   }
7587 
7588   /// See AbstractAttribute::trackStatistics()
7589   void trackStatistics() const override {
7590     STATS_DECLTRACK_ARG_ATTR(potential_values)
7591   }
7592 };
7593 
7594 struct AAPotentialValuesReturned
7595     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7596   using Base =
7597       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7598   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7599       : Base(IRP, A) {}
7600 
7601   /// See AbstractAttribute::trackStatistics()
7602   void trackStatistics() const override {
7603     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7604   }
7605 };
7606 
7607 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7608   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7609       : AAPotentialValuesImpl(IRP, A) {}
7610 
7611   /// See AbstractAttribute::initialize(..).
7612   void initialize(Attributor &A) override {
7613     Value &V = getAssociatedValue();
7614 
7615     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7616       unionAssumed(C->getValue());
7617       indicateOptimisticFixpoint();
7618       return;
7619     }
7620 
7621     if (isa<UndefValue>(&V)) {
7622       unionAssumedWithUndef();
7623       indicateOptimisticFixpoint();
7624       return;
7625     }
7626 
7627     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7628       return;
7629 
7630     if (isa<SelectInst>(V) || isa<PHINode>(V))
7631       return;
7632 
7633     indicatePessimisticFixpoint();
7634 
7635     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7636                       << getAssociatedValue() << "\n");
7637   }
7638 
7639   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7640                                 const APInt &RHS) {
7641     ICmpInst::Predicate Pred = ICI->getPredicate();
7642     switch (Pred) {
7643     case ICmpInst::ICMP_UGT:
7644       return LHS.ugt(RHS);
7645     case ICmpInst::ICMP_SGT:
7646       return LHS.sgt(RHS);
7647     case ICmpInst::ICMP_EQ:
7648       return LHS.eq(RHS);
7649     case ICmpInst::ICMP_UGE:
7650       return LHS.uge(RHS);
7651     case ICmpInst::ICMP_SGE:
7652       return LHS.sge(RHS);
7653     case ICmpInst::ICMP_ULT:
7654       return LHS.ult(RHS);
7655     case ICmpInst::ICMP_SLT:
7656       return LHS.slt(RHS);
7657     case ICmpInst::ICMP_NE:
7658       return LHS.ne(RHS);
7659     case ICmpInst::ICMP_ULE:
7660       return LHS.ule(RHS);
7661     case ICmpInst::ICMP_SLE:
7662       return LHS.sle(RHS);
7663     default:
7664       llvm_unreachable("Invalid ICmp predicate!");
7665     }
7666   }
7667 
7668   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7669                                  uint32_t ResultBitWidth) {
7670     Instruction::CastOps CastOp = CI->getOpcode();
7671     switch (CastOp) {
7672     default:
7673       llvm_unreachable("unsupported or not integer cast");
7674     case Instruction::Trunc:
7675       return Src.trunc(ResultBitWidth);
7676     case Instruction::SExt:
7677       return Src.sext(ResultBitWidth);
7678     case Instruction::ZExt:
7679       return Src.zext(ResultBitWidth);
7680     case Instruction::BitCast:
7681       return Src;
7682     }
7683   }
7684 
7685   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7686                                        const APInt &LHS, const APInt &RHS,
7687                                        bool &SkipOperation, bool &Unsupported) {
7688     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7689     // Unsupported is set to true when the binary operator is not supported.
7690     // SkipOperation is set to true when UB occur with the given operand pair
7691     // (LHS, RHS).
7692     // TODO: we should look at nsw and nuw keywords to handle operations
7693     //       that create poison or undef value.
7694     switch (BinOpcode) {
7695     default:
7696       Unsupported = true;
7697       return LHS;
7698     case Instruction::Add:
7699       return LHS + RHS;
7700     case Instruction::Sub:
7701       return LHS - RHS;
7702     case Instruction::Mul:
7703       return LHS * RHS;
7704     case Instruction::UDiv:
7705       if (RHS.isNullValue()) {
7706         SkipOperation = true;
7707         return LHS;
7708       }
7709       return LHS.udiv(RHS);
7710     case Instruction::SDiv:
7711       if (RHS.isNullValue()) {
7712         SkipOperation = true;
7713         return LHS;
7714       }
7715       return LHS.sdiv(RHS);
7716     case Instruction::URem:
7717       if (RHS.isNullValue()) {
7718         SkipOperation = true;
7719         return LHS;
7720       }
7721       return LHS.urem(RHS);
7722     case Instruction::SRem:
7723       if (RHS.isNullValue()) {
7724         SkipOperation = true;
7725         return LHS;
7726       }
7727       return LHS.srem(RHS);
7728     case Instruction::Shl:
7729       return LHS.shl(RHS);
7730     case Instruction::LShr:
7731       return LHS.lshr(RHS);
7732     case Instruction::AShr:
7733       return LHS.ashr(RHS);
7734     case Instruction::And:
7735       return LHS & RHS;
7736     case Instruction::Or:
7737       return LHS | RHS;
7738     case Instruction::Xor:
7739       return LHS ^ RHS;
7740     }
7741   }
7742 
7743   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7744                                            const APInt &LHS, const APInt &RHS) {
7745     bool SkipOperation = false;
7746     bool Unsupported = false;
7747     APInt Result =
7748         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7749     if (Unsupported)
7750       return false;
7751     // If SkipOperation is true, we can ignore this operand pair (L, R).
7752     if (!SkipOperation)
7753       unionAssumed(Result);
7754     return isValidState();
7755   }
7756 
7757   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7758     auto AssumedBefore = getAssumed();
7759     Value *LHS = ICI->getOperand(0);
7760     Value *RHS = ICI->getOperand(1);
7761     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7762       return indicatePessimisticFixpoint();
7763 
7764     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7765                                                 DepClassTy::REQUIRED);
7766     if (!LHSAA.isValidState())
7767       return indicatePessimisticFixpoint();
7768 
7769     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7770                                                 DepClassTy::REQUIRED);
7771     if (!RHSAA.isValidState())
7772       return indicatePessimisticFixpoint();
7773 
7774     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7775     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7776 
7777     // TODO: make use of undef flag to limit potential values aggressively.
7778     bool MaybeTrue = false, MaybeFalse = false;
7779     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7780     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7781       // The result of any comparison between undefs can be soundly replaced
7782       // with undef.
7783       unionAssumedWithUndef();
7784     } else if (LHSAA.undefIsContained()) {
7785       for (const APInt &R : RHSAAPVS) {
7786         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7787         MaybeTrue |= CmpResult;
7788         MaybeFalse |= !CmpResult;
7789         if (MaybeTrue & MaybeFalse)
7790           return indicatePessimisticFixpoint();
7791       }
7792     } else if (RHSAA.undefIsContained()) {
7793       for (const APInt &L : LHSAAPVS) {
7794         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7795         MaybeTrue |= CmpResult;
7796         MaybeFalse |= !CmpResult;
7797         if (MaybeTrue & MaybeFalse)
7798           return indicatePessimisticFixpoint();
7799       }
7800     } else {
7801       for (const APInt &L : LHSAAPVS) {
7802         for (const APInt &R : RHSAAPVS) {
7803           bool CmpResult = calculateICmpInst(ICI, L, R);
7804           MaybeTrue |= CmpResult;
7805           MaybeFalse |= !CmpResult;
7806           if (MaybeTrue & MaybeFalse)
7807             return indicatePessimisticFixpoint();
7808         }
7809       }
7810     }
7811     if (MaybeTrue)
7812       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7813     if (MaybeFalse)
7814       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7815     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7816                                          : ChangeStatus::CHANGED;
7817   }
7818 
7819   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7820     auto AssumedBefore = getAssumed();
7821     Value *LHS = SI->getTrueValue();
7822     Value *RHS = SI->getFalseValue();
7823     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7824       return indicatePessimisticFixpoint();
7825 
7826     // TODO: Use assumed simplified condition value
7827     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7828                                                 DepClassTy::REQUIRED);
7829     if (!LHSAA.isValidState())
7830       return indicatePessimisticFixpoint();
7831 
7832     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7833                                                 DepClassTy::REQUIRED);
7834     if (!RHSAA.isValidState())
7835       return indicatePessimisticFixpoint();
7836 
7837     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7838       // select i1 *, undef , undef => undef
7839       unionAssumedWithUndef();
7840     else {
7841       unionAssumed(LHSAA);
7842       unionAssumed(RHSAA);
7843     }
7844     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7845                                          : ChangeStatus::CHANGED;
7846   }
7847 
7848   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7849     auto AssumedBefore = getAssumed();
7850     if (!CI->isIntegerCast())
7851       return indicatePessimisticFixpoint();
7852     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7853     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7854     Value *Src = CI->getOperand(0);
7855     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
7856                                                 DepClassTy::REQUIRED);
7857     if (!SrcAA.isValidState())
7858       return indicatePessimisticFixpoint();
7859     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7860     if (SrcAA.undefIsContained())
7861       unionAssumedWithUndef();
7862     else {
7863       for (const APInt &S : SrcAAPVS) {
7864         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7865         unionAssumed(T);
7866       }
7867     }
7868     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7869                                          : ChangeStatus::CHANGED;
7870   }
7871 
7872   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7873     auto AssumedBefore = getAssumed();
7874     Value *LHS = BinOp->getOperand(0);
7875     Value *RHS = BinOp->getOperand(1);
7876     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7877       return indicatePessimisticFixpoint();
7878 
7879     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7880                                                 DepClassTy::REQUIRED);
7881     if (!LHSAA.isValidState())
7882       return indicatePessimisticFixpoint();
7883 
7884     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7885                                                 DepClassTy::REQUIRED);
7886     if (!RHSAA.isValidState())
7887       return indicatePessimisticFixpoint();
7888 
7889     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7890     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7891     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7892 
7893     // TODO: make use of undef flag to limit potential values aggressively.
7894     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7895       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7896         return indicatePessimisticFixpoint();
7897     } else if (LHSAA.undefIsContained()) {
7898       for (const APInt &R : RHSAAPVS) {
7899         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7900           return indicatePessimisticFixpoint();
7901       }
7902     } else if (RHSAA.undefIsContained()) {
7903       for (const APInt &L : LHSAAPVS) {
7904         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7905           return indicatePessimisticFixpoint();
7906       }
7907     } else {
7908       for (const APInt &L : LHSAAPVS) {
7909         for (const APInt &R : RHSAAPVS) {
7910           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7911             return indicatePessimisticFixpoint();
7912         }
7913       }
7914     }
7915     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7916                                          : ChangeStatus::CHANGED;
7917   }
7918 
7919   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7920     auto AssumedBefore = getAssumed();
7921     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7922       Value *IncomingValue = PHI->getIncomingValue(u);
7923       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7924           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
7925       if (!PotentialValuesAA.isValidState())
7926         return indicatePessimisticFixpoint();
7927       if (PotentialValuesAA.undefIsContained())
7928         unionAssumedWithUndef();
7929       else
7930         unionAssumed(PotentialValuesAA.getAssumed());
7931     }
7932     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7933                                          : ChangeStatus::CHANGED;
7934   }
7935 
7936   /// See AbstractAttribute::updateImpl(...).
7937   ChangeStatus updateImpl(Attributor &A) override {
7938     Value &V = getAssociatedValue();
7939     Instruction *I = dyn_cast<Instruction>(&V);
7940 
7941     if (auto *ICI = dyn_cast<ICmpInst>(I))
7942       return updateWithICmpInst(A, ICI);
7943 
7944     if (auto *SI = dyn_cast<SelectInst>(I))
7945       return updateWithSelectInst(A, SI);
7946 
7947     if (auto *CI = dyn_cast<CastInst>(I))
7948       return updateWithCastInst(A, CI);
7949 
7950     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7951       return updateWithBinaryOperator(A, BinOp);
7952 
7953     if (auto *PHI = dyn_cast<PHINode>(I))
7954       return updateWithPHINode(A, PHI);
7955 
7956     return indicatePessimisticFixpoint();
7957   }
7958 
7959   /// See AbstractAttribute::trackStatistics()
7960   void trackStatistics() const override {
7961     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7962   }
7963 };
7964 
7965 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7966   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7967       : AAPotentialValuesImpl(IRP, A) {}
7968 
7969   /// See AbstractAttribute::initialize(...).
7970   ChangeStatus updateImpl(Attributor &A) override {
7971     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7972                      "not be called");
7973   }
7974 
7975   /// See AbstractAttribute::trackStatistics()
7976   void trackStatistics() const override {
7977     STATS_DECLTRACK_FN_ATTR(potential_values)
7978   }
7979 };
7980 
7981 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7982   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7983       : AAPotentialValuesFunction(IRP, A) {}
7984 
7985   /// See AbstractAttribute::trackStatistics()
7986   void trackStatistics() const override {
7987     STATS_DECLTRACK_CS_ATTR(potential_values)
7988   }
7989 };
7990 
7991 struct AAPotentialValuesCallSiteReturned
7992     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7993   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7994       : AACallSiteReturnedFromReturned<AAPotentialValues,
7995                                        AAPotentialValuesImpl>(IRP, A) {}
7996 
7997   /// See AbstractAttribute::trackStatistics()
7998   void trackStatistics() const override {
7999     STATS_DECLTRACK_CSRET_ATTR(potential_values)
8000   }
8001 };
8002 
8003 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
8004   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
8005       : AAPotentialValuesFloating(IRP, A) {}
8006 
8007   /// See AbstractAttribute::initialize(..).
8008   void initialize(Attributor &A) override {
8009     Value &V = getAssociatedValue();
8010 
8011     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8012       unionAssumed(C->getValue());
8013       indicateOptimisticFixpoint();
8014       return;
8015     }
8016 
8017     if (isa<UndefValue>(&V)) {
8018       unionAssumedWithUndef();
8019       indicateOptimisticFixpoint();
8020       return;
8021     }
8022   }
8023 
8024   /// See AbstractAttribute::updateImpl(...).
8025   ChangeStatus updateImpl(Attributor &A) override {
8026     Value &V = getAssociatedValue();
8027     auto AssumedBefore = getAssumed();
8028     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
8029                                              DepClassTy::REQUIRED);
8030     const auto &S = AA.getAssumed();
8031     unionAssumed(S);
8032     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8033                                          : ChangeStatus::CHANGED;
8034   }
8035 
8036   /// See AbstractAttribute::trackStatistics()
8037   void trackStatistics() const override {
8038     STATS_DECLTRACK_CSARG_ATTR(potential_values)
8039   }
8040 };
8041 
8042 /// ------------------------ NoUndef Attribute ---------------------------------
8043 struct AANoUndefImpl : AANoUndef {
8044   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
8045 
8046   /// See AbstractAttribute::initialize(...).
8047   void initialize(Attributor &A) override {
8048     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
8049       indicateOptimisticFixpoint();
8050       return;
8051     }
8052     Value &V = getAssociatedValue();
8053     if (isa<UndefValue>(V))
8054       indicatePessimisticFixpoint();
8055     else if (isa<FreezeInst>(V))
8056       indicateOptimisticFixpoint();
8057     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
8058              isGuaranteedNotToBeUndefOrPoison(&V))
8059       indicateOptimisticFixpoint();
8060     else
8061       AANoUndef::initialize(A);
8062   }
8063 
8064   /// See followUsesInMBEC
8065   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
8066                        AANoUndef::StateType &State) {
8067     const Value *UseV = U->get();
8068     const DominatorTree *DT = nullptr;
8069     AssumptionCache *AC = nullptr;
8070     InformationCache &InfoCache = A.getInfoCache();
8071     if (Function *F = getAnchorScope()) {
8072       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
8073       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
8074     }
8075     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
8076     bool TrackUse = false;
8077     // Track use for instructions which must produce undef or poison bits when
8078     // at least one operand contains such bits.
8079     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
8080       TrackUse = true;
8081     return TrackUse;
8082   }
8083 
8084   /// See AbstractAttribute::getAsStr().
8085   const std::string getAsStr() const override {
8086     return getAssumed() ? "noundef" : "may-undef-or-poison";
8087   }
8088 
8089   ChangeStatus manifest(Attributor &A) override {
8090     // We don't manifest noundef attribute for dead positions because the
8091     // associated values with dead positions would be replaced with undef
8092     // values.
8093     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
8094       return ChangeStatus::UNCHANGED;
8095     // A position whose simplified value does not have any value is
8096     // considered to be dead. We don't manifest noundef in such positions for
8097     // the same reason above.
8098     bool UsedAssumedInformation = false;
8099     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
8100              .hasValue())
8101       return ChangeStatus::UNCHANGED;
8102     return AANoUndef::manifest(A);
8103   }
8104 };
8105 
8106 struct AANoUndefFloating : public AANoUndefImpl {
8107   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
8108       : AANoUndefImpl(IRP, A) {}
8109 
8110   /// See AbstractAttribute::initialize(...).
8111   void initialize(Attributor &A) override {
8112     AANoUndefImpl::initialize(A);
8113     if (!getState().isAtFixpoint())
8114       if (Instruction *CtxI = getCtxI())
8115         followUsesInMBEC(*this, A, getState(), *CtxI);
8116   }
8117 
8118   /// See AbstractAttribute::updateImpl(...).
8119   ChangeStatus updateImpl(Attributor &A) override {
8120     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8121                             AANoUndef::StateType &T, bool Stripped) -> bool {
8122       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
8123                                              DepClassTy::REQUIRED);
8124       if (!Stripped && this == &AA) {
8125         T.indicatePessimisticFixpoint();
8126       } else {
8127         const AANoUndef::StateType &S =
8128             static_cast<const AANoUndef::StateType &>(AA.getState());
8129         T ^= S;
8130       }
8131       return T.isValidState();
8132     };
8133 
8134     StateType T;
8135     if (!genericValueTraversal<AANoUndef, StateType>(
8136             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
8137       return indicatePessimisticFixpoint();
8138 
8139     return clampStateAndIndicateChange(getState(), T);
8140   }
8141 
8142   /// See AbstractAttribute::trackStatistics()
8143   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8144 };
8145 
8146 struct AANoUndefReturned final
8147     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
8148   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
8149       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
8150 
8151   /// See AbstractAttribute::trackStatistics()
8152   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8153 };
8154 
8155 struct AANoUndefArgument final
8156     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
8157   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8158       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8159 
8160   /// See AbstractAttribute::trackStatistics()
8161   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8162 };
8163 
8164 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8165   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8166       : AANoUndefFloating(IRP, A) {}
8167 
8168   /// See AbstractAttribute::trackStatistics()
8169   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8170 };
8171 
8172 struct AANoUndefCallSiteReturned final
8173     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8174   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8175       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8176 
8177   /// See AbstractAttribute::trackStatistics()
8178   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8179 };
8180 
8181 struct AACallEdgesFunction : public AACallEdges {
8182   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
8183       : AACallEdges(IRP, A) {}
8184 
8185   /// See AbstractAttribute::updateImpl(...).
8186   ChangeStatus updateImpl(Attributor &A) override {
8187     ChangeStatus Change = ChangeStatus::UNCHANGED;
8188     bool OldHasUnknownCallee = HasUnknownCallee;
8189 
8190     auto AddCalledFunction = [&](Function *Fn) {
8191       if (CalledFunctions.insert(Fn)) {
8192         Change = ChangeStatus::CHANGED;
8193         LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
8194                           << "\n");
8195       }
8196     };
8197 
8198     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
8199                           bool Stripped) -> bool {
8200       if (Function *Fn = dyn_cast<Function>(&V)) {
8201         AddCalledFunction(Fn);
8202       } else {
8203         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
8204         HasUnknown = true;
8205       }
8206 
8207       // Explore all values.
8208       return true;
8209     };
8210 
8211     // Process any value that we might call.
8212     auto ProcessCalledOperand = [&](Value *V, Instruction *Ctx) {
8213       if (!genericValueTraversal<AACallEdges, bool>(A, IRPosition::value(*V),
8214                                                     *this, HasUnknownCallee,
8215                                                     VisitValue, nullptr, false))
8216         // If we haven't gone through all values, assume that there are unknown
8217         // callees.
8218         HasUnknownCallee = true;
8219     };
8220 
8221     auto ProcessCallInst = [&](Instruction &Inst) {
8222       CallBase &CB = static_cast<CallBase &>(Inst);
8223 
8224       // Process callee metadata if available.
8225       if (auto *MD = Inst.getMetadata(LLVMContext::MD_callees)) {
8226         for (auto &Op : MD->operands()) {
8227           Function *Callee = mdconst::extract_or_null<Function>(Op);
8228           if (Callee)
8229             AddCalledFunction(Callee);
8230         }
8231         // Callees metadata grantees that the called function is one of its
8232         // operands, So we are done.
8233         return true;
8234       }
8235 
8236       // The most simple case.
8237       ProcessCalledOperand(CB.getCalledOperand(), &Inst);
8238 
8239       // Process callback functions.
8240       SmallVector<const Use *, 4u> CallbackUses;
8241       AbstractCallSite::getCallbackUses(CB, CallbackUses);
8242       for (const Use *U : CallbackUses)
8243         ProcessCalledOperand(U->get(), &Inst);
8244 
8245       return true;
8246     };
8247 
8248     // Visit all callable instructions.
8249     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this))
8250       // If we haven't looked at all call like instructions, assume that there
8251       // are unknown callees.
8252       HasUnknownCallee = true;
8253     // Track changes.
8254     if (OldHasUnknownCallee != HasUnknownCallee)
8255       Change = ChangeStatus::CHANGED;
8256 
8257     return Change;
8258   }
8259 
8260   virtual const SetVector<Function *> &getOptimisticEdges() const override {
8261     return CalledFunctions;
8262   };
8263 
8264   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
8265 
8266   const std::string getAsStr() const override {
8267     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
8268            std::to_string(CalledFunctions.size()) + "]";
8269   }
8270 
8271   void trackStatistics() const override {}
8272 
8273   /// Optimistic set of functions that might be called by this function.
8274   SetVector<Function *> CalledFunctions;
8275 
8276   /// Does this function have a call to a function that we don't know about.
8277   bool HasUnknownCallee = false;
8278 };
8279 
8280 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
8281   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
8282       : AAFunctionReachability(IRP, A) {}
8283 
8284   bool canReach(Attributor &A, Function *Fn) const override {
8285     // Assume that we can reach any function if we can reach a call with
8286     // unknown callee.
8287     if (CanReachUnknownCallee)
8288       return true;
8289 
8290     if (ReachableQueries.count(Fn))
8291       return true;
8292 
8293     if (UnreachableQueries.count(Fn))
8294       return false;
8295 
8296     const AACallEdges &AAEdges =
8297         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
8298 
8299     const SetVector<Function *> &Edges = AAEdges.getOptimisticEdges();
8300     bool Result = checkIfReachable(A, Edges, Fn);
8301 
8302     // Attributor returns attributes as const, so this function has to be
8303     // const for users of this attribute to use it without having to do
8304     // a const_cast.
8305     // This is a hack for us to be able to cache queries.
8306     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
8307 
8308     if (Result)
8309       NonConstThis->ReachableQueries.insert(Fn);
8310     else
8311       NonConstThis->UnreachableQueries.insert(Fn);
8312 
8313     return Result;
8314   }
8315 
8316   /// See AbstractAttribute::updateImpl(...).
8317   ChangeStatus updateImpl(Attributor &A) override {
8318     if (CanReachUnknownCallee)
8319       return ChangeStatus::UNCHANGED;
8320 
8321     const AACallEdges &AAEdges =
8322         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
8323     const SetVector<Function *> &Edges = AAEdges.getOptimisticEdges();
8324     ChangeStatus Change = ChangeStatus::UNCHANGED;
8325 
8326     if (AAEdges.hasUnknownCallee()) {
8327       bool OldCanReachUnknown = CanReachUnknownCallee;
8328       CanReachUnknownCallee = true;
8329       return OldCanReachUnknown ? ChangeStatus::UNCHANGED
8330                                 : ChangeStatus::CHANGED;
8331     }
8332 
8333     // Check if any of the unreachable functions become reachable.
8334     for (auto Current = UnreachableQueries.begin();
8335          Current != UnreachableQueries.end();) {
8336       if (!checkIfReachable(A, Edges, *Current)) {
8337         Current++;
8338         continue;
8339       }
8340       ReachableQueries.insert(*Current);
8341       UnreachableQueries.erase(*Current++);
8342       Change = ChangeStatus::CHANGED;
8343     }
8344 
8345     return Change;
8346   }
8347 
8348   const std::string getAsStr() const override {
8349     size_t QueryCount = ReachableQueries.size() + UnreachableQueries.size();
8350 
8351     return "FunctionReachability [" + std::to_string(ReachableQueries.size()) +
8352            "," + std::to_string(QueryCount) + "]";
8353   }
8354 
8355   void trackStatistics() const override {}
8356 
8357 private:
8358   bool canReachUnknownCallee() const override { return CanReachUnknownCallee; }
8359 
8360   bool checkIfReachable(Attributor &A, const SetVector<Function *> &Edges,
8361                         Function *Fn) const {
8362     if (Edges.count(Fn))
8363       return true;
8364 
8365     for (Function *Edge : Edges) {
8366       // We don't need a dependency if the result is reachable.
8367       const AAFunctionReachability &EdgeReachability =
8368           A.getAAFor<AAFunctionReachability>(*this, IRPosition::function(*Edge),
8369                                              DepClassTy::NONE);
8370 
8371       if (EdgeReachability.canReach(A, Fn))
8372         return true;
8373     }
8374     for (Function *Fn : Edges)
8375       A.getAAFor<AAFunctionReachability>(*this, IRPosition::function(*Fn),
8376                                          DepClassTy::REQUIRED);
8377 
8378     return false;
8379   }
8380 
8381   /// Set of functions that we know for sure is reachable.
8382   SmallPtrSet<Function *, 8> ReachableQueries;
8383 
8384   /// Set of functions that are unreachable, but might become reachable.
8385   SmallPtrSet<Function *, 8> UnreachableQueries;
8386 
8387   /// If we can reach a function with a call to a unknown function we assume
8388   /// that we can reach any function.
8389   bool CanReachUnknownCallee = false;
8390 };
8391 
8392 } // namespace
8393 
8394 AACallGraphNode *AACallEdgeIterator::operator*() const {
8395   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
8396       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
8397 }
8398 
8399 void AttributorCallGraph::print() {
8400   llvm::WriteGraph(outs(), this);
8401 }
8402 
8403 const char AAReturnedValues::ID = 0;
8404 const char AANoUnwind::ID = 0;
8405 const char AANoSync::ID = 0;
8406 const char AANoFree::ID = 0;
8407 const char AANonNull::ID = 0;
8408 const char AANoRecurse::ID = 0;
8409 const char AAWillReturn::ID = 0;
8410 const char AAUndefinedBehavior::ID = 0;
8411 const char AANoAlias::ID = 0;
8412 const char AAReachability::ID = 0;
8413 const char AANoReturn::ID = 0;
8414 const char AAIsDead::ID = 0;
8415 const char AADereferenceable::ID = 0;
8416 const char AAAlign::ID = 0;
8417 const char AANoCapture::ID = 0;
8418 const char AAValueSimplify::ID = 0;
8419 const char AAHeapToStack::ID = 0;
8420 const char AAPrivatizablePtr::ID = 0;
8421 const char AAMemoryBehavior::ID = 0;
8422 const char AAMemoryLocation::ID = 0;
8423 const char AAValueConstantRange::ID = 0;
8424 const char AAPotentialValues::ID = 0;
8425 const char AANoUndef::ID = 0;
8426 const char AACallEdges::ID = 0;
8427 const char AAFunctionReachability::ID = 0;
8428 
8429 // Macro magic to create the static generator function for attributes that
8430 // follow the naming scheme.
8431 
8432 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8433   case IRPosition::PK:                                                         \
8434     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8435 
8436 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8437   case IRPosition::PK:                                                         \
8438     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8439     ++NumAAs;                                                                  \
8440     break;
8441 
8442 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8443   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8444     CLASS *AA = nullptr;                                                       \
8445     switch (IRP.getPositionKind()) {                                           \
8446       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8447       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8448       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8449       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8450       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8451       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8452       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8453       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8454     }                                                                          \
8455     return *AA;                                                                \
8456   }
8457 
8458 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8459   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8460     CLASS *AA = nullptr;                                                       \
8461     switch (IRP.getPositionKind()) {                                           \
8462       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8463       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8464       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8465       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8466       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8467       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8468       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8469       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8470     }                                                                          \
8471     return *AA;                                                                \
8472   }
8473 
8474 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8475   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8476     CLASS *AA = nullptr;                                                       \
8477     switch (IRP.getPositionKind()) {                                           \
8478       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8479       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8480       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8481       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8482       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8483       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8484       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8485       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8486     }                                                                          \
8487     return *AA;                                                                \
8488   }
8489 
8490 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8491   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8492     CLASS *AA = nullptr;                                                       \
8493     switch (IRP.getPositionKind()) {                                           \
8494       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8495       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8496       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8497       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8498       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8499       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8500       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8501       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8502     }                                                                          \
8503     return *AA;                                                                \
8504   }
8505 
8506 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8507   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8508     CLASS *AA = nullptr;                                                       \
8509     switch (IRP.getPositionKind()) {                                           \
8510       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8511       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8512       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8513       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8514       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8515       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8516       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8517       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8518     }                                                                          \
8519     return *AA;                                                                \
8520   }
8521 
8522 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8523 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8524 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8525 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8526 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8527 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8528 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8529 
8530 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8531 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8532 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8533 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8534 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8535 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8536 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8537 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8538 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8539 
8540 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8541 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8542 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8543 
8544 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8545 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8546 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8547 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
8548 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
8549 
8550 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8551 
8552 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8553 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8554 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8555 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8556 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8557 #undef SWITCH_PK_CREATE
8558 #undef SWITCH_PK_INV
8559