1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "attributor"
41 
42 static cl::opt<bool> ManifestInternal(
43     "attributor-manifest-internal", cl::Hidden,
44     cl::desc("Manifest Attributor internal string attributes."),
45     cl::init(false));
46 
47 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
48                                        cl::Hidden);
49 
50 template <>
51 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
52 
53 static cl::opt<unsigned, true> MaxPotentialValues(
54     "attributor-max-potential-values", cl::Hidden,
55     cl::desc("Maximum number of potential values to be "
56              "tracked for each position."),
57     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
58     cl::init(7));
59 
60 STATISTIC(NumAAs, "Number of abstract attributes created");
61 
62 // Some helper macros to deal with statistics tracking.
63 //
64 // Usage:
65 // For simple IR attribute tracking overload trackStatistics in the abstract
66 // attribute and choose the right STATS_DECLTRACK_********* macro,
67 // e.g.,:
68 //  void trackStatistics() const override {
69 //    STATS_DECLTRACK_ARG_ATTR(returned)
70 //  }
71 // If there is a single "increment" side one can use the macro
72 // STATS_DECLTRACK with a custom message. If there are multiple increment
73 // sides, STATS_DECL and STATS_TRACK can also be used separately.
74 //
75 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
76   ("Number of " #TYPE " marked '" #NAME "'")
77 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
78 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
79 #define STATS_DECL(NAME, TYPE, MSG)                                            \
80   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
81 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
82 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
83   {                                                                            \
84     STATS_DECL(NAME, TYPE, MSG)                                                \
85     STATS_TRACK(NAME, TYPE)                                                    \
86   }
87 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
88   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
89 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
90   STATS_DECLTRACK(NAME, CSArguments,                                           \
91                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
92 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
94 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
95   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
96 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
98                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
99 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
100   STATS_DECLTRACK(NAME, CSReturn,                                              \
101                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
102 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
103   STATS_DECLTRACK(NAME, Floating,                                              \
104                   ("Number of floating values known to be '" #NAME "'"))
105 
106 // Specialization of the operator<< for abstract attributes subclasses. This
107 // disambiguates situations where multiple operators are applicable.
108 namespace llvm {
109 #define PIPE_OPERATOR(CLASS)                                                   \
110   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
111     return OS << static_cast<const AbstractAttribute &>(AA);                   \
112   }
113 
114 PIPE_OPERATOR(AAIsDead)
115 PIPE_OPERATOR(AANoUnwind)
116 PIPE_OPERATOR(AANoSync)
117 PIPE_OPERATOR(AANoRecurse)
118 PIPE_OPERATOR(AAWillReturn)
119 PIPE_OPERATOR(AANoReturn)
120 PIPE_OPERATOR(AAReturnedValues)
121 PIPE_OPERATOR(AANonNull)
122 PIPE_OPERATOR(AANoAlias)
123 PIPE_OPERATOR(AADereferenceable)
124 PIPE_OPERATOR(AAAlign)
125 PIPE_OPERATOR(AANoCapture)
126 PIPE_OPERATOR(AAValueSimplify)
127 PIPE_OPERATOR(AANoFree)
128 PIPE_OPERATOR(AAHeapToStack)
129 PIPE_OPERATOR(AAReachability)
130 PIPE_OPERATOR(AAMemoryBehavior)
131 PIPE_OPERATOR(AAMemoryLocation)
132 PIPE_OPERATOR(AAValueConstantRange)
133 PIPE_OPERATOR(AAPrivatizablePtr)
134 PIPE_OPERATOR(AAUndefinedBehavior)
135 PIPE_OPERATOR(AAPotentialValues)
136 PIPE_OPERATOR(AANoUndef)
137 
138 #undef PIPE_OPERATOR
139 } // namespace llvm
140 
141 namespace {
142 
143 static Optional<ConstantInt *>
144 getAssumedConstantInt(Attributor &A, const Value &V,
145                       const AbstractAttribute &AA,
146                       bool &UsedAssumedInformation) {
147   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
148   if (C.hasValue())
149     return dyn_cast_or_null<ConstantInt>(C.getValue());
150   return llvm::None;
151 }
152 
153 /// Get pointer operand of memory accessing instruction. If \p I is
154 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
155 /// is set to false and the instruction is volatile, return nullptr.
156 static const Value *getPointerOperand(const Instruction *I,
157                                       bool AllowVolatile) {
158   if (auto *LI = dyn_cast<LoadInst>(I)) {
159     if (!AllowVolatile && LI->isVolatile())
160       return nullptr;
161     return LI->getPointerOperand();
162   }
163 
164   if (auto *SI = dyn_cast<StoreInst>(I)) {
165     if (!AllowVolatile && SI->isVolatile())
166       return nullptr;
167     return SI->getPointerOperand();
168   }
169 
170   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
171     if (!AllowVolatile && CXI->isVolatile())
172       return nullptr;
173     return CXI->getPointerOperand();
174   }
175 
176   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
177     if (!AllowVolatile && RMWI->isVolatile())
178       return nullptr;
179     return RMWI->getPointerOperand();
180   }
181 
182   return nullptr;
183 }
184 
185 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
186 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
187 /// getelement pointer instructions that traverse the natural type of \p Ptr if
188 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
189 /// through a cast to i8*.
190 ///
191 /// TODO: This could probably live somewhere more prominantly if it doesn't
192 ///       already exist.
193 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
194                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
195   assert(Offset >= 0 && "Negative offset not supported yet!");
196   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
197                     << "-bytes as " << *ResTy << "\n");
198 
199   // The initial type we are trying to traverse to get nice GEPs.
200   Type *Ty = Ptr->getType();
201 
202   SmallVector<Value *, 4> Indices;
203   std::string GEPName = Ptr->getName().str();
204   while (Offset) {
205     uint64_t Idx, Rem;
206 
207     if (auto *STy = dyn_cast<StructType>(Ty)) {
208       const StructLayout *SL = DL.getStructLayout(STy);
209       if (int64_t(SL->getSizeInBytes()) < Offset)
210         break;
211       Idx = SL->getElementContainingOffset(Offset);
212       assert(Idx < STy->getNumElements() && "Offset calculation error!");
213       Rem = Offset - SL->getElementOffset(Idx);
214       Ty = STy->getElementType(Idx);
215     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
216       Ty = PTy->getElementType();
217       if (!Ty->isSized())
218         break;
219       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
220       assert(ElementSize && "Expected type with size!");
221       Idx = Offset / ElementSize;
222       Rem = Offset % ElementSize;
223     } else {
224       // Non-aggregate type, we cast and make byte-wise progress now.
225       break;
226     }
227 
228     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
229                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
230 
231     GEPName += "." + std::to_string(Idx);
232     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
233     Offset = Rem;
234   }
235 
236   // Create a GEP if we collected indices above.
237   if (Indices.size())
238     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
239 
240   // If an offset is left we use byte-wise adjustment.
241   if (Offset) {
242     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
243     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
244                         GEPName + ".b" + Twine(Offset));
245   }
246 
247   // Ensure the result has the requested type.
248   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
249 
250   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
251   return Ptr;
252 }
253 
254 /// Recursively visit all values that might become \p IRP at some point. This
255 /// will be done by looking through cast instructions, selects, phis, and calls
256 /// with the "returned" attribute. Once we cannot look through the value any
257 /// further, the callback \p VisitValueCB is invoked and passed the current
258 /// value, the \p State, and a flag to indicate if we stripped anything.
259 /// Stripped means that we unpacked the value associated with \p IRP at least
260 /// once. Note that the value used for the callback may still be the value
261 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
262 /// we will never visit more values than specified by \p MaxValues.
263 template <typename AAType, typename StateTy>
264 static bool genericValueTraversal(
265     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
266     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
267         VisitValueCB,
268     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
269     function_ref<Value *(Value *)> StripCB = nullptr) {
270 
271   const AAIsDead *LivenessAA = nullptr;
272   if (IRP.getAnchorScope())
273     LivenessAA = &A.getAAFor<AAIsDead>(
274         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
275         DepClassTy::NONE);
276   bool AnyDead = false;
277 
278   using Item = std::pair<Value *, const Instruction *>;
279   SmallSet<Item, 16> Visited;
280   SmallVector<Item, 16> Worklist;
281   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
282 
283   int Iteration = 0;
284   do {
285     Item I = Worklist.pop_back_val();
286     Value *V = I.first;
287     CtxI = I.second;
288     if (StripCB)
289       V = StripCB(V);
290 
291     // Check if we should process the current value. To prevent endless
292     // recursion keep a record of the values we followed!
293     if (!Visited.insert(I).second)
294       continue;
295 
296     // Make sure we limit the compile time for complex expressions.
297     if (Iteration++ >= MaxValues)
298       return false;
299 
300     // Explicitly look through calls with a "returned" attribute if we do
301     // not have a pointer as stripPointerCasts only works on them.
302     Value *NewV = nullptr;
303     if (V->getType()->isPointerTy()) {
304       NewV = V->stripPointerCasts();
305     } else {
306       auto *CB = dyn_cast<CallBase>(V);
307       if (CB && CB->getCalledFunction()) {
308         for (Argument &Arg : CB->getCalledFunction()->args())
309           if (Arg.hasReturnedAttr()) {
310             NewV = CB->getArgOperand(Arg.getArgNo());
311             break;
312           }
313       }
314     }
315     if (NewV && NewV != V) {
316       Worklist.push_back({NewV, CtxI});
317       continue;
318     }
319 
320     // Look through select instructions, visit both potential values.
321     if (auto *SI = dyn_cast<SelectInst>(V)) {
322       Worklist.push_back({SI->getTrueValue(), CtxI});
323       Worklist.push_back({SI->getFalseValue(), CtxI});
324       continue;
325     }
326 
327     // Look through phi nodes, visit all live operands.
328     if (auto *PHI = dyn_cast<PHINode>(V)) {
329       assert(LivenessAA &&
330              "Expected liveness in the presence of instructions!");
331       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
332         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
333         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
334                             LivenessAA,
335                             /* CheckBBLivenessOnly */ true)) {
336           AnyDead = true;
337           continue;
338         }
339         Worklist.push_back(
340             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
341       }
342       continue;
343     }
344 
345     if (UseValueSimplify && !isa<Constant>(V)) {
346       bool UsedAssumedInformation = false;
347       Optional<Constant *> C =
348           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
349       if (!C.hasValue())
350         continue;
351       if (Value *NewV = C.getValue()) {
352         Worklist.push_back({NewV, CtxI});
353         continue;
354       }
355     }
356 
357     // Once a leaf is reached we inform the user through the callback.
358     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
359       return false;
360   } while (!Worklist.empty());
361 
362   // If we actually used liveness information so we have to record a dependence.
363   if (AnyDead)
364     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
365 
366   // All values have been visited.
367   return true;
368 }
369 
370 const Value *stripAndAccumulateMinimalOffsets(
371     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
372     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
373     bool UseAssumed = false) {
374 
375   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
376     const IRPosition &Pos = IRPosition::value(V);
377     // Only track dependence if we are going to use the assumed info.
378     const AAValueConstantRange &ValueConstantRangeAA =
379         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
380                                          UseAssumed ? DepClassTy::OPTIONAL
381                                                     : DepClassTy::NONE);
382     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
383                                      : ValueConstantRangeAA.getKnown();
384     // We can only use the lower part of the range because the upper part can
385     // be higher than what the value can really be.
386     ROffset = Range.getSignedMin();
387     return true;
388   };
389 
390   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
391                                                 AttributorAnalysis);
392 }
393 
394 static const Value *getMinimalBaseOfAccsesPointerOperand(
395     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
396     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
397   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
398   if (!Ptr)
399     return nullptr;
400   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
401   const Value *Base = stripAndAccumulateMinimalOffsets(
402       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
403 
404   BytesOffset = OffsetAPInt.getSExtValue();
405   return Base;
406 }
407 
408 static const Value *
409 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
410                                      const DataLayout &DL,
411                                      bool AllowNonInbounds = false) {
412   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
413   if (!Ptr)
414     return nullptr;
415 
416   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
417                                           AllowNonInbounds);
418 }
419 
420 /// Helper function to clamp a state \p S of type \p StateType with the
421 /// information in \p R and indicate/return if \p S did change (as-in update is
422 /// required to be run again).
423 template <typename StateType>
424 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
425   auto Assumed = S.getAssumed();
426   S ^= R;
427   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
428                                    : ChangeStatus::CHANGED;
429 }
430 
431 /// Clamp the information known for all returned values of a function
432 /// (identified by \p QueryingAA) into \p S.
433 template <typename AAType, typename StateType = typename AAType::StateType>
434 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
435                                      StateType &S) {
436   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
437                     << QueryingAA << " into " << S << "\n");
438 
439   assert((QueryingAA.getIRPosition().getPositionKind() ==
440               IRPosition::IRP_RETURNED ||
441           QueryingAA.getIRPosition().getPositionKind() ==
442               IRPosition::IRP_CALL_SITE_RETURNED) &&
443          "Can only clamp returned value states for a function returned or call "
444          "site returned position!");
445 
446   // Use an optional state as there might not be any return values and we want
447   // to join (IntegerState::operator&) the state of all there are.
448   Optional<StateType> T;
449 
450   // Callback for each possibly returned value.
451   auto CheckReturnValue = [&](Value &RV) -> bool {
452     const IRPosition &RVPos = IRPosition::value(RV);
453     const AAType &AA =
454         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
455     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
456                       << " @ " << RVPos << "\n");
457     const StateType &AAS = AA.getState();
458     if (T.hasValue())
459       *T &= AAS;
460     else
461       T = AAS;
462     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
463                       << "\n");
464     return T->isValidState();
465   };
466 
467   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
468     S.indicatePessimisticFixpoint();
469   else if (T.hasValue())
470     S ^= *T;
471 }
472 
473 /// Helper class for generic deduction: return value -> returned position.
474 template <typename AAType, typename BaseType,
475           typename StateType = typename BaseType::StateType>
476 struct AAReturnedFromReturnedValues : public BaseType {
477   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
478       : BaseType(IRP, A) {}
479 
480   /// See AbstractAttribute::updateImpl(...).
481   ChangeStatus updateImpl(Attributor &A) override {
482     StateType S(StateType::getBestState(this->getState()));
483     clampReturnedValueStates<AAType, StateType>(A, *this, S);
484     // TODO: If we know we visited all returned values, thus no are assumed
485     // dead, we can take the known information from the state T.
486     return clampStateAndIndicateChange<StateType>(this->getState(), S);
487   }
488 };
489 
490 /// Clamp the information known at all call sites for a given argument
491 /// (identified by \p QueryingAA) into \p S.
492 template <typename AAType, typename StateType = typename AAType::StateType>
493 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
494                                         StateType &S) {
495   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
496                     << QueryingAA << " into " << S << "\n");
497 
498   assert(QueryingAA.getIRPosition().getPositionKind() ==
499              IRPosition::IRP_ARGUMENT &&
500          "Can only clamp call site argument states for an argument position!");
501 
502   // Use an optional state as there might not be any return values and we want
503   // to join (IntegerState::operator&) the state of all there are.
504   Optional<StateType> T;
505 
506   // The argument number which is also the call site argument number.
507   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
508 
509   auto CallSiteCheck = [&](AbstractCallSite ACS) {
510     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
511     // Check if a coresponding argument was found or if it is on not associated
512     // (which can happen for callback calls).
513     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
514       return false;
515 
516     const AAType &AA =
517         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
518     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
519                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
520     const StateType &AAS = AA.getState();
521     if (T.hasValue())
522       *T &= AAS;
523     else
524       T = AAS;
525     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
526                       << "\n");
527     return T->isValidState();
528   };
529 
530   bool AllCallSitesKnown;
531   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
532                               AllCallSitesKnown))
533     S.indicatePessimisticFixpoint();
534   else if (T.hasValue())
535     S ^= *T;
536 }
537 
538 /// Helper class for generic deduction: call site argument -> argument position.
539 template <typename AAType, typename BaseType,
540           typename StateType = typename AAType::StateType>
541 struct AAArgumentFromCallSiteArguments : public BaseType {
542   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
543       : BaseType(IRP, A) {}
544 
545   /// See AbstractAttribute::updateImpl(...).
546   ChangeStatus updateImpl(Attributor &A) override {
547     StateType S(StateType::getBestState(this->getState()));
548     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
549     // TODO: If we know we visited all incoming values, thus no are assumed
550     // dead, we can take the known information from the state T.
551     return clampStateAndIndicateChange<StateType>(this->getState(), S);
552   }
553 };
554 
555 /// Helper class for generic replication: function returned -> cs returned.
556 template <typename AAType, typename BaseType,
557           typename StateType = typename BaseType::StateType>
558 struct AACallSiteReturnedFromReturned : public BaseType {
559   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
560       : BaseType(IRP, A) {}
561 
562   /// See AbstractAttribute::updateImpl(...).
563   ChangeStatus updateImpl(Attributor &A) override {
564     assert(this->getIRPosition().getPositionKind() ==
565                IRPosition::IRP_CALL_SITE_RETURNED &&
566            "Can only wrap function returned positions for call site returned "
567            "positions!");
568     auto &S = this->getState();
569 
570     const Function *AssociatedFunction =
571         this->getIRPosition().getAssociatedFunction();
572     if (!AssociatedFunction)
573       return S.indicatePessimisticFixpoint();
574 
575     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
576     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
577     return clampStateAndIndicateChange(S, AA.getState());
578   }
579 };
580 
581 /// Helper function to accumulate uses.
582 template <class AAType, typename StateType = typename AAType::StateType>
583 static void followUsesInContext(AAType &AA, Attributor &A,
584                                 MustBeExecutedContextExplorer &Explorer,
585                                 const Instruction *CtxI,
586                                 SetVector<const Use *> &Uses,
587                                 StateType &State) {
588   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
589   for (unsigned u = 0; u < Uses.size(); ++u) {
590     const Use *U = Uses[u];
591     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
592       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
593       if (Found && AA.followUseInMBEC(A, U, UserI, State))
594         for (const Use &Us : UserI->uses())
595           Uses.insert(&Us);
596     }
597   }
598 }
599 
600 /// Use the must-be-executed-context around \p I to add information into \p S.
601 /// The AAType class is required to have `followUseInMBEC` method with the
602 /// following signature and behaviour:
603 ///
604 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
605 /// U - Underlying use.
606 /// I - The user of the \p U.
607 /// Returns true if the value should be tracked transitively.
608 ///
609 template <class AAType, typename StateType = typename AAType::StateType>
610 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
611                              Instruction &CtxI) {
612 
613   // Container for (transitive) uses of the associated value.
614   SetVector<const Use *> Uses;
615   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
616     Uses.insert(&U);
617 
618   MustBeExecutedContextExplorer &Explorer =
619       A.getInfoCache().getMustBeExecutedContextExplorer();
620 
621   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
622 
623   if (S.isAtFixpoint())
624     return;
625 
626   SmallVector<const BranchInst *, 4> BrInsts;
627   auto Pred = [&](const Instruction *I) {
628     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
629       if (Br->isConditional())
630         BrInsts.push_back(Br);
631     return true;
632   };
633 
634   // Here, accumulate conditional branch instructions in the context. We
635   // explore the child paths and collect the known states. The disjunction of
636   // those states can be merged to its own state. Let ParentState_i be a state
637   // to indicate the known information for an i-th branch instruction in the
638   // context. ChildStates are created for its successors respectively.
639   //
640   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
641   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
642   //      ...
643   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
644   //
645   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
646   //
647   // FIXME: Currently, recursive branches are not handled. For example, we
648   // can't deduce that ptr must be dereferenced in below function.
649   //
650   // void f(int a, int c, int *ptr) {
651   //    if(a)
652   //      if (b) {
653   //        *ptr = 0;
654   //      } else {
655   //        *ptr = 1;
656   //      }
657   //    else {
658   //      if (b) {
659   //        *ptr = 0;
660   //      } else {
661   //        *ptr = 1;
662   //      }
663   //    }
664   // }
665 
666   Explorer.checkForAllContext(&CtxI, Pred);
667   for (const BranchInst *Br : BrInsts) {
668     StateType ParentState;
669 
670     // The known state of the parent state is a conjunction of children's
671     // known states so it is initialized with a best state.
672     ParentState.indicateOptimisticFixpoint();
673 
674     for (const BasicBlock *BB : Br->successors()) {
675       StateType ChildState;
676 
677       size_t BeforeSize = Uses.size();
678       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
679 
680       // Erase uses which only appear in the child.
681       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
682         It = Uses.erase(It);
683 
684       ParentState &= ChildState;
685     }
686 
687     // Use only known state.
688     S += ParentState;
689   }
690 }
691 
692 /// -----------------------NoUnwind Function Attribute--------------------------
693 
694 struct AANoUnwindImpl : AANoUnwind {
695   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
696 
697   const std::string getAsStr() const override {
698     return getAssumed() ? "nounwind" : "may-unwind";
699   }
700 
701   /// See AbstractAttribute::updateImpl(...).
702   ChangeStatus updateImpl(Attributor &A) override {
703     auto Opcodes = {
704         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
705         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
706         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
707 
708     auto CheckForNoUnwind = [&](Instruction &I) {
709       if (!I.mayThrow())
710         return true;
711 
712       if (const auto *CB = dyn_cast<CallBase>(&I)) {
713         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
714             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
715         return NoUnwindAA.isAssumedNoUnwind();
716       }
717       return false;
718     };
719 
720     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
721       return indicatePessimisticFixpoint();
722 
723     return ChangeStatus::UNCHANGED;
724   }
725 };
726 
727 struct AANoUnwindFunction final : public AANoUnwindImpl {
728   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
729       : AANoUnwindImpl(IRP, A) {}
730 
731   /// See AbstractAttribute::trackStatistics()
732   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
733 };
734 
735 /// NoUnwind attribute deduction for a call sites.
736 struct AANoUnwindCallSite final : AANoUnwindImpl {
737   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
738       : AANoUnwindImpl(IRP, A) {}
739 
740   /// See AbstractAttribute::initialize(...).
741   void initialize(Attributor &A) override {
742     AANoUnwindImpl::initialize(A);
743     Function *F = getAssociatedFunction();
744     if (!F || F->isDeclaration())
745       indicatePessimisticFixpoint();
746   }
747 
748   /// See AbstractAttribute::updateImpl(...).
749   ChangeStatus updateImpl(Attributor &A) override {
750     // TODO: Once we have call site specific value information we can provide
751     //       call site specific liveness information and then it makes
752     //       sense to specialize attributes for call sites arguments instead of
753     //       redirecting requests to the callee argument.
754     Function *F = getAssociatedFunction();
755     const IRPosition &FnPos = IRPosition::function(*F);
756     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
757     return clampStateAndIndicateChange(getState(), FnAA.getState());
758   }
759 
760   /// See AbstractAttribute::trackStatistics()
761   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
762 };
763 
764 /// --------------------- Function Return Values -------------------------------
765 
766 /// "Attribute" that collects all potential returned values and the return
767 /// instructions that they arise from.
768 ///
769 /// If there is a unique returned value R, the manifest method will:
770 ///   - mark R with the "returned" attribute, if R is an argument.
771 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
772 
773   /// Mapping of values potentially returned by the associated function to the
774   /// return instructions that might return them.
775   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
776 
777   /// Mapping to remember the number of returned values for a call site such
778   /// that we can avoid updates if nothing changed.
779   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
780 
781   /// Set of unresolved calls returned by the associated function.
782   SmallSetVector<CallBase *, 4> UnresolvedCalls;
783 
784   /// State flags
785   ///
786   ///{
787   bool IsFixed = false;
788   bool IsValidState = true;
789   ///}
790 
791 public:
792   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
793       : AAReturnedValues(IRP, A) {}
794 
795   /// See AbstractAttribute::initialize(...).
796   void initialize(Attributor &A) override {
797     // Reset the state.
798     IsFixed = false;
799     IsValidState = true;
800     ReturnedValues.clear();
801 
802     Function *F = getAssociatedFunction();
803     if (!F || F->isDeclaration()) {
804       indicatePessimisticFixpoint();
805       return;
806     }
807     assert(!F->getReturnType()->isVoidTy() &&
808            "Did not expect a void return type!");
809 
810     // The map from instruction opcodes to those instructions in the function.
811     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
812 
813     // Look through all arguments, if one is marked as returned we are done.
814     for (Argument &Arg : F->args()) {
815       if (Arg.hasReturnedAttr()) {
816         auto &ReturnInstSet = ReturnedValues[&Arg];
817         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
818           for (Instruction *RI : *Insts)
819             ReturnInstSet.insert(cast<ReturnInst>(RI));
820 
821         indicateOptimisticFixpoint();
822         return;
823       }
824     }
825 
826     if (!A.isFunctionIPOAmendable(*F))
827       indicatePessimisticFixpoint();
828   }
829 
830   /// See AbstractAttribute::manifest(...).
831   ChangeStatus manifest(Attributor &A) override;
832 
833   /// See AbstractAttribute::getState(...).
834   AbstractState &getState() override { return *this; }
835 
836   /// See AbstractAttribute::getState(...).
837   const AbstractState &getState() const override { return *this; }
838 
839   /// See AbstractAttribute::updateImpl(Attributor &A).
840   ChangeStatus updateImpl(Attributor &A) override;
841 
842   llvm::iterator_range<iterator> returned_values() override {
843     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
844   }
845 
846   llvm::iterator_range<const_iterator> returned_values() const override {
847     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
848   }
849 
850   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
851     return UnresolvedCalls;
852   }
853 
854   /// Return the number of potential return values, -1 if unknown.
855   size_t getNumReturnValues() const override {
856     return isValidState() ? ReturnedValues.size() : -1;
857   }
858 
859   /// Return an assumed unique return value if a single candidate is found. If
860   /// there cannot be one, return a nullptr. If it is not clear yet, return the
861   /// Optional::NoneType.
862   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
863 
864   /// See AbstractState::checkForAllReturnedValues(...).
865   bool checkForAllReturnedValuesAndReturnInsts(
866       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
867       const override;
868 
869   /// Pretty print the attribute similar to the IR representation.
870   const std::string getAsStr() const override;
871 
872   /// See AbstractState::isAtFixpoint().
873   bool isAtFixpoint() const override { return IsFixed; }
874 
875   /// See AbstractState::isValidState().
876   bool isValidState() const override { return IsValidState; }
877 
878   /// See AbstractState::indicateOptimisticFixpoint(...).
879   ChangeStatus indicateOptimisticFixpoint() override {
880     IsFixed = true;
881     return ChangeStatus::UNCHANGED;
882   }
883 
884   ChangeStatus indicatePessimisticFixpoint() override {
885     IsFixed = true;
886     IsValidState = false;
887     return ChangeStatus::CHANGED;
888   }
889 };
890 
891 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
892   ChangeStatus Changed = ChangeStatus::UNCHANGED;
893 
894   // Bookkeeping.
895   assert(isValidState());
896   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
897                   "Number of function with known return values");
898 
899   // Check if we have an assumed unique return value that we could manifest.
900   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
901 
902   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
903     return Changed;
904 
905   // Bookkeeping.
906   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
907                   "Number of function with unique return");
908 
909   // Callback to replace the uses of CB with the constant C.
910   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
911     if (CB.use_empty())
912       return ChangeStatus::UNCHANGED;
913     if (A.changeValueAfterManifest(CB, C))
914       return ChangeStatus::CHANGED;
915     return ChangeStatus::UNCHANGED;
916   };
917 
918   // If the assumed unique return value is an argument, annotate it.
919   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
920     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
921             getAssociatedFunction()->getReturnType())) {
922       getIRPosition() = IRPosition::argument(*UniqueRVArg);
923       Changed = IRAttribute::manifest(A);
924     }
925   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
926     // We can replace the returned value with the unique returned constant.
927     Value &AnchorValue = getAnchorValue();
928     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
929       for (const Use &U : F->uses())
930         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
931           if (CB->isCallee(&U)) {
932             Constant *RVCCast =
933                 CB->getType() == RVC->getType()
934                     ? RVC
935                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
936             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
937           }
938     } else {
939       assert(isa<CallBase>(AnchorValue) &&
940              "Expcected a function or call base anchor!");
941       Constant *RVCCast =
942           AnchorValue.getType() == RVC->getType()
943               ? RVC
944               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
945       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
946     }
947     if (Changed == ChangeStatus::CHANGED)
948       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
949                       "Number of function returns replaced by constant return");
950   }
951 
952   return Changed;
953 }
954 
955 const std::string AAReturnedValuesImpl::getAsStr() const {
956   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
957          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
958          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
959 }
960 
961 Optional<Value *>
962 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
963   // If checkForAllReturnedValues provides a unique value, ignoring potential
964   // undef values that can also be present, it is assumed to be the actual
965   // return value and forwarded to the caller of this method. If there are
966   // multiple, a nullptr is returned indicating there cannot be a unique
967   // returned value.
968   Optional<Value *> UniqueRV;
969 
970   auto Pred = [&](Value &RV) -> bool {
971     // If we found a second returned value and neither the current nor the saved
972     // one is an undef, there is no unique returned value. Undefs are special
973     // since we can pretend they have any value.
974     if (UniqueRV.hasValue() && UniqueRV != &RV &&
975         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
976       UniqueRV = nullptr;
977       return false;
978     }
979 
980     // Do not overwrite a value with an undef.
981     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
982       UniqueRV = &RV;
983 
984     return true;
985   };
986 
987   if (!A.checkForAllReturnedValues(Pred, *this))
988     UniqueRV = nullptr;
989 
990   return UniqueRV;
991 }
992 
993 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
994     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
995     const {
996   if (!isValidState())
997     return false;
998 
999   // Check all returned values but ignore call sites as long as we have not
1000   // encountered an overdefined one during an update.
1001   for (auto &It : ReturnedValues) {
1002     Value *RV = It.first;
1003 
1004     CallBase *CB = dyn_cast<CallBase>(RV);
1005     if (CB && !UnresolvedCalls.count(CB))
1006       continue;
1007 
1008     if (!Pred(*RV, It.second))
1009       return false;
1010   }
1011 
1012   return true;
1013 }
1014 
1015 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1016   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1017   bool Changed = false;
1018 
1019   // State used in the value traversals starting in returned values.
1020   struct RVState {
1021     // The map in which we collect return values -> return instrs.
1022     decltype(ReturnedValues) &RetValsMap;
1023     // The flag to indicate a change.
1024     bool &Changed;
1025     // The return instrs we come from.
1026     SmallSetVector<ReturnInst *, 4> RetInsts;
1027   };
1028 
1029   // Callback for a leaf value returned by the associated function.
1030   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1031                          bool) -> bool {
1032     auto Size = RVS.RetValsMap[&Val].size();
1033     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1034     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1035     RVS.Changed |= Inserted;
1036     LLVM_DEBUG({
1037       if (Inserted)
1038         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1039                << " => " << RVS.RetInsts.size() << "\n";
1040     });
1041     return true;
1042   };
1043 
1044   // Helper method to invoke the generic value traversal.
1045   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1046                                 const Instruction *CtxI) {
1047     IRPosition RetValPos = IRPosition::value(RV);
1048     return genericValueTraversal<AAReturnedValues, RVState>(
1049         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1050         /* UseValueSimplify */ false);
1051   };
1052 
1053   // Callback for all "return intructions" live in the associated function.
1054   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1055     ReturnInst &Ret = cast<ReturnInst>(I);
1056     RVState RVS({ReturnedValues, Changed, {}});
1057     RVS.RetInsts.insert(&Ret);
1058     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1059   };
1060 
1061   // Start by discovering returned values from all live returned instructions in
1062   // the associated function.
1063   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1064     return indicatePessimisticFixpoint();
1065 
1066   // Once returned values "directly" present in the code are handled we try to
1067   // resolve returned calls. To avoid modifications to the ReturnedValues map
1068   // while we iterate over it we kept record of potential new entries in a copy
1069   // map, NewRVsMap.
1070   decltype(ReturnedValues) NewRVsMap;
1071 
1072   auto HandleReturnValue = [&](Value *RV,
1073                                SmallSetVector<ReturnInst *, 4> &RIs) {
1074     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1075                       << RIs.size() << " RIs\n");
1076     CallBase *CB = dyn_cast<CallBase>(RV);
1077     if (!CB || UnresolvedCalls.count(CB))
1078       return;
1079 
1080     if (!CB->getCalledFunction()) {
1081       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1082                         << "\n");
1083       UnresolvedCalls.insert(CB);
1084       return;
1085     }
1086 
1087     // TODO: use the function scope once we have call site AAReturnedValues.
1088     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1089         *this, IRPosition::function(*CB->getCalledFunction()),
1090         DepClassTy::REQUIRED);
1091     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1092                       << RetValAA << "\n");
1093 
1094     // Skip dead ends, thus if we do not know anything about the returned
1095     // call we mark it as unresolved and it will stay that way.
1096     if (!RetValAA.getState().isValidState()) {
1097       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1098                         << "\n");
1099       UnresolvedCalls.insert(CB);
1100       return;
1101     }
1102 
1103     // Do not try to learn partial information. If the callee has unresolved
1104     // return values we will treat the call as unresolved/opaque.
1105     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1106     if (!RetValAAUnresolvedCalls.empty()) {
1107       UnresolvedCalls.insert(CB);
1108       return;
1109     }
1110 
1111     // Now check if we can track transitively returned values. If possible, thus
1112     // if all return value can be represented in the current scope, do so.
1113     bool Unresolved = false;
1114     for (auto &RetValAAIt : RetValAA.returned_values()) {
1115       Value *RetVal = RetValAAIt.first;
1116       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1117           isa<Constant>(RetVal))
1118         continue;
1119       // Anything that did not fit in the above categories cannot be resolved,
1120       // mark the call as unresolved.
1121       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1122                            "cannot be translated: "
1123                         << *RetVal << "\n");
1124       UnresolvedCalls.insert(CB);
1125       Unresolved = true;
1126       break;
1127     }
1128 
1129     if (Unresolved)
1130       return;
1131 
1132     // Now track transitively returned values.
1133     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1134     if (NumRetAA == RetValAA.getNumReturnValues()) {
1135       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1136                            "changed since it was seen last\n");
1137       return;
1138     }
1139     NumRetAA = RetValAA.getNumReturnValues();
1140 
1141     for (auto &RetValAAIt : RetValAA.returned_values()) {
1142       Value *RetVal = RetValAAIt.first;
1143       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1144         // Arguments are mapped to call site operands and we begin the traversal
1145         // again.
1146         bool Unused = false;
1147         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1148         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1149         continue;
1150       }
1151       if (isa<CallBase>(RetVal)) {
1152         // Call sites are resolved by the callee attribute over time, no need to
1153         // do anything for us.
1154         continue;
1155       }
1156       if (isa<Constant>(RetVal)) {
1157         // Constants are valid everywhere, we can simply take them.
1158         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1159         continue;
1160       }
1161     }
1162   };
1163 
1164   for (auto &It : ReturnedValues)
1165     HandleReturnValue(It.first, It.second);
1166 
1167   // Because processing the new information can again lead to new return values
1168   // we have to be careful and iterate until this iteration is complete. The
1169   // idea is that we are in a stable state at the end of an update. All return
1170   // values have been handled and properly categorized. We might not update
1171   // again if we have not requested a non-fix attribute so we cannot "wait" for
1172   // the next update to analyze a new return value.
1173   while (!NewRVsMap.empty()) {
1174     auto It = std::move(NewRVsMap.back());
1175     NewRVsMap.pop_back();
1176 
1177     assert(!It.second.empty() && "Entry does not add anything.");
1178     auto &ReturnInsts = ReturnedValues[It.first];
1179     for (ReturnInst *RI : It.second)
1180       if (ReturnInsts.insert(RI)) {
1181         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1182                           << *It.first << " => " << *RI << "\n");
1183         HandleReturnValue(It.first, ReturnInsts);
1184         Changed = true;
1185       }
1186   }
1187 
1188   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1189   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1190 }
1191 
1192 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1193   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1194       : AAReturnedValuesImpl(IRP, A) {}
1195 
1196   /// See AbstractAttribute::trackStatistics()
1197   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1198 };
1199 
1200 /// Returned values information for a call sites.
1201 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1202   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1203       : AAReturnedValuesImpl(IRP, A) {}
1204 
1205   /// See AbstractAttribute::initialize(...).
1206   void initialize(Attributor &A) override {
1207     // TODO: Once we have call site specific value information we can provide
1208     //       call site specific liveness information and then it makes
1209     //       sense to specialize attributes for call sites instead of
1210     //       redirecting requests to the callee.
1211     llvm_unreachable("Abstract attributes for returned values are not "
1212                      "supported for call sites yet!");
1213   }
1214 
1215   /// See AbstractAttribute::updateImpl(...).
1216   ChangeStatus updateImpl(Attributor &A) override {
1217     return indicatePessimisticFixpoint();
1218   }
1219 
1220   /// See AbstractAttribute::trackStatistics()
1221   void trackStatistics() const override {}
1222 };
1223 
1224 /// ------------------------ NoSync Function Attribute -------------------------
1225 
1226 struct AANoSyncImpl : AANoSync {
1227   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1228 
1229   const std::string getAsStr() const override {
1230     return getAssumed() ? "nosync" : "may-sync";
1231   }
1232 
1233   /// See AbstractAttribute::updateImpl(...).
1234   ChangeStatus updateImpl(Attributor &A) override;
1235 
1236   /// Helper function used to determine whether an instruction is non-relaxed
1237   /// atomic. In other words, if an atomic instruction does not have unordered
1238   /// or monotonic ordering
1239   static bool isNonRelaxedAtomic(Instruction *I);
1240 
1241   /// Helper function used to determine whether an instruction is volatile.
1242   static bool isVolatile(Instruction *I);
1243 
1244   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1245   /// memset).
1246   static bool isNoSyncIntrinsic(Instruction *I);
1247 };
1248 
1249 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1250   if (!I->isAtomic())
1251     return false;
1252 
1253   AtomicOrdering Ordering;
1254   switch (I->getOpcode()) {
1255   case Instruction::AtomicRMW:
1256     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1257     break;
1258   case Instruction::Store:
1259     Ordering = cast<StoreInst>(I)->getOrdering();
1260     break;
1261   case Instruction::Load:
1262     Ordering = cast<LoadInst>(I)->getOrdering();
1263     break;
1264   case Instruction::Fence: {
1265     auto *FI = cast<FenceInst>(I);
1266     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1267       return false;
1268     Ordering = FI->getOrdering();
1269     break;
1270   }
1271   case Instruction::AtomicCmpXchg: {
1272     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1273     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1274     // Only if both are relaxed, than it can be treated as relaxed.
1275     // Otherwise it is non-relaxed.
1276     if (Success != AtomicOrdering::Unordered &&
1277         Success != AtomicOrdering::Monotonic)
1278       return true;
1279     if (Failure != AtomicOrdering::Unordered &&
1280         Failure != AtomicOrdering::Monotonic)
1281       return true;
1282     return false;
1283   }
1284   default:
1285     llvm_unreachable(
1286         "New atomic operations need to be known in the attributor.");
1287   }
1288 
1289   // Relaxed.
1290   if (Ordering == AtomicOrdering::Unordered ||
1291       Ordering == AtomicOrdering::Monotonic)
1292     return false;
1293   return true;
1294 }
1295 
1296 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1297 /// FIXME: We should ipmrove the handling of intrinsics.
1298 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1299   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1300     switch (II->getIntrinsicID()) {
1301     /// Element wise atomic memory intrinsics are can only be unordered,
1302     /// therefore nosync.
1303     case Intrinsic::memset_element_unordered_atomic:
1304     case Intrinsic::memmove_element_unordered_atomic:
1305     case Intrinsic::memcpy_element_unordered_atomic:
1306       return true;
1307     case Intrinsic::memset:
1308     case Intrinsic::memmove:
1309     case Intrinsic::memcpy:
1310       if (!cast<MemIntrinsic>(II)->isVolatile())
1311         return true;
1312       return false;
1313     default:
1314       return false;
1315     }
1316   }
1317   return false;
1318 }
1319 
1320 bool AANoSyncImpl::isVolatile(Instruction *I) {
1321   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1322 
1323   switch (I->getOpcode()) {
1324   case Instruction::AtomicRMW:
1325     return cast<AtomicRMWInst>(I)->isVolatile();
1326   case Instruction::Store:
1327     return cast<StoreInst>(I)->isVolatile();
1328   case Instruction::Load:
1329     return cast<LoadInst>(I)->isVolatile();
1330   case Instruction::AtomicCmpXchg:
1331     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1332   default:
1333     return false;
1334   }
1335 }
1336 
1337 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1338 
1339   auto CheckRWInstForNoSync = [&](Instruction &I) {
1340     /// We are looking for volatile instructions or Non-Relaxed atomics.
1341     /// FIXME: We should improve the handling of intrinsics.
1342 
1343     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1344       return true;
1345 
1346     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1347       if (CB->hasFnAttr(Attribute::NoSync))
1348         return true;
1349 
1350       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1351           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1352       return NoSyncAA.isAssumedNoSync();
1353     }
1354 
1355     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1356       return true;
1357 
1358     return false;
1359   };
1360 
1361   auto CheckForNoSync = [&](Instruction &I) {
1362     // At this point we handled all read/write effects and they are all
1363     // nosync, so they can be skipped.
1364     if (I.mayReadOrWriteMemory())
1365       return true;
1366 
1367     // non-convergent and readnone imply nosync.
1368     return !cast<CallBase>(I).isConvergent();
1369   };
1370 
1371   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1372       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1373     return indicatePessimisticFixpoint();
1374 
1375   return ChangeStatus::UNCHANGED;
1376 }
1377 
1378 struct AANoSyncFunction final : public AANoSyncImpl {
1379   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1380       : AANoSyncImpl(IRP, A) {}
1381 
1382   /// See AbstractAttribute::trackStatistics()
1383   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1384 };
1385 
1386 /// NoSync attribute deduction for a call sites.
1387 struct AANoSyncCallSite final : AANoSyncImpl {
1388   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1389       : AANoSyncImpl(IRP, A) {}
1390 
1391   /// See AbstractAttribute::initialize(...).
1392   void initialize(Attributor &A) override {
1393     AANoSyncImpl::initialize(A);
1394     Function *F = getAssociatedFunction();
1395     if (!F || F->isDeclaration())
1396       indicatePessimisticFixpoint();
1397   }
1398 
1399   /// See AbstractAttribute::updateImpl(...).
1400   ChangeStatus updateImpl(Attributor &A) override {
1401     // TODO: Once we have call site specific value information we can provide
1402     //       call site specific liveness information and then it makes
1403     //       sense to specialize attributes for call sites arguments instead of
1404     //       redirecting requests to the callee argument.
1405     Function *F = getAssociatedFunction();
1406     const IRPosition &FnPos = IRPosition::function(*F);
1407     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1408     return clampStateAndIndicateChange(getState(), FnAA.getState());
1409   }
1410 
1411   /// See AbstractAttribute::trackStatistics()
1412   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1413 };
1414 
1415 /// ------------------------ No-Free Attributes ----------------------------
1416 
1417 struct AANoFreeImpl : public AANoFree {
1418   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1419 
1420   /// See AbstractAttribute::updateImpl(...).
1421   ChangeStatus updateImpl(Attributor &A) override {
1422     auto CheckForNoFree = [&](Instruction &I) {
1423       const auto &CB = cast<CallBase>(I);
1424       if (CB.hasFnAttr(Attribute::NoFree))
1425         return true;
1426 
1427       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1428           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1429       return NoFreeAA.isAssumedNoFree();
1430     };
1431 
1432     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1433       return indicatePessimisticFixpoint();
1434     return ChangeStatus::UNCHANGED;
1435   }
1436 
1437   /// See AbstractAttribute::getAsStr().
1438   const std::string getAsStr() const override {
1439     return getAssumed() ? "nofree" : "may-free";
1440   }
1441 };
1442 
1443 struct AANoFreeFunction final : public AANoFreeImpl {
1444   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1445       : AANoFreeImpl(IRP, A) {}
1446 
1447   /// See AbstractAttribute::trackStatistics()
1448   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1449 };
1450 
1451 /// NoFree attribute deduction for a call sites.
1452 struct AANoFreeCallSite final : AANoFreeImpl {
1453   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1454       : AANoFreeImpl(IRP, A) {}
1455 
1456   /// See AbstractAttribute::initialize(...).
1457   void initialize(Attributor &A) override {
1458     AANoFreeImpl::initialize(A);
1459     Function *F = getAssociatedFunction();
1460     if (!F || F->isDeclaration())
1461       indicatePessimisticFixpoint();
1462   }
1463 
1464   /// See AbstractAttribute::updateImpl(...).
1465   ChangeStatus updateImpl(Attributor &A) override {
1466     // TODO: Once we have call site specific value information we can provide
1467     //       call site specific liveness information and then it makes
1468     //       sense to specialize attributes for call sites arguments instead of
1469     //       redirecting requests to the callee argument.
1470     Function *F = getAssociatedFunction();
1471     const IRPosition &FnPos = IRPosition::function(*F);
1472     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1473     return clampStateAndIndicateChange(getState(), FnAA.getState());
1474   }
1475 
1476   /// See AbstractAttribute::trackStatistics()
1477   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1478 };
1479 
1480 /// NoFree attribute for floating values.
1481 struct AANoFreeFloating : AANoFreeImpl {
1482   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1483       : AANoFreeImpl(IRP, A) {}
1484 
1485   /// See AbstractAttribute::trackStatistics()
1486   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1487 
1488   /// See Abstract Attribute::updateImpl(...).
1489   ChangeStatus updateImpl(Attributor &A) override {
1490     const IRPosition &IRP = getIRPosition();
1491 
1492     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1493         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1494     if (NoFreeAA.isAssumedNoFree())
1495       return ChangeStatus::UNCHANGED;
1496 
1497     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1498     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1499       Instruction *UserI = cast<Instruction>(U.getUser());
1500       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1501         if (CB->isBundleOperand(&U))
1502           return false;
1503         if (!CB->isArgOperand(&U))
1504           return true;
1505         unsigned ArgNo = CB->getArgOperandNo(&U);
1506 
1507         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1508             *this, IRPosition::callsite_argument(*CB, ArgNo),
1509             DepClassTy::REQUIRED);
1510         return NoFreeArg.isAssumedNoFree();
1511       }
1512 
1513       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1514           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1515         Follow = true;
1516         return true;
1517       }
1518       if (isa<ReturnInst>(UserI))
1519         return true;
1520 
1521       // Unknown user.
1522       return false;
1523     };
1524     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1525       return indicatePessimisticFixpoint();
1526 
1527     return ChangeStatus::UNCHANGED;
1528   }
1529 };
1530 
1531 /// NoFree attribute for a call site argument.
1532 struct AANoFreeArgument final : AANoFreeFloating {
1533   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1534       : AANoFreeFloating(IRP, A) {}
1535 
1536   /// See AbstractAttribute::trackStatistics()
1537   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1538 };
1539 
1540 /// NoFree attribute for call site arguments.
1541 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1542   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1543       : AANoFreeFloating(IRP, A) {}
1544 
1545   /// See AbstractAttribute::updateImpl(...).
1546   ChangeStatus updateImpl(Attributor &A) override {
1547     // TODO: Once we have call site specific value information we can provide
1548     //       call site specific liveness information and then it makes
1549     //       sense to specialize attributes for call sites arguments instead of
1550     //       redirecting requests to the callee argument.
1551     Argument *Arg = getAssociatedArgument();
1552     if (!Arg)
1553       return indicatePessimisticFixpoint();
1554     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1555     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1556     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1557   }
1558 
1559   /// See AbstractAttribute::trackStatistics()
1560   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1561 };
1562 
1563 /// NoFree attribute for function return value.
1564 struct AANoFreeReturned final : AANoFreeFloating {
1565   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1566       : AANoFreeFloating(IRP, A) {
1567     llvm_unreachable("NoFree is not applicable to function returns!");
1568   }
1569 
1570   /// See AbstractAttribute::initialize(...).
1571   void initialize(Attributor &A) override {
1572     llvm_unreachable("NoFree is not applicable to function returns!");
1573   }
1574 
1575   /// See AbstractAttribute::updateImpl(...).
1576   ChangeStatus updateImpl(Attributor &A) override {
1577     llvm_unreachable("NoFree is not applicable to function returns!");
1578   }
1579 
1580   /// See AbstractAttribute::trackStatistics()
1581   void trackStatistics() const override {}
1582 };
1583 
1584 /// NoFree attribute deduction for a call site return value.
1585 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1586   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1587       : AANoFreeFloating(IRP, A) {}
1588 
1589   ChangeStatus manifest(Attributor &A) override {
1590     return ChangeStatus::UNCHANGED;
1591   }
1592   /// See AbstractAttribute::trackStatistics()
1593   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1594 };
1595 
1596 /// ------------------------ NonNull Argument Attribute ------------------------
1597 static int64_t getKnownNonNullAndDerefBytesForUse(
1598     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1599     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1600   TrackUse = false;
1601 
1602   const Value *UseV = U->get();
1603   if (!UseV->getType()->isPointerTy())
1604     return 0;
1605 
1606   // We need to follow common pointer manipulation uses to the accesses they
1607   // feed into. We can try to be smart to avoid looking through things we do not
1608   // like for now, e.g., non-inbounds GEPs.
1609   if (isa<CastInst>(I)) {
1610     TrackUse = true;
1611     return 0;
1612   }
1613 
1614   if (isa<GetElementPtrInst>(I)) {
1615     TrackUse = true;
1616     return 0;
1617   }
1618 
1619   Type *PtrTy = UseV->getType();
1620   const Function *F = I->getFunction();
1621   bool NullPointerIsDefined =
1622       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1623   const DataLayout &DL = A.getInfoCache().getDL();
1624   if (const auto *CB = dyn_cast<CallBase>(I)) {
1625     if (CB->isBundleOperand(U)) {
1626       if (RetainedKnowledge RK = getKnowledgeFromUse(
1627               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1628         IsNonNull |=
1629             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1630         return RK.ArgValue;
1631       }
1632       return 0;
1633     }
1634 
1635     if (CB->isCallee(U)) {
1636       IsNonNull |= !NullPointerIsDefined;
1637       return 0;
1638     }
1639 
1640     unsigned ArgNo = CB->getArgOperandNo(U);
1641     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1642     // As long as we only use known information there is no need to track
1643     // dependences here.
1644     auto &DerefAA =
1645         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1646     IsNonNull |= DerefAA.isKnownNonNull();
1647     return DerefAA.getKnownDereferenceableBytes();
1648   }
1649 
1650   int64_t Offset;
1651   const Value *Base =
1652       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1653   if (Base) {
1654     if (Base == &AssociatedValue &&
1655         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1656       int64_t DerefBytes =
1657           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1658 
1659       IsNonNull |= !NullPointerIsDefined;
1660       return std::max(int64_t(0), DerefBytes);
1661     }
1662   }
1663 
1664   /// Corner case when an offset is 0.
1665   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1666                                               /*AllowNonInbounds*/ true);
1667   if (Base) {
1668     if (Offset == 0 && Base == &AssociatedValue &&
1669         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1670       int64_t DerefBytes =
1671           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1672       IsNonNull |= !NullPointerIsDefined;
1673       return std::max(int64_t(0), DerefBytes);
1674     }
1675   }
1676 
1677   return 0;
1678 }
1679 
1680 struct AANonNullImpl : AANonNull {
1681   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1682       : AANonNull(IRP, A),
1683         NullIsDefined(NullPointerIsDefined(
1684             getAnchorScope(),
1685             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1686 
1687   /// See AbstractAttribute::initialize(...).
1688   void initialize(Attributor &A) override {
1689     Value &V = getAssociatedValue();
1690     if (!NullIsDefined &&
1691         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1692                 /* IgnoreSubsumingPositions */ false, &A)) {
1693       indicateOptimisticFixpoint();
1694       return;
1695     }
1696 
1697     if (isa<ConstantPointerNull>(V)) {
1698       indicatePessimisticFixpoint();
1699       return;
1700     }
1701 
1702     AANonNull::initialize(A);
1703 
1704     bool CanBeNull = true;
1705     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) {
1706       if (!CanBeNull) {
1707         indicateOptimisticFixpoint();
1708         return;
1709       }
1710     }
1711 
1712     if (isa<GlobalValue>(&getAssociatedValue())) {
1713       indicatePessimisticFixpoint();
1714       return;
1715     }
1716 
1717     if (Instruction *CtxI = getCtxI())
1718       followUsesInMBEC(*this, A, getState(), *CtxI);
1719   }
1720 
1721   /// See followUsesInMBEC
1722   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1723                        AANonNull::StateType &State) {
1724     bool IsNonNull = false;
1725     bool TrackUse = false;
1726     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1727                                        IsNonNull, TrackUse);
1728     State.setKnown(IsNonNull);
1729     return TrackUse;
1730   }
1731 
1732   /// See AbstractAttribute::getAsStr().
1733   const std::string getAsStr() const override {
1734     return getAssumed() ? "nonnull" : "may-null";
1735   }
1736 
1737   /// Flag to determine if the underlying value can be null and still allow
1738   /// valid accesses.
1739   const bool NullIsDefined;
1740 };
1741 
1742 /// NonNull attribute for a floating value.
1743 struct AANonNullFloating : public AANonNullImpl {
1744   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1745       : AANonNullImpl(IRP, A) {}
1746 
1747   /// See AbstractAttribute::updateImpl(...).
1748   ChangeStatus updateImpl(Attributor &A) override {
1749     const DataLayout &DL = A.getDataLayout();
1750 
1751     DominatorTree *DT = nullptr;
1752     AssumptionCache *AC = nullptr;
1753     InformationCache &InfoCache = A.getInfoCache();
1754     if (const Function *Fn = getAnchorScope()) {
1755       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1756       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1757     }
1758 
1759     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1760                             AANonNull::StateType &T, bool Stripped) -> bool {
1761       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1762                                              DepClassTy::REQUIRED);
1763       if (!Stripped && this == &AA) {
1764         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1765           T.indicatePessimisticFixpoint();
1766       } else {
1767         // Use abstract attribute information.
1768         const AANonNull::StateType &NS = AA.getState();
1769         T ^= NS;
1770       }
1771       return T.isValidState();
1772     };
1773 
1774     StateType T;
1775     if (!genericValueTraversal<AANonNull, StateType>(
1776             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1777       return indicatePessimisticFixpoint();
1778 
1779     return clampStateAndIndicateChange(getState(), T);
1780   }
1781 
1782   /// See AbstractAttribute::trackStatistics()
1783   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1784 };
1785 
1786 /// NonNull attribute for function return value.
1787 struct AANonNullReturned final
1788     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1789   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1790       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1791 
1792   /// See AbstractAttribute::getAsStr().
1793   const std::string getAsStr() const override {
1794     return getAssumed() ? "nonnull" : "may-null";
1795   }
1796 
1797   /// See AbstractAttribute::trackStatistics()
1798   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1799 };
1800 
1801 /// NonNull attribute for function argument.
1802 struct AANonNullArgument final
1803     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1804   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1805       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1806 
1807   /// See AbstractAttribute::trackStatistics()
1808   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1809 };
1810 
1811 struct AANonNullCallSiteArgument final : AANonNullFloating {
1812   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1813       : AANonNullFloating(IRP, A) {}
1814 
1815   /// See AbstractAttribute::trackStatistics()
1816   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1817 };
1818 
1819 /// NonNull attribute for a call site return position.
1820 struct AANonNullCallSiteReturned final
1821     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1822   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1823       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1824 
1825   /// See AbstractAttribute::trackStatistics()
1826   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1827 };
1828 
1829 /// ------------------------ No-Recurse Attributes ----------------------------
1830 
1831 struct AANoRecurseImpl : public AANoRecurse {
1832   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1833 
1834   /// See AbstractAttribute::getAsStr()
1835   const std::string getAsStr() const override {
1836     return getAssumed() ? "norecurse" : "may-recurse";
1837   }
1838 };
1839 
1840 struct AANoRecurseFunction final : AANoRecurseImpl {
1841   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1842       : AANoRecurseImpl(IRP, A) {}
1843 
1844   /// See AbstractAttribute::initialize(...).
1845   void initialize(Attributor &A) override {
1846     AANoRecurseImpl::initialize(A);
1847     if (const Function *F = getAnchorScope())
1848       if (A.getInfoCache().getSccSize(*F) != 1)
1849         indicatePessimisticFixpoint();
1850   }
1851 
1852   /// See AbstractAttribute::updateImpl(...).
1853   ChangeStatus updateImpl(Attributor &A) override {
1854 
1855     // If all live call sites are known to be no-recurse, we are as well.
1856     auto CallSitePred = [&](AbstractCallSite ACS) {
1857       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1858           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1859           DepClassTy::NONE);
1860       return NoRecurseAA.isKnownNoRecurse();
1861     };
1862     bool AllCallSitesKnown;
1863     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1864       // If we know all call sites and all are known no-recurse, we are done.
1865       // If all known call sites, which might not be all that exist, are known
1866       // to be no-recurse, we are not done but we can continue to assume
1867       // no-recurse. If one of the call sites we have not visited will become
1868       // live, another update is triggered.
1869       if (AllCallSitesKnown)
1870         indicateOptimisticFixpoint();
1871       return ChangeStatus::UNCHANGED;
1872     }
1873 
1874     // If the above check does not hold anymore we look at the calls.
1875     auto CheckForNoRecurse = [&](Instruction &I) {
1876       const auto &CB = cast<CallBase>(I);
1877       if (CB.hasFnAttr(Attribute::NoRecurse))
1878         return true;
1879 
1880       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1881           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1882       if (!NoRecurseAA.isAssumedNoRecurse())
1883         return false;
1884 
1885       // Recursion to the same function
1886       if (CB.getCalledFunction() == getAnchorScope())
1887         return false;
1888 
1889       return true;
1890     };
1891 
1892     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1893       return indicatePessimisticFixpoint();
1894     return ChangeStatus::UNCHANGED;
1895   }
1896 
1897   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1898 };
1899 
1900 /// NoRecurse attribute deduction for a call sites.
1901 struct AANoRecurseCallSite final : AANoRecurseImpl {
1902   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1903       : AANoRecurseImpl(IRP, A) {}
1904 
1905   /// See AbstractAttribute::initialize(...).
1906   void initialize(Attributor &A) override {
1907     AANoRecurseImpl::initialize(A);
1908     Function *F = getAssociatedFunction();
1909     if (!F || F->isDeclaration())
1910       indicatePessimisticFixpoint();
1911   }
1912 
1913   /// See AbstractAttribute::updateImpl(...).
1914   ChangeStatus updateImpl(Attributor &A) override {
1915     // TODO: Once we have call site specific value information we can provide
1916     //       call site specific liveness information and then it makes
1917     //       sense to specialize attributes for call sites arguments instead of
1918     //       redirecting requests to the callee argument.
1919     Function *F = getAssociatedFunction();
1920     const IRPosition &FnPos = IRPosition::function(*F);
1921     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1922     return clampStateAndIndicateChange(getState(), FnAA.getState());
1923   }
1924 
1925   /// See AbstractAttribute::trackStatistics()
1926   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1927 };
1928 
1929 /// -------------------- Undefined-Behavior Attributes ------------------------
1930 
1931 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1932   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1933       : AAUndefinedBehavior(IRP, A) {}
1934 
1935   /// See AbstractAttribute::updateImpl(...).
1936   // through a pointer (i.e. also branches etc.)
1937   ChangeStatus updateImpl(Attributor &A) override {
1938     const size_t UBPrevSize = KnownUBInsts.size();
1939     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1940 
1941     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1942       // Skip instructions that are already saved.
1943       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1944         return true;
1945 
1946       // If we reach here, we know we have an instruction
1947       // that accesses memory through a pointer operand,
1948       // for which getPointerOperand() should give it to us.
1949       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1950       assert(PtrOp &&
1951              "Expected pointer operand of memory accessing instruction");
1952 
1953       // Either we stopped and the appropriate action was taken,
1954       // or we got back a simplified value to continue.
1955       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1956       if (!SimplifiedPtrOp.hasValue())
1957         return true;
1958       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1959 
1960       // A memory access through a pointer is considered UB
1961       // only if the pointer has constant null value.
1962       // TODO: Expand it to not only check constant values.
1963       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1964         AssumedNoUBInsts.insert(&I);
1965         return true;
1966       }
1967       const Type *PtrTy = PtrOpVal->getType();
1968 
1969       // Because we only consider instructions inside functions,
1970       // assume that a parent function exists.
1971       const Function *F = I.getFunction();
1972 
1973       // A memory access using constant null pointer is only considered UB
1974       // if null pointer is _not_ defined for the target platform.
1975       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1976         AssumedNoUBInsts.insert(&I);
1977       else
1978         KnownUBInsts.insert(&I);
1979       return true;
1980     };
1981 
1982     auto InspectBrInstForUB = [&](Instruction &I) {
1983       // A conditional branch instruction is considered UB if it has `undef`
1984       // condition.
1985 
1986       // Skip instructions that are already saved.
1987       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1988         return true;
1989 
1990       // We know we have a branch instruction.
1991       auto BrInst = cast<BranchInst>(&I);
1992 
1993       // Unconditional branches are never considered UB.
1994       if (BrInst->isUnconditional())
1995         return true;
1996 
1997       // Either we stopped and the appropriate action was taken,
1998       // or we got back a simplified value to continue.
1999       Optional<Value *> SimplifiedCond =
2000           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2001       if (!SimplifiedCond.hasValue())
2002         return true;
2003       AssumedNoUBInsts.insert(&I);
2004       return true;
2005     };
2006 
2007     auto InspectCallSiteForUB = [&](Instruction &I) {
2008       // Check whether a callsite always cause UB or not
2009 
2010       // Skip instructions that are already saved.
2011       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2012         return true;
2013 
2014       // Check nonnull and noundef argument attribute violation for each
2015       // callsite.
2016       CallBase &CB = cast<CallBase>(I);
2017       Function *Callee = CB.getCalledFunction();
2018       if (!Callee)
2019         return true;
2020       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2021         // If current argument is known to be simplified to null pointer and the
2022         // corresponding argument position is known to have nonnull attribute,
2023         // the argument is poison. Furthermore, if the argument is poison and
2024         // the position is known to have noundef attriubte, this callsite is
2025         // considered UB.
2026         if (idx >= Callee->arg_size())
2027           break;
2028         Value *ArgVal = CB.getArgOperand(idx);
2029         if (!ArgVal)
2030           continue;
2031         // Here, we handle three cases.
2032         //   (1) Not having a value means it is dead. (we can replace the value
2033         //       with undef)
2034         //   (2) Simplified to undef. The argument violate noundef attriubte.
2035         //   (3) Simplified to null pointer where known to be nonnull.
2036         //       The argument is a poison value and violate noundef attribute.
2037         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2038         auto &NoUndefAA =
2039             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2040         if (!NoUndefAA.isKnownNoUndef())
2041           continue;
2042         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2043             *this, IRPosition::value(*ArgVal), DepClassTy::NONE);
2044         if (!ValueSimplifyAA.isKnown())
2045           continue;
2046         Optional<Value *> SimplifiedVal =
2047             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2048         if (!SimplifiedVal.hasValue() ||
2049             isa<UndefValue>(*SimplifiedVal.getValue())) {
2050           KnownUBInsts.insert(&I);
2051           continue;
2052         }
2053         if (!ArgVal->getType()->isPointerTy() ||
2054             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2055           continue;
2056         auto &NonNullAA =
2057             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2058         if (NonNullAA.isKnownNonNull())
2059           KnownUBInsts.insert(&I);
2060       }
2061       return true;
2062     };
2063 
2064     auto InspectReturnInstForUB =
2065         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2066           // Check if a return instruction always cause UB or not
2067           // Note: It is guaranteed that the returned position of the anchor
2068           //       scope has noundef attribute when this is called.
2069           //       We also ensure the return position is not "assumed dead"
2070           //       because the returned value was then potentially simplified to
2071           //       `undef` in AAReturnedValues without removing the `noundef`
2072           //       attribute yet.
2073 
2074           // When the returned position has noundef attriubte, UB occur in the
2075           // following cases.
2076           //   (1) Returned value is known to be undef.
2077           //   (2) The value is known to be a null pointer and the returned
2078           //       position has nonnull attribute (because the returned value is
2079           //       poison).
2080           bool FoundUB = false;
2081           if (isa<UndefValue>(V)) {
2082             FoundUB = true;
2083           } else {
2084             if (isa<ConstantPointerNull>(V)) {
2085               auto &NonNullAA = A.getAAFor<AANonNull>(
2086                   *this, IRPosition::returned(*getAnchorScope()),
2087                   DepClassTy::NONE);
2088               if (NonNullAA.isKnownNonNull())
2089                 FoundUB = true;
2090             }
2091           }
2092 
2093           if (FoundUB)
2094             for (ReturnInst *RI : RetInsts)
2095               KnownUBInsts.insert(RI);
2096           return true;
2097         };
2098 
2099     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2100                               {Instruction::Load, Instruction::Store,
2101                                Instruction::AtomicCmpXchg,
2102                                Instruction::AtomicRMW},
2103                               /* CheckBBLivenessOnly */ true);
2104     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2105                               /* CheckBBLivenessOnly */ true);
2106     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2107 
2108     // If the returned position of the anchor scope has noundef attriubte, check
2109     // all returned instructions.
2110     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2111       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2112       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2113         auto &RetPosNoUndefAA =
2114             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2115         if (RetPosNoUndefAA.isKnownNoUndef())
2116           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2117                                                     *this);
2118       }
2119     }
2120 
2121     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2122         UBPrevSize != KnownUBInsts.size())
2123       return ChangeStatus::CHANGED;
2124     return ChangeStatus::UNCHANGED;
2125   }
2126 
2127   bool isKnownToCauseUB(Instruction *I) const override {
2128     return KnownUBInsts.count(I);
2129   }
2130 
2131   bool isAssumedToCauseUB(Instruction *I) const override {
2132     // In simple words, if an instruction is not in the assumed to _not_
2133     // cause UB, then it is assumed UB (that includes those
2134     // in the KnownUBInsts set). The rest is boilerplate
2135     // is to ensure that it is one of the instructions we test
2136     // for UB.
2137 
2138     switch (I->getOpcode()) {
2139     case Instruction::Load:
2140     case Instruction::Store:
2141     case Instruction::AtomicCmpXchg:
2142     case Instruction::AtomicRMW:
2143       return !AssumedNoUBInsts.count(I);
2144     case Instruction::Br: {
2145       auto BrInst = cast<BranchInst>(I);
2146       if (BrInst->isUnconditional())
2147         return false;
2148       return !AssumedNoUBInsts.count(I);
2149     } break;
2150     default:
2151       return false;
2152     }
2153     return false;
2154   }
2155 
2156   ChangeStatus manifest(Attributor &A) override {
2157     if (KnownUBInsts.empty())
2158       return ChangeStatus::UNCHANGED;
2159     for (Instruction *I : KnownUBInsts)
2160       A.changeToUnreachableAfterManifest(I);
2161     return ChangeStatus::CHANGED;
2162   }
2163 
2164   /// See AbstractAttribute::getAsStr()
2165   const std::string getAsStr() const override {
2166     return getAssumed() ? "undefined-behavior" : "no-ub";
2167   }
2168 
2169   /// Note: The correctness of this analysis depends on the fact that the
2170   /// following 2 sets will stop changing after some point.
2171   /// "Change" here means that their size changes.
2172   /// The size of each set is monotonically increasing
2173   /// (we only add items to them) and it is upper bounded by the number of
2174   /// instructions in the processed function (we can never save more
2175   /// elements in either set than this number). Hence, at some point,
2176   /// they will stop increasing.
2177   /// Consequently, at some point, both sets will have stopped
2178   /// changing, effectively making the analysis reach a fixpoint.
2179 
2180   /// Note: These 2 sets are disjoint and an instruction can be considered
2181   /// one of 3 things:
2182   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2183   ///    the KnownUBInsts set.
2184   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2185   ///    has a reason to assume it).
2186   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2187   ///    could not find a reason to assume or prove that it can cause UB,
2188   ///    hence it assumes it doesn't. We have a set for these instructions
2189   ///    so that we don't reprocess them in every update.
2190   ///    Note however that instructions in this set may cause UB.
2191 
2192 protected:
2193   /// A set of all live instructions _known_ to cause UB.
2194   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2195 
2196 private:
2197   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2198   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2199 
2200   // Should be called on updates in which if we're processing an instruction
2201   // \p I that depends on a value \p V, one of the following has to happen:
2202   // - If the value is assumed, then stop.
2203   // - If the value is known but undef, then consider it UB.
2204   // - Otherwise, do specific processing with the simplified value.
2205   // We return None in the first 2 cases to signify that an appropriate
2206   // action was taken and the caller should stop.
2207   // Otherwise, we return the simplified value that the caller should
2208   // use for specific processing.
2209   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2210                                          Instruction *I) {
2211     const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2212         *this, IRPosition::value(*V), DepClassTy::REQUIRED);
2213     Optional<Value *> SimplifiedV =
2214         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2215     if (!ValueSimplifyAA.isKnown()) {
2216       // Don't depend on assumed values.
2217       return llvm::None;
2218     }
2219     if (!SimplifiedV.hasValue()) {
2220       // If it is known (which we tested above) but it doesn't have a value,
2221       // then we can assume `undef` and hence the instruction is UB.
2222       KnownUBInsts.insert(I);
2223       return llvm::None;
2224     }
2225     Value *Val = SimplifiedV.getValue();
2226     if (isa<UndefValue>(Val)) {
2227       KnownUBInsts.insert(I);
2228       return llvm::None;
2229     }
2230     return Val;
2231   }
2232 };
2233 
2234 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2235   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2236       : AAUndefinedBehaviorImpl(IRP, A) {}
2237 
2238   /// See AbstractAttribute::trackStatistics()
2239   void trackStatistics() const override {
2240     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2241                "Number of instructions known to have UB");
2242     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2243         KnownUBInsts.size();
2244   }
2245 };
2246 
2247 /// ------------------------ Will-Return Attributes ----------------------------
2248 
2249 // Helper function that checks whether a function has any cycle which we don't
2250 // know if it is bounded or not.
2251 // Loops with maximum trip count are considered bounded, any other cycle not.
2252 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2253   ScalarEvolution *SE =
2254       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2255   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2256   // If either SCEV or LoopInfo is not available for the function then we assume
2257   // any cycle to be unbounded cycle.
2258   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2259   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2260   if (!SE || !LI) {
2261     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2262       if (SCCI.hasCycle())
2263         return true;
2264     return false;
2265   }
2266 
2267   // If there's irreducible control, the function may contain non-loop cycles.
2268   if (mayContainIrreducibleControl(F, LI))
2269     return true;
2270 
2271   // Any loop that does not have a max trip count is considered unbounded cycle.
2272   for (auto *L : LI->getLoopsInPreorder()) {
2273     if (!SE->getSmallConstantMaxTripCount(L))
2274       return true;
2275   }
2276   return false;
2277 }
2278 
2279 struct AAWillReturnImpl : public AAWillReturn {
2280   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2281       : AAWillReturn(IRP, A) {}
2282 
2283   /// See AbstractAttribute::initialize(...).
2284   void initialize(Attributor &A) override {
2285     AAWillReturn::initialize(A);
2286 
2287     Function *F = getAnchorScope();
2288     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2289       indicatePessimisticFixpoint();
2290   }
2291 
2292   /// See AbstractAttribute::updateImpl(...).
2293   ChangeStatus updateImpl(Attributor &A) override {
2294     auto CheckForWillReturn = [&](Instruction &I) {
2295       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2296       const auto &WillReturnAA =
2297           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2298       if (WillReturnAA.isKnownWillReturn())
2299         return true;
2300       if (!WillReturnAA.isAssumedWillReturn())
2301         return false;
2302       const auto &NoRecurseAA =
2303           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2304       return NoRecurseAA.isAssumedNoRecurse();
2305     };
2306 
2307     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2308       return indicatePessimisticFixpoint();
2309 
2310     return ChangeStatus::UNCHANGED;
2311   }
2312 
2313   /// See AbstractAttribute::getAsStr()
2314   const std::string getAsStr() const override {
2315     return getAssumed() ? "willreturn" : "may-noreturn";
2316   }
2317 };
2318 
2319 struct AAWillReturnFunction final : AAWillReturnImpl {
2320   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2321       : AAWillReturnImpl(IRP, A) {}
2322 
2323   /// See AbstractAttribute::trackStatistics()
2324   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2325 };
2326 
2327 /// WillReturn attribute deduction for a call sites.
2328 struct AAWillReturnCallSite final : AAWillReturnImpl {
2329   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2330       : AAWillReturnImpl(IRP, A) {}
2331 
2332   /// See AbstractAttribute::initialize(...).
2333   void initialize(Attributor &A) override {
2334     AAWillReturn::initialize(A);
2335     Function *F = getAssociatedFunction();
2336     if (!F || !A.isFunctionIPOAmendable(*F))
2337       indicatePessimisticFixpoint();
2338   }
2339 
2340   /// See AbstractAttribute::updateImpl(...).
2341   ChangeStatus updateImpl(Attributor &A) override {
2342     // TODO: Once we have call site specific value information we can provide
2343     //       call site specific liveness information and then it makes
2344     //       sense to specialize attributes for call sites arguments instead of
2345     //       redirecting requests to the callee argument.
2346     Function *F = getAssociatedFunction();
2347     const IRPosition &FnPos = IRPosition::function(*F);
2348     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2349     return clampStateAndIndicateChange(getState(), FnAA.getState());
2350   }
2351 
2352   /// See AbstractAttribute::trackStatistics()
2353   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2354 };
2355 
2356 /// -------------------AAReachability Attribute--------------------------
2357 
2358 struct AAReachabilityImpl : AAReachability {
2359   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2360       : AAReachability(IRP, A) {}
2361 
2362   const std::string getAsStr() const override {
2363     // TODO: Return the number of reachable queries.
2364     return "reachable";
2365   }
2366 
2367   /// See AbstractAttribute::initialize(...).
2368   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2369 
2370   /// See AbstractAttribute::updateImpl(...).
2371   ChangeStatus updateImpl(Attributor &A) override {
2372     return indicatePessimisticFixpoint();
2373   }
2374 };
2375 
2376 struct AAReachabilityFunction final : public AAReachabilityImpl {
2377   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2378       : AAReachabilityImpl(IRP, A) {}
2379 
2380   /// See AbstractAttribute::trackStatistics()
2381   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2382 };
2383 
2384 /// ------------------------ NoAlias Argument Attribute ------------------------
2385 
2386 struct AANoAliasImpl : AANoAlias {
2387   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2388     assert(getAssociatedType()->isPointerTy() &&
2389            "Noalias is a pointer attribute");
2390   }
2391 
2392   const std::string getAsStr() const override {
2393     return getAssumed() ? "noalias" : "may-alias";
2394   }
2395 };
2396 
2397 /// NoAlias attribute for a floating value.
2398 struct AANoAliasFloating final : AANoAliasImpl {
2399   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2400       : AANoAliasImpl(IRP, A) {}
2401 
2402   /// See AbstractAttribute::initialize(...).
2403   void initialize(Attributor &A) override {
2404     AANoAliasImpl::initialize(A);
2405     Value *Val = &getAssociatedValue();
2406     do {
2407       CastInst *CI = dyn_cast<CastInst>(Val);
2408       if (!CI)
2409         break;
2410       Value *Base = CI->getOperand(0);
2411       if (!Base->hasOneUse())
2412         break;
2413       Val = Base;
2414     } while (true);
2415 
2416     if (!Val->getType()->isPointerTy()) {
2417       indicatePessimisticFixpoint();
2418       return;
2419     }
2420 
2421     if (isa<AllocaInst>(Val))
2422       indicateOptimisticFixpoint();
2423     else if (isa<ConstantPointerNull>(Val) &&
2424              !NullPointerIsDefined(getAnchorScope(),
2425                                    Val->getType()->getPointerAddressSpace()))
2426       indicateOptimisticFixpoint();
2427     else if (Val != &getAssociatedValue()) {
2428       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2429           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2430       if (ValNoAliasAA.isKnownNoAlias())
2431         indicateOptimisticFixpoint();
2432     }
2433   }
2434 
2435   /// See AbstractAttribute::updateImpl(...).
2436   ChangeStatus updateImpl(Attributor &A) override {
2437     // TODO: Implement this.
2438     return indicatePessimisticFixpoint();
2439   }
2440 
2441   /// See AbstractAttribute::trackStatistics()
2442   void trackStatistics() const override {
2443     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2444   }
2445 };
2446 
2447 /// NoAlias attribute for an argument.
2448 struct AANoAliasArgument final
2449     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2450   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2451   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2452 
2453   /// See AbstractAttribute::initialize(...).
2454   void initialize(Attributor &A) override {
2455     Base::initialize(A);
2456     // See callsite argument attribute and callee argument attribute.
2457     if (hasAttr({Attribute::ByVal}))
2458       indicateOptimisticFixpoint();
2459   }
2460 
2461   /// See AbstractAttribute::update(...).
2462   ChangeStatus updateImpl(Attributor &A) override {
2463     // We have to make sure no-alias on the argument does not break
2464     // synchronization when this is a callback argument, see also [1] below.
2465     // If synchronization cannot be affected, we delegate to the base updateImpl
2466     // function, otherwise we give up for now.
2467 
2468     // If the function is no-sync, no-alias cannot break synchronization.
2469     const auto &NoSyncAA =
2470         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2471                              DepClassTy::OPTIONAL);
2472     if (NoSyncAA.isAssumedNoSync())
2473       return Base::updateImpl(A);
2474 
2475     // If the argument is read-only, no-alias cannot break synchronization.
2476     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2477         *this, getIRPosition(), DepClassTy::OPTIONAL);
2478     if (MemBehaviorAA.isAssumedReadOnly())
2479       return Base::updateImpl(A);
2480 
2481     // If the argument is never passed through callbacks, no-alias cannot break
2482     // synchronization.
2483     bool AllCallSitesKnown;
2484     if (A.checkForAllCallSites(
2485             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2486             true, AllCallSitesKnown))
2487       return Base::updateImpl(A);
2488 
2489     // TODO: add no-alias but make sure it doesn't break synchronization by
2490     // introducing fake uses. See:
2491     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2492     //     International Workshop on OpenMP 2018,
2493     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2494 
2495     return indicatePessimisticFixpoint();
2496   }
2497 
2498   /// See AbstractAttribute::trackStatistics()
2499   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2500 };
2501 
2502 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2503   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2504       : AANoAliasImpl(IRP, A) {}
2505 
2506   /// See AbstractAttribute::initialize(...).
2507   void initialize(Attributor &A) override {
2508     // See callsite argument attribute and callee argument attribute.
2509     const auto &CB = cast<CallBase>(getAnchorValue());
2510     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2511       indicateOptimisticFixpoint();
2512     Value &Val = getAssociatedValue();
2513     if (isa<ConstantPointerNull>(Val) &&
2514         !NullPointerIsDefined(getAnchorScope(),
2515                               Val.getType()->getPointerAddressSpace()))
2516       indicateOptimisticFixpoint();
2517   }
2518 
2519   /// Determine if the underlying value may alias with the call site argument
2520   /// \p OtherArgNo of \p ICS (= the underlying call site).
2521   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2522                             const AAMemoryBehavior &MemBehaviorAA,
2523                             const CallBase &CB, unsigned OtherArgNo) {
2524     // We do not need to worry about aliasing with the underlying IRP.
2525     if (this->getCalleeArgNo() == (int)OtherArgNo)
2526       return false;
2527 
2528     // If it is not a pointer or pointer vector we do not alias.
2529     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2530     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2531       return false;
2532 
2533     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2534         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2535 
2536     // If the argument is readnone, there is no read-write aliasing.
2537     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2538       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2539       return false;
2540     }
2541 
2542     // If the argument is readonly and the underlying value is readonly, there
2543     // is no read-write aliasing.
2544     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2545     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2546       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2547       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2548       return false;
2549     }
2550 
2551     // We have to utilize actual alias analysis queries so we need the object.
2552     if (!AAR)
2553       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2554 
2555     // Try to rule it out at the call site.
2556     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2557     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2558                          "callsite arguments: "
2559                       << getAssociatedValue() << " " << *ArgOp << " => "
2560                       << (IsAliasing ? "" : "no-") << "alias \n");
2561 
2562     return IsAliasing;
2563   }
2564 
2565   bool
2566   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2567                                          const AAMemoryBehavior &MemBehaviorAA,
2568                                          const AANoAlias &NoAliasAA) {
2569     // We can deduce "noalias" if the following conditions hold.
2570     // (i)   Associated value is assumed to be noalias in the definition.
2571     // (ii)  Associated value is assumed to be no-capture in all the uses
2572     //       possibly executed before this callsite.
2573     // (iii) There is no other pointer argument which could alias with the
2574     //       value.
2575 
2576     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2577     if (!AssociatedValueIsNoAliasAtDef) {
2578       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2579                         << " is not no-alias at the definition\n");
2580       return false;
2581     }
2582 
2583     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2584 
2585     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2586     const Function *ScopeFn = VIRP.getAnchorScope();
2587     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2588     // Check whether the value is captured in the scope using AANoCapture.
2589     //      Look at CFG and check only uses possibly executed before this
2590     //      callsite.
2591     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2592       Instruction *UserI = cast<Instruction>(U.getUser());
2593 
2594       // If UserI is the curr instruction and there is a single potential use of
2595       // the value in UserI we allow the use.
2596       // TODO: We should inspect the operands and allow those that cannot alias
2597       //       with the value.
2598       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2599         return true;
2600 
2601       if (ScopeFn) {
2602         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2603             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2604 
2605         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2606           return true;
2607 
2608         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2609           if (CB->isArgOperand(&U)) {
2610 
2611             unsigned ArgNo = CB->getArgOperandNo(&U);
2612 
2613             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2614                 *this, IRPosition::callsite_argument(*CB, ArgNo),
2615                 DepClassTy::OPTIONAL);
2616 
2617             if (NoCaptureAA.isAssumedNoCapture())
2618               return true;
2619           }
2620         }
2621       }
2622 
2623       // For cases which can potentially have more users
2624       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2625           isa<SelectInst>(U)) {
2626         Follow = true;
2627         return true;
2628       }
2629 
2630       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2631       return false;
2632     };
2633 
2634     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2635       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2636         LLVM_DEBUG(
2637             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2638                    << " cannot be noalias as it is potentially captured\n");
2639         return false;
2640       }
2641     }
2642     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2643 
2644     // Check there is no other pointer argument which could alias with the
2645     // value passed at this call site.
2646     // TODO: AbstractCallSite
2647     const auto &CB = cast<CallBase>(getAnchorValue());
2648     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2649          OtherArgNo++)
2650       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2651         return false;
2652 
2653     return true;
2654   }
2655 
2656   /// See AbstractAttribute::updateImpl(...).
2657   ChangeStatus updateImpl(Attributor &A) override {
2658     // If the argument is readnone we are done as there are no accesses via the
2659     // argument.
2660     auto &MemBehaviorAA =
2661         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2662     if (MemBehaviorAA.isAssumedReadNone()) {
2663       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2664       return ChangeStatus::UNCHANGED;
2665     }
2666 
2667     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2668     const auto &NoAliasAA =
2669         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2670 
2671     AAResults *AAR = nullptr;
2672     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2673                                                NoAliasAA)) {
2674       LLVM_DEBUG(
2675           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2676       return ChangeStatus::UNCHANGED;
2677     }
2678 
2679     return indicatePessimisticFixpoint();
2680   }
2681 
2682   /// See AbstractAttribute::trackStatistics()
2683   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2684 };
2685 
2686 /// NoAlias attribute for function return value.
2687 struct AANoAliasReturned final : AANoAliasImpl {
2688   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2689       : AANoAliasImpl(IRP, A) {}
2690 
2691   /// See AbstractAttribute::initialize(...).
2692   void initialize(Attributor &A) override {
2693     AANoAliasImpl::initialize(A);
2694     Function *F = getAssociatedFunction();
2695     if (!F || F->isDeclaration())
2696       indicatePessimisticFixpoint();
2697   }
2698 
2699   /// See AbstractAttribute::updateImpl(...).
2700   virtual ChangeStatus updateImpl(Attributor &A) override {
2701 
2702     auto CheckReturnValue = [&](Value &RV) -> bool {
2703       if (Constant *C = dyn_cast<Constant>(&RV))
2704         if (C->isNullValue() || isa<UndefValue>(C))
2705           return true;
2706 
2707       /// For now, we can only deduce noalias if we have call sites.
2708       /// FIXME: add more support.
2709       if (!isa<CallBase>(&RV))
2710         return false;
2711 
2712       const IRPosition &RVPos = IRPosition::value(RV);
2713       const auto &NoAliasAA =
2714           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2715       if (!NoAliasAA.isAssumedNoAlias())
2716         return false;
2717 
2718       const auto &NoCaptureAA =
2719           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2720       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2721     };
2722 
2723     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2724       return indicatePessimisticFixpoint();
2725 
2726     return ChangeStatus::UNCHANGED;
2727   }
2728 
2729   /// See AbstractAttribute::trackStatistics()
2730   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2731 };
2732 
2733 /// NoAlias attribute deduction for a call site return value.
2734 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2735   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2736       : AANoAliasImpl(IRP, A) {}
2737 
2738   /// See AbstractAttribute::initialize(...).
2739   void initialize(Attributor &A) override {
2740     AANoAliasImpl::initialize(A);
2741     Function *F = getAssociatedFunction();
2742     if (!F || F->isDeclaration())
2743       indicatePessimisticFixpoint();
2744   }
2745 
2746   /// See AbstractAttribute::updateImpl(...).
2747   ChangeStatus updateImpl(Attributor &A) override {
2748     // TODO: Once we have call site specific value information we can provide
2749     //       call site specific liveness information and then it makes
2750     //       sense to specialize attributes for call sites arguments instead of
2751     //       redirecting requests to the callee argument.
2752     Function *F = getAssociatedFunction();
2753     const IRPosition &FnPos = IRPosition::returned(*F);
2754     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2755     return clampStateAndIndicateChange(getState(), FnAA.getState());
2756   }
2757 
2758   /// See AbstractAttribute::trackStatistics()
2759   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2760 };
2761 
2762 /// -------------------AAIsDead Function Attribute-----------------------
2763 
2764 struct AAIsDeadValueImpl : public AAIsDead {
2765   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2766 
2767   /// See AAIsDead::isAssumedDead().
2768   bool isAssumedDead() const override { return getAssumed(); }
2769 
2770   /// See AAIsDead::isKnownDead().
2771   bool isKnownDead() const override { return getKnown(); }
2772 
2773   /// See AAIsDead::isAssumedDead(BasicBlock *).
2774   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2775 
2776   /// See AAIsDead::isKnownDead(BasicBlock *).
2777   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2778 
2779   /// See AAIsDead::isAssumedDead(Instruction *I).
2780   bool isAssumedDead(const Instruction *I) const override {
2781     return I == getCtxI() && isAssumedDead();
2782   }
2783 
2784   /// See AAIsDead::isKnownDead(Instruction *I).
2785   bool isKnownDead(const Instruction *I) const override {
2786     return isAssumedDead(I) && getKnown();
2787   }
2788 
2789   /// See AbstractAttribute::getAsStr().
2790   const std::string getAsStr() const override {
2791     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2792   }
2793 
2794   /// Check if all uses are assumed dead.
2795   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2796     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2797     // Explicitly set the dependence class to required because we want a long
2798     // chain of N dependent instructions to be considered live as soon as one is
2799     // without going through N update cycles. This is not required for
2800     // correctness.
2801     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2802   }
2803 
2804   /// Determine if \p I is assumed to be side-effect free.
2805   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2806     if (!I || wouldInstructionBeTriviallyDead(I))
2807       return true;
2808 
2809     auto *CB = dyn_cast<CallBase>(I);
2810     if (!CB || isa<IntrinsicInst>(CB))
2811       return false;
2812 
2813     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2814     const auto &NoUnwindAA =
2815         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2816     if (!NoUnwindAA.isAssumedNoUnwind())
2817       return false;
2818     if (!NoUnwindAA.isKnownNoUnwind())
2819       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2820 
2821     const auto &MemBehaviorAA =
2822         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2823     if (MemBehaviorAA.isAssumedReadOnly()) {
2824       if (!MemBehaviorAA.isKnownReadOnly())
2825         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2826       return true;
2827     }
2828     return false;
2829   }
2830 };
2831 
2832 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2833   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2834       : AAIsDeadValueImpl(IRP, A) {}
2835 
2836   /// See AbstractAttribute::initialize(...).
2837   void initialize(Attributor &A) override {
2838     if (isa<UndefValue>(getAssociatedValue())) {
2839       indicatePessimisticFixpoint();
2840       return;
2841     }
2842 
2843     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2844     if (!isAssumedSideEffectFree(A, I))
2845       indicatePessimisticFixpoint();
2846   }
2847 
2848   /// See AbstractAttribute::updateImpl(...).
2849   ChangeStatus updateImpl(Attributor &A) override {
2850     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2851     if (!isAssumedSideEffectFree(A, I))
2852       return indicatePessimisticFixpoint();
2853 
2854     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2855       return indicatePessimisticFixpoint();
2856     return ChangeStatus::UNCHANGED;
2857   }
2858 
2859   /// See AbstractAttribute::manifest(...).
2860   ChangeStatus manifest(Attributor &A) override {
2861     Value &V = getAssociatedValue();
2862     if (auto *I = dyn_cast<Instruction>(&V)) {
2863       // If we get here we basically know the users are all dead. We check if
2864       // isAssumedSideEffectFree returns true here again because it might not be
2865       // the case and only the users are dead but the instruction (=call) is
2866       // still needed.
2867       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2868         A.deleteAfterManifest(*I);
2869         return ChangeStatus::CHANGED;
2870       }
2871     }
2872     if (V.use_empty())
2873       return ChangeStatus::UNCHANGED;
2874 
2875     bool UsedAssumedInformation = false;
2876     Optional<Constant *> C =
2877         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2878     if (C.hasValue() && C.getValue())
2879       return ChangeStatus::UNCHANGED;
2880 
2881     // Replace the value with undef as it is dead but keep droppable uses around
2882     // as they provide information we don't want to give up on just yet.
2883     UndefValue &UV = *UndefValue::get(V.getType());
2884     bool AnyChange =
2885         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2886     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2887   }
2888 
2889   /// See AbstractAttribute::trackStatistics()
2890   void trackStatistics() const override {
2891     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2892   }
2893 };
2894 
2895 struct AAIsDeadArgument : public AAIsDeadFloating {
2896   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2897       : AAIsDeadFloating(IRP, A) {}
2898 
2899   /// See AbstractAttribute::initialize(...).
2900   void initialize(Attributor &A) override {
2901     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2902       indicatePessimisticFixpoint();
2903   }
2904 
2905   /// See AbstractAttribute::manifest(...).
2906   ChangeStatus manifest(Attributor &A) override {
2907     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2908     Argument &Arg = *getAssociatedArgument();
2909     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2910       if (A.registerFunctionSignatureRewrite(
2911               Arg, /* ReplacementTypes */ {},
2912               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2913               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2914         Arg.dropDroppableUses();
2915         return ChangeStatus::CHANGED;
2916       }
2917     return Changed;
2918   }
2919 
2920   /// See AbstractAttribute::trackStatistics()
2921   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2922 };
2923 
2924 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2925   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2926       : AAIsDeadValueImpl(IRP, A) {}
2927 
2928   /// See AbstractAttribute::initialize(...).
2929   void initialize(Attributor &A) override {
2930     if (isa<UndefValue>(getAssociatedValue()))
2931       indicatePessimisticFixpoint();
2932   }
2933 
2934   /// See AbstractAttribute::updateImpl(...).
2935   ChangeStatus updateImpl(Attributor &A) override {
2936     // TODO: Once we have call site specific value information we can provide
2937     //       call site specific liveness information and then it makes
2938     //       sense to specialize attributes for call sites arguments instead of
2939     //       redirecting requests to the callee argument.
2940     Argument *Arg = getAssociatedArgument();
2941     if (!Arg)
2942       return indicatePessimisticFixpoint();
2943     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2944     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
2945     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2946   }
2947 
2948   /// See AbstractAttribute::manifest(...).
2949   ChangeStatus manifest(Attributor &A) override {
2950     CallBase &CB = cast<CallBase>(getAnchorValue());
2951     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2952     assert(!isa<UndefValue>(U.get()) &&
2953            "Expected undef values to be filtered out!");
2954     UndefValue &UV = *UndefValue::get(U->getType());
2955     if (A.changeUseAfterManifest(U, UV))
2956       return ChangeStatus::CHANGED;
2957     return ChangeStatus::UNCHANGED;
2958   }
2959 
2960   /// See AbstractAttribute::trackStatistics()
2961   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2962 };
2963 
2964 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2965   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2966       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2967 
2968   /// See AAIsDead::isAssumedDead().
2969   bool isAssumedDead() const override {
2970     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2971   }
2972 
2973   /// See AbstractAttribute::initialize(...).
2974   void initialize(Attributor &A) override {
2975     if (isa<UndefValue>(getAssociatedValue())) {
2976       indicatePessimisticFixpoint();
2977       return;
2978     }
2979 
2980     // We track this separately as a secondary state.
2981     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2982   }
2983 
2984   /// See AbstractAttribute::updateImpl(...).
2985   ChangeStatus updateImpl(Attributor &A) override {
2986     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2987     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2988       IsAssumedSideEffectFree = false;
2989       Changed = ChangeStatus::CHANGED;
2990     }
2991 
2992     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2993       return indicatePessimisticFixpoint();
2994     return Changed;
2995   }
2996 
2997   /// See AbstractAttribute::trackStatistics()
2998   void trackStatistics() const override {
2999     if (IsAssumedSideEffectFree)
3000       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3001     else
3002       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3003   }
3004 
3005   /// See AbstractAttribute::getAsStr().
3006   const std::string getAsStr() const override {
3007     return isAssumedDead()
3008                ? "assumed-dead"
3009                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3010   }
3011 
3012 private:
3013   bool IsAssumedSideEffectFree;
3014 };
3015 
3016 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3017   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3018       : AAIsDeadValueImpl(IRP, A) {}
3019 
3020   /// See AbstractAttribute::updateImpl(...).
3021   ChangeStatus updateImpl(Attributor &A) override {
3022 
3023     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3024                               {Instruction::Ret});
3025 
3026     auto PredForCallSite = [&](AbstractCallSite ACS) {
3027       if (ACS.isCallbackCall() || !ACS.getInstruction())
3028         return false;
3029       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3030     };
3031 
3032     bool AllCallSitesKnown;
3033     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3034                                 AllCallSitesKnown))
3035       return indicatePessimisticFixpoint();
3036 
3037     return ChangeStatus::UNCHANGED;
3038   }
3039 
3040   /// See AbstractAttribute::manifest(...).
3041   ChangeStatus manifest(Attributor &A) override {
3042     // TODO: Rewrite the signature to return void?
3043     bool AnyChange = false;
3044     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3045     auto RetInstPred = [&](Instruction &I) {
3046       ReturnInst &RI = cast<ReturnInst>(I);
3047       if (!isa<UndefValue>(RI.getReturnValue()))
3048         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3049       return true;
3050     };
3051     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3052     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3053   }
3054 
3055   /// See AbstractAttribute::trackStatistics()
3056   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3057 };
3058 
3059 struct AAIsDeadFunction : public AAIsDead {
3060   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3061 
3062   /// See AbstractAttribute::initialize(...).
3063   void initialize(Attributor &A) override {
3064     const Function *F = getAnchorScope();
3065     if (F && !F->isDeclaration()) {
3066       // We only want to compute liveness once. If the function is not part of
3067       // the SCC, skip it.
3068       if (A.isRunOn(*const_cast<Function *>(F))) {
3069         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3070         assumeLive(A, F->getEntryBlock());
3071       } else {
3072         indicatePessimisticFixpoint();
3073       }
3074     }
3075   }
3076 
3077   /// See AbstractAttribute::getAsStr().
3078   const std::string getAsStr() const override {
3079     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3080            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3081            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3082            std::to_string(KnownDeadEnds.size()) + "]";
3083   }
3084 
3085   /// See AbstractAttribute::manifest(...).
3086   ChangeStatus manifest(Attributor &A) override {
3087     assert(getState().isValidState() &&
3088            "Attempted to manifest an invalid state!");
3089 
3090     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3091     Function &F = *getAnchorScope();
3092 
3093     if (AssumedLiveBlocks.empty()) {
3094       A.deleteAfterManifest(F);
3095       return ChangeStatus::CHANGED;
3096     }
3097 
3098     // Flag to determine if we can change an invoke to a call assuming the
3099     // callee is nounwind. This is not possible if the personality of the
3100     // function allows to catch asynchronous exceptions.
3101     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3102 
3103     KnownDeadEnds.set_union(ToBeExploredFrom);
3104     for (const Instruction *DeadEndI : KnownDeadEnds) {
3105       auto *CB = dyn_cast<CallBase>(DeadEndI);
3106       if (!CB)
3107         continue;
3108       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3109           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3110       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3111       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3112         continue;
3113 
3114       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3115         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3116       else
3117         A.changeToUnreachableAfterManifest(
3118             const_cast<Instruction *>(DeadEndI->getNextNode()));
3119       HasChanged = ChangeStatus::CHANGED;
3120     }
3121 
3122     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3123     for (BasicBlock &BB : F)
3124       if (!AssumedLiveBlocks.count(&BB)) {
3125         A.deleteAfterManifest(BB);
3126         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3127       }
3128 
3129     return HasChanged;
3130   }
3131 
3132   /// See AbstractAttribute::updateImpl(...).
3133   ChangeStatus updateImpl(Attributor &A) override;
3134 
3135   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3136     return !AssumedLiveEdges.count(std::make_pair(From, To));
3137   }
3138 
3139   /// See AbstractAttribute::trackStatistics()
3140   void trackStatistics() const override {}
3141 
3142   /// Returns true if the function is assumed dead.
3143   bool isAssumedDead() const override { return false; }
3144 
3145   /// See AAIsDead::isKnownDead().
3146   bool isKnownDead() const override { return false; }
3147 
3148   /// See AAIsDead::isAssumedDead(BasicBlock *).
3149   bool isAssumedDead(const BasicBlock *BB) const override {
3150     assert(BB->getParent() == getAnchorScope() &&
3151            "BB must be in the same anchor scope function.");
3152 
3153     if (!getAssumed())
3154       return false;
3155     return !AssumedLiveBlocks.count(BB);
3156   }
3157 
3158   /// See AAIsDead::isKnownDead(BasicBlock *).
3159   bool isKnownDead(const BasicBlock *BB) const override {
3160     return getKnown() && isAssumedDead(BB);
3161   }
3162 
3163   /// See AAIsDead::isAssumed(Instruction *I).
3164   bool isAssumedDead(const Instruction *I) const override {
3165     assert(I->getParent()->getParent() == getAnchorScope() &&
3166            "Instruction must be in the same anchor scope function.");
3167 
3168     if (!getAssumed())
3169       return false;
3170 
3171     // If it is not in AssumedLiveBlocks then it for sure dead.
3172     // Otherwise, it can still be after noreturn call in a live block.
3173     if (!AssumedLiveBlocks.count(I->getParent()))
3174       return true;
3175 
3176     // If it is not after a liveness barrier it is live.
3177     const Instruction *PrevI = I->getPrevNode();
3178     while (PrevI) {
3179       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3180         return true;
3181       PrevI = PrevI->getPrevNode();
3182     }
3183     return false;
3184   }
3185 
3186   /// See AAIsDead::isKnownDead(Instruction *I).
3187   bool isKnownDead(const Instruction *I) const override {
3188     return getKnown() && isAssumedDead(I);
3189   }
3190 
3191   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3192   /// that internal function called from \p BB should now be looked at.
3193   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3194     if (!AssumedLiveBlocks.insert(&BB).second)
3195       return false;
3196 
3197     // We assume that all of BB is (probably) live now and if there are calls to
3198     // internal functions we will assume that those are now live as well. This
3199     // is a performance optimization for blocks with calls to a lot of internal
3200     // functions. It can however cause dead functions to be treated as live.
3201     for (const Instruction &I : BB)
3202       if (const auto *CB = dyn_cast<CallBase>(&I))
3203         if (const Function *F = CB->getCalledFunction())
3204           if (F->hasLocalLinkage())
3205             A.markLiveInternalFunction(*F);
3206     return true;
3207   }
3208 
3209   /// Collection of instructions that need to be explored again, e.g., we
3210   /// did assume they do not transfer control to (one of their) successors.
3211   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3212 
3213   /// Collection of instructions that are known to not transfer control.
3214   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3215 
3216   /// Collection of all assumed live edges
3217   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3218 
3219   /// Collection of all assumed live BasicBlocks.
3220   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3221 };
3222 
3223 static bool
3224 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3225                         AbstractAttribute &AA,
3226                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3227   const IRPosition &IPos = IRPosition::callsite_function(CB);
3228 
3229   const auto &NoReturnAA =
3230       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3231   if (NoReturnAA.isAssumedNoReturn())
3232     return !NoReturnAA.isKnownNoReturn();
3233   if (CB.isTerminator())
3234     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3235   else
3236     AliveSuccessors.push_back(CB.getNextNode());
3237   return false;
3238 }
3239 
3240 static bool
3241 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3242                         AbstractAttribute &AA,
3243                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3244   bool UsedAssumedInformation =
3245       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3246 
3247   // First, determine if we can change an invoke to a call assuming the
3248   // callee is nounwind. This is not possible if the personality of the
3249   // function allows to catch asynchronous exceptions.
3250   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3251     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3252   } else {
3253     const IRPosition &IPos = IRPosition::callsite_function(II);
3254     const auto &AANoUnw =
3255         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3256     if (AANoUnw.isAssumedNoUnwind()) {
3257       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3258     } else {
3259       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3260     }
3261   }
3262   return UsedAssumedInformation;
3263 }
3264 
3265 static bool
3266 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3267                         AbstractAttribute &AA,
3268                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3269   bool UsedAssumedInformation = false;
3270   if (BI.getNumSuccessors() == 1) {
3271     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3272   } else {
3273     Optional<ConstantInt *> CI = getAssumedConstantInt(
3274         A, *BI.getCondition(), AA, UsedAssumedInformation);
3275     if (!CI.hasValue()) {
3276       // No value yet, assume both edges are dead.
3277     } else if (CI.getValue()) {
3278       const BasicBlock *SuccBB =
3279           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3280       AliveSuccessors.push_back(&SuccBB->front());
3281     } else {
3282       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3283       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3284       UsedAssumedInformation = false;
3285     }
3286   }
3287   return UsedAssumedInformation;
3288 }
3289 
3290 static bool
3291 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3292                         AbstractAttribute &AA,
3293                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3294   bool UsedAssumedInformation = false;
3295   Optional<ConstantInt *> CI =
3296       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3297   if (!CI.hasValue()) {
3298     // No value yet, assume all edges are dead.
3299   } else if (CI.getValue()) {
3300     for (auto &CaseIt : SI.cases()) {
3301       if (CaseIt.getCaseValue() == CI.getValue()) {
3302         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3303         return UsedAssumedInformation;
3304       }
3305     }
3306     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3307     return UsedAssumedInformation;
3308   } else {
3309     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3310       AliveSuccessors.push_back(&SuccBB->front());
3311   }
3312   return UsedAssumedInformation;
3313 }
3314 
3315 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3316   ChangeStatus Change = ChangeStatus::UNCHANGED;
3317 
3318   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3319                     << getAnchorScope()->size() << "] BBs and "
3320                     << ToBeExploredFrom.size() << " exploration points and "
3321                     << KnownDeadEnds.size() << " known dead ends\n");
3322 
3323   // Copy and clear the list of instructions we need to explore from. It is
3324   // refilled with instructions the next update has to look at.
3325   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3326                                                ToBeExploredFrom.end());
3327   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3328 
3329   SmallVector<const Instruction *, 8> AliveSuccessors;
3330   while (!Worklist.empty()) {
3331     const Instruction *I = Worklist.pop_back_val();
3332     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3333 
3334     // Fast forward for uninteresting instructions. We could look for UB here
3335     // though.
3336     while (!I->isTerminator() && !isa<CallBase>(I)) {
3337       Change = ChangeStatus::CHANGED;
3338       I = I->getNextNode();
3339     }
3340 
3341     AliveSuccessors.clear();
3342 
3343     bool UsedAssumedInformation = false;
3344     switch (I->getOpcode()) {
3345     // TODO: look for (assumed) UB to backwards propagate "deadness".
3346     default:
3347       assert(I->isTerminator() &&
3348              "Expected non-terminators to be handled already!");
3349       for (const BasicBlock *SuccBB : successors(I->getParent()))
3350         AliveSuccessors.push_back(&SuccBB->front());
3351       break;
3352     case Instruction::Call:
3353       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3354                                                        *this, AliveSuccessors);
3355       break;
3356     case Instruction::Invoke:
3357       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3358                                                        *this, AliveSuccessors);
3359       break;
3360     case Instruction::Br:
3361       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3362                                                        *this, AliveSuccessors);
3363       break;
3364     case Instruction::Switch:
3365       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3366                                                        *this, AliveSuccessors);
3367       break;
3368     }
3369 
3370     if (UsedAssumedInformation) {
3371       NewToBeExploredFrom.insert(I);
3372     } else {
3373       Change = ChangeStatus::CHANGED;
3374       if (AliveSuccessors.empty() ||
3375           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3376         KnownDeadEnds.insert(I);
3377     }
3378 
3379     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3380                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3381                       << UsedAssumedInformation << "\n");
3382 
3383     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3384       if (!I->isTerminator()) {
3385         assert(AliveSuccessors.size() == 1 &&
3386                "Non-terminator expected to have a single successor!");
3387         Worklist.push_back(AliveSuccessor);
3388       } else {
3389         // record the assumed live edge
3390         AssumedLiveEdges.insert(
3391             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3392         if (assumeLive(A, *AliveSuccessor->getParent()))
3393           Worklist.push_back(AliveSuccessor);
3394       }
3395     }
3396   }
3397 
3398   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3399 
3400   // If we know everything is live there is no need to query for liveness.
3401   // Instead, indicating a pessimistic fixpoint will cause the state to be
3402   // "invalid" and all queries to be answered conservatively without lookups.
3403   // To be in this state we have to (1) finished the exploration and (3) not
3404   // discovered any non-trivial dead end and (2) not ruled unreachable code
3405   // dead.
3406   if (ToBeExploredFrom.empty() &&
3407       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3408       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3409         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3410       }))
3411     return indicatePessimisticFixpoint();
3412   return Change;
3413 }
3414 
3415 /// Liveness information for a call sites.
3416 struct AAIsDeadCallSite final : AAIsDeadFunction {
3417   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3418       : AAIsDeadFunction(IRP, A) {}
3419 
3420   /// See AbstractAttribute::initialize(...).
3421   void initialize(Attributor &A) override {
3422     // TODO: Once we have call site specific value information we can provide
3423     //       call site specific liveness information and then it makes
3424     //       sense to specialize attributes for call sites instead of
3425     //       redirecting requests to the callee.
3426     llvm_unreachable("Abstract attributes for liveness are not "
3427                      "supported for call sites yet!");
3428   }
3429 
3430   /// See AbstractAttribute::updateImpl(...).
3431   ChangeStatus updateImpl(Attributor &A) override {
3432     return indicatePessimisticFixpoint();
3433   }
3434 
3435   /// See AbstractAttribute::trackStatistics()
3436   void trackStatistics() const override {}
3437 };
3438 
3439 /// -------------------- Dereferenceable Argument Attribute --------------------
3440 
3441 template <>
3442 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3443                                                      const DerefState &R) {
3444   ChangeStatus CS0 =
3445       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3446   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3447   return CS0 | CS1;
3448 }
3449 
3450 struct AADereferenceableImpl : AADereferenceable {
3451   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3452       : AADereferenceable(IRP, A) {}
3453   using StateType = DerefState;
3454 
3455   /// See AbstractAttribute::initialize(...).
3456   void initialize(Attributor &A) override {
3457     SmallVector<Attribute, 4> Attrs;
3458     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3459              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3460     for (const Attribute &Attr : Attrs)
3461       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3462 
3463     const IRPosition &IRP = this->getIRPosition();
3464     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3465 
3466     bool CanBeNull;
3467     takeKnownDerefBytesMaximum(
3468         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3469             A.getDataLayout(), CanBeNull));
3470 
3471     bool IsFnInterface = IRP.isFnInterfaceKind();
3472     Function *FnScope = IRP.getAnchorScope();
3473     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3474       indicatePessimisticFixpoint();
3475       return;
3476     }
3477 
3478     if (Instruction *CtxI = getCtxI())
3479       followUsesInMBEC(*this, A, getState(), *CtxI);
3480   }
3481 
3482   /// See AbstractAttribute::getState()
3483   /// {
3484   StateType &getState() override { return *this; }
3485   const StateType &getState() const override { return *this; }
3486   /// }
3487 
3488   /// Helper function for collecting accessed bytes in must-be-executed-context
3489   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3490                               DerefState &State) {
3491     const Value *UseV = U->get();
3492     if (!UseV->getType()->isPointerTy())
3493       return;
3494 
3495     Type *PtrTy = UseV->getType();
3496     const DataLayout &DL = A.getDataLayout();
3497     int64_t Offset;
3498     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3499             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3500       if (Base == &getAssociatedValue() &&
3501           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3502         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3503         State.addAccessedBytes(Offset, Size);
3504       }
3505     }
3506   }
3507 
3508   /// See followUsesInMBEC
3509   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3510                        AADereferenceable::StateType &State) {
3511     bool IsNonNull = false;
3512     bool TrackUse = false;
3513     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3514         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3515     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3516                       << " for instruction " << *I << "\n");
3517 
3518     addAccessedBytesForUse(A, U, I, State);
3519     State.takeKnownDerefBytesMaximum(DerefBytes);
3520     return TrackUse;
3521   }
3522 
3523   /// See AbstractAttribute::manifest(...).
3524   ChangeStatus manifest(Attributor &A) override {
3525     ChangeStatus Change = AADereferenceable::manifest(A);
3526     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3527       removeAttrs({Attribute::DereferenceableOrNull});
3528       return ChangeStatus::CHANGED;
3529     }
3530     return Change;
3531   }
3532 
3533   void getDeducedAttributes(LLVMContext &Ctx,
3534                             SmallVectorImpl<Attribute> &Attrs) const override {
3535     // TODO: Add *_globally support
3536     if (isAssumedNonNull())
3537       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3538           Ctx, getAssumedDereferenceableBytes()));
3539     else
3540       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3541           Ctx, getAssumedDereferenceableBytes()));
3542   }
3543 
3544   /// See AbstractAttribute::getAsStr().
3545   const std::string getAsStr() const override {
3546     if (!getAssumedDereferenceableBytes())
3547       return "unknown-dereferenceable";
3548     return std::string("dereferenceable") +
3549            (isAssumedNonNull() ? "" : "_or_null") +
3550            (isAssumedGlobal() ? "_globally" : "") + "<" +
3551            std::to_string(getKnownDereferenceableBytes()) + "-" +
3552            std::to_string(getAssumedDereferenceableBytes()) + ">";
3553   }
3554 };
3555 
3556 /// Dereferenceable attribute for a floating value.
3557 struct AADereferenceableFloating : AADereferenceableImpl {
3558   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3559       : AADereferenceableImpl(IRP, A) {}
3560 
3561   /// See AbstractAttribute::updateImpl(...).
3562   ChangeStatus updateImpl(Attributor &A) override {
3563     const DataLayout &DL = A.getDataLayout();
3564 
3565     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3566                             bool Stripped) -> bool {
3567       unsigned IdxWidth =
3568           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3569       APInt Offset(IdxWidth, 0);
3570       const Value *Base =
3571           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3572 
3573       const auto &AA = A.getAAFor<AADereferenceable>(
3574           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3575       int64_t DerefBytes = 0;
3576       if (!Stripped && this == &AA) {
3577         // Use IR information if we did not strip anything.
3578         // TODO: track globally.
3579         bool CanBeNull;
3580         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3581         T.GlobalState.indicatePessimisticFixpoint();
3582       } else {
3583         const DerefState &DS = AA.getState();
3584         DerefBytes = DS.DerefBytesState.getAssumed();
3585         T.GlobalState &= DS.GlobalState;
3586       }
3587 
3588       // For now we do not try to "increase" dereferenceability due to negative
3589       // indices as we first have to come up with code to deal with loops and
3590       // for overflows of the dereferenceable bytes.
3591       int64_t OffsetSExt = Offset.getSExtValue();
3592       if (OffsetSExt < 0)
3593         OffsetSExt = 0;
3594 
3595       T.takeAssumedDerefBytesMinimum(
3596           std::max(int64_t(0), DerefBytes - OffsetSExt));
3597 
3598       if (this == &AA) {
3599         if (!Stripped) {
3600           // If nothing was stripped IR information is all we got.
3601           T.takeKnownDerefBytesMaximum(
3602               std::max(int64_t(0), DerefBytes - OffsetSExt));
3603           T.indicatePessimisticFixpoint();
3604         } else if (OffsetSExt > 0) {
3605           // If something was stripped but there is circular reasoning we look
3606           // for the offset. If it is positive we basically decrease the
3607           // dereferenceable bytes in a circluar loop now, which will simply
3608           // drive them down to the known value in a very slow way which we
3609           // can accelerate.
3610           T.indicatePessimisticFixpoint();
3611         }
3612       }
3613 
3614       return T.isValidState();
3615     };
3616 
3617     DerefState T;
3618     if (!genericValueTraversal<AADereferenceable, DerefState>(
3619             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3620       return indicatePessimisticFixpoint();
3621 
3622     return clampStateAndIndicateChange(getState(), T);
3623   }
3624 
3625   /// See AbstractAttribute::trackStatistics()
3626   void trackStatistics() const override {
3627     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3628   }
3629 };
3630 
3631 /// Dereferenceable attribute for a return value.
3632 struct AADereferenceableReturned final
3633     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3634   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3635       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3636             IRP, A) {}
3637 
3638   /// See AbstractAttribute::trackStatistics()
3639   void trackStatistics() const override {
3640     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3641   }
3642 };
3643 
3644 /// Dereferenceable attribute for an argument
3645 struct AADereferenceableArgument final
3646     : AAArgumentFromCallSiteArguments<AADereferenceable,
3647                                       AADereferenceableImpl> {
3648   using Base =
3649       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3650   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3651       : Base(IRP, A) {}
3652 
3653   /// See AbstractAttribute::trackStatistics()
3654   void trackStatistics() const override {
3655     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3656   }
3657 };
3658 
3659 /// Dereferenceable attribute for a call site argument.
3660 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3661   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3662       : AADereferenceableFloating(IRP, A) {}
3663 
3664   /// See AbstractAttribute::trackStatistics()
3665   void trackStatistics() const override {
3666     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3667   }
3668 };
3669 
3670 /// Dereferenceable attribute deduction for a call site return value.
3671 struct AADereferenceableCallSiteReturned final
3672     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3673   using Base =
3674       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3675   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3676       : Base(IRP, A) {}
3677 
3678   /// See AbstractAttribute::trackStatistics()
3679   void trackStatistics() const override {
3680     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3681   }
3682 };
3683 
3684 // ------------------------ Align Argument Attribute ------------------------
3685 
3686 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3687                                     Value &AssociatedValue, const Use *U,
3688                                     const Instruction *I, bool &TrackUse) {
3689   // We need to follow common pointer manipulation uses to the accesses they
3690   // feed into.
3691   if (isa<CastInst>(I)) {
3692     // Follow all but ptr2int casts.
3693     TrackUse = !isa<PtrToIntInst>(I);
3694     return 0;
3695   }
3696   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3697     if (GEP->hasAllConstantIndices())
3698       TrackUse = true;
3699     return 0;
3700   }
3701 
3702   MaybeAlign MA;
3703   if (const auto *CB = dyn_cast<CallBase>(I)) {
3704     if (CB->isBundleOperand(U) || CB->isCallee(U))
3705       return 0;
3706 
3707     unsigned ArgNo = CB->getArgOperandNo(U);
3708     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3709     // As long as we only use known information there is no need to track
3710     // dependences here.
3711     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3712     MA = MaybeAlign(AlignAA.getKnownAlign());
3713   }
3714 
3715   const DataLayout &DL = A.getDataLayout();
3716   const Value *UseV = U->get();
3717   if (auto *SI = dyn_cast<StoreInst>(I)) {
3718     if (SI->getPointerOperand() == UseV)
3719       MA = SI->getAlign();
3720   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3721     if (LI->getPointerOperand() == UseV)
3722       MA = LI->getAlign();
3723   }
3724 
3725   if (!MA || *MA <= QueryingAA.getKnownAlign())
3726     return 0;
3727 
3728   unsigned Alignment = MA->value();
3729   int64_t Offset;
3730 
3731   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3732     if (Base == &AssociatedValue) {
3733       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3734       // So we can say that the maximum power of two which is a divisor of
3735       // gcd(Offset, Alignment) is an alignment.
3736 
3737       uint32_t gcd =
3738           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3739       Alignment = llvm::PowerOf2Floor(gcd);
3740     }
3741   }
3742 
3743   return Alignment;
3744 }
3745 
3746 struct AAAlignImpl : AAAlign {
3747   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3748 
3749   /// See AbstractAttribute::initialize(...).
3750   void initialize(Attributor &A) override {
3751     SmallVector<Attribute, 4> Attrs;
3752     getAttrs({Attribute::Alignment}, Attrs);
3753     for (const Attribute &Attr : Attrs)
3754       takeKnownMaximum(Attr.getValueAsInt());
3755 
3756     Value &V = getAssociatedValue();
3757     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3758     //       use of the function pointer. This was caused by D73131. We want to
3759     //       avoid this for function pointers especially because we iterate
3760     //       their uses and int2ptr is not handled. It is not a correctness
3761     //       problem though!
3762     if (!V.getType()->getPointerElementType()->isFunctionTy())
3763       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3764 
3765     if (getIRPosition().isFnInterfaceKind() &&
3766         (!getAnchorScope() ||
3767          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3768       indicatePessimisticFixpoint();
3769       return;
3770     }
3771 
3772     if (Instruction *CtxI = getCtxI())
3773       followUsesInMBEC(*this, A, getState(), *CtxI);
3774   }
3775 
3776   /// See AbstractAttribute::manifest(...).
3777   ChangeStatus manifest(Attributor &A) override {
3778     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3779 
3780     // Check for users that allow alignment annotations.
3781     Value &AssociatedValue = getAssociatedValue();
3782     for (const Use &U : AssociatedValue.uses()) {
3783       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3784         if (SI->getPointerOperand() == &AssociatedValue)
3785           if (SI->getAlignment() < getAssumedAlign()) {
3786             STATS_DECLTRACK(AAAlign, Store,
3787                             "Number of times alignment added to a store");
3788             SI->setAlignment(Align(getAssumedAlign()));
3789             LoadStoreChanged = ChangeStatus::CHANGED;
3790           }
3791       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3792         if (LI->getPointerOperand() == &AssociatedValue)
3793           if (LI->getAlignment() < getAssumedAlign()) {
3794             LI->setAlignment(Align(getAssumedAlign()));
3795             STATS_DECLTRACK(AAAlign, Load,
3796                             "Number of times alignment added to a load");
3797             LoadStoreChanged = ChangeStatus::CHANGED;
3798           }
3799       }
3800     }
3801 
3802     ChangeStatus Changed = AAAlign::manifest(A);
3803 
3804     Align InheritAlign =
3805         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3806     if (InheritAlign >= getAssumedAlign())
3807       return LoadStoreChanged;
3808     return Changed | LoadStoreChanged;
3809   }
3810 
3811   // TODO: Provide a helper to determine the implied ABI alignment and check in
3812   //       the existing manifest method and a new one for AAAlignImpl that value
3813   //       to avoid making the alignment explicit if it did not improve.
3814 
3815   /// See AbstractAttribute::getDeducedAttributes
3816   virtual void
3817   getDeducedAttributes(LLVMContext &Ctx,
3818                        SmallVectorImpl<Attribute> &Attrs) const override {
3819     if (getAssumedAlign() > 1)
3820       Attrs.emplace_back(
3821           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3822   }
3823 
3824   /// See followUsesInMBEC
3825   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3826                        AAAlign::StateType &State) {
3827     bool TrackUse = false;
3828 
3829     unsigned int KnownAlign =
3830         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3831     State.takeKnownMaximum(KnownAlign);
3832 
3833     return TrackUse;
3834   }
3835 
3836   /// See AbstractAttribute::getAsStr().
3837   const std::string getAsStr() const override {
3838     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3839                                 "-" + std::to_string(getAssumedAlign()) + ">")
3840                              : "unknown-align";
3841   }
3842 };
3843 
3844 /// Align attribute for a floating value.
3845 struct AAAlignFloating : AAAlignImpl {
3846   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3847 
3848   /// See AbstractAttribute::updateImpl(...).
3849   ChangeStatus updateImpl(Attributor &A) override {
3850     const DataLayout &DL = A.getDataLayout();
3851 
3852     auto VisitValueCB = [&](Value &V, const Instruction *,
3853                             AAAlign::StateType &T, bool Stripped) -> bool {
3854       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3855                                            DepClassTy::REQUIRED);
3856       if (!Stripped && this == &AA) {
3857         int64_t Offset;
3858         unsigned Alignment = 1;
3859         if (const Value *Base =
3860                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3861           Align PA = Base->getPointerAlignment(DL);
3862           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3863           // So we can say that the maximum power of two which is a divisor of
3864           // gcd(Offset, Alignment) is an alignment.
3865 
3866           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3867                                                uint32_t(PA.value()));
3868           Alignment = llvm::PowerOf2Floor(gcd);
3869         } else {
3870           Alignment = V.getPointerAlignment(DL).value();
3871         }
3872         // Use only IR information if we did not strip anything.
3873         T.takeKnownMaximum(Alignment);
3874         T.indicatePessimisticFixpoint();
3875       } else {
3876         // Use abstract attribute information.
3877         const AAAlign::StateType &DS = AA.getState();
3878         T ^= DS;
3879       }
3880       return T.isValidState();
3881     };
3882 
3883     StateType T;
3884     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3885                                                    VisitValueCB, getCtxI()))
3886       return indicatePessimisticFixpoint();
3887 
3888     // TODO: If we know we visited all incoming values, thus no are assumed
3889     // dead, we can take the known information from the state T.
3890     return clampStateAndIndicateChange(getState(), T);
3891   }
3892 
3893   /// See AbstractAttribute::trackStatistics()
3894   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3895 };
3896 
3897 /// Align attribute for function return value.
3898 struct AAAlignReturned final
3899     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3900   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3901   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3902 
3903   /// See AbstractAttribute::initialize(...).
3904   void initialize(Attributor &A) override {
3905     Base::initialize(A);
3906     Function *F = getAssociatedFunction();
3907     if (!F || F->isDeclaration())
3908       indicatePessimisticFixpoint();
3909   }
3910 
3911   /// See AbstractAttribute::trackStatistics()
3912   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3913 };
3914 
3915 /// Align attribute for function argument.
3916 struct AAAlignArgument final
3917     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3918   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3919   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3920 
3921   /// See AbstractAttribute::manifest(...).
3922   ChangeStatus manifest(Attributor &A) override {
3923     // If the associated argument is involved in a must-tail call we give up
3924     // because we would need to keep the argument alignments of caller and
3925     // callee in-sync. Just does not seem worth the trouble right now.
3926     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3927       return ChangeStatus::UNCHANGED;
3928     return Base::manifest(A);
3929   }
3930 
3931   /// See AbstractAttribute::trackStatistics()
3932   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3933 };
3934 
3935 struct AAAlignCallSiteArgument final : AAAlignFloating {
3936   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3937       : AAAlignFloating(IRP, A) {}
3938 
3939   /// See AbstractAttribute::manifest(...).
3940   ChangeStatus manifest(Attributor &A) override {
3941     // If the associated argument is involved in a must-tail call we give up
3942     // because we would need to keep the argument alignments of caller and
3943     // callee in-sync. Just does not seem worth the trouble right now.
3944     if (Argument *Arg = getAssociatedArgument())
3945       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3946         return ChangeStatus::UNCHANGED;
3947     ChangeStatus Changed = AAAlignImpl::manifest(A);
3948     Align InheritAlign =
3949         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3950     if (InheritAlign >= getAssumedAlign())
3951       Changed = ChangeStatus::UNCHANGED;
3952     return Changed;
3953   }
3954 
3955   /// See AbstractAttribute::updateImpl(Attributor &A).
3956   ChangeStatus updateImpl(Attributor &A) override {
3957     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3958     if (Argument *Arg = getAssociatedArgument()) {
3959       // We only take known information from the argument
3960       // so we do not need to track a dependence.
3961       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3962           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
3963       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3964     }
3965     return Changed;
3966   }
3967 
3968   /// See AbstractAttribute::trackStatistics()
3969   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3970 };
3971 
3972 /// Align attribute deduction for a call site return value.
3973 struct AAAlignCallSiteReturned final
3974     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3975   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3976   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3977       : Base(IRP, A) {}
3978 
3979   /// See AbstractAttribute::initialize(...).
3980   void initialize(Attributor &A) override {
3981     Base::initialize(A);
3982     Function *F = getAssociatedFunction();
3983     if (!F || F->isDeclaration())
3984       indicatePessimisticFixpoint();
3985   }
3986 
3987   /// See AbstractAttribute::trackStatistics()
3988   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3989 };
3990 
3991 /// ------------------ Function No-Return Attribute ----------------------------
3992 struct AANoReturnImpl : public AANoReturn {
3993   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3994 
3995   /// See AbstractAttribute::initialize(...).
3996   void initialize(Attributor &A) override {
3997     AANoReturn::initialize(A);
3998     Function *F = getAssociatedFunction();
3999     if (!F || F->isDeclaration())
4000       indicatePessimisticFixpoint();
4001   }
4002 
4003   /// See AbstractAttribute::getAsStr().
4004   const std::string getAsStr() const override {
4005     return getAssumed() ? "noreturn" : "may-return";
4006   }
4007 
4008   /// See AbstractAttribute::updateImpl(Attributor &A).
4009   virtual ChangeStatus updateImpl(Attributor &A) override {
4010     auto CheckForNoReturn = [](Instruction &) { return false; };
4011     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4012                                    {(unsigned)Instruction::Ret}))
4013       return indicatePessimisticFixpoint();
4014     return ChangeStatus::UNCHANGED;
4015   }
4016 };
4017 
4018 struct AANoReturnFunction final : AANoReturnImpl {
4019   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4020       : AANoReturnImpl(IRP, A) {}
4021 
4022   /// See AbstractAttribute::trackStatistics()
4023   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4024 };
4025 
4026 /// NoReturn attribute deduction for a call sites.
4027 struct AANoReturnCallSite final : AANoReturnImpl {
4028   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4029       : AANoReturnImpl(IRP, A) {}
4030 
4031   /// See AbstractAttribute::initialize(...).
4032   void initialize(Attributor &A) override {
4033     AANoReturnImpl::initialize(A);
4034     if (Function *F = getAssociatedFunction()) {
4035       const IRPosition &FnPos = IRPosition::function(*F);
4036       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4037       if (!FnAA.isAssumedNoReturn())
4038         indicatePessimisticFixpoint();
4039     }
4040   }
4041 
4042   /// See AbstractAttribute::updateImpl(...).
4043   ChangeStatus updateImpl(Attributor &A) override {
4044     // TODO: Once we have call site specific value information we can provide
4045     //       call site specific liveness information and then it makes
4046     //       sense to specialize attributes for call sites arguments instead of
4047     //       redirecting requests to the callee argument.
4048     Function *F = getAssociatedFunction();
4049     const IRPosition &FnPos = IRPosition::function(*F);
4050     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4051     return clampStateAndIndicateChange(getState(), FnAA.getState());
4052   }
4053 
4054   /// See AbstractAttribute::trackStatistics()
4055   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4056 };
4057 
4058 /// ----------------------- Variable Capturing ---------------------------------
4059 
4060 /// A class to hold the state of for no-capture attributes.
4061 struct AANoCaptureImpl : public AANoCapture {
4062   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4063 
4064   /// See AbstractAttribute::initialize(...).
4065   void initialize(Attributor &A) override {
4066     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4067       indicateOptimisticFixpoint();
4068       return;
4069     }
4070     Function *AnchorScope = getAnchorScope();
4071     if (isFnInterfaceKind() &&
4072         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4073       indicatePessimisticFixpoint();
4074       return;
4075     }
4076 
4077     // You cannot "capture" null in the default address space.
4078     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4079         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4080       indicateOptimisticFixpoint();
4081       return;
4082     }
4083 
4084     const Function *F =
4085         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4086 
4087     // Check what state the associated function can actually capture.
4088     if (F)
4089       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4090     else
4091       indicatePessimisticFixpoint();
4092   }
4093 
4094   /// See AbstractAttribute::updateImpl(...).
4095   ChangeStatus updateImpl(Attributor &A) override;
4096 
4097   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4098   virtual void
4099   getDeducedAttributes(LLVMContext &Ctx,
4100                        SmallVectorImpl<Attribute> &Attrs) const override {
4101     if (!isAssumedNoCaptureMaybeReturned())
4102       return;
4103 
4104     if (isArgumentPosition()) {
4105       if (isAssumedNoCapture())
4106         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4107       else if (ManifestInternal)
4108         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4109     }
4110   }
4111 
4112   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4113   /// depending on the ability of the function associated with \p IRP to capture
4114   /// state in memory and through "returning/throwing", respectively.
4115   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4116                                                    const Function &F,
4117                                                    BitIntegerState &State) {
4118     // TODO: Once we have memory behavior attributes we should use them here.
4119 
4120     // If we know we cannot communicate or write to memory, we do not care about
4121     // ptr2int anymore.
4122     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4123         F.getReturnType()->isVoidTy()) {
4124       State.addKnownBits(NO_CAPTURE);
4125       return;
4126     }
4127 
4128     // A function cannot capture state in memory if it only reads memory, it can
4129     // however return/throw state and the state might be influenced by the
4130     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4131     if (F.onlyReadsMemory())
4132       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4133 
4134     // A function cannot communicate state back if it does not through
4135     // exceptions and doesn not return values.
4136     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4137       State.addKnownBits(NOT_CAPTURED_IN_RET);
4138 
4139     // Check existing "returned" attributes.
4140     int ArgNo = IRP.getCalleeArgNo();
4141     if (F.doesNotThrow() && ArgNo >= 0) {
4142       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4143         if (F.hasParamAttribute(u, Attribute::Returned)) {
4144           if (u == unsigned(ArgNo))
4145             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4146           else if (F.onlyReadsMemory())
4147             State.addKnownBits(NO_CAPTURE);
4148           else
4149             State.addKnownBits(NOT_CAPTURED_IN_RET);
4150           break;
4151         }
4152     }
4153   }
4154 
4155   /// See AbstractState::getAsStr().
4156   const std::string getAsStr() const override {
4157     if (isKnownNoCapture())
4158       return "known not-captured";
4159     if (isAssumedNoCapture())
4160       return "assumed not-captured";
4161     if (isKnownNoCaptureMaybeReturned())
4162       return "known not-captured-maybe-returned";
4163     if (isAssumedNoCaptureMaybeReturned())
4164       return "assumed not-captured-maybe-returned";
4165     return "assumed-captured";
4166   }
4167 };
4168 
4169 /// Attributor-aware capture tracker.
4170 struct AACaptureUseTracker final : public CaptureTracker {
4171 
4172   /// Create a capture tracker that can lookup in-flight abstract attributes
4173   /// through the Attributor \p A.
4174   ///
4175   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4176   /// search is stopped. If a use leads to a return instruction,
4177   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4178   /// If a use leads to a ptr2int which may capture the value,
4179   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4180   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4181   /// set. All values in \p PotentialCopies are later tracked as well. For every
4182   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4183   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4184   /// conservatively set to true.
4185   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4186                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4187                       SmallVectorImpl<const Value *> &PotentialCopies,
4188                       unsigned &RemainingUsesToExplore)
4189       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4190         PotentialCopies(PotentialCopies),
4191         RemainingUsesToExplore(RemainingUsesToExplore) {}
4192 
4193   /// Determine if \p V maybe captured. *Also updates the state!*
4194   bool valueMayBeCaptured(const Value *V) {
4195     if (V->getType()->isPointerTy()) {
4196       PointerMayBeCaptured(V, this);
4197     } else {
4198       State.indicatePessimisticFixpoint();
4199     }
4200     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4201   }
4202 
4203   /// See CaptureTracker::tooManyUses().
4204   void tooManyUses() override {
4205     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4206   }
4207 
4208   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4209     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4210       return true;
4211     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4212         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4213     return DerefAA.getAssumedDereferenceableBytes();
4214   }
4215 
4216   /// See CaptureTracker::captured(...).
4217   bool captured(const Use *U) override {
4218     Instruction *UInst = cast<Instruction>(U->getUser());
4219     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4220                       << "\n");
4221 
4222     // Because we may reuse the tracker multiple times we keep track of the
4223     // number of explored uses ourselves as well.
4224     if (RemainingUsesToExplore-- == 0) {
4225       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4226       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4227                           /* Return */ true);
4228     }
4229 
4230     // Deal with ptr2int by following uses.
4231     if (isa<PtrToIntInst>(UInst)) {
4232       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4233       return valueMayBeCaptured(UInst);
4234     }
4235 
4236     // Explicitly catch return instructions.
4237     if (isa<ReturnInst>(UInst))
4238       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4239                           /* Return */ true);
4240 
4241     // For now we only use special logic for call sites. However, the tracker
4242     // itself knows about a lot of other non-capturing cases already.
4243     auto *CB = dyn_cast<CallBase>(UInst);
4244     if (!CB || !CB->isArgOperand(U))
4245       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4246                           /* Return */ true);
4247 
4248     unsigned ArgNo = CB->getArgOperandNo(U);
4249     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4250     // If we have a abstract no-capture attribute for the argument we can use
4251     // it to justify a non-capture attribute here. This allows recursion!
4252     auto &ArgNoCaptureAA =
4253         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4254     if (ArgNoCaptureAA.isAssumedNoCapture())
4255       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4256                           /* Return */ false);
4257     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4258       addPotentialCopy(*CB);
4259       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4260                           /* Return */ false);
4261     }
4262 
4263     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4264     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4265                         /* Return */ true);
4266   }
4267 
4268   /// Register \p CS as potential copy of the value we are checking.
4269   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4270 
4271   /// See CaptureTracker::shouldExplore(...).
4272   bool shouldExplore(const Use *U) override {
4273     // Check liveness and ignore droppable users.
4274     return !U->getUser()->isDroppable() &&
4275            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4276   }
4277 
4278   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4279   /// \p CapturedInRet, then return the appropriate value for use in the
4280   /// CaptureTracker::captured() interface.
4281   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4282                     bool CapturedInRet) {
4283     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4284                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4285     if (CapturedInMem)
4286       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4287     if (CapturedInInt)
4288       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4289     if (CapturedInRet)
4290       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4291     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4292   }
4293 
4294 private:
4295   /// The attributor providing in-flight abstract attributes.
4296   Attributor &A;
4297 
4298   /// The abstract attribute currently updated.
4299   AANoCapture &NoCaptureAA;
4300 
4301   /// The abstract liveness state.
4302   const AAIsDead &IsDeadAA;
4303 
4304   /// The state currently updated.
4305   AANoCapture::StateType &State;
4306 
4307   /// Set of potential copies of the tracked value.
4308   SmallVectorImpl<const Value *> &PotentialCopies;
4309 
4310   /// Global counter to limit the number of explored uses.
4311   unsigned &RemainingUsesToExplore;
4312 };
4313 
4314 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4315   const IRPosition &IRP = getIRPosition();
4316   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4317                                         : &IRP.getAssociatedValue();
4318   if (!V)
4319     return indicatePessimisticFixpoint();
4320 
4321   const Function *F =
4322       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4323   assert(F && "Expected a function!");
4324   const IRPosition &FnPos = IRPosition::function(*F);
4325   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4326 
4327   AANoCapture::StateType T;
4328 
4329   // Readonly means we cannot capture through memory.
4330   const auto &FnMemAA =
4331       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4332   if (FnMemAA.isAssumedReadOnly()) {
4333     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4334     if (FnMemAA.isKnownReadOnly())
4335       addKnownBits(NOT_CAPTURED_IN_MEM);
4336     else
4337       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4338   }
4339 
4340   // Make sure all returned values are different than the underlying value.
4341   // TODO: we could do this in a more sophisticated way inside
4342   //       AAReturnedValues, e.g., track all values that escape through returns
4343   //       directly somehow.
4344   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4345     bool SeenConstant = false;
4346     for (auto &It : RVAA.returned_values()) {
4347       if (isa<Constant>(It.first)) {
4348         if (SeenConstant)
4349           return false;
4350         SeenConstant = true;
4351       } else if (!isa<Argument>(It.first) ||
4352                  It.first == getAssociatedArgument())
4353         return false;
4354     }
4355     return true;
4356   };
4357 
4358   const auto &NoUnwindAA =
4359       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4360   if (NoUnwindAA.isAssumedNoUnwind()) {
4361     bool IsVoidTy = F->getReturnType()->isVoidTy();
4362     const AAReturnedValues *RVAA =
4363         IsVoidTy ? nullptr
4364                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4365 
4366                                                  DepClassTy::OPTIONAL);
4367     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4368       T.addKnownBits(NOT_CAPTURED_IN_RET);
4369       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4370         return ChangeStatus::UNCHANGED;
4371       if (NoUnwindAA.isKnownNoUnwind() &&
4372           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4373         addKnownBits(NOT_CAPTURED_IN_RET);
4374         if (isKnown(NOT_CAPTURED_IN_MEM))
4375           return indicateOptimisticFixpoint();
4376       }
4377     }
4378   }
4379 
4380   // Use the CaptureTracker interface and logic with the specialized tracker,
4381   // defined in AACaptureUseTracker, that can look at in-flight abstract
4382   // attributes and directly updates the assumed state.
4383   SmallVector<const Value *, 4> PotentialCopies;
4384   unsigned RemainingUsesToExplore =
4385       getDefaultMaxUsesToExploreForCaptureTracking();
4386   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4387                               RemainingUsesToExplore);
4388 
4389   // Check all potential copies of the associated value until we can assume
4390   // none will be captured or we have to assume at least one might be.
4391   unsigned Idx = 0;
4392   PotentialCopies.push_back(V);
4393   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4394     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4395 
4396   AANoCapture::StateType &S = getState();
4397   auto Assumed = S.getAssumed();
4398   S.intersectAssumedBits(T.getAssumed());
4399   if (!isAssumedNoCaptureMaybeReturned())
4400     return indicatePessimisticFixpoint();
4401   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4402                                    : ChangeStatus::CHANGED;
4403 }
4404 
4405 /// NoCapture attribute for function arguments.
4406 struct AANoCaptureArgument final : AANoCaptureImpl {
4407   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4408       : AANoCaptureImpl(IRP, A) {}
4409 
4410   /// See AbstractAttribute::trackStatistics()
4411   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4412 };
4413 
4414 /// NoCapture attribute for call site arguments.
4415 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4416   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4417       : AANoCaptureImpl(IRP, A) {}
4418 
4419   /// See AbstractAttribute::initialize(...).
4420   void initialize(Attributor &A) override {
4421     if (Argument *Arg = getAssociatedArgument())
4422       if (Arg->hasByValAttr())
4423         indicateOptimisticFixpoint();
4424     AANoCaptureImpl::initialize(A);
4425   }
4426 
4427   /// See AbstractAttribute::updateImpl(...).
4428   ChangeStatus updateImpl(Attributor &A) override {
4429     // TODO: Once we have call site specific value information we can provide
4430     //       call site specific liveness information and then it makes
4431     //       sense to specialize attributes for call sites arguments instead of
4432     //       redirecting requests to the callee argument.
4433     Argument *Arg = getAssociatedArgument();
4434     if (!Arg)
4435       return indicatePessimisticFixpoint();
4436     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4437     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4438     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4439   }
4440 
4441   /// See AbstractAttribute::trackStatistics()
4442   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4443 };
4444 
4445 /// NoCapture attribute for floating values.
4446 struct AANoCaptureFloating final : AANoCaptureImpl {
4447   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4448       : AANoCaptureImpl(IRP, A) {}
4449 
4450   /// See AbstractAttribute::trackStatistics()
4451   void trackStatistics() const override {
4452     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4453   }
4454 };
4455 
4456 /// NoCapture attribute for function return value.
4457 struct AANoCaptureReturned final : AANoCaptureImpl {
4458   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4459       : AANoCaptureImpl(IRP, A) {
4460     llvm_unreachable("NoCapture is not applicable to function returns!");
4461   }
4462 
4463   /// See AbstractAttribute::initialize(...).
4464   void initialize(Attributor &A) override {
4465     llvm_unreachable("NoCapture is not applicable to function returns!");
4466   }
4467 
4468   /// See AbstractAttribute::updateImpl(...).
4469   ChangeStatus updateImpl(Attributor &A) override {
4470     llvm_unreachable("NoCapture is not applicable to function returns!");
4471   }
4472 
4473   /// See AbstractAttribute::trackStatistics()
4474   void trackStatistics() const override {}
4475 };
4476 
4477 /// NoCapture attribute deduction for a call site return value.
4478 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4479   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4480       : AANoCaptureImpl(IRP, A) {}
4481 
4482   /// See AbstractAttribute::initialize(...).
4483   void initialize(Attributor &A) override {
4484     const Function *F = getAnchorScope();
4485     // Check what state the associated function can actually capture.
4486     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4487   }
4488 
4489   /// See AbstractAttribute::trackStatistics()
4490   void trackStatistics() const override {
4491     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4492   }
4493 };
4494 
4495 /// ------------------ Value Simplify Attribute ----------------------------
4496 struct AAValueSimplifyImpl : AAValueSimplify {
4497   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4498       : AAValueSimplify(IRP, A) {}
4499 
4500   /// See AbstractAttribute::initialize(...).
4501   void initialize(Attributor &A) override {
4502     if (getAssociatedValue().getType()->isVoidTy())
4503       indicatePessimisticFixpoint();
4504   }
4505 
4506   /// See AbstractAttribute::getAsStr().
4507   const std::string getAsStr() const override {
4508     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4509                         : "not-simple";
4510   }
4511 
4512   /// See AbstractAttribute::trackStatistics()
4513   void trackStatistics() const override {}
4514 
4515   /// See AAValueSimplify::getAssumedSimplifiedValue()
4516   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4517     if (!getAssumed())
4518       return const_cast<Value *>(&getAssociatedValue());
4519     return SimplifiedAssociatedValue;
4520   }
4521 
4522   /// Helper function for querying AAValueSimplify and updating candicate.
4523   /// \param QueryingValue Value trying to unify with SimplifiedValue
4524   /// \param AccumulatedSimplifiedValue Current simplification result.
4525   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4526                              Value &QueryingValue,
4527                              Optional<Value *> &AccumulatedSimplifiedValue) {
4528     // FIXME: Add a typecast support.
4529 
4530     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4531         QueryingAA, IRPosition::value(QueryingValue), DepClassTy::REQUIRED);
4532 
4533     Optional<Value *> QueryingValueSimplified =
4534         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4535 
4536     if (!QueryingValueSimplified.hasValue())
4537       return true;
4538 
4539     if (!QueryingValueSimplified.getValue())
4540       return false;
4541 
4542     Value &QueryingValueSimplifiedUnwrapped =
4543         *QueryingValueSimplified.getValue();
4544 
4545     if (AccumulatedSimplifiedValue.hasValue() &&
4546         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4547         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4548       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4549     if (AccumulatedSimplifiedValue.hasValue() &&
4550         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4551       return true;
4552 
4553     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4554                       << " is assumed to be "
4555                       << QueryingValueSimplifiedUnwrapped << "\n");
4556 
4557     AccumulatedSimplifiedValue = QueryingValueSimplified;
4558     return true;
4559   }
4560 
4561   /// Returns a candidate is found or not
4562   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4563     if (!getAssociatedValue().getType()->isIntegerTy())
4564       return false;
4565 
4566     const auto &AA =
4567         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4568 
4569     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4570 
4571     if (!COpt.hasValue()) {
4572       SimplifiedAssociatedValue = llvm::None;
4573       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4574       return true;
4575     }
4576     if (auto *C = COpt.getValue()) {
4577       SimplifiedAssociatedValue = C;
4578       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4579       return true;
4580     }
4581     return false;
4582   }
4583 
4584   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4585     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4586       return true;
4587     if (askSimplifiedValueFor<AAPotentialValues>(A))
4588       return true;
4589     return false;
4590   }
4591 
4592   /// See AbstractAttribute::manifest(...).
4593   ChangeStatus manifest(Attributor &A) override {
4594     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4595 
4596     if (SimplifiedAssociatedValue.hasValue() &&
4597         !SimplifiedAssociatedValue.getValue())
4598       return Changed;
4599 
4600     Value &V = getAssociatedValue();
4601     auto *C = SimplifiedAssociatedValue.hasValue()
4602                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4603                   : UndefValue::get(V.getType());
4604     if (C) {
4605       // We can replace the AssociatedValue with the constant.
4606       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4607         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4608                           << " :: " << *this << "\n");
4609         if (A.changeValueAfterManifest(V, *C))
4610           Changed = ChangeStatus::CHANGED;
4611       }
4612     }
4613 
4614     return Changed | AAValueSimplify::manifest(A);
4615   }
4616 
4617   /// See AbstractState::indicatePessimisticFixpoint(...).
4618   ChangeStatus indicatePessimisticFixpoint() override {
4619     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4620     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4621     SimplifiedAssociatedValue = &getAssociatedValue();
4622     indicateOptimisticFixpoint();
4623     return ChangeStatus::CHANGED;
4624   }
4625 
4626 protected:
4627   // An assumed simplified value. Initially, it is set to Optional::None, which
4628   // means that the value is not clear under current assumption. If in the
4629   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4630   // returns orignal associated value.
4631   Optional<Value *> SimplifiedAssociatedValue;
4632 };
4633 
4634 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4635   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4636       : AAValueSimplifyImpl(IRP, A) {}
4637 
4638   void initialize(Attributor &A) override {
4639     AAValueSimplifyImpl::initialize(A);
4640     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4641       indicatePessimisticFixpoint();
4642     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4643                  Attribute::StructRet, Attribute::Nest},
4644                 /* IgnoreSubsumingPositions */ true))
4645       indicatePessimisticFixpoint();
4646 
4647     // FIXME: This is a hack to prevent us from propagating function poiner in
4648     // the new pass manager CGSCC pass as it creates call edges the
4649     // CallGraphUpdater cannot handle yet.
4650     Value &V = getAssociatedValue();
4651     if (V.getType()->isPointerTy() &&
4652         V.getType()->getPointerElementType()->isFunctionTy() &&
4653         !A.isModulePass())
4654       indicatePessimisticFixpoint();
4655   }
4656 
4657   /// See AbstractAttribute::updateImpl(...).
4658   ChangeStatus updateImpl(Attributor &A) override {
4659     // Byval is only replacable if it is readonly otherwise we would write into
4660     // the replaced value and not the copy that byval creates implicitly.
4661     Argument *Arg = getAssociatedArgument();
4662     if (Arg->hasByValAttr()) {
4663       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4664       //       there is no race by not copying a constant byval.
4665       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4666                                                        DepClassTy::REQUIRED);
4667       if (!MemAA.isAssumedReadOnly())
4668         return indicatePessimisticFixpoint();
4669     }
4670 
4671     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4672 
4673     auto PredForCallSite = [&](AbstractCallSite ACS) {
4674       const IRPosition &ACSArgPos =
4675           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4676       // Check if a coresponding argument was found or if it is on not
4677       // associated (which can happen for callback calls).
4678       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4679         return false;
4680 
4681       // We can only propagate thread independent values through callbacks.
4682       // This is different to direct/indirect call sites because for them we
4683       // know the thread executing the caller and callee is the same. For
4684       // callbacks this is not guaranteed, thus a thread dependent value could
4685       // be different for the caller and callee, making it invalid to propagate.
4686       Value &ArgOp = ACSArgPos.getAssociatedValue();
4687       if (ACS.isCallbackCall())
4688         if (auto *C = dyn_cast<Constant>(&ArgOp))
4689           if (C->isThreadDependent())
4690             return false;
4691       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4692     };
4693 
4694     bool AllCallSitesKnown;
4695     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4696                                 AllCallSitesKnown))
4697       if (!askSimplifiedValueForOtherAAs(A))
4698         return indicatePessimisticFixpoint();
4699 
4700     // If a candicate was found in this update, return CHANGED.
4701     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4702                ? ChangeStatus::UNCHANGED
4703                : ChangeStatus ::CHANGED;
4704   }
4705 
4706   /// See AbstractAttribute::trackStatistics()
4707   void trackStatistics() const override {
4708     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4709   }
4710 };
4711 
4712 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4713   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4714       : AAValueSimplifyImpl(IRP, A) {}
4715 
4716   /// See AbstractAttribute::updateImpl(...).
4717   ChangeStatus updateImpl(Attributor &A) override {
4718     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4719 
4720     auto PredForReturned = [&](Value &V) {
4721       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4722     };
4723 
4724     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4725       if (!askSimplifiedValueForOtherAAs(A))
4726         return indicatePessimisticFixpoint();
4727 
4728     // If a candicate was found in this update, return CHANGED.
4729     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4730                ? ChangeStatus::UNCHANGED
4731                : ChangeStatus ::CHANGED;
4732   }
4733 
4734   ChangeStatus manifest(Attributor &A) override {
4735     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4736 
4737     if (SimplifiedAssociatedValue.hasValue() &&
4738         !SimplifiedAssociatedValue.getValue())
4739       return Changed;
4740 
4741     Value &V = getAssociatedValue();
4742     auto *C = SimplifiedAssociatedValue.hasValue()
4743                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4744                   : UndefValue::get(V.getType());
4745     if (C) {
4746       auto PredForReturned =
4747           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4748             // We can replace the AssociatedValue with the constant.
4749             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4750               return true;
4751 
4752             for (ReturnInst *RI : RetInsts) {
4753               if (RI->getFunction() != getAnchorScope())
4754                 continue;
4755               auto *RC = C;
4756               if (RC->getType() != RI->getReturnValue()->getType())
4757                 RC = ConstantExpr::getBitCast(RC,
4758                                               RI->getReturnValue()->getType());
4759               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4760                                 << " in " << *RI << " :: " << *this << "\n");
4761               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4762                 Changed = ChangeStatus::CHANGED;
4763             }
4764             return true;
4765           };
4766       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4767     }
4768 
4769     return Changed | AAValueSimplify::manifest(A);
4770   }
4771 
4772   /// See AbstractAttribute::trackStatistics()
4773   void trackStatistics() const override {
4774     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4775   }
4776 };
4777 
4778 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4779   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4780       : AAValueSimplifyImpl(IRP, A) {}
4781 
4782   /// See AbstractAttribute::initialize(...).
4783   void initialize(Attributor &A) override {
4784     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4785     //        Needs investigation.
4786     // AAValueSimplifyImpl::initialize(A);
4787     Value &V = getAnchorValue();
4788 
4789     // TODO: add other stuffs
4790     if (isa<Constant>(V))
4791       indicatePessimisticFixpoint();
4792   }
4793 
4794   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4795   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4796   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4797   /// updated and \p Changed is set appropriately.
4798   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4799                               ChangeStatus &Changed) {
4800     if (!ICmp)
4801       return false;
4802     if (!ICmp->isEquality())
4803       return false;
4804 
4805     // This is a comparison with == or !-. We check for nullptr now.
4806     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4807     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4808     if (!Op0IsNull && !Op1IsNull)
4809       return false;
4810 
4811     LLVMContext &Ctx = ICmp->getContext();
4812     // Check for `nullptr ==/!= nullptr` first:
4813     if (Op0IsNull && Op1IsNull) {
4814       Value *NewVal = ConstantInt::get(
4815           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4816       assert(!SimplifiedAssociatedValue.hasValue() &&
4817              "Did not expect non-fixed value for constant comparison");
4818       SimplifiedAssociatedValue = NewVal;
4819       indicateOptimisticFixpoint();
4820       Changed = ChangeStatus::CHANGED;
4821       return true;
4822     }
4823 
4824     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4825     // non-nullptr operand and if we assume it's non-null we can conclude the
4826     // result of the comparison.
4827     assert((Op0IsNull || Op1IsNull) &&
4828            "Expected nullptr versus non-nullptr comparison at this point");
4829 
4830     // The index is the operand that we assume is not null.
4831     unsigned PtrIdx = Op0IsNull;
4832     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4833         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4834         DepClassTy::REQUIRED);
4835     if (!PtrNonNullAA.isAssumedNonNull())
4836       return false;
4837 
4838     // The new value depends on the predicate, true for != and false for ==.
4839     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4840                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4841 
4842     assert((!SimplifiedAssociatedValue.hasValue() ||
4843             SimplifiedAssociatedValue == NewVal) &&
4844            "Did not expect to change value for zero-comparison");
4845 
4846     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4847     SimplifiedAssociatedValue = NewVal;
4848 
4849     if (PtrNonNullAA.isKnownNonNull())
4850       indicateOptimisticFixpoint();
4851 
4852     Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED;
4853     return true;
4854   }
4855 
4856   /// See AbstractAttribute::updateImpl(...).
4857   ChangeStatus updateImpl(Attributor &A) override {
4858     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4859 
4860     ChangeStatus Changed;
4861     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4862                                Changed))
4863       return Changed;
4864 
4865     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4866                             bool Stripped) -> bool {
4867       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V),
4868                                              DepClassTy::REQUIRED);
4869       if (!Stripped && this == &AA) {
4870         // TODO: Look the instruction and check recursively.
4871 
4872         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4873                           << "\n");
4874         return false;
4875       }
4876       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4877     };
4878 
4879     bool Dummy = false;
4880     if (!genericValueTraversal<AAValueSimplify, bool>(
4881             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4882             /* UseValueSimplify */ false))
4883       if (!askSimplifiedValueForOtherAAs(A))
4884         return indicatePessimisticFixpoint();
4885 
4886     // If a candicate was found in this update, return CHANGED.
4887 
4888     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4889                ? ChangeStatus::UNCHANGED
4890                : ChangeStatus ::CHANGED;
4891   }
4892 
4893   /// See AbstractAttribute::trackStatistics()
4894   void trackStatistics() const override {
4895     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4896   }
4897 };
4898 
4899 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4900   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4901       : AAValueSimplifyImpl(IRP, A) {}
4902 
4903   /// See AbstractAttribute::initialize(...).
4904   void initialize(Attributor &A) override {
4905     SimplifiedAssociatedValue = &getAnchorValue();
4906     indicateOptimisticFixpoint();
4907   }
4908   /// See AbstractAttribute::initialize(...).
4909   ChangeStatus updateImpl(Attributor &A) override {
4910     llvm_unreachable(
4911         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4912   }
4913   /// See AbstractAttribute::trackStatistics()
4914   void trackStatistics() const override {
4915     STATS_DECLTRACK_FN_ATTR(value_simplify)
4916   }
4917 };
4918 
4919 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4920   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4921       : AAValueSimplifyFunction(IRP, A) {}
4922   /// See AbstractAttribute::trackStatistics()
4923   void trackStatistics() const override {
4924     STATS_DECLTRACK_CS_ATTR(value_simplify)
4925   }
4926 };
4927 
4928 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4929   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4930       : AAValueSimplifyReturned(IRP, A) {}
4931 
4932   /// See AbstractAttribute::manifest(...).
4933   ChangeStatus manifest(Attributor &A) override {
4934     return AAValueSimplifyImpl::manifest(A);
4935   }
4936 
4937   void trackStatistics() const override {
4938     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4939   }
4940 };
4941 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4942   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4943       : AAValueSimplifyFloating(IRP, A) {}
4944 
4945   /// See AbstractAttribute::manifest(...).
4946   ChangeStatus manifest(Attributor &A) override {
4947     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4948 
4949     if (SimplifiedAssociatedValue.hasValue() &&
4950         !SimplifiedAssociatedValue.getValue())
4951       return Changed;
4952 
4953     Value &V = getAssociatedValue();
4954     auto *C = SimplifiedAssociatedValue.hasValue()
4955                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4956                   : UndefValue::get(V.getType());
4957     if (C) {
4958       Use &U = cast<CallBase>(&getAnchorValue())
4959                    ->getArgOperandUse(getCallSiteArgNo());
4960       // We can replace the AssociatedValue with the constant.
4961       if (&V != C && V.getType() == C->getType()) {
4962         if (A.changeUseAfterManifest(U, *C))
4963           Changed = ChangeStatus::CHANGED;
4964       }
4965     }
4966 
4967     return Changed | AAValueSimplify::manifest(A);
4968   }
4969 
4970   void trackStatistics() const override {
4971     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4972   }
4973 };
4974 
4975 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4976 struct AAHeapToStackImpl : public AAHeapToStack {
4977   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4978       : AAHeapToStack(IRP, A) {}
4979 
4980   const std::string getAsStr() const override {
4981     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4982   }
4983 
4984   ChangeStatus manifest(Attributor &A) override {
4985     assert(getState().isValidState() &&
4986            "Attempted to manifest an invalid state!");
4987 
4988     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4989     Function *F = getAnchorScope();
4990     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4991 
4992     for (Instruction *MallocCall : MallocCalls) {
4993       // This malloc cannot be replaced.
4994       if (BadMallocCalls.count(MallocCall))
4995         continue;
4996 
4997       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4998         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4999         A.deleteAfterManifest(*FreeCall);
5000         HasChanged = ChangeStatus::CHANGED;
5001       }
5002 
5003       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
5004                         << "\n");
5005 
5006       Align Alignment;
5007       Value *Size;
5008       if (isCallocLikeFn(MallocCall, TLI)) {
5009         auto *Num = MallocCall->getOperand(0);
5010         auto *SizeT = MallocCall->getOperand(1);
5011         IRBuilder<> B(MallocCall);
5012         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5013       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5014         Size = MallocCall->getOperand(1);
5015         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5016                                    ->getValue()
5017                                    .getZExtValue())
5018                         .valueOrOne();
5019       } else {
5020         Size = MallocCall->getOperand(0);
5021       }
5022 
5023       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5024       Instruction *AI =
5025           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5026                          "", MallocCall->getNextNode());
5027 
5028       if (AI->getType() != MallocCall->getType())
5029         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5030                              AI->getNextNode());
5031 
5032       A.changeValueAfterManifest(*MallocCall, *AI);
5033 
5034       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5035         auto *NBB = II->getNormalDest();
5036         BranchInst::Create(NBB, MallocCall->getParent());
5037         A.deleteAfterManifest(*MallocCall);
5038       } else {
5039         A.deleteAfterManifest(*MallocCall);
5040       }
5041 
5042       // Zero out the allocated memory if it was a calloc.
5043       if (isCallocLikeFn(MallocCall, TLI)) {
5044         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5045                                    AI->getNextNode());
5046         Value *Ops[] = {
5047             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5048             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5049 
5050         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5051         Module *M = F->getParent();
5052         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5053         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5054       }
5055       HasChanged = ChangeStatus::CHANGED;
5056     }
5057 
5058     return HasChanged;
5059   }
5060 
5061   /// Collection of all malloc calls in a function.
5062   SmallSetVector<Instruction *, 4> MallocCalls;
5063 
5064   /// Collection of malloc calls that cannot be converted.
5065   DenseSet<const Instruction *> BadMallocCalls;
5066 
5067   /// A map for each malloc call to the set of associated free calls.
5068   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5069 
5070   ChangeStatus updateImpl(Attributor &A) override;
5071 };
5072 
5073 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5074   const Function *F = getAnchorScope();
5075   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5076 
5077   MustBeExecutedContextExplorer &Explorer =
5078       A.getInfoCache().getMustBeExecutedContextExplorer();
5079 
5080   auto FreeCheck = [&](Instruction &I) {
5081     const auto &Frees = FreesForMalloc.lookup(&I);
5082     if (Frees.size() != 1)
5083       return false;
5084     Instruction *UniqueFree = *Frees.begin();
5085     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5086   };
5087 
5088   auto UsesCheck = [&](Instruction &I) {
5089     bool ValidUsesOnly = true;
5090     bool MustUse = true;
5091     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5092       Instruction *UserI = cast<Instruction>(U.getUser());
5093       if (isa<LoadInst>(UserI))
5094         return true;
5095       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5096         if (SI->getValueOperand() == U.get()) {
5097           LLVM_DEBUG(dbgs()
5098                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5099           ValidUsesOnly = false;
5100         } else {
5101           // A store into the malloc'ed memory is fine.
5102         }
5103         return true;
5104       }
5105       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5106         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5107           return true;
5108         // Record malloc.
5109         if (isFreeCall(UserI, TLI)) {
5110           if (MustUse) {
5111             FreesForMalloc[&I].insert(UserI);
5112           } else {
5113             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5114                               << *UserI << "\n");
5115             ValidUsesOnly = false;
5116           }
5117           return true;
5118         }
5119 
5120         unsigned ArgNo = CB->getArgOperandNo(&U);
5121 
5122         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5123             *this, IRPosition::callsite_argument(*CB, ArgNo),
5124             DepClassTy::REQUIRED);
5125 
5126         // If a callsite argument use is nofree, we are fine.
5127         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5128             *this, IRPosition::callsite_argument(*CB, ArgNo),
5129             DepClassTy::REQUIRED);
5130 
5131         if (!NoCaptureAA.isAssumedNoCapture() ||
5132             !ArgNoFreeAA.isAssumedNoFree()) {
5133           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5134           ValidUsesOnly = false;
5135         }
5136         return true;
5137       }
5138 
5139       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5140           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5141         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5142         Follow = true;
5143         return true;
5144       }
5145       // Unknown user for which we can not track uses further (in a way that
5146       // makes sense).
5147       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5148       ValidUsesOnly = false;
5149       return true;
5150     };
5151     A.checkForAllUses(Pred, *this, I);
5152     return ValidUsesOnly;
5153   };
5154 
5155   auto MallocCallocCheck = [&](Instruction &I) {
5156     if (BadMallocCalls.count(&I))
5157       return true;
5158 
5159     bool IsMalloc = isMallocLikeFn(&I, TLI);
5160     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5161     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5162     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5163       BadMallocCalls.insert(&I);
5164       return true;
5165     }
5166 
5167     if (IsMalloc) {
5168       if (MaxHeapToStackSize == -1) {
5169         if (UsesCheck(I) || FreeCheck(I)) {
5170           MallocCalls.insert(&I);
5171           return true;
5172         }
5173       }
5174       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5175         if (Size->getValue().ule(MaxHeapToStackSize))
5176           if (UsesCheck(I) || FreeCheck(I)) {
5177             MallocCalls.insert(&I);
5178             return true;
5179           }
5180     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5181       if (MaxHeapToStackSize == -1) {
5182         if (UsesCheck(I) || FreeCheck(I)) {
5183           MallocCalls.insert(&I);
5184           return true;
5185         }
5186       }
5187       // Only if the alignment and sizes are constant.
5188       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5189         if (Size->getValue().ule(MaxHeapToStackSize))
5190           if (UsesCheck(I) || FreeCheck(I)) {
5191             MallocCalls.insert(&I);
5192             return true;
5193           }
5194     } else if (IsCalloc) {
5195       if (MaxHeapToStackSize == -1) {
5196         if (UsesCheck(I) || FreeCheck(I)) {
5197           MallocCalls.insert(&I);
5198           return true;
5199         }
5200       }
5201       bool Overflow = false;
5202       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5203         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5204           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5205                   .ule(MaxHeapToStackSize))
5206             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5207               MallocCalls.insert(&I);
5208               return true;
5209             }
5210     }
5211 
5212     BadMallocCalls.insert(&I);
5213     return true;
5214   };
5215 
5216   size_t NumBadMallocs = BadMallocCalls.size();
5217 
5218   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5219 
5220   if (NumBadMallocs != BadMallocCalls.size())
5221     return ChangeStatus::CHANGED;
5222 
5223   return ChangeStatus::UNCHANGED;
5224 }
5225 
5226 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5227   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5228       : AAHeapToStackImpl(IRP, A) {}
5229 
5230   /// See AbstractAttribute::trackStatistics().
5231   void trackStatistics() const override {
5232     STATS_DECL(
5233         MallocCalls, Function,
5234         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5235     for (auto *C : MallocCalls)
5236       if (!BadMallocCalls.count(C))
5237         ++BUILD_STAT_NAME(MallocCalls, Function);
5238   }
5239 };
5240 
5241 /// ----------------------- Privatizable Pointers ------------------------------
5242 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5243   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5244       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5245 
5246   ChangeStatus indicatePessimisticFixpoint() override {
5247     AAPrivatizablePtr::indicatePessimisticFixpoint();
5248     PrivatizableType = nullptr;
5249     return ChangeStatus::CHANGED;
5250   }
5251 
5252   /// Identify the type we can chose for a private copy of the underlying
5253   /// argument. None means it is not clear yet, nullptr means there is none.
5254   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5255 
5256   /// Return a privatizable type that encloses both T0 and T1.
5257   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5258   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5259     if (!T0.hasValue())
5260       return T1;
5261     if (!T1.hasValue())
5262       return T0;
5263     if (T0 == T1)
5264       return T0;
5265     return nullptr;
5266   }
5267 
5268   Optional<Type *> getPrivatizableType() const override {
5269     return PrivatizableType;
5270   }
5271 
5272   const std::string getAsStr() const override {
5273     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5274   }
5275 
5276 protected:
5277   Optional<Type *> PrivatizableType;
5278 };
5279 
5280 // TODO: Do this for call site arguments (probably also other values) as well.
5281 
5282 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5283   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5284       : AAPrivatizablePtrImpl(IRP, A) {}
5285 
5286   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5287   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5288     // If this is a byval argument and we know all the call sites (so we can
5289     // rewrite them), there is no need to check them explicitly.
5290     bool AllCallSitesKnown;
5291     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5292         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5293                                true, AllCallSitesKnown))
5294       return getAssociatedValue().getType()->getPointerElementType();
5295 
5296     Optional<Type *> Ty;
5297     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5298 
5299     // Make sure the associated call site argument has the same type at all call
5300     // sites and it is an allocation we know is safe to privatize, for now that
5301     // means we only allow alloca instructions.
5302     // TODO: We can additionally analyze the accesses in the callee to  create
5303     //       the type from that information instead. That is a little more
5304     //       involved and will be done in a follow up patch.
5305     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5306       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5307       // Check if a coresponding argument was found or if it is one not
5308       // associated (which can happen for callback calls).
5309       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5310         return false;
5311 
5312       // Check that all call sites agree on a type.
5313       auto &PrivCSArgAA =
5314           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5315       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5316 
5317       LLVM_DEBUG({
5318         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5319         if (CSTy.hasValue() && CSTy.getValue())
5320           CSTy.getValue()->print(dbgs());
5321         else if (CSTy.hasValue())
5322           dbgs() << "<nullptr>";
5323         else
5324           dbgs() << "<none>";
5325       });
5326 
5327       Ty = combineTypes(Ty, CSTy);
5328 
5329       LLVM_DEBUG({
5330         dbgs() << " : New Type: ";
5331         if (Ty.hasValue() && Ty.getValue())
5332           Ty.getValue()->print(dbgs());
5333         else if (Ty.hasValue())
5334           dbgs() << "<nullptr>";
5335         else
5336           dbgs() << "<none>";
5337         dbgs() << "\n";
5338       });
5339 
5340       return !Ty.hasValue() || Ty.getValue();
5341     };
5342 
5343     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5344       return nullptr;
5345     return Ty;
5346   }
5347 
5348   /// See AbstractAttribute::updateImpl(...).
5349   ChangeStatus updateImpl(Attributor &A) override {
5350     PrivatizableType = identifyPrivatizableType(A);
5351     if (!PrivatizableType.hasValue())
5352       return ChangeStatus::UNCHANGED;
5353     if (!PrivatizableType.getValue())
5354       return indicatePessimisticFixpoint();
5355 
5356     // The dependence is optional so we don't give up once we give up on the
5357     // alignment.
5358     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5359                         DepClassTy::OPTIONAL);
5360 
5361     // Avoid arguments with padding for now.
5362     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5363         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5364                                                 A.getInfoCache().getDL())) {
5365       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5366       return indicatePessimisticFixpoint();
5367     }
5368 
5369     // Verify callee and caller agree on how the promoted argument would be
5370     // passed.
5371     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5372     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5373     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5374     Function &Fn = *getIRPosition().getAnchorScope();
5375     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5376     ArgsToPromote.insert(getAssociatedArgument());
5377     const auto *TTI =
5378         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5379     if (!TTI ||
5380         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5381             Fn, *TTI, ArgsToPromote, Dummy) ||
5382         ArgsToPromote.empty()) {
5383       LLVM_DEBUG(
5384           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5385                  << Fn.getName() << "\n");
5386       return indicatePessimisticFixpoint();
5387     }
5388 
5389     // Collect the types that will replace the privatizable type in the function
5390     // signature.
5391     SmallVector<Type *, 16> ReplacementTypes;
5392     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5393 
5394     // Register a rewrite of the argument.
5395     Argument *Arg = getAssociatedArgument();
5396     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5397       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5398       return indicatePessimisticFixpoint();
5399     }
5400 
5401     unsigned ArgNo = Arg->getArgNo();
5402 
5403     // Helper to check if for the given call site the associated argument is
5404     // passed to a callback where the privatization would be different.
5405     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5406       SmallVector<const Use *, 4> CallbackUses;
5407       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5408       for (const Use *U : CallbackUses) {
5409         AbstractCallSite CBACS(U);
5410         assert(CBACS && CBACS.isCallbackCall());
5411         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5412           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5413 
5414           LLVM_DEBUG({
5415             dbgs()
5416                 << "[AAPrivatizablePtr] Argument " << *Arg
5417                 << "check if can be privatized in the context of its parent ("
5418                 << Arg->getParent()->getName()
5419                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5420                    "callback ("
5421                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5422                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5423                 << CBACS.getCallArgOperand(CBArg) << " vs "
5424                 << CB.getArgOperand(ArgNo) << "\n"
5425                 << "[AAPrivatizablePtr] " << CBArg << " : "
5426                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5427           });
5428 
5429           if (CBArgNo != int(ArgNo))
5430             continue;
5431           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5432               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5433           if (CBArgPrivAA.isValidState()) {
5434             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5435             if (!CBArgPrivTy.hasValue())
5436               continue;
5437             if (CBArgPrivTy.getValue() == PrivatizableType)
5438               continue;
5439           }
5440 
5441           LLVM_DEBUG({
5442             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5443                    << " cannot be privatized in the context of its parent ("
5444                    << Arg->getParent()->getName()
5445                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5446                       "callback ("
5447                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5448                    << ").\n[AAPrivatizablePtr] for which the argument "
5449                       "privatization is not compatible.\n";
5450           });
5451           return false;
5452         }
5453       }
5454       return true;
5455     };
5456 
5457     // Helper to check if for the given call site the associated argument is
5458     // passed to a direct call where the privatization would be different.
5459     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5460       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5461       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5462       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5463              "Expected a direct call operand for callback call operand");
5464 
5465       LLVM_DEBUG({
5466         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5467                << " check if be privatized in the context of its parent ("
5468                << Arg->getParent()->getName()
5469                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5470                   "direct call of ("
5471                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5472                << ").\n";
5473       });
5474 
5475       Function *DCCallee = DC->getCalledFunction();
5476       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5477         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5478             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
5479             DepClassTy::REQUIRED);
5480         if (DCArgPrivAA.isValidState()) {
5481           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5482           if (!DCArgPrivTy.hasValue())
5483             return true;
5484           if (DCArgPrivTy.getValue() == PrivatizableType)
5485             return true;
5486         }
5487       }
5488 
5489       LLVM_DEBUG({
5490         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5491                << " cannot be privatized in the context of its parent ("
5492                << Arg->getParent()->getName()
5493                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5494                   "direct call of ("
5495                << ACS.getInstruction()->getCalledFunction()->getName()
5496                << ").\n[AAPrivatizablePtr] for which the argument "
5497                   "privatization is not compatible.\n";
5498       });
5499       return false;
5500     };
5501 
5502     // Helper to check if the associated argument is used at the given abstract
5503     // call site in a way that is incompatible with the privatization assumed
5504     // here.
5505     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5506       if (ACS.isDirectCall())
5507         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5508       if (ACS.isCallbackCall())
5509         return IsCompatiblePrivArgOfDirectCS(ACS);
5510       return false;
5511     };
5512 
5513     bool AllCallSitesKnown;
5514     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5515                                 AllCallSitesKnown))
5516       return indicatePessimisticFixpoint();
5517 
5518     return ChangeStatus::UNCHANGED;
5519   }
5520 
5521   /// Given a type to private \p PrivType, collect the constituates (which are
5522   /// used) in \p ReplacementTypes.
5523   static void
5524   identifyReplacementTypes(Type *PrivType,
5525                            SmallVectorImpl<Type *> &ReplacementTypes) {
5526     // TODO: For now we expand the privatization type to the fullest which can
5527     //       lead to dead arguments that need to be removed later.
5528     assert(PrivType && "Expected privatizable type!");
5529 
5530     // Traverse the type, extract constituate types on the outermost level.
5531     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5532       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5533         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5534     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5535       ReplacementTypes.append(PrivArrayType->getNumElements(),
5536                               PrivArrayType->getElementType());
5537     } else {
5538       ReplacementTypes.push_back(PrivType);
5539     }
5540   }
5541 
5542   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5543   /// The values needed are taken from the arguments of \p F starting at
5544   /// position \p ArgNo.
5545   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5546                                    unsigned ArgNo, Instruction &IP) {
5547     assert(PrivType && "Expected privatizable type!");
5548 
5549     IRBuilder<NoFolder> IRB(&IP);
5550     const DataLayout &DL = F.getParent()->getDataLayout();
5551 
5552     // Traverse the type, build GEPs and stores.
5553     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5554       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5555       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5556         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5557         Value *Ptr = constructPointer(
5558             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5559         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5560       }
5561     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5562       Type *PointeeTy = PrivArrayType->getElementType();
5563       Type *PointeePtrTy = PointeeTy->getPointerTo();
5564       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5565       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5566         Value *Ptr =
5567             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5568         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5569       }
5570     } else {
5571       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5572     }
5573   }
5574 
5575   /// Extract values from \p Base according to the type \p PrivType at the
5576   /// call position \p ACS. The values are appended to \p ReplacementValues.
5577   void createReplacementValues(Align Alignment, Type *PrivType,
5578                                AbstractCallSite ACS, Value *Base,
5579                                SmallVectorImpl<Value *> &ReplacementValues) {
5580     assert(Base && "Expected base value!");
5581     assert(PrivType && "Expected privatizable type!");
5582     Instruction *IP = ACS.getInstruction();
5583 
5584     IRBuilder<NoFolder> IRB(IP);
5585     const DataLayout &DL = IP->getModule()->getDataLayout();
5586 
5587     if (Base->getType()->getPointerElementType() != PrivType)
5588       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5589                                                  "", ACS.getInstruction());
5590 
5591     // Traverse the type, build GEPs and loads.
5592     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5593       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5594       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5595         Type *PointeeTy = PrivStructType->getElementType(u);
5596         Value *Ptr =
5597             constructPointer(PointeeTy->getPointerTo(), Base,
5598                              PrivStructLayout->getElementOffset(u), IRB, DL);
5599         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5600         L->setAlignment(Alignment);
5601         ReplacementValues.push_back(L);
5602       }
5603     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5604       Type *PointeeTy = PrivArrayType->getElementType();
5605       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5606       Type *PointeePtrTy = PointeeTy->getPointerTo();
5607       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5608         Value *Ptr =
5609             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5610         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5611         L->setAlignment(Alignment);
5612         ReplacementValues.push_back(L);
5613       }
5614     } else {
5615       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5616       L->setAlignment(Alignment);
5617       ReplacementValues.push_back(L);
5618     }
5619   }
5620 
5621   /// See AbstractAttribute::manifest(...)
5622   ChangeStatus manifest(Attributor &A) override {
5623     if (!PrivatizableType.hasValue())
5624       return ChangeStatus::UNCHANGED;
5625     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5626 
5627     // Collect all tail calls in the function as we cannot allow new allocas to
5628     // escape into tail recursion.
5629     // TODO: Be smarter about new allocas escaping into tail calls.
5630     SmallVector<CallInst *, 16> TailCalls;
5631     if (!A.checkForAllInstructions(
5632             [&](Instruction &I) {
5633               CallInst &CI = cast<CallInst>(I);
5634               if (CI.isTailCall())
5635                 TailCalls.push_back(&CI);
5636               return true;
5637             },
5638             *this, {Instruction::Call}))
5639       return ChangeStatus::UNCHANGED;
5640 
5641     Argument *Arg = getAssociatedArgument();
5642     // Query AAAlign attribute for alignment of associated argument to
5643     // determine the best alignment of loads.
5644     const auto &AlignAA =
5645         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
5646 
5647     // Callback to repair the associated function. A new alloca is placed at the
5648     // beginning and initialized with the values passed through arguments. The
5649     // new alloca replaces the use of the old pointer argument.
5650     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5651         [=](const Attributor::ArgumentReplacementInfo &ARI,
5652             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5653           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5654           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5655           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5656                                            Arg->getName() + ".priv", IP);
5657           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5658                                ArgIt->getArgNo(), *IP);
5659 
5660           if (AI->getType() != Arg->getType())
5661             AI =
5662                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5663           Arg->replaceAllUsesWith(AI);
5664 
5665           for (CallInst *CI : TailCalls)
5666             CI->setTailCall(false);
5667         };
5668 
5669     // Callback to repair a call site of the associated function. The elements
5670     // of the privatizable type are loaded prior to the call and passed to the
5671     // new function version.
5672     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5673         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5674                       AbstractCallSite ACS,
5675                       SmallVectorImpl<Value *> &NewArgOperands) {
5676           // When no alignment is specified for the load instruction,
5677           // natural alignment is assumed.
5678           createReplacementValues(
5679               assumeAligned(AlignAA.getAssumedAlign()),
5680               PrivatizableType.getValue(), ACS,
5681               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5682               NewArgOperands);
5683         };
5684 
5685     // Collect the types that will replace the privatizable type in the function
5686     // signature.
5687     SmallVector<Type *, 16> ReplacementTypes;
5688     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5689 
5690     // Register a rewrite of the argument.
5691     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5692                                            std::move(FnRepairCB),
5693                                            std::move(ACSRepairCB)))
5694       return ChangeStatus::CHANGED;
5695     return ChangeStatus::UNCHANGED;
5696   }
5697 
5698   /// See AbstractAttribute::trackStatistics()
5699   void trackStatistics() const override {
5700     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5701   }
5702 };
5703 
5704 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5705   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5706       : AAPrivatizablePtrImpl(IRP, A) {}
5707 
5708   /// See AbstractAttribute::initialize(...).
5709   virtual void initialize(Attributor &A) override {
5710     // TODO: We can privatize more than arguments.
5711     indicatePessimisticFixpoint();
5712   }
5713 
5714   ChangeStatus updateImpl(Attributor &A) override {
5715     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5716                      "updateImpl will not be called");
5717   }
5718 
5719   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5720   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5721     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5722     if (!Obj) {
5723       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5724       return nullptr;
5725     }
5726 
5727     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5728       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5729         if (CI->isOne())
5730           return Obj->getType()->getPointerElementType();
5731     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5732       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
5733           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
5734       if (PrivArgAA.isAssumedPrivatizablePtr())
5735         return Obj->getType()->getPointerElementType();
5736     }
5737 
5738     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5739                          "alloca nor privatizable argument: "
5740                       << *Obj << "!\n");
5741     return nullptr;
5742   }
5743 
5744   /// See AbstractAttribute::trackStatistics()
5745   void trackStatistics() const override {
5746     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5747   }
5748 };
5749 
5750 struct AAPrivatizablePtrCallSiteArgument final
5751     : public AAPrivatizablePtrFloating {
5752   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5753       : AAPrivatizablePtrFloating(IRP, A) {}
5754 
5755   /// See AbstractAttribute::initialize(...).
5756   void initialize(Attributor &A) override {
5757     if (getIRPosition().hasAttr(Attribute::ByVal))
5758       indicateOptimisticFixpoint();
5759   }
5760 
5761   /// See AbstractAttribute::updateImpl(...).
5762   ChangeStatus updateImpl(Attributor &A) override {
5763     PrivatizableType = identifyPrivatizableType(A);
5764     if (!PrivatizableType.hasValue())
5765       return ChangeStatus::UNCHANGED;
5766     if (!PrivatizableType.getValue())
5767       return indicatePessimisticFixpoint();
5768 
5769     const IRPosition &IRP = getIRPosition();
5770     auto &NoCaptureAA =
5771         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
5772     if (!NoCaptureAA.isAssumedNoCapture()) {
5773       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5774       return indicatePessimisticFixpoint();
5775     }
5776 
5777     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
5778     if (!NoAliasAA.isAssumedNoAlias()) {
5779       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5780       return indicatePessimisticFixpoint();
5781     }
5782 
5783     const auto &MemBehaviorAA =
5784         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
5785     if (!MemBehaviorAA.isAssumedReadOnly()) {
5786       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5787       return indicatePessimisticFixpoint();
5788     }
5789 
5790     return ChangeStatus::UNCHANGED;
5791   }
5792 
5793   /// See AbstractAttribute::trackStatistics()
5794   void trackStatistics() const override {
5795     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5796   }
5797 };
5798 
5799 struct AAPrivatizablePtrCallSiteReturned final
5800     : public AAPrivatizablePtrFloating {
5801   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5802       : AAPrivatizablePtrFloating(IRP, A) {}
5803 
5804   /// See AbstractAttribute::initialize(...).
5805   void initialize(Attributor &A) override {
5806     // TODO: We can privatize more than arguments.
5807     indicatePessimisticFixpoint();
5808   }
5809 
5810   /// See AbstractAttribute::trackStatistics()
5811   void trackStatistics() const override {
5812     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5813   }
5814 };
5815 
5816 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5817   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5818       : AAPrivatizablePtrFloating(IRP, A) {}
5819 
5820   /// See AbstractAttribute::initialize(...).
5821   void initialize(Attributor &A) override {
5822     // TODO: We can privatize more than arguments.
5823     indicatePessimisticFixpoint();
5824   }
5825 
5826   /// See AbstractAttribute::trackStatistics()
5827   void trackStatistics() const override {
5828     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5829   }
5830 };
5831 
5832 /// -------------------- Memory Behavior Attributes ----------------------------
5833 /// Includes read-none, read-only, and write-only.
5834 /// ----------------------------------------------------------------------------
5835 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5836   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5837       : AAMemoryBehavior(IRP, A) {}
5838 
5839   /// See AbstractAttribute::initialize(...).
5840   void initialize(Attributor &A) override {
5841     intersectAssumedBits(BEST_STATE);
5842     getKnownStateFromValue(getIRPosition(), getState());
5843     AAMemoryBehavior::initialize(A);
5844   }
5845 
5846   /// Return the memory behavior information encoded in the IR for \p IRP.
5847   static void getKnownStateFromValue(const IRPosition &IRP,
5848                                      BitIntegerState &State,
5849                                      bool IgnoreSubsumingPositions = false) {
5850     SmallVector<Attribute, 2> Attrs;
5851     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5852     for (const Attribute &Attr : Attrs) {
5853       switch (Attr.getKindAsEnum()) {
5854       case Attribute::ReadNone:
5855         State.addKnownBits(NO_ACCESSES);
5856         break;
5857       case Attribute::ReadOnly:
5858         State.addKnownBits(NO_WRITES);
5859         break;
5860       case Attribute::WriteOnly:
5861         State.addKnownBits(NO_READS);
5862         break;
5863       default:
5864         llvm_unreachable("Unexpected attribute!");
5865       }
5866     }
5867 
5868     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5869       if (!I->mayReadFromMemory())
5870         State.addKnownBits(NO_READS);
5871       if (!I->mayWriteToMemory())
5872         State.addKnownBits(NO_WRITES);
5873     }
5874   }
5875 
5876   /// See AbstractAttribute::getDeducedAttributes(...).
5877   void getDeducedAttributes(LLVMContext &Ctx,
5878                             SmallVectorImpl<Attribute> &Attrs) const override {
5879     assert(Attrs.size() == 0);
5880     if (isAssumedReadNone())
5881       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5882     else if (isAssumedReadOnly())
5883       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5884     else if (isAssumedWriteOnly())
5885       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5886     assert(Attrs.size() <= 1);
5887   }
5888 
5889   /// See AbstractAttribute::manifest(...).
5890   ChangeStatus manifest(Attributor &A) override {
5891     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5892       return ChangeStatus::UNCHANGED;
5893 
5894     const IRPosition &IRP = getIRPosition();
5895 
5896     // Check if we would improve the existing attributes first.
5897     SmallVector<Attribute, 4> DeducedAttrs;
5898     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5899     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5900           return IRP.hasAttr(Attr.getKindAsEnum(),
5901                              /* IgnoreSubsumingPositions */ true);
5902         }))
5903       return ChangeStatus::UNCHANGED;
5904 
5905     // Clear existing attributes.
5906     IRP.removeAttrs(AttrKinds);
5907 
5908     // Use the generic manifest method.
5909     return IRAttribute::manifest(A);
5910   }
5911 
5912   /// See AbstractState::getAsStr().
5913   const std::string getAsStr() const override {
5914     if (isAssumedReadNone())
5915       return "readnone";
5916     if (isAssumedReadOnly())
5917       return "readonly";
5918     if (isAssumedWriteOnly())
5919       return "writeonly";
5920     return "may-read/write";
5921   }
5922 
5923   /// The set of IR attributes AAMemoryBehavior deals with.
5924   static const Attribute::AttrKind AttrKinds[3];
5925 };
5926 
5927 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5928     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5929 
5930 /// Memory behavior attribute for a floating value.
5931 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5932   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5933       : AAMemoryBehaviorImpl(IRP, A) {}
5934 
5935   /// See AbstractAttribute::initialize(...).
5936   void initialize(Attributor &A) override {
5937     AAMemoryBehaviorImpl::initialize(A);
5938     addUsesOf(A, getAssociatedValue());
5939   }
5940 
5941   /// See AbstractAttribute::updateImpl(...).
5942   ChangeStatus updateImpl(Attributor &A) override;
5943 
5944   /// See AbstractAttribute::trackStatistics()
5945   void trackStatistics() const override {
5946     if (isAssumedReadNone())
5947       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5948     else if (isAssumedReadOnly())
5949       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5950     else if (isAssumedWriteOnly())
5951       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5952   }
5953 
5954 private:
5955   /// Return true if users of \p UserI might access the underlying
5956   /// variable/location described by \p U and should therefore be analyzed.
5957   bool followUsersOfUseIn(Attributor &A, const Use *U,
5958                           const Instruction *UserI);
5959 
5960   /// Update the state according to the effect of use \p U in \p UserI.
5961   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5962 
5963 protected:
5964   /// Add the uses of \p V to the `Uses` set we look at during the update step.
5965   void addUsesOf(Attributor &A, const Value &V);
5966 
5967   /// Container for (transitive) uses of the associated argument.
5968   SmallVector<const Use *, 8> Uses;
5969 
5970   /// Set to remember the uses we already traversed.
5971   SmallPtrSet<const Use *, 8> Visited;
5972 };
5973 
5974 /// Memory behavior attribute for function argument.
5975 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5976   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5977       : AAMemoryBehaviorFloating(IRP, A) {}
5978 
5979   /// See AbstractAttribute::initialize(...).
5980   void initialize(Attributor &A) override {
5981     intersectAssumedBits(BEST_STATE);
5982     const IRPosition &IRP = getIRPosition();
5983     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5984     // can query it when we use has/getAttr. That would allow us to reuse the
5985     // initialize of the base class here.
5986     bool HasByVal =
5987         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5988     getKnownStateFromValue(IRP, getState(),
5989                            /* IgnoreSubsumingPositions */ HasByVal);
5990 
5991     // Initialize the use vector with all direct uses of the associated value.
5992     Argument *Arg = getAssociatedArgument();
5993     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5994       indicatePessimisticFixpoint();
5995     } else {
5996       addUsesOf(A, *Arg);
5997     }
5998   }
5999 
6000   ChangeStatus manifest(Attributor &A) override {
6001     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6002     if (!getAssociatedValue().getType()->isPointerTy())
6003       return ChangeStatus::UNCHANGED;
6004 
6005     // TODO: From readattrs.ll: "inalloca parameters are always
6006     //                           considered written"
6007     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6008       removeKnownBits(NO_WRITES);
6009       removeAssumedBits(NO_WRITES);
6010     }
6011     return AAMemoryBehaviorFloating::manifest(A);
6012   }
6013 
6014   /// See AbstractAttribute::trackStatistics()
6015   void trackStatistics() const override {
6016     if (isAssumedReadNone())
6017       STATS_DECLTRACK_ARG_ATTR(readnone)
6018     else if (isAssumedReadOnly())
6019       STATS_DECLTRACK_ARG_ATTR(readonly)
6020     else if (isAssumedWriteOnly())
6021       STATS_DECLTRACK_ARG_ATTR(writeonly)
6022   }
6023 };
6024 
6025 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
6026   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6027       : AAMemoryBehaviorArgument(IRP, A) {}
6028 
6029   /// See AbstractAttribute::initialize(...).
6030   void initialize(Attributor &A) override {
6031     // If we don't have an associated attribute this is either a variadic call
6032     // or an indirect call, either way, nothing to do here.
6033     Argument *Arg = getAssociatedArgument();
6034     if (!Arg) {
6035       indicatePessimisticFixpoint();
6036       return;
6037     }
6038     if (Arg->hasByValAttr()) {
6039       addKnownBits(NO_WRITES);
6040       removeKnownBits(NO_READS);
6041       removeAssumedBits(NO_READS);
6042     }
6043     AAMemoryBehaviorArgument::initialize(A);
6044     if (getAssociatedFunction()->isDeclaration())
6045       indicatePessimisticFixpoint();
6046   }
6047 
6048   /// See AbstractAttribute::updateImpl(...).
6049   ChangeStatus updateImpl(Attributor &A) override {
6050     // TODO: Once we have call site specific value information we can provide
6051     //       call site specific liveness liveness information and then it makes
6052     //       sense to specialize attributes for call sites arguments instead of
6053     //       redirecting requests to the callee argument.
6054     Argument *Arg = getAssociatedArgument();
6055     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6056     auto &ArgAA =
6057         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6058     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6059   }
6060 
6061   /// See AbstractAttribute::trackStatistics()
6062   void trackStatistics() const override {
6063     if (isAssumedReadNone())
6064       STATS_DECLTRACK_CSARG_ATTR(readnone)
6065     else if (isAssumedReadOnly())
6066       STATS_DECLTRACK_CSARG_ATTR(readonly)
6067     else if (isAssumedWriteOnly())
6068       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6069   }
6070 };
6071 
6072 /// Memory behavior attribute for a call site return position.
6073 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6074   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6075       : AAMemoryBehaviorFloating(IRP, A) {}
6076 
6077   /// See AbstractAttribute::initialize(...).
6078   void initialize(Attributor &A) override {
6079     AAMemoryBehaviorImpl::initialize(A);
6080     Function *F = getAssociatedFunction();
6081     if (!F || F->isDeclaration())
6082       indicatePessimisticFixpoint();
6083   }
6084 
6085   /// See AbstractAttribute::manifest(...).
6086   ChangeStatus manifest(Attributor &A) override {
6087     // We do not annotate returned values.
6088     return ChangeStatus::UNCHANGED;
6089   }
6090 
6091   /// See AbstractAttribute::trackStatistics()
6092   void trackStatistics() const override {}
6093 };
6094 
6095 /// An AA to represent the memory behavior function attributes.
6096 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6097   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6098       : AAMemoryBehaviorImpl(IRP, A) {}
6099 
6100   /// See AbstractAttribute::updateImpl(Attributor &A).
6101   virtual ChangeStatus updateImpl(Attributor &A) override;
6102 
6103   /// See AbstractAttribute::manifest(...).
6104   ChangeStatus manifest(Attributor &A) override {
6105     Function &F = cast<Function>(getAnchorValue());
6106     if (isAssumedReadNone()) {
6107       F.removeFnAttr(Attribute::ArgMemOnly);
6108       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6109       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6110     }
6111     return AAMemoryBehaviorImpl::manifest(A);
6112   }
6113 
6114   /// See AbstractAttribute::trackStatistics()
6115   void trackStatistics() const override {
6116     if (isAssumedReadNone())
6117       STATS_DECLTRACK_FN_ATTR(readnone)
6118     else if (isAssumedReadOnly())
6119       STATS_DECLTRACK_FN_ATTR(readonly)
6120     else if (isAssumedWriteOnly())
6121       STATS_DECLTRACK_FN_ATTR(writeonly)
6122   }
6123 };
6124 
6125 /// AAMemoryBehavior attribute for call sites.
6126 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6127   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6128       : AAMemoryBehaviorImpl(IRP, A) {}
6129 
6130   /// See AbstractAttribute::initialize(...).
6131   void initialize(Attributor &A) override {
6132     AAMemoryBehaviorImpl::initialize(A);
6133     Function *F = getAssociatedFunction();
6134     if (!F || F->isDeclaration())
6135       indicatePessimisticFixpoint();
6136   }
6137 
6138   /// See AbstractAttribute::updateImpl(...).
6139   ChangeStatus updateImpl(Attributor &A) override {
6140     // TODO: Once we have call site specific value information we can provide
6141     //       call site specific liveness liveness information and then it makes
6142     //       sense to specialize attributes for call sites arguments instead of
6143     //       redirecting requests to the callee argument.
6144     Function *F = getAssociatedFunction();
6145     const IRPosition &FnPos = IRPosition::function(*F);
6146     auto &FnAA =
6147         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
6148     return clampStateAndIndicateChange(getState(), FnAA.getState());
6149   }
6150 
6151   /// See AbstractAttribute::trackStatistics()
6152   void trackStatistics() const override {
6153     if (isAssumedReadNone())
6154       STATS_DECLTRACK_CS_ATTR(readnone)
6155     else if (isAssumedReadOnly())
6156       STATS_DECLTRACK_CS_ATTR(readonly)
6157     else if (isAssumedWriteOnly())
6158       STATS_DECLTRACK_CS_ATTR(writeonly)
6159   }
6160 };
6161 
6162 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6163 
6164   // The current assumed state used to determine a change.
6165   auto AssumedState = getAssumed();
6166 
6167   auto CheckRWInst = [&](Instruction &I) {
6168     // If the instruction has an own memory behavior state, use it to restrict
6169     // the local state. No further analysis is required as the other memory
6170     // state is as optimistic as it gets.
6171     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6172       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6173           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
6174       intersectAssumedBits(MemBehaviorAA.getAssumed());
6175       return !isAtFixpoint();
6176     }
6177 
6178     // Remove access kind modifiers if necessary.
6179     if (I.mayReadFromMemory())
6180       removeAssumedBits(NO_READS);
6181     if (I.mayWriteToMemory())
6182       removeAssumedBits(NO_WRITES);
6183     return !isAtFixpoint();
6184   };
6185 
6186   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6187     return indicatePessimisticFixpoint();
6188 
6189   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6190                                         : ChangeStatus::UNCHANGED;
6191 }
6192 
6193 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6194 
6195   const IRPosition &IRP = getIRPosition();
6196   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6197   AAMemoryBehavior::StateType &S = getState();
6198 
6199   // First, check the function scope. We take the known information and we avoid
6200   // work if the assumed information implies the current assumed information for
6201   // this attribute. This is a valid for all but byval arguments.
6202   Argument *Arg = IRP.getAssociatedArgument();
6203   AAMemoryBehavior::base_t FnMemAssumedState =
6204       AAMemoryBehavior::StateType::getWorstState();
6205   if (!Arg || !Arg->hasByValAttr()) {
6206     const auto &FnMemAA =
6207         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
6208     FnMemAssumedState = FnMemAA.getAssumed();
6209     S.addKnownBits(FnMemAA.getKnown());
6210     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6211       return ChangeStatus::UNCHANGED;
6212   }
6213 
6214   // Make sure the value is not captured (except through "return"), if
6215   // it is, any information derived would be irrelevant anyway as we cannot
6216   // check the potential aliases introduced by the capture. However, no need
6217   // to fall back to anythign less optimistic than the function state.
6218   const auto &ArgNoCaptureAA =
6219       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
6220   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6221     S.intersectAssumedBits(FnMemAssumedState);
6222     return ChangeStatus::CHANGED;
6223   }
6224 
6225   // The current assumed state used to determine a change.
6226   auto AssumedState = S.getAssumed();
6227 
6228   // Liveness information to exclude dead users.
6229   // TODO: Take the FnPos once we have call site specific liveness information.
6230   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6231       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6232       DepClassTy::NONE);
6233 
6234   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6235   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6236     const Use *U = Uses[i];
6237     Instruction *UserI = cast<Instruction>(U->getUser());
6238     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6239                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6240                       << "]\n");
6241     if (A.isAssumedDead(*U, this, &LivenessAA))
6242       continue;
6243 
6244     // Droppable users, e.g., llvm::assume does not actually perform any action.
6245     if (UserI->isDroppable())
6246       continue;
6247 
6248     // Check if the users of UserI should also be visited.
6249     if (followUsersOfUseIn(A, U, UserI))
6250       addUsesOf(A, *UserI);
6251 
6252     // If UserI might touch memory we analyze the use in detail.
6253     if (UserI->mayReadOrWriteMemory())
6254       analyzeUseIn(A, U, UserI);
6255   }
6256 
6257   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6258                                         : ChangeStatus::UNCHANGED;
6259 }
6260 
6261 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6262   SmallVector<const Use *, 8> WL;
6263   for (const Use &U : V.uses())
6264     WL.push_back(&U);
6265 
6266   while (!WL.empty()) {
6267     const Use *U = WL.pop_back_val();
6268     if (!Visited.insert(U).second)
6269       continue;
6270 
6271     const Instruction *UserI = cast<Instruction>(U->getUser());
6272     if (UserI->mayReadOrWriteMemory()) {
6273       Uses.push_back(U);
6274       continue;
6275     }
6276     if (!followUsersOfUseIn(A, U, UserI))
6277       continue;
6278     for (const Use &UU : UserI->uses())
6279       WL.push_back(&UU);
6280   }
6281 }
6282 
6283 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6284                                                   const Instruction *UserI) {
6285   // The loaded value is unrelated to the pointer argument, no need to
6286   // follow the users of the load.
6287   if (isa<LoadInst>(UserI))
6288     return false;
6289 
6290   // By default we follow all uses assuming UserI might leak information on U,
6291   // we have special handling for call sites operands though.
6292   const auto *CB = dyn_cast<CallBase>(UserI);
6293   if (!CB || !CB->isArgOperand(U))
6294     return true;
6295 
6296   // If the use is a call argument known not to be captured, the users of
6297   // the call do not need to be visited because they have to be unrelated to
6298   // the input. Note that this check is not trivial even though we disallow
6299   // general capturing of the underlying argument. The reason is that the
6300   // call might the argument "through return", which we allow and for which we
6301   // need to check call users.
6302   if (U->get()->getType()->isPointerTy()) {
6303     unsigned ArgNo = CB->getArgOperandNo(U);
6304     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6305         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
6306     return !ArgNoCaptureAA.isAssumedNoCapture();
6307   }
6308 
6309   return true;
6310 }
6311 
6312 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6313                                             const Instruction *UserI) {
6314   assert(UserI->mayReadOrWriteMemory());
6315 
6316   switch (UserI->getOpcode()) {
6317   default:
6318     // TODO: Handle all atomics and other side-effect operations we know of.
6319     break;
6320   case Instruction::Load:
6321     // Loads cause the NO_READS property to disappear.
6322     removeAssumedBits(NO_READS);
6323     return;
6324 
6325   case Instruction::Store:
6326     // Stores cause the NO_WRITES property to disappear if the use is the
6327     // pointer operand. Note that we do assume that capturing was taken care of
6328     // somewhere else.
6329     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6330       removeAssumedBits(NO_WRITES);
6331     return;
6332 
6333   case Instruction::Call:
6334   case Instruction::CallBr:
6335   case Instruction::Invoke: {
6336     // For call sites we look at the argument memory behavior attribute (this
6337     // could be recursive!) in order to restrict our own state.
6338     const auto *CB = cast<CallBase>(UserI);
6339 
6340     // Give up on operand bundles.
6341     if (CB->isBundleOperand(U)) {
6342       indicatePessimisticFixpoint();
6343       return;
6344     }
6345 
6346     // Calling a function does read the function pointer, maybe write it if the
6347     // function is self-modifying.
6348     if (CB->isCallee(U)) {
6349       removeAssumedBits(NO_READS);
6350       break;
6351     }
6352 
6353     // Adjust the possible access behavior based on the information on the
6354     // argument.
6355     IRPosition Pos;
6356     if (U->get()->getType()->isPointerTy())
6357       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6358     else
6359       Pos = IRPosition::callsite_function(*CB);
6360     const auto &MemBehaviorAA =
6361         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
6362     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6363     // and at least "known".
6364     intersectAssumedBits(MemBehaviorAA.getAssumed());
6365     return;
6366   }
6367   };
6368 
6369   // Generally, look at the "may-properties" and adjust the assumed state if we
6370   // did not trigger special handling before.
6371   if (UserI->mayReadFromMemory())
6372     removeAssumedBits(NO_READS);
6373   if (UserI->mayWriteToMemory())
6374     removeAssumedBits(NO_WRITES);
6375 }
6376 
6377 } // namespace
6378 
6379 /// -------------------- Memory Locations Attributes ---------------------------
6380 /// Includes read-none, argmemonly, inaccessiblememonly,
6381 /// inaccessiblememorargmemonly
6382 /// ----------------------------------------------------------------------------
6383 
6384 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6385     AAMemoryLocation::MemoryLocationsKind MLK) {
6386   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6387     return "all memory";
6388   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6389     return "no memory";
6390   std::string S = "memory:";
6391   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6392     S += "stack,";
6393   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6394     S += "constant,";
6395   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6396     S += "internal global,";
6397   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6398     S += "external global,";
6399   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6400     S += "argument,";
6401   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6402     S += "inaccessible,";
6403   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6404     S += "malloced,";
6405   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6406     S += "unknown,";
6407   S.pop_back();
6408   return S;
6409 }
6410 
6411 namespace {
6412 struct AAMemoryLocationImpl : public AAMemoryLocation {
6413 
6414   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6415       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6416     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6417       AccessKind2Accesses[u] = nullptr;
6418   }
6419 
6420   ~AAMemoryLocationImpl() {
6421     // The AccessSets are allocated via a BumpPtrAllocator, we call
6422     // the destructor manually.
6423     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6424       if (AccessKind2Accesses[u])
6425         AccessKind2Accesses[u]->~AccessSet();
6426   }
6427 
6428   /// See AbstractAttribute::initialize(...).
6429   void initialize(Attributor &A) override {
6430     intersectAssumedBits(BEST_STATE);
6431     getKnownStateFromValue(A, getIRPosition(), getState());
6432     AAMemoryLocation::initialize(A);
6433   }
6434 
6435   /// Return the memory behavior information encoded in the IR for \p IRP.
6436   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6437                                      BitIntegerState &State,
6438                                      bool IgnoreSubsumingPositions = false) {
6439     // For internal functions we ignore `argmemonly` and
6440     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6441     // constant propagation. It is unclear if this is the best way but it is
6442     // unlikely this will cause real performance problems. If we are deriving
6443     // attributes for the anchor function we even remove the attribute in
6444     // addition to ignoring it.
6445     bool UseArgMemOnly = true;
6446     Function *AnchorFn = IRP.getAnchorScope();
6447     if (AnchorFn && A.isRunOn(*AnchorFn))
6448       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6449 
6450     SmallVector<Attribute, 2> Attrs;
6451     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6452     for (const Attribute &Attr : Attrs) {
6453       switch (Attr.getKindAsEnum()) {
6454       case Attribute::ReadNone:
6455         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6456         break;
6457       case Attribute::InaccessibleMemOnly:
6458         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6459         break;
6460       case Attribute::ArgMemOnly:
6461         if (UseArgMemOnly)
6462           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6463         else
6464           IRP.removeAttrs({Attribute::ArgMemOnly});
6465         break;
6466       case Attribute::InaccessibleMemOrArgMemOnly:
6467         if (UseArgMemOnly)
6468           State.addKnownBits(inverseLocation(
6469               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6470         else
6471           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6472         break;
6473       default:
6474         llvm_unreachable("Unexpected attribute!");
6475       }
6476     }
6477   }
6478 
6479   /// See AbstractAttribute::getDeducedAttributes(...).
6480   void getDeducedAttributes(LLVMContext &Ctx,
6481                             SmallVectorImpl<Attribute> &Attrs) const override {
6482     assert(Attrs.size() == 0);
6483     if (isAssumedReadNone()) {
6484       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6485     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6486       if (isAssumedInaccessibleMemOnly())
6487         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6488       else if (isAssumedArgMemOnly())
6489         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6490       else if (isAssumedInaccessibleOrArgMemOnly())
6491         Attrs.push_back(
6492             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6493     }
6494     assert(Attrs.size() <= 1);
6495   }
6496 
6497   /// See AbstractAttribute::manifest(...).
6498   ChangeStatus manifest(Attributor &A) override {
6499     const IRPosition &IRP = getIRPosition();
6500 
6501     // Check if we would improve the existing attributes first.
6502     SmallVector<Attribute, 4> DeducedAttrs;
6503     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6504     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6505           return IRP.hasAttr(Attr.getKindAsEnum(),
6506                              /* IgnoreSubsumingPositions */ true);
6507         }))
6508       return ChangeStatus::UNCHANGED;
6509 
6510     // Clear existing attributes.
6511     IRP.removeAttrs(AttrKinds);
6512     if (isAssumedReadNone())
6513       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6514 
6515     // Use the generic manifest method.
6516     return IRAttribute::manifest(A);
6517   }
6518 
6519   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6520   bool checkForAllAccessesToMemoryKind(
6521       function_ref<bool(const Instruction *, const Value *, AccessKind,
6522                         MemoryLocationsKind)>
6523           Pred,
6524       MemoryLocationsKind RequestedMLK) const override {
6525     if (!isValidState())
6526       return false;
6527 
6528     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6529     if (AssumedMLK == NO_LOCATIONS)
6530       return true;
6531 
6532     unsigned Idx = 0;
6533     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6534          CurMLK *= 2, ++Idx) {
6535       if (CurMLK & RequestedMLK)
6536         continue;
6537 
6538       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6539         for (const AccessInfo &AI : *Accesses)
6540           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6541             return false;
6542     }
6543 
6544     return true;
6545   }
6546 
6547   ChangeStatus indicatePessimisticFixpoint() override {
6548     // If we give up and indicate a pessimistic fixpoint this instruction will
6549     // become an access for all potential access kinds:
6550     // TODO: Add pointers for argmemonly and globals to improve the results of
6551     //       checkForAllAccessesToMemoryKind.
6552     bool Changed = false;
6553     MemoryLocationsKind KnownMLK = getKnown();
6554     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6555     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6556       if (!(CurMLK & KnownMLK))
6557         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6558                                   getAccessKindFromInst(I));
6559     return AAMemoryLocation::indicatePessimisticFixpoint();
6560   }
6561 
6562 protected:
6563   /// Helper struct to tie together an instruction that has a read or write
6564   /// effect with the pointer it accesses (if any).
6565   struct AccessInfo {
6566 
6567     /// The instruction that caused the access.
6568     const Instruction *I;
6569 
6570     /// The base pointer that is accessed, or null if unknown.
6571     const Value *Ptr;
6572 
6573     /// The kind of access (read/write/read+write).
6574     AccessKind Kind;
6575 
6576     bool operator==(const AccessInfo &RHS) const {
6577       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6578     }
6579     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6580       if (LHS.I != RHS.I)
6581         return LHS.I < RHS.I;
6582       if (LHS.Ptr != RHS.Ptr)
6583         return LHS.Ptr < RHS.Ptr;
6584       if (LHS.Kind != RHS.Kind)
6585         return LHS.Kind < RHS.Kind;
6586       return false;
6587     }
6588   };
6589 
6590   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6591   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6592   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6593   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6594 
6595   /// Categorize the pointer arguments of CB that might access memory in
6596   /// AccessedLoc and update the state and access map accordingly.
6597   void
6598   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6599                                      AAMemoryLocation::StateType &AccessedLocs,
6600                                      bool &Changed);
6601 
6602   /// Return the kind(s) of location that may be accessed by \p V.
6603   AAMemoryLocation::MemoryLocationsKind
6604   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6605 
6606   /// Return the access kind as determined by \p I.
6607   AccessKind getAccessKindFromInst(const Instruction *I) {
6608     AccessKind AK = READ_WRITE;
6609     if (I) {
6610       AK = I->mayReadFromMemory() ? READ : NONE;
6611       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6612     }
6613     return AK;
6614   }
6615 
6616   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6617   /// an access of kind \p AK to a \p MLK memory location with the access
6618   /// pointer \p Ptr.
6619   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6620                                  MemoryLocationsKind MLK, const Instruction *I,
6621                                  const Value *Ptr, bool &Changed,
6622                                  AccessKind AK = READ_WRITE) {
6623 
6624     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6625     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6626     if (!Accesses)
6627       Accesses = new (Allocator) AccessSet();
6628     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6629     State.removeAssumedBits(MLK);
6630   }
6631 
6632   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6633   /// arguments, and update the state and access map accordingly.
6634   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6635                           AAMemoryLocation::StateType &State, bool &Changed);
6636 
6637   /// Used to allocate access sets.
6638   BumpPtrAllocator &Allocator;
6639 
6640   /// The set of IR attributes AAMemoryLocation deals with.
6641   static const Attribute::AttrKind AttrKinds[4];
6642 };
6643 
6644 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6645     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6646     Attribute::InaccessibleMemOrArgMemOnly};
6647 
6648 void AAMemoryLocationImpl::categorizePtrValue(
6649     Attributor &A, const Instruction &I, const Value &Ptr,
6650     AAMemoryLocation::StateType &State, bool &Changed) {
6651   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6652                     << Ptr << " ["
6653                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6654 
6655   auto StripGEPCB = [](Value *V) -> Value * {
6656     auto *GEP = dyn_cast<GEPOperator>(V);
6657     while (GEP) {
6658       V = GEP->getPointerOperand();
6659       GEP = dyn_cast<GEPOperator>(V);
6660     }
6661     return V;
6662   };
6663 
6664   auto VisitValueCB = [&](Value &V, const Instruction *,
6665                           AAMemoryLocation::StateType &T,
6666                           bool Stripped) -> bool {
6667     // TODO: recognize the TBAA used for constant accesses.
6668     MemoryLocationsKind MLK = NO_LOCATIONS;
6669     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6670     if (isa<UndefValue>(V))
6671       return true;
6672     if (auto *Arg = dyn_cast<Argument>(&V)) {
6673       if (Arg->hasByValAttr())
6674         MLK = NO_LOCAL_MEM;
6675       else
6676         MLK = NO_ARGUMENT_MEM;
6677     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6678       // Reading constant memory is not treated as a read "effect" by the
6679       // function attr pass so we won't neither. Constants defined by TBAA are
6680       // similar. (We know we do not write it because it is constant.)
6681       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6682         if (GVar->isConstant())
6683           return true;
6684 
6685       if (GV->hasLocalLinkage())
6686         MLK = NO_GLOBAL_INTERNAL_MEM;
6687       else
6688         MLK = NO_GLOBAL_EXTERNAL_MEM;
6689     } else if (isa<ConstantPointerNull>(V) &&
6690                !NullPointerIsDefined(getAssociatedFunction(),
6691                                      V.getType()->getPointerAddressSpace())) {
6692       return true;
6693     } else if (isa<AllocaInst>(V)) {
6694       MLK = NO_LOCAL_MEM;
6695     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6696       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
6697           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
6698       if (NoAliasAA.isAssumedNoAlias())
6699         MLK = NO_MALLOCED_MEM;
6700       else
6701         MLK = NO_UNKOWN_MEM;
6702     } else {
6703       MLK = NO_UNKOWN_MEM;
6704     }
6705 
6706     assert(MLK != NO_LOCATIONS && "No location specified!");
6707     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6708                               getAccessKindFromInst(&I));
6709     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6710                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6711                       << "\n");
6712     return true;
6713   };
6714 
6715   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6716           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6717           /* UseValueSimplify */ true,
6718           /* MaxValues */ 32, StripGEPCB)) {
6719     LLVM_DEBUG(
6720         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6721     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6722                               getAccessKindFromInst(&I));
6723   } else {
6724     LLVM_DEBUG(
6725         dbgs()
6726         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6727         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6728   }
6729 }
6730 
6731 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6732     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6733     bool &Changed) {
6734   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6735 
6736     // Skip non-pointer arguments.
6737     const Value *ArgOp = CB.getArgOperand(ArgNo);
6738     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6739       continue;
6740 
6741     // Skip readnone arguments.
6742     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6743     const auto &ArgOpMemLocationAA =
6744         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
6745 
6746     if (ArgOpMemLocationAA.isAssumedReadNone())
6747       continue;
6748 
6749     // Categorize potentially accessed pointer arguments as if there was an
6750     // access instruction with them as pointer.
6751     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6752   }
6753 }
6754 
6755 AAMemoryLocation::MemoryLocationsKind
6756 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6757                                                   bool &Changed) {
6758   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6759                     << I << "\n");
6760 
6761   AAMemoryLocation::StateType AccessedLocs;
6762   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6763 
6764   if (auto *CB = dyn_cast<CallBase>(&I)) {
6765 
6766     // First check if we assume any memory is access is visible.
6767     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
6768         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
6769     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6770                       << " [" << CBMemLocationAA << "]\n");
6771 
6772     if (CBMemLocationAA.isAssumedReadNone())
6773       return NO_LOCATIONS;
6774 
6775     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6776       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6777                                 Changed, getAccessKindFromInst(&I));
6778       return AccessedLocs.getAssumed();
6779     }
6780 
6781     uint32_t CBAssumedNotAccessedLocs =
6782         CBMemLocationAA.getAssumedNotAccessedLocation();
6783 
6784     // Set the argmemonly and global bit as we handle them separately below.
6785     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6786         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6787 
6788     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6789       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6790         continue;
6791       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6792                                 getAccessKindFromInst(&I));
6793     }
6794 
6795     // Now handle global memory if it might be accessed. This is slightly tricky
6796     // as NO_GLOBAL_MEM has multiple bits set.
6797     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6798     if (HasGlobalAccesses) {
6799       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6800                             AccessKind Kind, MemoryLocationsKind MLK) {
6801         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6802                                   getAccessKindFromInst(&I));
6803         return true;
6804       };
6805       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6806               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6807         return AccessedLocs.getWorstState();
6808     }
6809 
6810     LLVM_DEBUG(
6811         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6812                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6813 
6814     // Now handle argument memory if it might be accessed.
6815     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6816     if (HasArgAccesses)
6817       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6818 
6819     LLVM_DEBUG(
6820         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6821                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6822 
6823     return AccessedLocs.getAssumed();
6824   }
6825 
6826   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6827     LLVM_DEBUG(
6828         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6829                << I << " [" << *Ptr << "]\n");
6830     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6831     return AccessedLocs.getAssumed();
6832   }
6833 
6834   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6835                     << I << "\n");
6836   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6837                             getAccessKindFromInst(&I));
6838   return AccessedLocs.getAssumed();
6839 }
6840 
6841 /// An AA to represent the memory behavior function attributes.
6842 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6843   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6844       : AAMemoryLocationImpl(IRP, A) {}
6845 
6846   /// See AbstractAttribute::updateImpl(Attributor &A).
6847   virtual ChangeStatus updateImpl(Attributor &A) override {
6848 
6849     const auto &MemBehaviorAA =
6850         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
6851     if (MemBehaviorAA.isAssumedReadNone()) {
6852       if (MemBehaviorAA.isKnownReadNone())
6853         return indicateOptimisticFixpoint();
6854       assert(isAssumedReadNone() &&
6855              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6856       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6857       return ChangeStatus::UNCHANGED;
6858     }
6859 
6860     // The current assumed state used to determine a change.
6861     auto AssumedState = getAssumed();
6862     bool Changed = false;
6863 
6864     auto CheckRWInst = [&](Instruction &I) {
6865       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6866       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6867                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6868       removeAssumedBits(inverseLocation(MLK, false, false));
6869       // Stop once only the valid bit set in the *not assumed location*, thus
6870       // once we don't actually exclude any memory locations in the state.
6871       return getAssumedNotAccessedLocation() != VALID_STATE;
6872     };
6873 
6874     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6875       return indicatePessimisticFixpoint();
6876 
6877     Changed |= AssumedState != getAssumed();
6878     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6879   }
6880 
6881   /// See AbstractAttribute::trackStatistics()
6882   void trackStatistics() const override {
6883     if (isAssumedReadNone())
6884       STATS_DECLTRACK_FN_ATTR(readnone)
6885     else if (isAssumedArgMemOnly())
6886       STATS_DECLTRACK_FN_ATTR(argmemonly)
6887     else if (isAssumedInaccessibleMemOnly())
6888       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6889     else if (isAssumedInaccessibleOrArgMemOnly())
6890       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6891   }
6892 };
6893 
6894 /// AAMemoryLocation attribute for call sites.
6895 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6896   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6897       : AAMemoryLocationImpl(IRP, A) {}
6898 
6899   /// See AbstractAttribute::initialize(...).
6900   void initialize(Attributor &A) override {
6901     AAMemoryLocationImpl::initialize(A);
6902     Function *F = getAssociatedFunction();
6903     if (!F || F->isDeclaration())
6904       indicatePessimisticFixpoint();
6905   }
6906 
6907   /// See AbstractAttribute::updateImpl(...).
6908   ChangeStatus updateImpl(Attributor &A) override {
6909     // TODO: Once we have call site specific value information we can provide
6910     //       call site specific liveness liveness information and then it makes
6911     //       sense to specialize attributes for call sites arguments instead of
6912     //       redirecting requests to the callee argument.
6913     Function *F = getAssociatedFunction();
6914     const IRPosition &FnPos = IRPosition::function(*F);
6915     auto &FnAA =
6916         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
6917     bool Changed = false;
6918     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6919                           AccessKind Kind, MemoryLocationsKind MLK) {
6920       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6921                                 getAccessKindFromInst(I));
6922       return true;
6923     };
6924     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6925       return indicatePessimisticFixpoint();
6926     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6927   }
6928 
6929   /// See AbstractAttribute::trackStatistics()
6930   void trackStatistics() const override {
6931     if (isAssumedReadNone())
6932       STATS_DECLTRACK_CS_ATTR(readnone)
6933   }
6934 };
6935 
6936 /// ------------------ Value Constant Range Attribute -------------------------
6937 
6938 struct AAValueConstantRangeImpl : AAValueConstantRange {
6939   using StateType = IntegerRangeState;
6940   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6941       : AAValueConstantRange(IRP, A) {}
6942 
6943   /// See AbstractAttribute::getAsStr().
6944   const std::string getAsStr() const override {
6945     std::string Str;
6946     llvm::raw_string_ostream OS(Str);
6947     OS << "range(" << getBitWidth() << ")<";
6948     getKnown().print(OS);
6949     OS << " / ";
6950     getAssumed().print(OS);
6951     OS << ">";
6952     return OS.str();
6953   }
6954 
6955   /// Helper function to get a SCEV expr for the associated value at program
6956   /// point \p I.
6957   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6958     if (!getAnchorScope())
6959       return nullptr;
6960 
6961     ScalarEvolution *SE =
6962         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6963             *getAnchorScope());
6964 
6965     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6966         *getAnchorScope());
6967 
6968     if (!SE || !LI)
6969       return nullptr;
6970 
6971     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6972     if (!I)
6973       return S;
6974 
6975     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6976   }
6977 
6978   /// Helper function to get a range from SCEV for the associated value at
6979   /// program point \p I.
6980   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6981                                          const Instruction *I = nullptr) const {
6982     if (!getAnchorScope())
6983       return getWorstState(getBitWidth());
6984 
6985     ScalarEvolution *SE =
6986         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6987             *getAnchorScope());
6988 
6989     const SCEV *S = getSCEV(A, I);
6990     if (!SE || !S)
6991       return getWorstState(getBitWidth());
6992 
6993     return SE->getUnsignedRange(S);
6994   }
6995 
6996   /// Helper function to get a range from LVI for the associated value at
6997   /// program point \p I.
6998   ConstantRange
6999   getConstantRangeFromLVI(Attributor &A,
7000                           const Instruction *CtxI = nullptr) const {
7001     if (!getAnchorScope())
7002       return getWorstState(getBitWidth());
7003 
7004     LazyValueInfo *LVI =
7005         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7006             *getAnchorScope());
7007 
7008     if (!LVI || !CtxI)
7009       return getWorstState(getBitWidth());
7010     return LVI->getConstantRange(&getAssociatedValue(),
7011                                  const_cast<Instruction *>(CtxI));
7012   }
7013 
7014   /// See AAValueConstantRange::getKnownConstantRange(..).
7015   ConstantRange
7016   getKnownConstantRange(Attributor &A,
7017                         const Instruction *CtxI = nullptr) const override {
7018     if (!CtxI || CtxI == getCtxI())
7019       return getKnown();
7020 
7021     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7022     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7023     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7024   }
7025 
7026   /// See AAValueConstantRange::getAssumedConstantRange(..).
7027   ConstantRange
7028   getAssumedConstantRange(Attributor &A,
7029                           const Instruction *CtxI = nullptr) const override {
7030     // TODO: Make SCEV use Attributor assumption.
7031     //       We may be able to bound a variable range via assumptions in
7032     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7033     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7034 
7035     if (!CtxI || CtxI == getCtxI())
7036       return getAssumed();
7037 
7038     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7039     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7040     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7041   }
7042 
7043   /// See AbstractAttribute::initialize(..).
7044   void initialize(Attributor &A) override {
7045     // Intersect a range given by SCEV.
7046     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7047 
7048     // Intersect a range given by LVI.
7049     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7050   }
7051 
7052   /// Helper function to create MDNode for range metadata.
7053   static MDNode *
7054   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7055                             const ConstantRange &AssumedConstantRange) {
7056     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7057                                   Ty, AssumedConstantRange.getLower())),
7058                               ConstantAsMetadata::get(ConstantInt::get(
7059                                   Ty, AssumedConstantRange.getUpper()))};
7060     return MDNode::get(Ctx, LowAndHigh);
7061   }
7062 
7063   /// Return true if \p Assumed is included in \p KnownRanges.
7064   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7065 
7066     if (Assumed.isFullSet())
7067       return false;
7068 
7069     if (!KnownRanges)
7070       return true;
7071 
7072     // If multiple ranges are annotated in IR, we give up to annotate assumed
7073     // range for now.
7074 
7075     // TODO:  If there exists a known range which containts assumed range, we
7076     // can say assumed range is better.
7077     if (KnownRanges->getNumOperands() > 2)
7078       return false;
7079 
7080     ConstantInt *Lower =
7081         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7082     ConstantInt *Upper =
7083         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7084 
7085     ConstantRange Known(Lower->getValue(), Upper->getValue());
7086     return Known.contains(Assumed) && Known != Assumed;
7087   }
7088 
7089   /// Helper function to set range metadata.
7090   static bool
7091   setRangeMetadataIfisBetterRange(Instruction *I,
7092                                   const ConstantRange &AssumedConstantRange) {
7093     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7094     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7095       if (!AssumedConstantRange.isEmptySet()) {
7096         I->setMetadata(LLVMContext::MD_range,
7097                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7098                                                  AssumedConstantRange));
7099         return true;
7100       }
7101     }
7102     return false;
7103   }
7104 
7105   /// See AbstractAttribute::manifest()
7106   ChangeStatus manifest(Attributor &A) override {
7107     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7108     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7109     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7110 
7111     auto &V = getAssociatedValue();
7112     if (!AssumedConstantRange.isEmptySet() &&
7113         !AssumedConstantRange.isSingleElement()) {
7114       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7115         assert(I == getCtxI() && "Should not annotate an instruction which is "
7116                                  "not the context instruction");
7117         if (isa<CallInst>(I) || isa<LoadInst>(I))
7118           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7119             Changed = ChangeStatus::CHANGED;
7120       }
7121     }
7122 
7123     return Changed;
7124   }
7125 };
7126 
7127 struct AAValueConstantRangeArgument final
7128     : AAArgumentFromCallSiteArguments<
7129           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
7130   using Base = AAArgumentFromCallSiteArguments<
7131       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
7132   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7133       : Base(IRP, A) {}
7134 
7135   /// See AbstractAttribute::initialize(..).
7136   void initialize(Attributor &A) override {
7137     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7138       indicatePessimisticFixpoint();
7139     } else {
7140       Base::initialize(A);
7141     }
7142   }
7143 
7144   /// See AbstractAttribute::trackStatistics()
7145   void trackStatistics() const override {
7146     STATS_DECLTRACK_ARG_ATTR(value_range)
7147   }
7148 };
7149 
7150 struct AAValueConstantRangeReturned
7151     : AAReturnedFromReturnedValues<AAValueConstantRange,
7152                                    AAValueConstantRangeImpl> {
7153   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
7154                                             AAValueConstantRangeImpl>;
7155   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7156       : Base(IRP, A) {}
7157 
7158   /// See AbstractAttribute::initialize(...).
7159   void initialize(Attributor &A) override {}
7160 
7161   /// See AbstractAttribute::trackStatistics()
7162   void trackStatistics() const override {
7163     STATS_DECLTRACK_FNRET_ATTR(value_range)
7164   }
7165 };
7166 
7167 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7168   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7169       : AAValueConstantRangeImpl(IRP, A) {}
7170 
7171   /// See AbstractAttribute::initialize(...).
7172   void initialize(Attributor &A) override {
7173     AAValueConstantRangeImpl::initialize(A);
7174     Value &V = getAssociatedValue();
7175 
7176     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7177       unionAssumed(ConstantRange(C->getValue()));
7178       indicateOptimisticFixpoint();
7179       return;
7180     }
7181 
7182     if (isa<UndefValue>(&V)) {
7183       // Collapse the undef state to 0.
7184       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7185       indicateOptimisticFixpoint();
7186       return;
7187     }
7188 
7189     if (isa<CallBase>(&V))
7190       return;
7191 
7192     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7193       return;
7194     // If it is a load instruction with range metadata, use it.
7195     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7196       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7197         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7198         return;
7199       }
7200 
7201     // We can work with PHI and select instruction as we traverse their operands
7202     // during update.
7203     if (isa<SelectInst>(V) || isa<PHINode>(V))
7204       return;
7205 
7206     // Otherwise we give up.
7207     indicatePessimisticFixpoint();
7208 
7209     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7210                       << getAssociatedValue() << "\n");
7211   }
7212 
7213   bool calculateBinaryOperator(
7214       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7215       const Instruction *CtxI,
7216       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7217     Value *LHS = BinOp->getOperand(0);
7218     Value *RHS = BinOp->getOperand(1);
7219     // TODO: Allow non integers as well.
7220     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7221       return false;
7222 
7223     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7224         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
7225     QuerriedAAs.push_back(&LHSAA);
7226     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7227 
7228     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7229         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
7230     QuerriedAAs.push_back(&RHSAA);
7231     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7232 
7233     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7234 
7235     T.unionAssumed(AssumedRange);
7236 
7237     // TODO: Track a known state too.
7238 
7239     return T.isValidState();
7240   }
7241 
7242   bool calculateCastInst(
7243       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7244       const Instruction *CtxI,
7245       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7246     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7247     // TODO: Allow non integers as well.
7248     Value &OpV = *CastI->getOperand(0);
7249     if (!OpV.getType()->isIntegerTy())
7250       return false;
7251 
7252     auto &OpAA = A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV),
7253                                                   DepClassTy::REQUIRED);
7254     QuerriedAAs.push_back(&OpAA);
7255     T.unionAssumed(
7256         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7257     return T.isValidState();
7258   }
7259 
7260   bool
7261   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7262                    const Instruction *CtxI,
7263                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7264     Value *LHS = CmpI->getOperand(0);
7265     Value *RHS = CmpI->getOperand(1);
7266     // TODO: Allow non integers as well.
7267     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7268       return false;
7269 
7270     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7271         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
7272     QuerriedAAs.push_back(&LHSAA);
7273     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7274         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
7275     QuerriedAAs.push_back(&RHSAA);
7276 
7277     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7278     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7279 
7280     // If one of them is empty set, we can't decide.
7281     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7282       return true;
7283 
7284     bool MustTrue = false, MustFalse = false;
7285 
7286     auto AllowedRegion =
7287         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7288 
7289     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
7290         CmpI->getPredicate(), RHSAARange);
7291 
7292     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7293       MustFalse = true;
7294 
7295     if (SatisfyingRegion.contains(LHSAARange))
7296       MustTrue = true;
7297 
7298     assert((!MustTrue || !MustFalse) &&
7299            "Either MustTrue or MustFalse should be false!");
7300 
7301     if (MustTrue)
7302       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7303     else if (MustFalse)
7304       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7305     else
7306       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7307 
7308     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7309                       << " " << RHSAA << "\n");
7310 
7311     // TODO: Track a known state too.
7312     return T.isValidState();
7313   }
7314 
7315   /// See AbstractAttribute::updateImpl(...).
7316   ChangeStatus updateImpl(Attributor &A) override {
7317     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7318                             IntegerRangeState &T, bool Stripped) -> bool {
7319       Instruction *I = dyn_cast<Instruction>(&V);
7320       if (!I || isa<CallBase>(I)) {
7321 
7322         // If the value is not instruction, we query AA to Attributor.
7323         const auto &AA = A.getAAFor<AAValueConstantRange>(
7324             *this, IRPosition::value(V), DepClassTy::REQUIRED);
7325 
7326         // Clamp operator is not used to utilize a program point CtxI.
7327         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7328 
7329         return T.isValidState();
7330       }
7331 
7332       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7333       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7334         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7335           return false;
7336       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7337         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7338           return false;
7339       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7340         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7341           return false;
7342       } else {
7343         // Give up with other instructions.
7344         // TODO: Add other instructions
7345 
7346         T.indicatePessimisticFixpoint();
7347         return false;
7348       }
7349 
7350       // Catch circular reasoning in a pessimistic way for now.
7351       // TODO: Check how the range evolves and if we stripped anything, see also
7352       //       AADereferenceable or AAAlign for similar situations.
7353       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7354         if (QueriedAA != this)
7355           continue;
7356         // If we are in a stady state we do not need to worry.
7357         if (T.getAssumed() == getState().getAssumed())
7358           continue;
7359         T.indicatePessimisticFixpoint();
7360       }
7361 
7362       return T.isValidState();
7363     };
7364 
7365     IntegerRangeState T(getBitWidth());
7366 
7367     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7368             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7369             /* UseValueSimplify */ false))
7370       return indicatePessimisticFixpoint();
7371 
7372     return clampStateAndIndicateChange(getState(), T);
7373   }
7374 
7375   /// See AbstractAttribute::trackStatistics()
7376   void trackStatistics() const override {
7377     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7378   }
7379 };
7380 
7381 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7382   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7383       : AAValueConstantRangeImpl(IRP, A) {}
7384 
7385   /// See AbstractAttribute::initialize(...).
7386   ChangeStatus updateImpl(Attributor &A) override {
7387     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7388                      "not be called");
7389   }
7390 
7391   /// See AbstractAttribute::trackStatistics()
7392   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7393 };
7394 
7395 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7396   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7397       : AAValueConstantRangeFunction(IRP, A) {}
7398 
7399   /// See AbstractAttribute::trackStatistics()
7400   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7401 };
7402 
7403 struct AAValueConstantRangeCallSiteReturned
7404     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7405                                      AAValueConstantRangeImpl> {
7406   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7407       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7408                                        AAValueConstantRangeImpl>(IRP, A) {}
7409 
7410   /// See AbstractAttribute::initialize(...).
7411   void initialize(Attributor &A) override {
7412     // If it is a load instruction with range metadata, use the metadata.
7413     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7414       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7415         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7416 
7417     AAValueConstantRangeImpl::initialize(A);
7418   }
7419 
7420   /// See AbstractAttribute::trackStatistics()
7421   void trackStatistics() const override {
7422     STATS_DECLTRACK_CSRET_ATTR(value_range)
7423   }
7424 };
7425 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7426   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7427       : AAValueConstantRangeFloating(IRP, A) {}
7428 
7429   /// See AbstractAttribute::manifest()
7430   ChangeStatus manifest(Attributor &A) override {
7431     return ChangeStatus::UNCHANGED;
7432   }
7433 
7434   /// See AbstractAttribute::trackStatistics()
7435   void trackStatistics() const override {
7436     STATS_DECLTRACK_CSARG_ATTR(value_range)
7437   }
7438 };
7439 
7440 /// ------------------ Potential Values Attribute -------------------------
7441 
7442 struct AAPotentialValuesImpl : AAPotentialValues {
7443   using StateType = PotentialConstantIntValuesState;
7444 
7445   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7446       : AAPotentialValues(IRP, A) {}
7447 
7448   /// See AbstractAttribute::getAsStr().
7449   const std::string getAsStr() const override {
7450     std::string Str;
7451     llvm::raw_string_ostream OS(Str);
7452     OS << getState();
7453     return OS.str();
7454   }
7455 
7456   /// See AbstractAttribute::updateImpl(...).
7457   ChangeStatus updateImpl(Attributor &A) override {
7458     return indicatePessimisticFixpoint();
7459   }
7460 };
7461 
7462 struct AAPotentialValuesArgument final
7463     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7464                                       PotentialConstantIntValuesState> {
7465   using Base =
7466       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7467                                       PotentialConstantIntValuesState>;
7468   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7469       : Base(IRP, A) {}
7470 
7471   /// See AbstractAttribute::initialize(..).
7472   void initialize(Attributor &A) override {
7473     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7474       indicatePessimisticFixpoint();
7475     } else {
7476       Base::initialize(A);
7477     }
7478   }
7479 
7480   /// See AbstractAttribute::trackStatistics()
7481   void trackStatistics() const override {
7482     STATS_DECLTRACK_ARG_ATTR(potential_values)
7483   }
7484 };
7485 
7486 struct AAPotentialValuesReturned
7487     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7488   using Base =
7489       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7490   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7491       : Base(IRP, A) {}
7492 
7493   /// See AbstractAttribute::trackStatistics()
7494   void trackStatistics() const override {
7495     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7496   }
7497 };
7498 
7499 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7500   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7501       : AAPotentialValuesImpl(IRP, A) {}
7502 
7503   /// See AbstractAttribute::initialize(..).
7504   void initialize(Attributor &A) override {
7505     Value &V = getAssociatedValue();
7506 
7507     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7508       unionAssumed(C->getValue());
7509       indicateOptimisticFixpoint();
7510       return;
7511     }
7512 
7513     if (isa<UndefValue>(&V)) {
7514       unionAssumedWithUndef();
7515       indicateOptimisticFixpoint();
7516       return;
7517     }
7518 
7519     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7520       return;
7521 
7522     if (isa<SelectInst>(V) || isa<PHINode>(V))
7523       return;
7524 
7525     indicatePessimisticFixpoint();
7526 
7527     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7528                       << getAssociatedValue() << "\n");
7529   }
7530 
7531   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7532                                 const APInt &RHS) {
7533     ICmpInst::Predicate Pred = ICI->getPredicate();
7534     switch (Pred) {
7535     case ICmpInst::ICMP_UGT:
7536       return LHS.ugt(RHS);
7537     case ICmpInst::ICMP_SGT:
7538       return LHS.sgt(RHS);
7539     case ICmpInst::ICMP_EQ:
7540       return LHS.eq(RHS);
7541     case ICmpInst::ICMP_UGE:
7542       return LHS.uge(RHS);
7543     case ICmpInst::ICMP_SGE:
7544       return LHS.sge(RHS);
7545     case ICmpInst::ICMP_ULT:
7546       return LHS.ult(RHS);
7547     case ICmpInst::ICMP_SLT:
7548       return LHS.slt(RHS);
7549     case ICmpInst::ICMP_NE:
7550       return LHS.ne(RHS);
7551     case ICmpInst::ICMP_ULE:
7552       return LHS.ule(RHS);
7553     case ICmpInst::ICMP_SLE:
7554       return LHS.sle(RHS);
7555     default:
7556       llvm_unreachable("Invalid ICmp predicate!");
7557     }
7558   }
7559 
7560   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7561                                  uint32_t ResultBitWidth) {
7562     Instruction::CastOps CastOp = CI->getOpcode();
7563     switch (CastOp) {
7564     default:
7565       llvm_unreachable("unsupported or not integer cast");
7566     case Instruction::Trunc:
7567       return Src.trunc(ResultBitWidth);
7568     case Instruction::SExt:
7569       return Src.sext(ResultBitWidth);
7570     case Instruction::ZExt:
7571       return Src.zext(ResultBitWidth);
7572     case Instruction::BitCast:
7573       return Src;
7574     }
7575   }
7576 
7577   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7578                                        const APInt &LHS, const APInt &RHS,
7579                                        bool &SkipOperation, bool &Unsupported) {
7580     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7581     // Unsupported is set to true when the binary operator is not supported.
7582     // SkipOperation is set to true when UB occur with the given operand pair
7583     // (LHS, RHS).
7584     // TODO: we should look at nsw and nuw keywords to handle operations
7585     //       that create poison or undef value.
7586     switch (BinOpcode) {
7587     default:
7588       Unsupported = true;
7589       return LHS;
7590     case Instruction::Add:
7591       return LHS + RHS;
7592     case Instruction::Sub:
7593       return LHS - RHS;
7594     case Instruction::Mul:
7595       return LHS * RHS;
7596     case Instruction::UDiv:
7597       if (RHS.isNullValue()) {
7598         SkipOperation = true;
7599         return LHS;
7600       }
7601       return LHS.udiv(RHS);
7602     case Instruction::SDiv:
7603       if (RHS.isNullValue()) {
7604         SkipOperation = true;
7605         return LHS;
7606       }
7607       return LHS.sdiv(RHS);
7608     case Instruction::URem:
7609       if (RHS.isNullValue()) {
7610         SkipOperation = true;
7611         return LHS;
7612       }
7613       return LHS.urem(RHS);
7614     case Instruction::SRem:
7615       if (RHS.isNullValue()) {
7616         SkipOperation = true;
7617         return LHS;
7618       }
7619       return LHS.srem(RHS);
7620     case Instruction::Shl:
7621       return LHS.shl(RHS);
7622     case Instruction::LShr:
7623       return LHS.lshr(RHS);
7624     case Instruction::AShr:
7625       return LHS.ashr(RHS);
7626     case Instruction::And:
7627       return LHS & RHS;
7628     case Instruction::Or:
7629       return LHS | RHS;
7630     case Instruction::Xor:
7631       return LHS ^ RHS;
7632     }
7633   }
7634 
7635   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7636                                            const APInt &LHS, const APInt &RHS) {
7637     bool SkipOperation = false;
7638     bool Unsupported = false;
7639     APInt Result =
7640         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7641     if (Unsupported)
7642       return false;
7643     // If SkipOperation is true, we can ignore this operand pair (L, R).
7644     if (!SkipOperation)
7645       unionAssumed(Result);
7646     return isValidState();
7647   }
7648 
7649   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7650     auto AssumedBefore = getAssumed();
7651     Value *LHS = ICI->getOperand(0);
7652     Value *RHS = ICI->getOperand(1);
7653     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7654       return indicatePessimisticFixpoint();
7655 
7656     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7657                                                 DepClassTy::REQUIRED);
7658     if (!LHSAA.isValidState())
7659       return indicatePessimisticFixpoint();
7660 
7661     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7662                                                 DepClassTy::REQUIRED);
7663     if (!RHSAA.isValidState())
7664       return indicatePessimisticFixpoint();
7665 
7666     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7667     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7668 
7669     // TODO: make use of undef flag to limit potential values aggressively.
7670     bool MaybeTrue = false, MaybeFalse = false;
7671     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7672     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7673       // The result of any comparison between undefs can be soundly replaced
7674       // with undef.
7675       unionAssumedWithUndef();
7676     } else if (LHSAA.undefIsContained()) {
7677       bool MaybeTrue = false, MaybeFalse = false;
7678       for (const APInt &R : RHSAAPVS) {
7679         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7680         MaybeTrue |= CmpResult;
7681         MaybeFalse |= !CmpResult;
7682         if (MaybeTrue & MaybeFalse)
7683           return indicatePessimisticFixpoint();
7684       }
7685     } else if (RHSAA.undefIsContained()) {
7686       for (const APInt &L : LHSAAPVS) {
7687         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7688         MaybeTrue |= CmpResult;
7689         MaybeFalse |= !CmpResult;
7690         if (MaybeTrue & MaybeFalse)
7691           return indicatePessimisticFixpoint();
7692       }
7693     } else {
7694       for (const APInt &L : LHSAAPVS) {
7695         for (const APInt &R : RHSAAPVS) {
7696           bool CmpResult = calculateICmpInst(ICI, L, R);
7697           MaybeTrue |= CmpResult;
7698           MaybeFalse |= !CmpResult;
7699           if (MaybeTrue & MaybeFalse)
7700             return indicatePessimisticFixpoint();
7701         }
7702       }
7703     }
7704     if (MaybeTrue)
7705       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7706     if (MaybeFalse)
7707       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7708     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7709                                          : ChangeStatus::CHANGED;
7710   }
7711 
7712   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7713     auto AssumedBefore = getAssumed();
7714     Value *LHS = SI->getTrueValue();
7715     Value *RHS = SI->getFalseValue();
7716     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7717       return indicatePessimisticFixpoint();
7718 
7719     // TODO: Use assumed simplified condition value
7720     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7721                                                 DepClassTy::REQUIRED);
7722     if (!LHSAA.isValidState())
7723       return indicatePessimisticFixpoint();
7724 
7725     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7726                                                 DepClassTy::REQUIRED);
7727     if (!RHSAA.isValidState())
7728       return indicatePessimisticFixpoint();
7729 
7730     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7731       // select i1 *, undef , undef => undef
7732       unionAssumedWithUndef();
7733     else {
7734       unionAssumed(LHSAA);
7735       unionAssumed(RHSAA);
7736     }
7737     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7738                                          : ChangeStatus::CHANGED;
7739   }
7740 
7741   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7742     auto AssumedBefore = getAssumed();
7743     if (!CI->isIntegerCast())
7744       return indicatePessimisticFixpoint();
7745     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7746     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7747     Value *Src = CI->getOperand(0);
7748     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
7749                                                 DepClassTy::REQUIRED);
7750     if (!SrcAA.isValidState())
7751       return indicatePessimisticFixpoint();
7752     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7753     if (SrcAA.undefIsContained())
7754       unionAssumedWithUndef();
7755     else {
7756       for (const APInt &S : SrcAAPVS) {
7757         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7758         unionAssumed(T);
7759       }
7760     }
7761     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7762                                          : ChangeStatus::CHANGED;
7763   }
7764 
7765   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7766     auto AssumedBefore = getAssumed();
7767     Value *LHS = BinOp->getOperand(0);
7768     Value *RHS = BinOp->getOperand(1);
7769     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7770       return indicatePessimisticFixpoint();
7771 
7772     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7773                                                 DepClassTy::REQUIRED);
7774     if (!LHSAA.isValidState())
7775       return indicatePessimisticFixpoint();
7776 
7777     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7778                                                 DepClassTy::REQUIRED);
7779     if (!RHSAA.isValidState())
7780       return indicatePessimisticFixpoint();
7781 
7782     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7783     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7784     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7785 
7786     // TODO: make use of undef flag to limit potential values aggressively.
7787     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7788       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7789         return indicatePessimisticFixpoint();
7790     } else if (LHSAA.undefIsContained()) {
7791       for (const APInt &R : RHSAAPVS) {
7792         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7793           return indicatePessimisticFixpoint();
7794       }
7795     } else if (RHSAA.undefIsContained()) {
7796       for (const APInt &L : LHSAAPVS) {
7797         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7798           return indicatePessimisticFixpoint();
7799       }
7800     } else {
7801       for (const APInt &L : LHSAAPVS) {
7802         for (const APInt &R : RHSAAPVS) {
7803           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7804             return indicatePessimisticFixpoint();
7805         }
7806       }
7807     }
7808     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7809                                          : ChangeStatus::CHANGED;
7810   }
7811 
7812   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7813     auto AssumedBefore = getAssumed();
7814     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7815       Value *IncomingValue = PHI->getIncomingValue(u);
7816       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7817           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
7818       if (!PotentialValuesAA.isValidState())
7819         return indicatePessimisticFixpoint();
7820       if (PotentialValuesAA.undefIsContained())
7821         unionAssumedWithUndef();
7822       else
7823         unionAssumed(PotentialValuesAA.getAssumed());
7824     }
7825     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7826                                          : ChangeStatus::CHANGED;
7827   }
7828 
7829   /// See AbstractAttribute::updateImpl(...).
7830   ChangeStatus updateImpl(Attributor &A) override {
7831     Value &V = getAssociatedValue();
7832     Instruction *I = dyn_cast<Instruction>(&V);
7833 
7834     if (auto *ICI = dyn_cast<ICmpInst>(I))
7835       return updateWithICmpInst(A, ICI);
7836 
7837     if (auto *SI = dyn_cast<SelectInst>(I))
7838       return updateWithSelectInst(A, SI);
7839 
7840     if (auto *CI = dyn_cast<CastInst>(I))
7841       return updateWithCastInst(A, CI);
7842 
7843     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7844       return updateWithBinaryOperator(A, BinOp);
7845 
7846     if (auto *PHI = dyn_cast<PHINode>(I))
7847       return updateWithPHINode(A, PHI);
7848 
7849     return indicatePessimisticFixpoint();
7850   }
7851 
7852   /// See AbstractAttribute::trackStatistics()
7853   void trackStatistics() const override {
7854     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7855   }
7856 };
7857 
7858 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7859   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7860       : AAPotentialValuesImpl(IRP, A) {}
7861 
7862   /// See AbstractAttribute::initialize(...).
7863   ChangeStatus updateImpl(Attributor &A) override {
7864     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7865                      "not be called");
7866   }
7867 
7868   /// See AbstractAttribute::trackStatistics()
7869   void trackStatistics() const override {
7870     STATS_DECLTRACK_FN_ATTR(potential_values)
7871   }
7872 };
7873 
7874 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7875   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7876       : AAPotentialValuesFunction(IRP, A) {}
7877 
7878   /// See AbstractAttribute::trackStatistics()
7879   void trackStatistics() const override {
7880     STATS_DECLTRACK_CS_ATTR(potential_values)
7881   }
7882 };
7883 
7884 struct AAPotentialValuesCallSiteReturned
7885     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7886   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7887       : AACallSiteReturnedFromReturned<AAPotentialValues,
7888                                        AAPotentialValuesImpl>(IRP, A) {}
7889 
7890   /// See AbstractAttribute::trackStatistics()
7891   void trackStatistics() const override {
7892     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7893   }
7894 };
7895 
7896 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7897   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7898       : AAPotentialValuesFloating(IRP, A) {}
7899 
7900   /// See AbstractAttribute::initialize(..).
7901   void initialize(Attributor &A) override {
7902     Value &V = getAssociatedValue();
7903 
7904     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7905       unionAssumed(C->getValue());
7906       indicateOptimisticFixpoint();
7907       return;
7908     }
7909 
7910     if (isa<UndefValue>(&V)) {
7911       unionAssumedWithUndef();
7912       indicateOptimisticFixpoint();
7913       return;
7914     }
7915   }
7916 
7917   /// See AbstractAttribute::updateImpl(...).
7918   ChangeStatus updateImpl(Attributor &A) override {
7919     Value &V = getAssociatedValue();
7920     auto AssumedBefore = getAssumed();
7921     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
7922                                              DepClassTy::REQUIRED);
7923     const auto &S = AA.getAssumed();
7924     unionAssumed(S);
7925     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7926                                          : ChangeStatus::CHANGED;
7927   }
7928 
7929   /// See AbstractAttribute::trackStatistics()
7930   void trackStatistics() const override {
7931     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7932   }
7933 };
7934 
7935 /// ------------------------ NoUndef Attribute ---------------------------------
7936 struct AANoUndefImpl : AANoUndef {
7937   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
7938 
7939   /// See AbstractAttribute::initialize(...).
7940   void initialize(Attributor &A) override {
7941     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
7942       indicateOptimisticFixpoint();
7943       return;
7944     }
7945     Value &V = getAssociatedValue();
7946     if (isa<UndefValue>(V))
7947       indicatePessimisticFixpoint();
7948     else if (isa<FreezeInst>(V))
7949       indicateOptimisticFixpoint();
7950     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
7951              isGuaranteedNotToBeUndefOrPoison(&V))
7952       indicateOptimisticFixpoint();
7953     else
7954       AANoUndef::initialize(A);
7955   }
7956 
7957   /// See followUsesInMBEC
7958   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
7959                        AANoUndef::StateType &State) {
7960     const Value *UseV = U->get();
7961     const DominatorTree *DT = nullptr;
7962     AssumptionCache *AC = nullptr;
7963     InformationCache &InfoCache = A.getInfoCache();
7964     if (Function *F = getAnchorScope()) {
7965       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
7966       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
7967     }
7968     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
7969     bool TrackUse = false;
7970     // Track use for instructions which must produce undef or poison bits when
7971     // at least one operand contains such bits.
7972     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
7973       TrackUse = true;
7974     return TrackUse;
7975   }
7976 
7977   /// See AbstractAttribute::getAsStr().
7978   const std::string getAsStr() const override {
7979     return getAssumed() ? "noundef" : "may-undef-or-poison";
7980   }
7981 
7982   ChangeStatus manifest(Attributor &A) override {
7983     // We don't manifest noundef attribute for dead positions because the
7984     // associated values with dead positions would be replaced with undef
7985     // values.
7986     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
7987       return ChangeStatus::UNCHANGED;
7988     // A position whose simplified value does not have any value is
7989     // considered to be dead. We don't manifest noundef in such positions for
7990     // the same reason above.
7991     auto &ValueSimplifyAA =
7992         A.getAAFor<AAValueSimplify>(*this, getIRPosition(), DepClassTy::NONE);
7993     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
7994       return ChangeStatus::UNCHANGED;
7995     return AANoUndef::manifest(A);
7996   }
7997 };
7998 
7999 struct AANoUndefFloating : public AANoUndefImpl {
8000   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
8001       : AANoUndefImpl(IRP, A) {}
8002 
8003   /// See AbstractAttribute::initialize(...).
8004   void initialize(Attributor &A) override {
8005     AANoUndefImpl::initialize(A);
8006     if (!getState().isAtFixpoint())
8007       if (Instruction *CtxI = getCtxI())
8008         followUsesInMBEC(*this, A, getState(), *CtxI);
8009   }
8010 
8011   /// See AbstractAttribute::updateImpl(...).
8012   ChangeStatus updateImpl(Attributor &A) override {
8013     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8014                             AANoUndef::StateType &T, bool Stripped) -> bool {
8015       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
8016                                              DepClassTy::REQUIRED);
8017       if (!Stripped && this == &AA) {
8018         T.indicatePessimisticFixpoint();
8019       } else {
8020         const AANoUndef::StateType &S =
8021             static_cast<const AANoUndef::StateType &>(AA.getState());
8022         T ^= S;
8023       }
8024       return T.isValidState();
8025     };
8026 
8027     StateType T;
8028     if (!genericValueTraversal<AANoUndef, StateType>(
8029             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
8030       return indicatePessimisticFixpoint();
8031 
8032     return clampStateAndIndicateChange(getState(), T);
8033   }
8034 
8035   /// See AbstractAttribute::trackStatistics()
8036   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8037 };
8038 
8039 struct AANoUndefReturned final
8040     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
8041   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
8042       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
8043 
8044   /// See AbstractAttribute::trackStatistics()
8045   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8046 };
8047 
8048 struct AANoUndefArgument final
8049     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
8050   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8051       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8052 
8053   /// See AbstractAttribute::trackStatistics()
8054   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8055 };
8056 
8057 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8058   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8059       : AANoUndefFloating(IRP, A) {}
8060 
8061   /// See AbstractAttribute::trackStatistics()
8062   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8063 };
8064 
8065 struct AANoUndefCallSiteReturned final
8066     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8067   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8068       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8069 
8070   /// See AbstractAttribute::trackStatistics()
8071   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8072 };
8073 } // namespace
8074 
8075 const char AAReturnedValues::ID = 0;
8076 const char AANoUnwind::ID = 0;
8077 const char AANoSync::ID = 0;
8078 const char AANoFree::ID = 0;
8079 const char AANonNull::ID = 0;
8080 const char AANoRecurse::ID = 0;
8081 const char AAWillReturn::ID = 0;
8082 const char AAUndefinedBehavior::ID = 0;
8083 const char AANoAlias::ID = 0;
8084 const char AAReachability::ID = 0;
8085 const char AANoReturn::ID = 0;
8086 const char AAIsDead::ID = 0;
8087 const char AADereferenceable::ID = 0;
8088 const char AAAlign::ID = 0;
8089 const char AANoCapture::ID = 0;
8090 const char AAValueSimplify::ID = 0;
8091 const char AAHeapToStack::ID = 0;
8092 const char AAPrivatizablePtr::ID = 0;
8093 const char AAMemoryBehavior::ID = 0;
8094 const char AAMemoryLocation::ID = 0;
8095 const char AAValueConstantRange::ID = 0;
8096 const char AAPotentialValues::ID = 0;
8097 const char AANoUndef::ID = 0;
8098 
8099 // Macro magic to create the static generator function for attributes that
8100 // follow the naming scheme.
8101 
8102 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8103   case IRPosition::PK:                                                         \
8104     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8105 
8106 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8107   case IRPosition::PK:                                                         \
8108     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8109     ++NumAAs;                                                                  \
8110     break;
8111 
8112 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8113   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8114     CLASS *AA = nullptr;                                                       \
8115     switch (IRP.getPositionKind()) {                                           \
8116       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8117       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8118       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8119       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8120       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8121       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8122       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8123       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8124     }                                                                          \
8125     return *AA;                                                                \
8126   }
8127 
8128 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8129   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8130     CLASS *AA = nullptr;                                                       \
8131     switch (IRP.getPositionKind()) {                                           \
8132       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8133       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8134       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8135       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8136       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8137       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8138       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8139       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8140     }                                                                          \
8141     return *AA;                                                                \
8142   }
8143 
8144 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8145   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8146     CLASS *AA = nullptr;                                                       \
8147     switch (IRP.getPositionKind()) {                                           \
8148       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8149       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8150       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8151       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8152       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8153       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8154       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8155       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8156     }                                                                          \
8157     return *AA;                                                                \
8158   }
8159 
8160 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8161   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8162     CLASS *AA = nullptr;                                                       \
8163     switch (IRP.getPositionKind()) {                                           \
8164       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8165       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8166       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8167       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8168       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8169       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8170       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8171       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8172     }                                                                          \
8173     return *AA;                                                                \
8174   }
8175 
8176 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8177   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8178     CLASS *AA = nullptr;                                                       \
8179     switch (IRP.getPositionKind()) {                                           \
8180       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8181       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8182       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8183       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8184       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8185       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8186       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8187       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8188     }                                                                          \
8189     return *AA;                                                                \
8190   }
8191 
8192 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8193 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8194 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8195 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8196 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8197 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8198 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8199 
8200 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8201 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8202 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8203 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8204 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8205 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8206 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8207 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8208 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8209 
8210 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8211 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8212 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8213 
8214 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8215 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8216 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8217 
8218 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8219 
8220 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8221 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8222 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8223 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8224 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8225 #undef SWITCH_PK_CREATE
8226 #undef SWITCH_PK_INV
8227