1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SCCIterator.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumeBundleQueries.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LazyValueInfo.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
28 #include "llvm/Analysis/ScalarEvolution.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/NoFolder.h"
37 #include "llvm/Support/Alignment.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/FileSystem.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
44 #include "llvm/Transforms/Utils/Local.h"
45 #include <cassert>
46 
47 using namespace llvm;
48 
49 #define DEBUG_TYPE "attributor"
50 
51 static cl::opt<bool> ManifestInternal(
52     "attributor-manifest-internal", cl::Hidden,
53     cl::desc("Manifest Attributor internal string attributes."),
54     cl::init(false));
55 
56 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
57                                        cl::Hidden);
58 
59 template <>
60 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
61 
62 static cl::opt<unsigned, true> MaxPotentialValues(
63     "attributor-max-potential-values", cl::Hidden,
64     cl::desc("Maximum number of potential values to be "
65              "tracked for each position."),
66     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
67     cl::init(7));
68 
69 STATISTIC(NumAAs, "Number of abstract attributes created");
70 
71 // Some helper macros to deal with statistics tracking.
72 //
73 // Usage:
74 // For simple IR attribute tracking overload trackStatistics in the abstract
75 // attribute and choose the right STATS_DECLTRACK_********* macro,
76 // e.g.,:
77 //  void trackStatistics() const override {
78 //    STATS_DECLTRACK_ARG_ATTR(returned)
79 //  }
80 // If there is a single "increment" side one can use the macro
81 // STATS_DECLTRACK with a custom message. If there are multiple increment
82 // sides, STATS_DECL and STATS_TRACK can also be used separately.
83 //
84 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
85   ("Number of " #TYPE " marked '" #NAME "'")
86 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
87 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
88 #define STATS_DECL(NAME, TYPE, MSG)                                            \
89   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
90 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
91 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
92   {                                                                            \
93     STATS_DECL(NAME, TYPE, MSG)                                                \
94     STATS_TRACK(NAME, TYPE)                                                    \
95   }
96 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
97   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
98 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
99   STATS_DECLTRACK(NAME, CSArguments,                                           \
100                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
101 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
102   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
103 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
104   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
105 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
106   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
107                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
108 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
109   STATS_DECLTRACK(NAME, CSReturn,                                              \
110                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
111 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
112   STATS_DECLTRACK(NAME, Floating,                                              \
113                   ("Number of floating values known to be '" #NAME "'"))
114 
115 // Specialization of the operator<< for abstract attributes subclasses. This
116 // disambiguates situations where multiple operators are applicable.
117 namespace llvm {
118 #define PIPE_OPERATOR(CLASS)                                                   \
119   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
120     return OS << static_cast<const AbstractAttribute &>(AA);                   \
121   }
122 
123 PIPE_OPERATOR(AAIsDead)
124 PIPE_OPERATOR(AANoUnwind)
125 PIPE_OPERATOR(AANoSync)
126 PIPE_OPERATOR(AANoRecurse)
127 PIPE_OPERATOR(AAWillReturn)
128 PIPE_OPERATOR(AANoReturn)
129 PIPE_OPERATOR(AAReturnedValues)
130 PIPE_OPERATOR(AANonNull)
131 PIPE_OPERATOR(AANoAlias)
132 PIPE_OPERATOR(AADereferenceable)
133 PIPE_OPERATOR(AAAlign)
134 PIPE_OPERATOR(AANoCapture)
135 PIPE_OPERATOR(AAValueSimplify)
136 PIPE_OPERATOR(AANoFree)
137 PIPE_OPERATOR(AAHeapToStack)
138 PIPE_OPERATOR(AAReachability)
139 PIPE_OPERATOR(AAMemoryBehavior)
140 PIPE_OPERATOR(AAMemoryLocation)
141 PIPE_OPERATOR(AAValueConstantRange)
142 PIPE_OPERATOR(AAPrivatizablePtr)
143 PIPE_OPERATOR(AAUndefinedBehavior)
144 PIPE_OPERATOR(AAPotentialValues)
145 PIPE_OPERATOR(AANoUndef)
146 PIPE_OPERATOR(AACallEdges)
147 PIPE_OPERATOR(AAFunctionReachability)
148 PIPE_OPERATOR(AAPointerInfo)
149 
150 #undef PIPE_OPERATOR
151 
152 template <>
153 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
154                                                      const DerefState &R) {
155   ChangeStatus CS0 =
156       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
157   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
158   return CS0 | CS1;
159 }
160 
161 } // namespace llvm
162 
163 /// Get pointer operand of memory accessing instruction. If \p I is
164 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
165 /// is set to false and the instruction is volatile, return nullptr.
166 static const Value *getPointerOperand(const Instruction *I,
167                                       bool AllowVolatile) {
168   if (!AllowVolatile && I->isVolatile())
169     return nullptr;
170 
171   if (auto *LI = dyn_cast<LoadInst>(I)) {
172     return LI->getPointerOperand();
173   }
174 
175   if (auto *SI = dyn_cast<StoreInst>(I)) {
176     return SI->getPointerOperand();
177   }
178 
179   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
180     return CXI->getPointerOperand();
181   }
182 
183   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
184     return RMWI->getPointerOperand();
185   }
186 
187   return nullptr;
188 }
189 
190 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
191 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
192 /// getelement pointer instructions that traverse the natural type of \p Ptr if
193 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
194 /// through a cast to i8*.
195 ///
196 /// TODO: This could probably live somewhere more prominantly if it doesn't
197 ///       already exist.
198 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
199                                int64_t Offset, IRBuilder<NoFolder> &IRB,
200                                const DataLayout &DL) {
201   assert(Offset >= 0 && "Negative offset not supported yet!");
202   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
203                     << "-bytes as " << *ResTy << "\n");
204 
205   if (Offset) {
206     SmallVector<Value *, 4> Indices;
207     std::string GEPName = Ptr->getName().str() + ".0";
208 
209     // Add 0 index to look through the pointer.
210     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
211            "Offset out of bounds");
212     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
213 
214     Type *Ty = PtrElemTy;
215     do {
216       auto *STy = dyn_cast<StructType>(Ty);
217       if (!STy)
218         // Non-aggregate type, we cast and make byte-wise progress now.
219         break;
220 
221       const StructLayout *SL = DL.getStructLayout(STy);
222       if (int64_t(SL->getSizeInBytes()) < Offset)
223         break;
224 
225       uint64_t Idx = SL->getElementContainingOffset(Offset);
226       assert(Idx < STy->getNumElements() && "Offset calculation error!");
227       uint64_t Rem = Offset - SL->getElementOffset(Idx);
228       Ty = STy->getElementType(Idx);
229 
230       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
231                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
232 
233       GEPName += "." + std::to_string(Idx);
234       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
235       Offset = Rem;
236     } while (Offset);
237 
238     // Create a GEP for the indices collected above.
239     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
240 
241     // If an offset is left we use byte-wise adjustment.
242     if (Offset) {
243       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
244       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
245                           GEPName + ".b" + Twine(Offset));
246     }
247   }
248 
249   // Ensure the result has the requested type.
250   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
251 
252   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
253   return Ptr;
254 }
255 
256 /// Recursively visit all values that might become \p IRP at some point. This
257 /// will be done by looking through cast instructions, selects, phis, and calls
258 /// with the "returned" attribute. Once we cannot look through the value any
259 /// further, the callback \p VisitValueCB is invoked and passed the current
260 /// value, the \p State, and a flag to indicate if we stripped anything.
261 /// Stripped means that we unpacked the value associated with \p IRP at least
262 /// once. Note that the value used for the callback may still be the value
263 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
264 /// we will never visit more values than specified by \p MaxValues.
265 template <typename StateTy>
266 static bool genericValueTraversal(
267     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
268     StateTy &State,
269     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
270         VisitValueCB,
271     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
272     function_ref<Value *(Value *)> StripCB = nullptr) {
273 
274   const AAIsDead *LivenessAA = nullptr;
275   if (IRP.getAnchorScope())
276     LivenessAA = &A.getAAFor<AAIsDead>(
277         QueryingAA,
278         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
279         DepClassTy::NONE);
280   bool AnyDead = false;
281 
282   Value *InitialV = &IRP.getAssociatedValue();
283   using Item = std::pair<Value *, const Instruction *>;
284   SmallSet<Item, 16> Visited;
285   SmallVector<Item, 16> Worklist;
286   Worklist.push_back({InitialV, CtxI});
287 
288   int Iteration = 0;
289   do {
290     Item I = Worklist.pop_back_val();
291     Value *V = I.first;
292     CtxI = I.second;
293     if (StripCB)
294       V = StripCB(V);
295 
296     // Check if we should process the current value. To prevent endless
297     // recursion keep a record of the values we followed!
298     if (!Visited.insert(I).second)
299       continue;
300 
301     // Make sure we limit the compile time for complex expressions.
302     if (Iteration++ >= MaxValues)
303       return false;
304 
305     // Explicitly look through calls with a "returned" attribute if we do
306     // not have a pointer as stripPointerCasts only works on them.
307     Value *NewV = nullptr;
308     if (V->getType()->isPointerTy()) {
309       NewV = V->stripPointerCasts();
310     } else {
311       auto *CB = dyn_cast<CallBase>(V);
312       if (CB && CB->getCalledFunction()) {
313         for (Argument &Arg : CB->getCalledFunction()->args())
314           if (Arg.hasReturnedAttr()) {
315             NewV = CB->getArgOperand(Arg.getArgNo());
316             break;
317           }
318       }
319     }
320     if (NewV && NewV != V) {
321       Worklist.push_back({NewV, CtxI});
322       continue;
323     }
324 
325     // Look through select instructions, visit assumed potential values.
326     if (auto *SI = dyn_cast<SelectInst>(V)) {
327       bool UsedAssumedInformation = false;
328       Optional<Constant *> C = A.getAssumedConstant(
329           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
330       bool NoValueYet = !C.hasValue();
331       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
332         continue;
333       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
334         if (CI->isZero())
335           Worklist.push_back({SI->getFalseValue(), CtxI});
336         else
337           Worklist.push_back({SI->getTrueValue(), CtxI});
338         continue;
339       }
340       // We could not simplify the condition, assume both values.(
341       Worklist.push_back({SI->getTrueValue(), CtxI});
342       Worklist.push_back({SI->getFalseValue(), CtxI});
343       continue;
344     }
345 
346     // Look through phi nodes, visit all live operands.
347     if (auto *PHI = dyn_cast<PHINode>(V)) {
348       assert(LivenessAA &&
349              "Expected liveness in the presence of instructions!");
350       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
351         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
352         bool UsedAssumedInformation = false;
353         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
354                             LivenessAA, UsedAssumedInformation,
355                             /* CheckBBLivenessOnly */ true)) {
356           AnyDead = true;
357           continue;
358         }
359         Worklist.push_back(
360             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
361       }
362       continue;
363     }
364 
365     if (UseValueSimplify && !isa<Constant>(V)) {
366       bool UsedAssumedInformation = false;
367       Optional<Value *> SimpleV =
368           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
369       if (!SimpleV.hasValue())
370         continue;
371       if (!SimpleV.getValue())
372         return false;
373       Value *NewV = SimpleV.getValue();
374       if (NewV != V) {
375         Worklist.push_back({NewV, CtxI});
376         continue;
377       }
378     }
379 
380     // Once a leaf is reached we inform the user through the callback.
381     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
382       return false;
383   } while (!Worklist.empty());
384 
385   // If we actually used liveness information so we have to record a dependence.
386   if (AnyDead)
387     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
388 
389   // All values have been visited.
390   return true;
391 }
392 
393 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
394                                      SmallVectorImpl<Value *> &Objects,
395                                      const AbstractAttribute &QueryingAA,
396                                      const Instruction *CtxI) {
397   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
398   SmallPtrSet<Value *, 8> SeenObjects;
399   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
400                                      SmallVectorImpl<Value *> &Objects,
401                                      bool) -> bool {
402     if (SeenObjects.insert(&Val).second)
403       Objects.push_back(&Val);
404     return true;
405   };
406   if (!genericValueTraversal<decltype(Objects)>(
407           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
408           true, 32, StripCB))
409     return false;
410   return true;
411 }
412 
413 const Value *stripAndAccumulateMinimalOffsets(
414     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
415     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
416     bool UseAssumed = false) {
417 
418   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
419     const IRPosition &Pos = IRPosition::value(V);
420     // Only track dependence if we are going to use the assumed info.
421     const AAValueConstantRange &ValueConstantRangeAA =
422         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
423                                          UseAssumed ? DepClassTy::OPTIONAL
424                                                     : DepClassTy::NONE);
425     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
426                                      : ValueConstantRangeAA.getKnown();
427     // We can only use the lower part of the range because the upper part can
428     // be higher than what the value can really be.
429     ROffset = Range.getSignedMin();
430     return true;
431   };
432 
433   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
434                                                 AttributorAnalysis);
435 }
436 
437 static const Value *getMinimalBaseOfAccsesPointerOperand(
438     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
439     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
440   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
441   if (!Ptr)
442     return nullptr;
443   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
444   const Value *Base = stripAndAccumulateMinimalOffsets(
445       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
446 
447   BytesOffset = OffsetAPInt.getSExtValue();
448   return Base;
449 }
450 
451 static const Value *
452 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
453                                      const DataLayout &DL,
454                                      bool AllowNonInbounds = false) {
455   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
456   if (!Ptr)
457     return nullptr;
458 
459   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
460                                           AllowNonInbounds);
461 }
462 
463 /// Clamp the information known for all returned values of a function
464 /// (identified by \p QueryingAA) into \p S.
465 template <typename AAType, typename StateType = typename AAType::StateType>
466 static void clampReturnedValueStates(
467     Attributor &A, const AAType &QueryingAA, StateType &S,
468     const IRPosition::CallBaseContext *CBContext = nullptr) {
469   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
470                     << QueryingAA << " into " << S << "\n");
471 
472   assert((QueryingAA.getIRPosition().getPositionKind() ==
473               IRPosition::IRP_RETURNED ||
474           QueryingAA.getIRPosition().getPositionKind() ==
475               IRPosition::IRP_CALL_SITE_RETURNED) &&
476          "Can only clamp returned value states for a function returned or call "
477          "site returned position!");
478 
479   // Use an optional state as there might not be any return values and we want
480   // to join (IntegerState::operator&) the state of all there are.
481   Optional<StateType> T;
482 
483   // Callback for each possibly returned value.
484   auto CheckReturnValue = [&](Value &RV) -> bool {
485     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
486     const AAType &AA =
487         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
488     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
489                       << " @ " << RVPos << "\n");
490     const StateType &AAS = AA.getState();
491     if (T.hasValue())
492       *T &= AAS;
493     else
494       T = AAS;
495     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
496                       << "\n");
497     return T->isValidState();
498   };
499 
500   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
501     S.indicatePessimisticFixpoint();
502   else if (T.hasValue())
503     S ^= *T;
504 }
505 
506 /// Helper class for generic deduction: return value -> returned position.
507 template <typename AAType, typename BaseType,
508           typename StateType = typename BaseType::StateType,
509           bool PropagateCallBaseContext = false>
510 struct AAReturnedFromReturnedValues : public BaseType {
511   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
512       : BaseType(IRP, A) {}
513 
514   /// See AbstractAttribute::updateImpl(...).
515   ChangeStatus updateImpl(Attributor &A) override {
516     StateType S(StateType::getBestState(this->getState()));
517     clampReturnedValueStates<AAType, StateType>(
518         A, *this, S,
519         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
520     // TODO: If we know we visited all returned values, thus no are assumed
521     // dead, we can take the known information from the state T.
522     return clampStateAndIndicateChange<StateType>(this->getState(), S);
523   }
524 };
525 
526 /// Clamp the information known at all call sites for a given argument
527 /// (identified by \p QueryingAA) into \p S.
528 template <typename AAType, typename StateType = typename AAType::StateType>
529 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
530                                         StateType &S) {
531   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
532                     << QueryingAA << " into " << S << "\n");
533 
534   assert(QueryingAA.getIRPosition().getPositionKind() ==
535              IRPosition::IRP_ARGUMENT &&
536          "Can only clamp call site argument states for an argument position!");
537 
538   // Use an optional state as there might not be any return values and we want
539   // to join (IntegerState::operator&) the state of all there are.
540   Optional<StateType> T;
541 
542   // The argument number which is also the call site argument number.
543   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
544 
545   auto CallSiteCheck = [&](AbstractCallSite ACS) {
546     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
547     // Check if a coresponding argument was found or if it is on not associated
548     // (which can happen for callback calls).
549     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
550       return false;
551 
552     const AAType &AA =
553         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
554     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
555                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
556     const StateType &AAS = AA.getState();
557     if (T.hasValue())
558       *T &= AAS;
559     else
560       T = AAS;
561     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
562                       << "\n");
563     return T->isValidState();
564   };
565 
566   bool AllCallSitesKnown;
567   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
568                               AllCallSitesKnown))
569     S.indicatePessimisticFixpoint();
570   else if (T.hasValue())
571     S ^= *T;
572 }
573 
574 /// This function is the bridge between argument position and the call base
575 /// context.
576 template <typename AAType, typename BaseType,
577           typename StateType = typename AAType::StateType>
578 bool getArgumentStateFromCallBaseContext(Attributor &A,
579                                          BaseType &QueryingAttribute,
580                                          IRPosition &Pos, StateType &State) {
581   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
582          "Expected an 'argument' position !");
583   const CallBase *CBContext = Pos.getCallBaseContext();
584   if (!CBContext)
585     return false;
586 
587   int ArgNo = Pos.getCallSiteArgNo();
588   assert(ArgNo >= 0 && "Invalid Arg No!");
589 
590   const auto &AA = A.getAAFor<AAType>(
591       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
592       DepClassTy::REQUIRED);
593   const StateType &CBArgumentState =
594       static_cast<const StateType &>(AA.getState());
595 
596   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
597                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
598                     << "\n");
599 
600   // NOTE: If we want to do call site grouping it should happen here.
601   State ^= CBArgumentState;
602   return true;
603 }
604 
605 /// Helper class for generic deduction: call site argument -> argument position.
606 template <typename AAType, typename BaseType,
607           typename StateType = typename AAType::StateType,
608           bool BridgeCallBaseContext = false>
609 struct AAArgumentFromCallSiteArguments : public BaseType {
610   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
611       : BaseType(IRP, A) {}
612 
613   /// See AbstractAttribute::updateImpl(...).
614   ChangeStatus updateImpl(Attributor &A) override {
615     StateType S = StateType::getBestState(this->getState());
616 
617     if (BridgeCallBaseContext) {
618       bool Success =
619           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
620               A, *this, this->getIRPosition(), S);
621       if (Success)
622         return clampStateAndIndicateChange<StateType>(this->getState(), S);
623     }
624     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
625 
626     // TODO: If we know we visited all incoming values, thus no are assumed
627     // dead, we can take the known information from the state T.
628     return clampStateAndIndicateChange<StateType>(this->getState(), S);
629   }
630 };
631 
632 /// Helper class for generic replication: function returned -> cs returned.
633 template <typename AAType, typename BaseType,
634           typename StateType = typename BaseType::StateType,
635           bool IntroduceCallBaseContext = false>
636 struct AACallSiteReturnedFromReturned : public BaseType {
637   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
638       : BaseType(IRP, A) {}
639 
640   /// See AbstractAttribute::updateImpl(...).
641   ChangeStatus updateImpl(Attributor &A) override {
642     assert(this->getIRPosition().getPositionKind() ==
643                IRPosition::IRP_CALL_SITE_RETURNED &&
644            "Can only wrap function returned positions for call site returned "
645            "positions!");
646     auto &S = this->getState();
647 
648     const Function *AssociatedFunction =
649         this->getIRPosition().getAssociatedFunction();
650     if (!AssociatedFunction)
651       return S.indicatePessimisticFixpoint();
652 
653     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
654     if (IntroduceCallBaseContext)
655       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
656                         << CBContext << "\n");
657 
658     IRPosition FnPos = IRPosition::returned(
659         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
660     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
661     return clampStateAndIndicateChange(S, AA.getState());
662   }
663 };
664 
665 /// Helper function to accumulate uses.
666 template <class AAType, typename StateType = typename AAType::StateType>
667 static void followUsesInContext(AAType &AA, Attributor &A,
668                                 MustBeExecutedContextExplorer &Explorer,
669                                 const Instruction *CtxI,
670                                 SetVector<const Use *> &Uses,
671                                 StateType &State) {
672   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
673   for (unsigned u = 0; u < Uses.size(); ++u) {
674     const Use *U = Uses[u];
675     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
676       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
677       if (Found && AA.followUseInMBEC(A, U, UserI, State))
678         for (const Use &Us : UserI->uses())
679           Uses.insert(&Us);
680     }
681   }
682 }
683 
684 /// Use the must-be-executed-context around \p I to add information into \p S.
685 /// The AAType class is required to have `followUseInMBEC` method with the
686 /// following signature and behaviour:
687 ///
688 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
689 /// U - Underlying use.
690 /// I - The user of the \p U.
691 /// Returns true if the value should be tracked transitively.
692 ///
693 template <class AAType, typename StateType = typename AAType::StateType>
694 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
695                              Instruction &CtxI) {
696 
697   // Container for (transitive) uses of the associated value.
698   SetVector<const Use *> Uses;
699   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
700     Uses.insert(&U);
701 
702   MustBeExecutedContextExplorer &Explorer =
703       A.getInfoCache().getMustBeExecutedContextExplorer();
704 
705   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
706 
707   if (S.isAtFixpoint())
708     return;
709 
710   SmallVector<const BranchInst *, 4> BrInsts;
711   auto Pred = [&](const Instruction *I) {
712     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
713       if (Br->isConditional())
714         BrInsts.push_back(Br);
715     return true;
716   };
717 
718   // Here, accumulate conditional branch instructions in the context. We
719   // explore the child paths and collect the known states. The disjunction of
720   // those states can be merged to its own state. Let ParentState_i be a state
721   // to indicate the known information for an i-th branch instruction in the
722   // context. ChildStates are created for its successors respectively.
723   //
724   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
725   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
726   //      ...
727   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
728   //
729   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
730   //
731   // FIXME: Currently, recursive branches are not handled. For example, we
732   // can't deduce that ptr must be dereferenced in below function.
733   //
734   // void f(int a, int c, int *ptr) {
735   //    if(a)
736   //      if (b) {
737   //        *ptr = 0;
738   //      } else {
739   //        *ptr = 1;
740   //      }
741   //    else {
742   //      if (b) {
743   //        *ptr = 0;
744   //      } else {
745   //        *ptr = 1;
746   //      }
747   //    }
748   // }
749 
750   Explorer.checkForAllContext(&CtxI, Pred);
751   for (const BranchInst *Br : BrInsts) {
752     StateType ParentState;
753 
754     // The known state of the parent state is a conjunction of children's
755     // known states so it is initialized with a best state.
756     ParentState.indicateOptimisticFixpoint();
757 
758     for (const BasicBlock *BB : Br->successors()) {
759       StateType ChildState;
760 
761       size_t BeforeSize = Uses.size();
762       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
763 
764       // Erase uses which only appear in the child.
765       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
766         It = Uses.erase(It);
767 
768       ParentState &= ChildState;
769     }
770 
771     // Use only known state.
772     S += ParentState;
773   }
774 }
775 
776 /// ------------------------ PointerInfo ---------------------------------------
777 
778 namespace llvm {
779 namespace AA {
780 namespace PointerInfo {
781 
782 /// An access kind description as used by AAPointerInfo.
783 struct OffsetAndSize;
784 
785 struct State;
786 
787 } // namespace PointerInfo
788 } // namespace AA
789 
790 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
791 template <>
792 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
793   using Access = AAPointerInfo::Access;
794   static inline Access getEmptyKey();
795   static inline Access getTombstoneKey();
796   static unsigned getHashValue(const Access &A);
797   static bool isEqual(const Access &LHS, const Access &RHS);
798 };
799 
800 /// Helper that allows OffsetAndSize as a key in a DenseMap.
801 template <>
802 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
803     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
804 
805 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
806 /// but the instruction
807 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
808   using Base = DenseMapInfo<Instruction *>;
809   using Access = AAPointerInfo::Access;
810   static inline Access getEmptyKey();
811   static inline Access getTombstoneKey();
812   static unsigned getHashValue(const Access &A);
813   static bool isEqual(const Access &LHS, const Access &RHS);
814 };
815 
816 } // namespace llvm
817 
818 /// Helper to represent an access offset and size, with logic to deal with
819 /// uncertainty and check for overlapping accesses.
820 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
821   using BaseTy = std::pair<int64_t, int64_t>;
822   OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
823   OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
824   int64_t getOffset() const { return first; }
825   int64_t getSize() const { return second; }
826   static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); }
827 
828   /// Return true if this offset and size pair might describe an address that
829   /// overlaps with \p OAS.
830   bool mayOverlap(const OffsetAndSize &OAS) const {
831     // Any unknown value and we are giving up -> overlap.
832     if (OAS.getOffset() == OffsetAndSize::Unknown ||
833         OAS.getSize() == OffsetAndSize::Unknown ||
834         getOffset() == OffsetAndSize::Unknown ||
835         getSize() == OffsetAndSize::Unknown)
836       return true;
837 
838     // Check if one offset point is in the other interval [offset, offset+size].
839     return OAS.getOffset() + OAS.getSize() > getOffset() &&
840            OAS.getOffset() < getOffset() + getSize();
841   }
842 
843   /// Constant used to represent unknown offset or sizes.
844   static constexpr int64_t Unknown = 1 << 31;
845 };
846 
847 /// Implementation of the DenseMapInfo.
848 ///
849 ///{
850 inline llvm::AccessAsInstructionInfo::Access
851 llvm::AccessAsInstructionInfo::getEmptyKey() {
852   return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
853 }
854 inline llvm::AccessAsInstructionInfo::Access
855 llvm::AccessAsInstructionInfo::getTombstoneKey() {
856   return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
857                 nullptr);
858 }
859 unsigned llvm::AccessAsInstructionInfo::getHashValue(
860     const llvm::AccessAsInstructionInfo::Access &A) {
861   return Base::getHashValue(A.getRemoteInst());
862 }
863 bool llvm::AccessAsInstructionInfo::isEqual(
864     const llvm::AccessAsInstructionInfo::Access &LHS,
865     const llvm::AccessAsInstructionInfo::Access &RHS) {
866   return LHS.getRemoteInst() == RHS.getRemoteInst();
867 }
868 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
869 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() {
870   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
871                                nullptr);
872 }
873 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
874 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() {
875   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
876                                nullptr);
877 }
878 
879 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue(
880     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) {
881   return detail::combineHashValue(
882              DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()),
883              (A.isWrittenValueYetUndetermined()
884                   ? ~0
885                   : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
886          A.getKind();
887 }
888 
889 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual(
890     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS,
891     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) {
892   return LHS == RHS;
893 }
894 ///}
895 
896 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
897 struct AA::PointerInfo::State : public AbstractState {
898 
899   /// Return the best possible representable state.
900   static State getBestState(const State &SIS) { return State(); }
901 
902   /// Return the worst possible representable state.
903   static State getWorstState(const State &SIS) {
904     State R;
905     R.indicatePessimisticFixpoint();
906     return R;
907   }
908 
909   State() {}
910   State(const State &SIS) : AccessBins(SIS.AccessBins) {}
911   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {}
912 
913   const State &getAssumed() const { return *this; }
914 
915   /// See AbstractState::isValidState().
916   bool isValidState() const override { return BS.isValidState(); }
917 
918   /// See AbstractState::isAtFixpoint().
919   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
920 
921   /// See AbstractState::indicateOptimisticFixpoint().
922   ChangeStatus indicateOptimisticFixpoint() override {
923     BS.indicateOptimisticFixpoint();
924     return ChangeStatus::UNCHANGED;
925   }
926 
927   /// See AbstractState::indicatePessimisticFixpoint().
928   ChangeStatus indicatePessimisticFixpoint() override {
929     BS.indicatePessimisticFixpoint();
930     return ChangeStatus::CHANGED;
931   }
932 
933   State &operator=(const State &R) {
934     if (this == &R)
935       return *this;
936     BS = R.BS;
937     AccessBins = R.AccessBins;
938     return *this;
939   }
940 
941   State &operator=(State &&R) {
942     if (this == &R)
943       return *this;
944     std::swap(BS, R.BS);
945     std::swap(AccessBins, R.AccessBins);
946     return *this;
947   }
948 
949   bool operator==(const State &R) const {
950     if (BS != R.BS)
951       return false;
952     if (AccessBins.size() != R.AccessBins.size())
953       return false;
954     auto It = begin(), RIt = R.begin(), E = end();
955     while (It != E) {
956       if (It->getFirst() != RIt->getFirst())
957         return false;
958       auto &Accs = It->getSecond();
959       auto &RAccs = RIt->getSecond();
960       if (Accs.size() != RAccs.size())
961         return false;
962       auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
963       while (AccIt != AccE) {
964         if (*AccIt != *RAccIt)
965           return false;
966         ++AccIt;
967         ++RAccIt;
968       }
969       ++It;
970       ++RIt;
971     }
972     return true;
973   }
974   bool operator!=(const State &R) const { return !(*this == R); }
975 
976   /// We store accesses in a set with the instruction as key.
977   using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>;
978 
979   /// We store all accesses in bins denoted by their offset and size.
980   using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>;
981 
982   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
983   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
984 
985 protected:
986   /// The bins with all the accesses for the associated pointer.
987   DenseMap<OffsetAndSize, Accesses> AccessBins;
988 
989   /// Add a new access to the state at offset \p Offset and with size \p Size.
990   /// The access is associated with \p I, writes \p Content (if anything), and
991   /// is of kind \p Kind.
992   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
993   ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I,
994                          Optional<Value *> Content,
995                          AAPointerInfo::AccessKind Kind, Type *Ty,
996                          Instruction *RemoteI = nullptr,
997                          Accesses *BinPtr = nullptr) {
998     OffsetAndSize Key{Offset, Size};
999     Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
1000     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1001     // Check if we have an access for this instruction in this bin, if not,
1002     // simply add it.
1003     auto It = Bin.find(Acc);
1004     if (It == Bin.end()) {
1005       Bin.insert(Acc);
1006       return ChangeStatus::CHANGED;
1007     }
1008     // If the existing access is the same as then new one, nothing changed.
1009     AAPointerInfo::Access Before = *It;
1010     // The new one will be combined with the existing one.
1011     *It &= Acc;
1012     return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1013   }
1014 
1015   /// See AAPointerInfo::forallInterferingAccesses.
1016   bool forallInterferingAccesses(
1017       Instruction &I,
1018       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1019     if (!isValidState())
1020       return false;
1021     // First find the offset and size of I.
1022     OffsetAndSize OAS(-1, -1);
1023     for (auto &It : AccessBins) {
1024       for (auto &Access : It.getSecond()) {
1025         if (Access.getRemoteInst() == &I) {
1026           OAS = It.getFirst();
1027           break;
1028         }
1029       }
1030       if (OAS.getSize() != -1)
1031         break;
1032     }
1033     if (OAS.getSize() == -1)
1034       return true;
1035 
1036     // Now that we have an offset and size, find all overlapping ones and use
1037     // the callback on the accesses.
1038     for (auto &It : AccessBins) {
1039       OffsetAndSize ItOAS = It.getFirst();
1040       if (!OAS.mayOverlap(ItOAS))
1041         continue;
1042       for (auto &Access : It.getSecond())
1043         if (!CB(Access, OAS == ItOAS))
1044           return false;
1045     }
1046     return true;
1047   }
1048 
1049 private:
1050   /// State to track fixpoint and validity.
1051   BooleanState BS;
1052 };
1053 
1054 struct AAPointerInfoImpl
1055     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1056   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1057   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1058 
1059   /// See AbstractAttribute::initialize(...).
1060   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1061 
1062   /// See AbstractAttribute::getAsStr().
1063   const std::string getAsStr() const override {
1064     return std::string("PointerInfo ") +
1065            (isValidState() ? (std::string("#") +
1066                               std::to_string(AccessBins.size()) + " bins")
1067                            : "<invalid>");
1068   }
1069 
1070   /// See AbstractAttribute::manifest(...).
1071   ChangeStatus manifest(Attributor &A) override {
1072     return AAPointerInfo::manifest(A);
1073   }
1074 
1075   bool forallInterferingAccesses(
1076       LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1077       const override {
1078     return State::forallInterferingAccesses(LI, CB);
1079   }
1080   bool forallInterferingAccesses(
1081       StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1082       const override {
1083     return State::forallInterferingAccesses(SI, CB);
1084   }
1085 
1086   ChangeStatus translateAndAddCalleeState(Attributor &A,
1087                                           const AAPointerInfo &CalleeAA,
1088                                           int64_t CallArgOffset, CallBase &CB) {
1089     using namespace AA::PointerInfo;
1090     if (!CalleeAA.getState().isValidState() || !isValidState())
1091       return indicatePessimisticFixpoint();
1092 
1093     const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1094     bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1095 
1096     // Combine the accesses bin by bin.
1097     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1098     for (auto &It : CalleeImplAA.getState()) {
1099       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1100       if (CallArgOffset != OffsetAndSize::Unknown)
1101         OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1102                             It.first.getSize());
1103       Accesses &Bin = AccessBins[OAS];
1104       for (const AAPointerInfo::Access &RAcc : It.second) {
1105         if (IsByval && !RAcc.isRead())
1106           continue;
1107         bool UsedAssumedInformation = false;
1108         Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1109             RAcc.getContent(), CB, *this, UsedAssumedInformation);
1110         AccessKind AK =
1111             AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1112                                                  : AccessKind::AK_READ_WRITE));
1113         Changed =
1114             Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
1115                                 RAcc.getType(), RAcc.getRemoteInst(), &Bin);
1116       }
1117     }
1118     return Changed;
1119   }
1120 
1121   /// Statistic tracking for all AAPointerInfo implementations.
1122   /// See AbstractAttribute::trackStatistics().
1123   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1124 };
1125 
1126 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1127   using AccessKind = AAPointerInfo::AccessKind;
1128   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1129       : AAPointerInfoImpl(IRP, A) {}
1130 
1131   /// See AbstractAttribute::initialize(...).
1132   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1133 
1134   /// Deal with an access and signal if it was handled successfully.
1135   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1136                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1137                     ChangeStatus &Changed, Type *Ty,
1138                     int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) {
1139     using namespace AA::PointerInfo;
1140     // No need to find a size if one is given or the offset is unknown.
1141     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1142         Ty) {
1143       const DataLayout &DL = A.getDataLayout();
1144       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1145       if (!AccessSize.isScalable())
1146         Size = AccessSize.getFixedSize();
1147     }
1148     Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
1149     return true;
1150   };
1151 
1152   /// Helper struct, will support ranges eventually.
1153   struct OffsetInfo {
1154     int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown;
1155 
1156     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1157   };
1158 
1159   /// See AbstractAttribute::updateImpl(...).
1160   ChangeStatus updateImpl(Attributor &A) override {
1161     using namespace AA::PointerInfo;
1162     State S = getState();
1163     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1164     Value &AssociatedValue = getAssociatedValue();
1165 
1166     const DataLayout &DL = A.getDataLayout();
1167     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1168     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1169 
1170     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
1171                                      bool &Follow) {
1172       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1173       UsrOI = PtrOI;
1174       Follow = true;
1175       return true;
1176     };
1177 
1178     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1179       Value *CurPtr = U.get();
1180       User *Usr = U.getUser();
1181       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1182                         << *Usr << "\n");
1183 
1184       OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1185 
1186       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1187         if (CE->isCast())
1188           return HandlePassthroughUser(Usr, PtrOI, Follow);
1189         if (CE->isCompare())
1190           return true;
1191         if (!CE->isGEPWithNoNotionalOverIndexing()) {
1192           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1193                             << "\n");
1194           return false;
1195         }
1196       }
1197       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1198         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1199         UsrOI = PtrOI;
1200 
1201         // TODO: Use range information.
1202         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1203             !GEP->hasAllConstantIndices()) {
1204           UsrOI.Offset = OffsetAndSize::Unknown;
1205           Follow = true;
1206           return true;
1207         }
1208 
1209         SmallVector<Value *, 8> Indices;
1210         for (Use &Idx : llvm::make_range(GEP->idx_begin(), GEP->idx_end())) {
1211           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1212             Indices.push_back(CIdx);
1213             continue;
1214           }
1215 
1216           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1217                             << " : " << *Idx << "\n");
1218           return false;
1219         }
1220         UsrOI.Offset = PtrOI.Offset +
1221                        DL.getIndexedOffsetInType(
1222                            CurPtr->getType()->getPointerElementType(), Indices);
1223         Follow = true;
1224         return true;
1225       }
1226       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1227         return HandlePassthroughUser(Usr, PtrOI, Follow);
1228 
1229       // For PHIs we need to take care of the recurrence explicitly as the value
1230       // might change while we iterate through a loop. For now, we give up if
1231       // the PHI is not invariant.
1232       if (isa<PHINode>(Usr)) {
1233         // Check if the PHI is invariant (so far).
1234         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1235         if (UsrOI == PtrOI)
1236           return true;
1237 
1238         // Check if the PHI operand has already an unknown offset as we can't
1239         // improve on that anymore.
1240         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1241           UsrOI = PtrOI;
1242           Follow = true;
1243           return true;
1244         }
1245 
1246         // Check if the PHI operand is not dependent on the PHI itself.
1247         APInt Offset(DL.getIndexTypeSizeInBits(AssociatedValue.getType()), 0);
1248         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1249                                     DL, Offset, /* AllowNonInbounds */ true)) {
1250           if (Offset != PtrOI.Offset) {
1251             LLVM_DEBUG(dbgs()
1252                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1253                        << *CurPtr << " in " << *Usr << "\n");
1254             return false;
1255           }
1256           return HandlePassthroughUser(Usr, PtrOI, Follow);
1257         }
1258 
1259         // TODO: Approximate in case we know the direction of the recurrence.
1260         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1261                           << *CurPtr << " in " << *Usr << "\n");
1262         UsrOI = PtrOI;
1263         UsrOI.Offset = OffsetAndSize::Unknown;
1264         Follow = true;
1265         return true;
1266       }
1267 
1268       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1269         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1270                             AccessKind::AK_READ, PtrOI.Offset, Changed,
1271                             LoadI->getType());
1272       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1273         if (StoreI->getValueOperand() == CurPtr) {
1274           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1275                             << *StoreI << "\n");
1276           return false;
1277         }
1278         bool UsedAssumedInformation = false;
1279         Optional<Value *> Content = A.getAssumedSimplified(
1280             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1281         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1282                             PtrOI.Offset, Changed,
1283                             StoreI->getValueOperand()->getType());
1284       }
1285       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1286         if (CB->isLifetimeStartOrEnd())
1287           return true;
1288         if (CB->isArgOperand(&U)) {
1289           unsigned ArgNo = CB->getArgOperandNo(&U);
1290           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1291               *this, IRPosition::callsite_argument(*CB, ArgNo),
1292               DepClassTy::REQUIRED);
1293           Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) |
1294                     Changed;
1295           return true;
1296         }
1297         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1298                           << "\n");
1299         // TODO: Allow some call uses
1300         return false;
1301       }
1302 
1303       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1304       return false;
1305     };
1306     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1307                            /* CheckBBLivenessOnly */ true))
1308       return indicatePessimisticFixpoint();
1309 
1310     LLVM_DEBUG({
1311       dbgs() << "Accesses by bin after update:\n";
1312       for (auto &It : AccessBins) {
1313         dbgs() << "[" << It.first.getOffset() << "-"
1314                << It.first.getOffset() + It.first.getSize()
1315                << "] : " << It.getSecond().size() << "\n";
1316         for (auto &Acc : It.getSecond()) {
1317           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1318                  << "\n";
1319           if (Acc.getLocalInst() != Acc.getRemoteInst())
1320             dbgs() << "     -->                         "
1321                    << *Acc.getRemoteInst() << "\n";
1322           if (!Acc.isWrittenValueYetUndetermined())
1323             dbgs() << "     - " << Acc.getWrittenValue() << "\n";
1324         }
1325       }
1326     });
1327 
1328     return Changed;
1329   }
1330 
1331   /// See AbstractAttribute::trackStatistics()
1332   void trackStatistics() const override {
1333     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1334   }
1335 };
1336 
1337 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1338   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1339       : AAPointerInfoImpl(IRP, A) {}
1340 
1341   /// See AbstractAttribute::updateImpl(...).
1342   ChangeStatus updateImpl(Attributor &A) override {
1343     return indicatePessimisticFixpoint();
1344   }
1345 
1346   /// See AbstractAttribute::trackStatistics()
1347   void trackStatistics() const override {
1348     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1349   }
1350 };
1351 
1352 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1353   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1354       : AAPointerInfoFloating(IRP, A) {}
1355 
1356   /// See AbstractAttribute::initialize(...).
1357   void initialize(Attributor &A) override {
1358     AAPointerInfoFloating::initialize(A);
1359     if (getAnchorScope()->isDeclaration())
1360       indicatePessimisticFixpoint();
1361   }
1362 
1363   /// See AbstractAttribute::trackStatistics()
1364   void trackStatistics() const override {
1365     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1366   }
1367 };
1368 
1369 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1370   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1371       : AAPointerInfoFloating(IRP, A) {}
1372 
1373   /// See AbstractAttribute::updateImpl(...).
1374   ChangeStatus updateImpl(Attributor &A) override {
1375     using namespace AA::PointerInfo;
1376     // We handle memory intrinsics explicitly, at least the first (=
1377     // destination) and second (=source) arguments as we know how they are
1378     // accessed.
1379     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1380       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1381       int64_t LengthVal = OffsetAndSize::Unknown;
1382       if (Length)
1383         LengthVal = Length->getSExtValue();
1384       Value &Ptr = getAssociatedValue();
1385       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1386       ChangeStatus Changed;
1387       if (ArgNo == 0) {
1388         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1389                      nullptr, LengthVal);
1390       } else if (ArgNo == 1) {
1391         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1392                      nullptr, LengthVal);
1393       } else {
1394         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1395                           << *MI << "\n");
1396         return indicatePessimisticFixpoint();
1397       }
1398       return Changed;
1399     }
1400 
1401     // TODO: Once we have call site specific value information we can provide
1402     //       call site specific liveness information and then it makes
1403     //       sense to specialize attributes for call sites arguments instead of
1404     //       redirecting requests to the callee argument.
1405     Argument *Arg = getAssociatedArgument();
1406     if (!Arg)
1407       return indicatePessimisticFixpoint();
1408     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1409     auto &ArgAA =
1410         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1411     return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1412   }
1413 
1414   /// See AbstractAttribute::trackStatistics()
1415   void trackStatistics() const override {
1416     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1417   }
1418 };
1419 
1420 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1421   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1422       : AAPointerInfoFloating(IRP, A) {}
1423 
1424   /// See AbstractAttribute::trackStatistics()
1425   void trackStatistics() const override {
1426     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1427   }
1428 };
1429 
1430 /// -----------------------NoUnwind Function Attribute--------------------------
1431 
1432 struct AANoUnwindImpl : AANoUnwind {
1433   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1434 
1435   const std::string getAsStr() const override {
1436     return getAssumed() ? "nounwind" : "may-unwind";
1437   }
1438 
1439   /// See AbstractAttribute::updateImpl(...).
1440   ChangeStatus updateImpl(Attributor &A) override {
1441     auto Opcodes = {
1442         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1443         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1444         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1445 
1446     auto CheckForNoUnwind = [&](Instruction &I) {
1447       if (!I.mayThrow())
1448         return true;
1449 
1450       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1451         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1452             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1453         return NoUnwindAA.isAssumedNoUnwind();
1454       }
1455       return false;
1456     };
1457 
1458     bool UsedAssumedInformation = false;
1459     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1460                                    UsedAssumedInformation))
1461       return indicatePessimisticFixpoint();
1462 
1463     return ChangeStatus::UNCHANGED;
1464   }
1465 };
1466 
1467 struct AANoUnwindFunction final : public AANoUnwindImpl {
1468   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1469       : AANoUnwindImpl(IRP, A) {}
1470 
1471   /// See AbstractAttribute::trackStatistics()
1472   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1473 };
1474 
1475 /// NoUnwind attribute deduction for a call sites.
1476 struct AANoUnwindCallSite final : AANoUnwindImpl {
1477   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1478       : AANoUnwindImpl(IRP, A) {}
1479 
1480   /// See AbstractAttribute::initialize(...).
1481   void initialize(Attributor &A) override {
1482     AANoUnwindImpl::initialize(A);
1483     Function *F = getAssociatedFunction();
1484     if (!F || F->isDeclaration())
1485       indicatePessimisticFixpoint();
1486   }
1487 
1488   /// See AbstractAttribute::updateImpl(...).
1489   ChangeStatus updateImpl(Attributor &A) override {
1490     // TODO: Once we have call site specific value information we can provide
1491     //       call site specific liveness information and then it makes
1492     //       sense to specialize attributes for call sites arguments instead of
1493     //       redirecting requests to the callee argument.
1494     Function *F = getAssociatedFunction();
1495     const IRPosition &FnPos = IRPosition::function(*F);
1496     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1497     return clampStateAndIndicateChange(getState(), FnAA.getState());
1498   }
1499 
1500   /// See AbstractAttribute::trackStatistics()
1501   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1502 };
1503 
1504 /// --------------------- Function Return Values -------------------------------
1505 
1506 /// "Attribute" that collects all potential returned values and the return
1507 /// instructions that they arise from.
1508 ///
1509 /// If there is a unique returned value R, the manifest method will:
1510 ///   - mark R with the "returned" attribute, if R is an argument.
1511 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1512 
1513   /// Mapping of values potentially returned by the associated function to the
1514   /// return instructions that might return them.
1515   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1516 
1517   /// State flags
1518   ///
1519   ///{
1520   bool IsFixed = false;
1521   bool IsValidState = true;
1522   ///}
1523 
1524 public:
1525   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1526       : AAReturnedValues(IRP, A) {}
1527 
1528   /// See AbstractAttribute::initialize(...).
1529   void initialize(Attributor &A) override {
1530     // Reset the state.
1531     IsFixed = false;
1532     IsValidState = true;
1533     ReturnedValues.clear();
1534 
1535     Function *F = getAssociatedFunction();
1536     if (!F || F->isDeclaration()) {
1537       indicatePessimisticFixpoint();
1538       return;
1539     }
1540     assert(!F->getReturnType()->isVoidTy() &&
1541            "Did not expect a void return type!");
1542 
1543     // The map from instruction opcodes to those instructions in the function.
1544     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1545 
1546     // Look through all arguments, if one is marked as returned we are done.
1547     for (Argument &Arg : F->args()) {
1548       if (Arg.hasReturnedAttr()) {
1549         auto &ReturnInstSet = ReturnedValues[&Arg];
1550         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1551           for (Instruction *RI : *Insts)
1552             ReturnInstSet.insert(cast<ReturnInst>(RI));
1553 
1554         indicateOptimisticFixpoint();
1555         return;
1556       }
1557     }
1558 
1559     if (!A.isFunctionIPOAmendable(*F))
1560       indicatePessimisticFixpoint();
1561   }
1562 
1563   /// See AbstractAttribute::manifest(...).
1564   ChangeStatus manifest(Attributor &A) override;
1565 
1566   /// See AbstractAttribute::getState(...).
1567   AbstractState &getState() override { return *this; }
1568 
1569   /// See AbstractAttribute::getState(...).
1570   const AbstractState &getState() const override { return *this; }
1571 
1572   /// See AbstractAttribute::updateImpl(Attributor &A).
1573   ChangeStatus updateImpl(Attributor &A) override;
1574 
1575   llvm::iterator_range<iterator> returned_values() override {
1576     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1577   }
1578 
1579   llvm::iterator_range<const_iterator> returned_values() const override {
1580     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1581   }
1582 
1583   /// Return the number of potential return values, -1 if unknown.
1584   size_t getNumReturnValues() const override {
1585     return isValidState() ? ReturnedValues.size() : -1;
1586   }
1587 
1588   /// Return an assumed unique return value if a single candidate is found. If
1589   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1590   /// Optional::NoneType.
1591   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1592 
1593   /// See AbstractState::checkForAllReturnedValues(...).
1594   bool checkForAllReturnedValuesAndReturnInsts(
1595       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1596       const override;
1597 
1598   /// Pretty print the attribute similar to the IR representation.
1599   const std::string getAsStr() const override;
1600 
1601   /// See AbstractState::isAtFixpoint().
1602   bool isAtFixpoint() const override { return IsFixed; }
1603 
1604   /// See AbstractState::isValidState().
1605   bool isValidState() const override { return IsValidState; }
1606 
1607   /// See AbstractState::indicateOptimisticFixpoint(...).
1608   ChangeStatus indicateOptimisticFixpoint() override {
1609     IsFixed = true;
1610     return ChangeStatus::UNCHANGED;
1611   }
1612 
1613   ChangeStatus indicatePessimisticFixpoint() override {
1614     IsFixed = true;
1615     IsValidState = false;
1616     return ChangeStatus::CHANGED;
1617   }
1618 };
1619 
1620 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1621   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1622 
1623   // Bookkeeping.
1624   assert(isValidState());
1625   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1626                   "Number of function with known return values");
1627 
1628   // Check if we have an assumed unique return value that we could manifest.
1629   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1630 
1631   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1632     return Changed;
1633 
1634   // Bookkeeping.
1635   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1636                   "Number of function with unique return");
1637   // If the assumed unique return value is an argument, annotate it.
1638   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1639     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1640             getAssociatedFunction()->getReturnType())) {
1641       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1642       Changed = IRAttribute::manifest(A);
1643     }
1644   }
1645   return Changed;
1646 }
1647 
1648 const std::string AAReturnedValuesImpl::getAsStr() const {
1649   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1650          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1651 }
1652 
1653 Optional<Value *>
1654 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1655   // If checkForAllReturnedValues provides a unique value, ignoring potential
1656   // undef values that can also be present, it is assumed to be the actual
1657   // return value and forwarded to the caller of this method. If there are
1658   // multiple, a nullptr is returned indicating there cannot be a unique
1659   // returned value.
1660   Optional<Value *> UniqueRV;
1661   Type *Ty = getAssociatedFunction()->getReturnType();
1662 
1663   auto Pred = [&](Value &RV) -> bool {
1664     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1665     return UniqueRV != Optional<Value *>(nullptr);
1666   };
1667 
1668   if (!A.checkForAllReturnedValues(Pred, *this))
1669     UniqueRV = nullptr;
1670 
1671   return UniqueRV;
1672 }
1673 
1674 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1675     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1676     const {
1677   if (!isValidState())
1678     return false;
1679 
1680   // Check all returned values but ignore call sites as long as we have not
1681   // encountered an overdefined one during an update.
1682   for (auto &It : ReturnedValues) {
1683     Value *RV = It.first;
1684     if (!Pred(*RV, It.second))
1685       return false;
1686   }
1687 
1688   return true;
1689 }
1690 
1691 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1692   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1693 
1694   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1695                            bool) -> bool {
1696     bool UsedAssumedInformation = false;
1697     Optional<Value *> SimpleRetVal =
1698         A.getAssumedSimplified(V, *this, UsedAssumedInformation);
1699     if (!SimpleRetVal.hasValue())
1700       return true;
1701     if (!SimpleRetVal.getValue())
1702       return false;
1703     Value *RetVal = *SimpleRetVal;
1704     assert(AA::isValidInScope(*RetVal, Ret.getFunction()) &&
1705            "Assumed returned value should be valid in function scope!");
1706     if (ReturnedValues[RetVal].insert(&Ret))
1707       Changed = ChangeStatus::CHANGED;
1708     return true;
1709   };
1710 
1711   auto ReturnInstCB = [&](Instruction &I) {
1712     ReturnInst &Ret = cast<ReturnInst>(I);
1713     return genericValueTraversal<ReturnInst>(
1714         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1715         &I);
1716   };
1717 
1718   // Discover returned values from all live returned instructions in the
1719   // associated function.
1720   bool UsedAssumedInformation = false;
1721   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1722                                  UsedAssumedInformation))
1723     return indicatePessimisticFixpoint();
1724   return Changed;
1725 }
1726 
1727 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1728   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1729       : AAReturnedValuesImpl(IRP, A) {}
1730 
1731   /// See AbstractAttribute::trackStatistics()
1732   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1733 };
1734 
1735 /// Returned values information for a call sites.
1736 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1737   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1738       : AAReturnedValuesImpl(IRP, A) {}
1739 
1740   /// See AbstractAttribute::initialize(...).
1741   void initialize(Attributor &A) override {
1742     // TODO: Once we have call site specific value information we can provide
1743     //       call site specific liveness information and then it makes
1744     //       sense to specialize attributes for call sites instead of
1745     //       redirecting requests to the callee.
1746     llvm_unreachable("Abstract attributes for returned values are not "
1747                      "supported for call sites yet!");
1748   }
1749 
1750   /// See AbstractAttribute::updateImpl(...).
1751   ChangeStatus updateImpl(Attributor &A) override {
1752     return indicatePessimisticFixpoint();
1753   }
1754 
1755   /// See AbstractAttribute::trackStatistics()
1756   void trackStatistics() const override {}
1757 };
1758 
1759 /// ------------------------ NoSync Function Attribute -------------------------
1760 
1761 struct AANoSyncImpl : AANoSync {
1762   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1763 
1764   const std::string getAsStr() const override {
1765     return getAssumed() ? "nosync" : "may-sync";
1766   }
1767 
1768   /// See AbstractAttribute::updateImpl(...).
1769   ChangeStatus updateImpl(Attributor &A) override;
1770 
1771   /// Helper function used to determine whether an instruction is non-relaxed
1772   /// atomic. In other words, if an atomic instruction does not have unordered
1773   /// or monotonic ordering
1774   static bool isNonRelaxedAtomic(Instruction *I);
1775 
1776   /// Helper function specific for intrinsics which are potentially volatile
1777   static bool isNoSyncIntrinsic(Instruction *I);
1778 };
1779 
1780 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1781   if (!I->isAtomic())
1782     return false;
1783 
1784   if (auto *FI = dyn_cast<FenceInst>(I))
1785     // All legal orderings for fence are stronger than monotonic.
1786     return FI->getSyncScopeID() != SyncScope::SingleThread;
1787   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1788     // Unordered is not a legal ordering for cmpxchg.
1789     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1790             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1791   }
1792 
1793   AtomicOrdering Ordering;
1794   switch (I->getOpcode()) {
1795   case Instruction::AtomicRMW:
1796     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1797     break;
1798   case Instruction::Store:
1799     Ordering = cast<StoreInst>(I)->getOrdering();
1800     break;
1801   case Instruction::Load:
1802     Ordering = cast<LoadInst>(I)->getOrdering();
1803     break;
1804   default:
1805     llvm_unreachable(
1806         "New atomic operations need to be known in the attributor.");
1807   }
1808 
1809   return (Ordering != AtomicOrdering::Unordered &&
1810           Ordering != AtomicOrdering::Monotonic);
1811 }
1812 
1813 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1814 /// which would be nosync except that they have a volatile flag.  All other
1815 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1816 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1817   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1818     return !MI->isVolatile();
1819   return false;
1820 }
1821 
1822 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1823 
1824   auto CheckRWInstForNoSync = [&](Instruction &I) {
1825     /// We are looking for volatile instructions or Non-Relaxed atomics.
1826 
1827     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1828       if (CB->hasFnAttr(Attribute::NoSync))
1829         return true;
1830 
1831       if (isNoSyncIntrinsic(&I))
1832         return true;
1833 
1834       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1835           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1836       return NoSyncAA.isAssumedNoSync();
1837     }
1838 
1839     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1840       return true;
1841 
1842     return false;
1843   };
1844 
1845   auto CheckForNoSync = [&](Instruction &I) {
1846     // At this point we handled all read/write effects and they are all
1847     // nosync, so they can be skipped.
1848     if (I.mayReadOrWriteMemory())
1849       return true;
1850 
1851     // non-convergent and readnone imply nosync.
1852     return !cast<CallBase>(I).isConvergent();
1853   };
1854 
1855   bool UsedAssumedInformation = false;
1856   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1857                                           UsedAssumedInformation) ||
1858       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1859                                          UsedAssumedInformation))
1860     return indicatePessimisticFixpoint();
1861 
1862   return ChangeStatus::UNCHANGED;
1863 }
1864 
1865 struct AANoSyncFunction final : public AANoSyncImpl {
1866   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1867       : AANoSyncImpl(IRP, A) {}
1868 
1869   /// See AbstractAttribute::trackStatistics()
1870   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1871 };
1872 
1873 /// NoSync attribute deduction for a call sites.
1874 struct AANoSyncCallSite final : AANoSyncImpl {
1875   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1876       : AANoSyncImpl(IRP, A) {}
1877 
1878   /// See AbstractAttribute::initialize(...).
1879   void initialize(Attributor &A) override {
1880     AANoSyncImpl::initialize(A);
1881     Function *F = getAssociatedFunction();
1882     if (!F || F->isDeclaration())
1883       indicatePessimisticFixpoint();
1884   }
1885 
1886   /// See AbstractAttribute::updateImpl(...).
1887   ChangeStatus updateImpl(Attributor &A) override {
1888     // TODO: Once we have call site specific value information we can provide
1889     //       call site specific liveness information and then it makes
1890     //       sense to specialize attributes for call sites arguments instead of
1891     //       redirecting requests to the callee argument.
1892     Function *F = getAssociatedFunction();
1893     const IRPosition &FnPos = IRPosition::function(*F);
1894     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1895     return clampStateAndIndicateChange(getState(), FnAA.getState());
1896   }
1897 
1898   /// See AbstractAttribute::trackStatistics()
1899   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1900 };
1901 
1902 /// ------------------------ No-Free Attributes ----------------------------
1903 
1904 struct AANoFreeImpl : public AANoFree {
1905   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1906 
1907   /// See AbstractAttribute::updateImpl(...).
1908   ChangeStatus updateImpl(Attributor &A) override {
1909     auto CheckForNoFree = [&](Instruction &I) {
1910       const auto &CB = cast<CallBase>(I);
1911       if (CB.hasFnAttr(Attribute::NoFree))
1912         return true;
1913 
1914       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1915           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1916       return NoFreeAA.isAssumedNoFree();
1917     };
1918 
1919     bool UsedAssumedInformation = false;
1920     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
1921                                            UsedAssumedInformation))
1922       return indicatePessimisticFixpoint();
1923     return ChangeStatus::UNCHANGED;
1924   }
1925 
1926   /// See AbstractAttribute::getAsStr().
1927   const std::string getAsStr() const override {
1928     return getAssumed() ? "nofree" : "may-free";
1929   }
1930 };
1931 
1932 struct AANoFreeFunction final : public AANoFreeImpl {
1933   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1934       : AANoFreeImpl(IRP, A) {}
1935 
1936   /// See AbstractAttribute::trackStatistics()
1937   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1938 };
1939 
1940 /// NoFree attribute deduction for a call sites.
1941 struct AANoFreeCallSite final : AANoFreeImpl {
1942   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1943       : AANoFreeImpl(IRP, A) {}
1944 
1945   /// See AbstractAttribute::initialize(...).
1946   void initialize(Attributor &A) override {
1947     AANoFreeImpl::initialize(A);
1948     Function *F = getAssociatedFunction();
1949     if (!F || F->isDeclaration())
1950       indicatePessimisticFixpoint();
1951   }
1952 
1953   /// See AbstractAttribute::updateImpl(...).
1954   ChangeStatus updateImpl(Attributor &A) override {
1955     // TODO: Once we have call site specific value information we can provide
1956     //       call site specific liveness information and then it makes
1957     //       sense to specialize attributes for call sites arguments instead of
1958     //       redirecting requests to the callee argument.
1959     Function *F = getAssociatedFunction();
1960     const IRPosition &FnPos = IRPosition::function(*F);
1961     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1962     return clampStateAndIndicateChange(getState(), FnAA.getState());
1963   }
1964 
1965   /// See AbstractAttribute::trackStatistics()
1966   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1967 };
1968 
1969 /// NoFree attribute for floating values.
1970 struct AANoFreeFloating : AANoFreeImpl {
1971   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1972       : AANoFreeImpl(IRP, A) {}
1973 
1974   /// See AbstractAttribute::trackStatistics()
1975   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1976 
1977   /// See Abstract Attribute::updateImpl(...).
1978   ChangeStatus updateImpl(Attributor &A) override {
1979     const IRPosition &IRP = getIRPosition();
1980 
1981     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1982         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1983     if (NoFreeAA.isAssumedNoFree())
1984       return ChangeStatus::UNCHANGED;
1985 
1986     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1987     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1988       Instruction *UserI = cast<Instruction>(U.getUser());
1989       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1990         if (CB->isBundleOperand(&U))
1991           return false;
1992         if (!CB->isArgOperand(&U))
1993           return true;
1994         unsigned ArgNo = CB->getArgOperandNo(&U);
1995 
1996         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1997             *this, IRPosition::callsite_argument(*CB, ArgNo),
1998             DepClassTy::REQUIRED);
1999         return NoFreeArg.isAssumedNoFree();
2000       }
2001 
2002       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2003           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2004         Follow = true;
2005         return true;
2006       }
2007       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2008           isa<ReturnInst>(UserI))
2009         return true;
2010 
2011       // Unknown user.
2012       return false;
2013     };
2014     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2015       return indicatePessimisticFixpoint();
2016 
2017     return ChangeStatus::UNCHANGED;
2018   }
2019 };
2020 
2021 /// NoFree attribute for a call site argument.
2022 struct AANoFreeArgument final : AANoFreeFloating {
2023   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2024       : AANoFreeFloating(IRP, A) {}
2025 
2026   /// See AbstractAttribute::trackStatistics()
2027   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2028 };
2029 
2030 /// NoFree attribute for call site arguments.
2031 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2032   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2033       : AANoFreeFloating(IRP, A) {}
2034 
2035   /// See AbstractAttribute::updateImpl(...).
2036   ChangeStatus updateImpl(Attributor &A) override {
2037     // TODO: Once we have call site specific value information we can provide
2038     //       call site specific liveness information and then it makes
2039     //       sense to specialize attributes for call sites arguments instead of
2040     //       redirecting requests to the callee argument.
2041     Argument *Arg = getAssociatedArgument();
2042     if (!Arg)
2043       return indicatePessimisticFixpoint();
2044     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2045     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2046     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2047   }
2048 
2049   /// See AbstractAttribute::trackStatistics()
2050   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2051 };
2052 
2053 /// NoFree attribute for function return value.
2054 struct AANoFreeReturned final : AANoFreeFloating {
2055   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2056       : AANoFreeFloating(IRP, A) {
2057     llvm_unreachable("NoFree is not applicable to function returns!");
2058   }
2059 
2060   /// See AbstractAttribute::initialize(...).
2061   void initialize(Attributor &A) override {
2062     llvm_unreachable("NoFree is not applicable to function returns!");
2063   }
2064 
2065   /// See AbstractAttribute::updateImpl(...).
2066   ChangeStatus updateImpl(Attributor &A) override {
2067     llvm_unreachable("NoFree is not applicable to function returns!");
2068   }
2069 
2070   /// See AbstractAttribute::trackStatistics()
2071   void trackStatistics() const override {}
2072 };
2073 
2074 /// NoFree attribute deduction for a call site return value.
2075 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2076   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2077       : AANoFreeFloating(IRP, A) {}
2078 
2079   ChangeStatus manifest(Attributor &A) override {
2080     return ChangeStatus::UNCHANGED;
2081   }
2082   /// See AbstractAttribute::trackStatistics()
2083   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2084 };
2085 
2086 /// ------------------------ NonNull Argument Attribute ------------------------
2087 static int64_t getKnownNonNullAndDerefBytesForUse(
2088     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2089     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2090   TrackUse = false;
2091 
2092   const Value *UseV = U->get();
2093   if (!UseV->getType()->isPointerTy())
2094     return 0;
2095 
2096   // We need to follow common pointer manipulation uses to the accesses they
2097   // feed into. We can try to be smart to avoid looking through things we do not
2098   // like for now, e.g., non-inbounds GEPs.
2099   if (isa<CastInst>(I)) {
2100     TrackUse = true;
2101     return 0;
2102   }
2103 
2104   if (isa<GetElementPtrInst>(I)) {
2105     TrackUse = true;
2106     return 0;
2107   }
2108 
2109   Type *PtrTy = UseV->getType();
2110   const Function *F = I->getFunction();
2111   bool NullPointerIsDefined =
2112       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2113   const DataLayout &DL = A.getInfoCache().getDL();
2114   if (const auto *CB = dyn_cast<CallBase>(I)) {
2115     if (CB->isBundleOperand(U)) {
2116       if (RetainedKnowledge RK = getKnowledgeFromUse(
2117               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2118         IsNonNull |=
2119             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2120         return RK.ArgValue;
2121       }
2122       return 0;
2123     }
2124 
2125     if (CB->isCallee(U)) {
2126       IsNonNull |= !NullPointerIsDefined;
2127       return 0;
2128     }
2129 
2130     unsigned ArgNo = CB->getArgOperandNo(U);
2131     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2132     // As long as we only use known information there is no need to track
2133     // dependences here.
2134     auto &DerefAA =
2135         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2136     IsNonNull |= DerefAA.isKnownNonNull();
2137     return DerefAA.getKnownDereferenceableBytes();
2138   }
2139 
2140   int64_t Offset;
2141   const Value *Base =
2142       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
2143   if (Base) {
2144     if (Base == &AssociatedValue &&
2145         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2146       int64_t DerefBytes =
2147           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
2148 
2149       IsNonNull |= !NullPointerIsDefined;
2150       return std::max(int64_t(0), DerefBytes);
2151     }
2152   }
2153 
2154   /// Corner case when an offset is 0.
2155   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
2156                                               /*AllowNonInbounds*/ true);
2157   if (Base) {
2158     if (Offset == 0 && Base == &AssociatedValue &&
2159         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2160       int64_t DerefBytes =
2161           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
2162       IsNonNull |= !NullPointerIsDefined;
2163       return std::max(int64_t(0), DerefBytes);
2164     }
2165   }
2166 
2167   return 0;
2168 }
2169 
2170 struct AANonNullImpl : AANonNull {
2171   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2172       : AANonNull(IRP, A),
2173         NullIsDefined(NullPointerIsDefined(
2174             getAnchorScope(),
2175             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2176 
2177   /// See AbstractAttribute::initialize(...).
2178   void initialize(Attributor &A) override {
2179     Value &V = getAssociatedValue();
2180     if (!NullIsDefined &&
2181         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2182                 /* IgnoreSubsumingPositions */ false, &A)) {
2183       indicateOptimisticFixpoint();
2184       return;
2185     }
2186 
2187     if (isa<ConstantPointerNull>(V)) {
2188       indicatePessimisticFixpoint();
2189       return;
2190     }
2191 
2192     AANonNull::initialize(A);
2193 
2194     bool CanBeNull, CanBeFreed;
2195     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2196                                          CanBeFreed)) {
2197       if (!CanBeNull) {
2198         indicateOptimisticFixpoint();
2199         return;
2200       }
2201     }
2202 
2203     if (isa<GlobalValue>(&getAssociatedValue())) {
2204       indicatePessimisticFixpoint();
2205       return;
2206     }
2207 
2208     if (Instruction *CtxI = getCtxI())
2209       followUsesInMBEC(*this, A, getState(), *CtxI);
2210   }
2211 
2212   /// See followUsesInMBEC
2213   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2214                        AANonNull::StateType &State) {
2215     bool IsNonNull = false;
2216     bool TrackUse = false;
2217     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2218                                        IsNonNull, TrackUse);
2219     State.setKnown(IsNonNull);
2220     return TrackUse;
2221   }
2222 
2223   /// See AbstractAttribute::getAsStr().
2224   const std::string getAsStr() const override {
2225     return getAssumed() ? "nonnull" : "may-null";
2226   }
2227 
2228   /// Flag to determine if the underlying value can be null and still allow
2229   /// valid accesses.
2230   const bool NullIsDefined;
2231 };
2232 
2233 /// NonNull attribute for a floating value.
2234 struct AANonNullFloating : public AANonNullImpl {
2235   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2236       : AANonNullImpl(IRP, A) {}
2237 
2238   /// See AbstractAttribute::updateImpl(...).
2239   ChangeStatus updateImpl(Attributor &A) override {
2240     const DataLayout &DL = A.getDataLayout();
2241 
2242     DominatorTree *DT = nullptr;
2243     AssumptionCache *AC = nullptr;
2244     InformationCache &InfoCache = A.getInfoCache();
2245     if (const Function *Fn = getAnchorScope()) {
2246       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2247       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2248     }
2249 
2250     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2251                             AANonNull::StateType &T, bool Stripped) -> bool {
2252       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2253                                              DepClassTy::REQUIRED);
2254       if (!Stripped && this == &AA) {
2255         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2256           T.indicatePessimisticFixpoint();
2257       } else {
2258         // Use abstract attribute information.
2259         const AANonNull::StateType &NS = AA.getState();
2260         T ^= NS;
2261       }
2262       return T.isValidState();
2263     };
2264 
2265     StateType T;
2266     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2267                                           VisitValueCB, getCtxI()))
2268       return indicatePessimisticFixpoint();
2269 
2270     return clampStateAndIndicateChange(getState(), T);
2271   }
2272 
2273   /// See AbstractAttribute::trackStatistics()
2274   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2275 };
2276 
2277 /// NonNull attribute for function return value.
2278 struct AANonNullReturned final
2279     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2280   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2281       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2282 
2283   /// See AbstractAttribute::getAsStr().
2284   const std::string getAsStr() const override {
2285     return getAssumed() ? "nonnull" : "may-null";
2286   }
2287 
2288   /// See AbstractAttribute::trackStatistics()
2289   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2290 };
2291 
2292 /// NonNull attribute for function argument.
2293 struct AANonNullArgument final
2294     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2295   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2296       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2297 
2298   /// See AbstractAttribute::trackStatistics()
2299   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2300 };
2301 
2302 struct AANonNullCallSiteArgument final : AANonNullFloating {
2303   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2304       : AANonNullFloating(IRP, A) {}
2305 
2306   /// See AbstractAttribute::trackStatistics()
2307   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2308 };
2309 
2310 /// NonNull attribute for a call site return position.
2311 struct AANonNullCallSiteReturned final
2312     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2313   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2314       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2315 
2316   /// See AbstractAttribute::trackStatistics()
2317   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2318 };
2319 
2320 /// ------------------------ No-Recurse Attributes ----------------------------
2321 
2322 struct AANoRecurseImpl : public AANoRecurse {
2323   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2324 
2325   /// See AbstractAttribute::getAsStr()
2326   const std::string getAsStr() const override {
2327     return getAssumed() ? "norecurse" : "may-recurse";
2328   }
2329 };
2330 
2331 struct AANoRecurseFunction final : AANoRecurseImpl {
2332   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2333       : AANoRecurseImpl(IRP, A) {}
2334 
2335   /// See AbstractAttribute::initialize(...).
2336   void initialize(Attributor &A) override {
2337     AANoRecurseImpl::initialize(A);
2338     if (const Function *F = getAnchorScope())
2339       if (A.getInfoCache().getSccSize(*F) != 1)
2340         indicatePessimisticFixpoint();
2341   }
2342 
2343   /// See AbstractAttribute::updateImpl(...).
2344   ChangeStatus updateImpl(Attributor &A) override {
2345 
2346     // If all live call sites are known to be no-recurse, we are as well.
2347     auto CallSitePred = [&](AbstractCallSite ACS) {
2348       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2349           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2350           DepClassTy::NONE);
2351       return NoRecurseAA.isKnownNoRecurse();
2352     };
2353     bool AllCallSitesKnown;
2354     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2355       // If we know all call sites and all are known no-recurse, we are done.
2356       // If all known call sites, which might not be all that exist, are known
2357       // to be no-recurse, we are not done but we can continue to assume
2358       // no-recurse. If one of the call sites we have not visited will become
2359       // live, another update is triggered.
2360       if (AllCallSitesKnown)
2361         indicateOptimisticFixpoint();
2362       return ChangeStatus::UNCHANGED;
2363     }
2364 
2365     // If the above check does not hold anymore we look at the calls.
2366     auto CheckForNoRecurse = [&](Instruction &I) {
2367       const auto &CB = cast<CallBase>(I);
2368       if (CB.hasFnAttr(Attribute::NoRecurse))
2369         return true;
2370 
2371       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2372           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2373       if (!NoRecurseAA.isAssumedNoRecurse())
2374         return false;
2375 
2376       // Recursion to the same function
2377       if (CB.getCalledFunction() == getAnchorScope())
2378         return false;
2379 
2380       return true;
2381     };
2382 
2383     bool UsedAssumedInformation = false;
2384     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this,
2385                                            UsedAssumedInformation))
2386       return indicatePessimisticFixpoint();
2387     return ChangeStatus::UNCHANGED;
2388   }
2389 
2390   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2391 };
2392 
2393 /// NoRecurse attribute deduction for a call sites.
2394 struct AANoRecurseCallSite final : AANoRecurseImpl {
2395   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2396       : AANoRecurseImpl(IRP, A) {}
2397 
2398   /// See AbstractAttribute::initialize(...).
2399   void initialize(Attributor &A) override {
2400     AANoRecurseImpl::initialize(A);
2401     Function *F = getAssociatedFunction();
2402     if (!F || F->isDeclaration())
2403       indicatePessimisticFixpoint();
2404   }
2405 
2406   /// See AbstractAttribute::updateImpl(...).
2407   ChangeStatus updateImpl(Attributor &A) override {
2408     // TODO: Once we have call site specific value information we can provide
2409     //       call site specific liveness information and then it makes
2410     //       sense to specialize attributes for call sites arguments instead of
2411     //       redirecting requests to the callee argument.
2412     Function *F = getAssociatedFunction();
2413     const IRPosition &FnPos = IRPosition::function(*F);
2414     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2415     return clampStateAndIndicateChange(getState(), FnAA.getState());
2416   }
2417 
2418   /// See AbstractAttribute::trackStatistics()
2419   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2420 };
2421 
2422 /// -------------------- Undefined-Behavior Attributes ------------------------
2423 
2424 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2425   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2426       : AAUndefinedBehavior(IRP, A) {}
2427 
2428   /// See AbstractAttribute::updateImpl(...).
2429   // through a pointer (i.e. also branches etc.)
2430   ChangeStatus updateImpl(Attributor &A) override {
2431     const size_t UBPrevSize = KnownUBInsts.size();
2432     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2433 
2434     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2435       // Lang ref now states volatile store is not UB, let's skip them.
2436       if (I.isVolatile() && I.mayWriteToMemory())
2437         return true;
2438 
2439       // Skip instructions that are already saved.
2440       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2441         return true;
2442 
2443       // If we reach here, we know we have an instruction
2444       // that accesses memory through a pointer operand,
2445       // for which getPointerOperand() should give it to us.
2446       Value *PtrOp =
2447           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2448       assert(PtrOp &&
2449              "Expected pointer operand of memory accessing instruction");
2450 
2451       // Either we stopped and the appropriate action was taken,
2452       // or we got back a simplified value to continue.
2453       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2454       if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2455         return true;
2456       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2457 
2458       // A memory access through a pointer is considered UB
2459       // only if the pointer has constant null value.
2460       // TODO: Expand it to not only check constant values.
2461       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2462         AssumedNoUBInsts.insert(&I);
2463         return true;
2464       }
2465       const Type *PtrTy = PtrOpVal->getType();
2466 
2467       // Because we only consider instructions inside functions,
2468       // assume that a parent function exists.
2469       const Function *F = I.getFunction();
2470 
2471       // A memory access using constant null pointer is only considered UB
2472       // if null pointer is _not_ defined for the target platform.
2473       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2474         AssumedNoUBInsts.insert(&I);
2475       else
2476         KnownUBInsts.insert(&I);
2477       return true;
2478     };
2479 
2480     auto InspectBrInstForUB = [&](Instruction &I) {
2481       // A conditional branch instruction is considered UB if it has `undef`
2482       // condition.
2483 
2484       // Skip instructions that are already saved.
2485       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2486         return true;
2487 
2488       // We know we have a branch instruction.
2489       auto *BrInst = cast<BranchInst>(&I);
2490 
2491       // Unconditional branches are never considered UB.
2492       if (BrInst->isUnconditional())
2493         return true;
2494 
2495       // Either we stopped and the appropriate action was taken,
2496       // or we got back a simplified value to continue.
2497       Optional<Value *> SimplifiedCond =
2498           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2499       if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2500         return true;
2501       AssumedNoUBInsts.insert(&I);
2502       return true;
2503     };
2504 
2505     auto InspectCallSiteForUB = [&](Instruction &I) {
2506       // Check whether a callsite always cause UB or not
2507 
2508       // Skip instructions that are already saved.
2509       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2510         return true;
2511 
2512       // Check nonnull and noundef argument attribute violation for each
2513       // callsite.
2514       CallBase &CB = cast<CallBase>(I);
2515       Function *Callee = CB.getCalledFunction();
2516       if (!Callee)
2517         return true;
2518       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2519         // If current argument is known to be simplified to null pointer and the
2520         // corresponding argument position is known to have nonnull attribute,
2521         // the argument is poison. Furthermore, if the argument is poison and
2522         // the position is known to have noundef attriubte, this callsite is
2523         // considered UB.
2524         if (idx >= Callee->arg_size())
2525           break;
2526         Value *ArgVal = CB.getArgOperand(idx);
2527         if (!ArgVal)
2528           continue;
2529         // Here, we handle three cases.
2530         //   (1) Not having a value means it is dead. (we can replace the value
2531         //       with undef)
2532         //   (2) Simplified to undef. The argument violate noundef attriubte.
2533         //   (3) Simplified to null pointer where known to be nonnull.
2534         //       The argument is a poison value and violate noundef attribute.
2535         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2536         auto &NoUndefAA =
2537             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2538         if (!NoUndefAA.isKnownNoUndef())
2539           continue;
2540         bool UsedAssumedInformation = false;
2541         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2542             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2543         if (UsedAssumedInformation)
2544           continue;
2545         if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2546           return true;
2547         if (!SimplifiedVal.hasValue() ||
2548             isa<UndefValue>(*SimplifiedVal.getValue())) {
2549           KnownUBInsts.insert(&I);
2550           continue;
2551         }
2552         if (!ArgVal->getType()->isPointerTy() ||
2553             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2554           continue;
2555         auto &NonNullAA =
2556             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2557         if (NonNullAA.isKnownNonNull())
2558           KnownUBInsts.insert(&I);
2559       }
2560       return true;
2561     };
2562 
2563     auto InspectReturnInstForUB =
2564         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2565           // Check if a return instruction always cause UB or not
2566           // Note: It is guaranteed that the returned position of the anchor
2567           //       scope has noundef attribute when this is called.
2568           //       We also ensure the return position is not "assumed dead"
2569           //       because the returned value was then potentially simplified to
2570           //       `undef` in AAReturnedValues without removing the `noundef`
2571           //       attribute yet.
2572 
2573           // When the returned position has noundef attriubte, UB occur in the
2574           // following cases.
2575           //   (1) Returned value is known to be undef.
2576           //   (2) The value is known to be a null pointer and the returned
2577           //       position has nonnull attribute (because the returned value is
2578           //       poison).
2579           bool FoundUB = false;
2580           if (isa<UndefValue>(V)) {
2581             FoundUB = true;
2582           } else {
2583             if (isa<ConstantPointerNull>(V)) {
2584               auto &NonNullAA = A.getAAFor<AANonNull>(
2585                   *this, IRPosition::returned(*getAnchorScope()),
2586                   DepClassTy::NONE);
2587               if (NonNullAA.isKnownNonNull())
2588                 FoundUB = true;
2589             }
2590           }
2591 
2592           if (FoundUB)
2593             for (ReturnInst *RI : RetInsts)
2594               KnownUBInsts.insert(RI);
2595           return true;
2596         };
2597 
2598     bool UsedAssumedInformation = false;
2599     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2600                               {Instruction::Load, Instruction::Store,
2601                                Instruction::AtomicCmpXchg,
2602                                Instruction::AtomicRMW},
2603                               UsedAssumedInformation,
2604                               /* CheckBBLivenessOnly */ true);
2605     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2606                               UsedAssumedInformation,
2607                               /* CheckBBLivenessOnly */ true);
2608     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2609                                       UsedAssumedInformation);
2610 
2611     // If the returned position of the anchor scope has noundef attriubte, check
2612     // all returned instructions.
2613     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2614       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2615       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2616         auto &RetPosNoUndefAA =
2617             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2618         if (RetPosNoUndefAA.isKnownNoUndef())
2619           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2620                                                     *this);
2621       }
2622     }
2623 
2624     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2625         UBPrevSize != KnownUBInsts.size())
2626       return ChangeStatus::CHANGED;
2627     return ChangeStatus::UNCHANGED;
2628   }
2629 
2630   bool isKnownToCauseUB(Instruction *I) const override {
2631     return KnownUBInsts.count(I);
2632   }
2633 
2634   bool isAssumedToCauseUB(Instruction *I) const override {
2635     // In simple words, if an instruction is not in the assumed to _not_
2636     // cause UB, then it is assumed UB (that includes those
2637     // in the KnownUBInsts set). The rest is boilerplate
2638     // is to ensure that it is one of the instructions we test
2639     // for UB.
2640 
2641     switch (I->getOpcode()) {
2642     case Instruction::Load:
2643     case Instruction::Store:
2644     case Instruction::AtomicCmpXchg:
2645     case Instruction::AtomicRMW:
2646       return !AssumedNoUBInsts.count(I);
2647     case Instruction::Br: {
2648       auto BrInst = cast<BranchInst>(I);
2649       if (BrInst->isUnconditional())
2650         return false;
2651       return !AssumedNoUBInsts.count(I);
2652     } break;
2653     default:
2654       return false;
2655     }
2656     return false;
2657   }
2658 
2659   ChangeStatus manifest(Attributor &A) override {
2660     if (KnownUBInsts.empty())
2661       return ChangeStatus::UNCHANGED;
2662     for (Instruction *I : KnownUBInsts)
2663       A.changeToUnreachableAfterManifest(I);
2664     return ChangeStatus::CHANGED;
2665   }
2666 
2667   /// See AbstractAttribute::getAsStr()
2668   const std::string getAsStr() const override {
2669     return getAssumed() ? "undefined-behavior" : "no-ub";
2670   }
2671 
2672   /// Note: The correctness of this analysis depends on the fact that the
2673   /// following 2 sets will stop changing after some point.
2674   /// "Change" here means that their size changes.
2675   /// The size of each set is monotonically increasing
2676   /// (we only add items to them) and it is upper bounded by the number of
2677   /// instructions in the processed function (we can never save more
2678   /// elements in either set than this number). Hence, at some point,
2679   /// they will stop increasing.
2680   /// Consequently, at some point, both sets will have stopped
2681   /// changing, effectively making the analysis reach a fixpoint.
2682 
2683   /// Note: These 2 sets are disjoint and an instruction can be considered
2684   /// one of 3 things:
2685   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2686   ///    the KnownUBInsts set.
2687   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2688   ///    has a reason to assume it).
2689   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2690   ///    could not find a reason to assume or prove that it can cause UB,
2691   ///    hence it assumes it doesn't. We have a set for these instructions
2692   ///    so that we don't reprocess them in every update.
2693   ///    Note however that instructions in this set may cause UB.
2694 
2695 protected:
2696   /// A set of all live instructions _known_ to cause UB.
2697   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2698 
2699 private:
2700   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2701   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2702 
2703   // Should be called on updates in which if we're processing an instruction
2704   // \p I that depends on a value \p V, one of the following has to happen:
2705   // - If the value is assumed, then stop.
2706   // - If the value is known but undef, then consider it UB.
2707   // - Otherwise, do specific processing with the simplified value.
2708   // We return None in the first 2 cases to signify that an appropriate
2709   // action was taken and the caller should stop.
2710   // Otherwise, we return the simplified value that the caller should
2711   // use for specific processing.
2712   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2713                                          Instruction *I) {
2714     bool UsedAssumedInformation = false;
2715     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2716         IRPosition::value(*V), *this, UsedAssumedInformation);
2717     if (!UsedAssumedInformation) {
2718       // Don't depend on assumed values.
2719       if (!SimplifiedV.hasValue()) {
2720         // If it is known (which we tested above) but it doesn't have a value,
2721         // then we can assume `undef` and hence the instruction is UB.
2722         KnownUBInsts.insert(I);
2723         return llvm::None;
2724       }
2725       if (!SimplifiedV.getValue())
2726         return nullptr;
2727       V = *SimplifiedV;
2728     }
2729     if (isa<UndefValue>(V)) {
2730       KnownUBInsts.insert(I);
2731       return llvm::None;
2732     }
2733     return V;
2734   }
2735 };
2736 
2737 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2738   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2739       : AAUndefinedBehaviorImpl(IRP, A) {}
2740 
2741   /// See AbstractAttribute::trackStatistics()
2742   void trackStatistics() const override {
2743     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2744                "Number of instructions known to have UB");
2745     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2746         KnownUBInsts.size();
2747   }
2748 };
2749 
2750 /// ------------------------ Will-Return Attributes ----------------------------
2751 
2752 // Helper function that checks whether a function has any cycle which we don't
2753 // know if it is bounded or not.
2754 // Loops with maximum trip count are considered bounded, any other cycle not.
2755 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2756   ScalarEvolution *SE =
2757       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2758   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2759   // If either SCEV or LoopInfo is not available for the function then we assume
2760   // any cycle to be unbounded cycle.
2761   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2762   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2763   if (!SE || !LI) {
2764     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2765       if (SCCI.hasCycle())
2766         return true;
2767     return false;
2768   }
2769 
2770   // If there's irreducible control, the function may contain non-loop cycles.
2771   if (mayContainIrreducibleControl(F, LI))
2772     return true;
2773 
2774   // Any loop that does not have a max trip count is considered unbounded cycle.
2775   for (auto *L : LI->getLoopsInPreorder()) {
2776     if (!SE->getSmallConstantMaxTripCount(L))
2777       return true;
2778   }
2779   return false;
2780 }
2781 
2782 struct AAWillReturnImpl : public AAWillReturn {
2783   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2784       : AAWillReturn(IRP, A) {}
2785 
2786   /// See AbstractAttribute::initialize(...).
2787   void initialize(Attributor &A) override {
2788     AAWillReturn::initialize(A);
2789 
2790     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2791       indicateOptimisticFixpoint();
2792       return;
2793     }
2794   }
2795 
2796   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2797   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2798     // Check for `mustprogress` in the scope and the associated function which
2799     // might be different if this is a call site.
2800     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2801         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2802       return false;
2803 
2804     const auto &MemAA =
2805         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2806     if (!MemAA.isAssumedReadOnly())
2807       return false;
2808     if (KnownOnly && !MemAA.isKnownReadOnly())
2809       return false;
2810     if (!MemAA.isKnownReadOnly())
2811       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2812 
2813     return true;
2814   }
2815 
2816   /// See AbstractAttribute::updateImpl(...).
2817   ChangeStatus updateImpl(Attributor &A) override {
2818     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2819       return ChangeStatus::UNCHANGED;
2820 
2821     auto CheckForWillReturn = [&](Instruction &I) {
2822       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2823       const auto &WillReturnAA =
2824           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2825       if (WillReturnAA.isKnownWillReturn())
2826         return true;
2827       if (!WillReturnAA.isAssumedWillReturn())
2828         return false;
2829       const auto &NoRecurseAA =
2830           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2831       return NoRecurseAA.isAssumedNoRecurse();
2832     };
2833 
2834     bool UsedAssumedInformation = false;
2835     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2836                                            UsedAssumedInformation))
2837       return indicatePessimisticFixpoint();
2838 
2839     return ChangeStatus::UNCHANGED;
2840   }
2841 
2842   /// See AbstractAttribute::getAsStr()
2843   const std::string getAsStr() const override {
2844     return getAssumed() ? "willreturn" : "may-noreturn";
2845   }
2846 };
2847 
2848 struct AAWillReturnFunction final : AAWillReturnImpl {
2849   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2850       : AAWillReturnImpl(IRP, A) {}
2851 
2852   /// See AbstractAttribute::initialize(...).
2853   void initialize(Attributor &A) override {
2854     AAWillReturnImpl::initialize(A);
2855 
2856     Function *F = getAnchorScope();
2857     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2858       indicatePessimisticFixpoint();
2859   }
2860 
2861   /// See AbstractAttribute::trackStatistics()
2862   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2863 };
2864 
2865 /// WillReturn attribute deduction for a call sites.
2866 struct AAWillReturnCallSite final : AAWillReturnImpl {
2867   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2868       : AAWillReturnImpl(IRP, A) {}
2869 
2870   /// See AbstractAttribute::initialize(...).
2871   void initialize(Attributor &A) override {
2872     AAWillReturnImpl::initialize(A);
2873     Function *F = getAssociatedFunction();
2874     if (!F || !A.isFunctionIPOAmendable(*F))
2875       indicatePessimisticFixpoint();
2876   }
2877 
2878   /// See AbstractAttribute::updateImpl(...).
2879   ChangeStatus updateImpl(Attributor &A) override {
2880     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2881       return ChangeStatus::UNCHANGED;
2882 
2883     // TODO: Once we have call site specific value information we can provide
2884     //       call site specific liveness information and then it makes
2885     //       sense to specialize attributes for call sites arguments instead of
2886     //       redirecting requests to the callee argument.
2887     Function *F = getAssociatedFunction();
2888     const IRPosition &FnPos = IRPosition::function(*F);
2889     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2890     return clampStateAndIndicateChange(getState(), FnAA.getState());
2891   }
2892 
2893   /// See AbstractAttribute::trackStatistics()
2894   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2895 };
2896 
2897 /// -------------------AAReachability Attribute--------------------------
2898 
2899 struct AAReachabilityImpl : AAReachability {
2900   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2901       : AAReachability(IRP, A) {}
2902 
2903   const std::string getAsStr() const override {
2904     // TODO: Return the number of reachable queries.
2905     return "reachable";
2906   }
2907 
2908   /// See AbstractAttribute::updateImpl(...).
2909   ChangeStatus updateImpl(Attributor &A) override {
2910     return ChangeStatus::UNCHANGED;
2911   }
2912 };
2913 
2914 struct AAReachabilityFunction final : public AAReachabilityImpl {
2915   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2916       : AAReachabilityImpl(IRP, A) {}
2917 
2918   /// See AbstractAttribute::trackStatistics()
2919   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2920 };
2921 
2922 /// ------------------------ NoAlias Argument Attribute ------------------------
2923 
2924 struct AANoAliasImpl : AANoAlias {
2925   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2926     assert(getAssociatedType()->isPointerTy() &&
2927            "Noalias is a pointer attribute");
2928   }
2929 
2930   const std::string getAsStr() const override {
2931     return getAssumed() ? "noalias" : "may-alias";
2932   }
2933 };
2934 
2935 /// NoAlias attribute for a floating value.
2936 struct AANoAliasFloating final : AANoAliasImpl {
2937   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2938       : AANoAliasImpl(IRP, A) {}
2939 
2940   /// See AbstractAttribute::initialize(...).
2941   void initialize(Attributor &A) override {
2942     AANoAliasImpl::initialize(A);
2943     Value *Val = &getAssociatedValue();
2944     do {
2945       CastInst *CI = dyn_cast<CastInst>(Val);
2946       if (!CI)
2947         break;
2948       Value *Base = CI->getOperand(0);
2949       if (!Base->hasOneUse())
2950         break;
2951       Val = Base;
2952     } while (true);
2953 
2954     if (!Val->getType()->isPointerTy()) {
2955       indicatePessimisticFixpoint();
2956       return;
2957     }
2958 
2959     if (isa<AllocaInst>(Val))
2960       indicateOptimisticFixpoint();
2961     else if (isa<ConstantPointerNull>(Val) &&
2962              !NullPointerIsDefined(getAnchorScope(),
2963                                    Val->getType()->getPointerAddressSpace()))
2964       indicateOptimisticFixpoint();
2965     else if (Val != &getAssociatedValue()) {
2966       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2967           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2968       if (ValNoAliasAA.isKnownNoAlias())
2969         indicateOptimisticFixpoint();
2970     }
2971   }
2972 
2973   /// See AbstractAttribute::updateImpl(...).
2974   ChangeStatus updateImpl(Attributor &A) override {
2975     // TODO: Implement this.
2976     return indicatePessimisticFixpoint();
2977   }
2978 
2979   /// See AbstractAttribute::trackStatistics()
2980   void trackStatistics() const override {
2981     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2982   }
2983 };
2984 
2985 /// NoAlias attribute for an argument.
2986 struct AANoAliasArgument final
2987     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2988   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2989   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2990 
2991   /// See AbstractAttribute::initialize(...).
2992   void initialize(Attributor &A) override {
2993     Base::initialize(A);
2994     // See callsite argument attribute and callee argument attribute.
2995     if (hasAttr({Attribute::ByVal}))
2996       indicateOptimisticFixpoint();
2997   }
2998 
2999   /// See AbstractAttribute::update(...).
3000   ChangeStatus updateImpl(Attributor &A) override {
3001     // We have to make sure no-alias on the argument does not break
3002     // synchronization when this is a callback argument, see also [1] below.
3003     // If synchronization cannot be affected, we delegate to the base updateImpl
3004     // function, otherwise we give up for now.
3005 
3006     // If the function is no-sync, no-alias cannot break synchronization.
3007     const auto &NoSyncAA =
3008         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3009                              DepClassTy::OPTIONAL);
3010     if (NoSyncAA.isAssumedNoSync())
3011       return Base::updateImpl(A);
3012 
3013     // If the argument is read-only, no-alias cannot break synchronization.
3014     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3015         *this, getIRPosition(), DepClassTy::OPTIONAL);
3016     if (MemBehaviorAA.isAssumedReadOnly())
3017       return Base::updateImpl(A);
3018 
3019     // If the argument is never passed through callbacks, no-alias cannot break
3020     // synchronization.
3021     bool AllCallSitesKnown;
3022     if (A.checkForAllCallSites(
3023             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3024             true, AllCallSitesKnown))
3025       return Base::updateImpl(A);
3026 
3027     // TODO: add no-alias but make sure it doesn't break synchronization by
3028     // introducing fake uses. See:
3029     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3030     //     International Workshop on OpenMP 2018,
3031     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3032 
3033     return indicatePessimisticFixpoint();
3034   }
3035 
3036   /// See AbstractAttribute::trackStatistics()
3037   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3038 };
3039 
3040 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3041   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3042       : AANoAliasImpl(IRP, A) {}
3043 
3044   /// See AbstractAttribute::initialize(...).
3045   void initialize(Attributor &A) override {
3046     // See callsite argument attribute and callee argument attribute.
3047     const auto &CB = cast<CallBase>(getAnchorValue());
3048     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3049       indicateOptimisticFixpoint();
3050     Value &Val = getAssociatedValue();
3051     if (isa<ConstantPointerNull>(Val) &&
3052         !NullPointerIsDefined(getAnchorScope(),
3053                               Val.getType()->getPointerAddressSpace()))
3054       indicateOptimisticFixpoint();
3055   }
3056 
3057   /// Determine if the underlying value may alias with the call site argument
3058   /// \p OtherArgNo of \p ICS (= the underlying call site).
3059   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3060                             const AAMemoryBehavior &MemBehaviorAA,
3061                             const CallBase &CB, unsigned OtherArgNo) {
3062     // We do not need to worry about aliasing with the underlying IRP.
3063     if (this->getCalleeArgNo() == (int)OtherArgNo)
3064       return false;
3065 
3066     // If it is not a pointer or pointer vector we do not alias.
3067     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3068     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3069       return false;
3070 
3071     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3072         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3073 
3074     // If the argument is readnone, there is no read-write aliasing.
3075     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3076       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3077       return false;
3078     }
3079 
3080     // If the argument is readonly and the underlying value is readonly, there
3081     // is no read-write aliasing.
3082     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3083     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3084       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3085       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3086       return false;
3087     }
3088 
3089     // We have to utilize actual alias analysis queries so we need the object.
3090     if (!AAR)
3091       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3092 
3093     // Try to rule it out at the call site.
3094     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3095     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3096                          "callsite arguments: "
3097                       << getAssociatedValue() << " " << *ArgOp << " => "
3098                       << (IsAliasing ? "" : "no-") << "alias \n");
3099 
3100     return IsAliasing;
3101   }
3102 
3103   bool
3104   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3105                                          const AAMemoryBehavior &MemBehaviorAA,
3106                                          const AANoAlias &NoAliasAA) {
3107     // We can deduce "noalias" if the following conditions hold.
3108     // (i)   Associated value is assumed to be noalias in the definition.
3109     // (ii)  Associated value is assumed to be no-capture in all the uses
3110     //       possibly executed before this callsite.
3111     // (iii) There is no other pointer argument which could alias with the
3112     //       value.
3113 
3114     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3115     if (!AssociatedValueIsNoAliasAtDef) {
3116       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3117                         << " is not no-alias at the definition\n");
3118       return false;
3119     }
3120 
3121     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3122 
3123     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3124     const Function *ScopeFn = VIRP.getAnchorScope();
3125     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3126     // Check whether the value is captured in the scope using AANoCapture.
3127     //      Look at CFG and check only uses possibly executed before this
3128     //      callsite.
3129     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3130       Instruction *UserI = cast<Instruction>(U.getUser());
3131 
3132       // If UserI is the curr instruction and there is a single potential use of
3133       // the value in UserI we allow the use.
3134       // TODO: We should inspect the operands and allow those that cannot alias
3135       //       with the value.
3136       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3137         return true;
3138 
3139       if (ScopeFn) {
3140         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
3141             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
3142 
3143         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
3144           return true;
3145 
3146         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3147           if (CB->isArgOperand(&U)) {
3148 
3149             unsigned ArgNo = CB->getArgOperandNo(&U);
3150 
3151             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3152                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3153                 DepClassTy::OPTIONAL);
3154 
3155             if (NoCaptureAA.isAssumedNoCapture())
3156               return true;
3157           }
3158         }
3159       }
3160 
3161       // For cases which can potentially have more users
3162       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3163           isa<SelectInst>(U)) {
3164         Follow = true;
3165         return true;
3166       }
3167 
3168       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
3169       return false;
3170     };
3171 
3172     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3173       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3174         LLVM_DEBUG(
3175             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3176                    << " cannot be noalias as it is potentially captured\n");
3177         return false;
3178       }
3179     }
3180     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3181 
3182     // Check there is no other pointer argument which could alias with the
3183     // value passed at this call site.
3184     // TODO: AbstractCallSite
3185     const auto &CB = cast<CallBase>(getAnchorValue());
3186     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3187       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3188         return false;
3189 
3190     return true;
3191   }
3192 
3193   /// See AbstractAttribute::updateImpl(...).
3194   ChangeStatus updateImpl(Attributor &A) override {
3195     // If the argument is readnone we are done as there are no accesses via the
3196     // argument.
3197     auto &MemBehaviorAA =
3198         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3199     if (MemBehaviorAA.isAssumedReadNone()) {
3200       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3201       return ChangeStatus::UNCHANGED;
3202     }
3203 
3204     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3205     const auto &NoAliasAA =
3206         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3207 
3208     AAResults *AAR = nullptr;
3209     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3210                                                NoAliasAA)) {
3211       LLVM_DEBUG(
3212           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3213       return ChangeStatus::UNCHANGED;
3214     }
3215 
3216     return indicatePessimisticFixpoint();
3217   }
3218 
3219   /// See AbstractAttribute::trackStatistics()
3220   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3221 };
3222 
3223 /// NoAlias attribute for function return value.
3224 struct AANoAliasReturned final : AANoAliasImpl {
3225   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3226       : AANoAliasImpl(IRP, A) {}
3227 
3228   /// See AbstractAttribute::initialize(...).
3229   void initialize(Attributor &A) override {
3230     AANoAliasImpl::initialize(A);
3231     Function *F = getAssociatedFunction();
3232     if (!F || F->isDeclaration())
3233       indicatePessimisticFixpoint();
3234   }
3235 
3236   /// See AbstractAttribute::updateImpl(...).
3237   virtual ChangeStatus updateImpl(Attributor &A) override {
3238 
3239     auto CheckReturnValue = [&](Value &RV) -> bool {
3240       if (Constant *C = dyn_cast<Constant>(&RV))
3241         if (C->isNullValue() || isa<UndefValue>(C))
3242           return true;
3243 
3244       /// For now, we can only deduce noalias if we have call sites.
3245       /// FIXME: add more support.
3246       if (!isa<CallBase>(&RV))
3247         return false;
3248 
3249       const IRPosition &RVPos = IRPosition::value(RV);
3250       const auto &NoAliasAA =
3251           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3252       if (!NoAliasAA.isAssumedNoAlias())
3253         return false;
3254 
3255       const auto &NoCaptureAA =
3256           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3257       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3258     };
3259 
3260     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3261       return indicatePessimisticFixpoint();
3262 
3263     return ChangeStatus::UNCHANGED;
3264   }
3265 
3266   /// See AbstractAttribute::trackStatistics()
3267   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3268 };
3269 
3270 /// NoAlias attribute deduction for a call site return value.
3271 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3272   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3273       : AANoAliasImpl(IRP, A) {}
3274 
3275   /// See AbstractAttribute::initialize(...).
3276   void initialize(Attributor &A) override {
3277     AANoAliasImpl::initialize(A);
3278     Function *F = getAssociatedFunction();
3279     if (!F || F->isDeclaration())
3280       indicatePessimisticFixpoint();
3281   }
3282 
3283   /// See AbstractAttribute::updateImpl(...).
3284   ChangeStatus updateImpl(Attributor &A) override {
3285     // TODO: Once we have call site specific value information we can provide
3286     //       call site specific liveness information and then it makes
3287     //       sense to specialize attributes for call sites arguments instead of
3288     //       redirecting requests to the callee argument.
3289     Function *F = getAssociatedFunction();
3290     const IRPosition &FnPos = IRPosition::returned(*F);
3291     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3292     return clampStateAndIndicateChange(getState(), FnAA.getState());
3293   }
3294 
3295   /// See AbstractAttribute::trackStatistics()
3296   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3297 };
3298 
3299 /// -------------------AAIsDead Function Attribute-----------------------
3300 
3301 struct AAIsDeadValueImpl : public AAIsDead {
3302   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3303 
3304   /// See AAIsDead::isAssumedDead().
3305   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3306 
3307   /// See AAIsDead::isKnownDead().
3308   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3309 
3310   /// See AAIsDead::isAssumedDead(BasicBlock *).
3311   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3312 
3313   /// See AAIsDead::isKnownDead(BasicBlock *).
3314   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3315 
3316   /// See AAIsDead::isAssumedDead(Instruction *I).
3317   bool isAssumedDead(const Instruction *I) const override {
3318     return I == getCtxI() && isAssumedDead();
3319   }
3320 
3321   /// See AAIsDead::isKnownDead(Instruction *I).
3322   bool isKnownDead(const Instruction *I) const override {
3323     return isAssumedDead(I) && isKnownDead();
3324   }
3325 
3326   /// See AbstractAttribute::getAsStr().
3327   const std::string getAsStr() const override {
3328     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3329   }
3330 
3331   /// Check if all uses are assumed dead.
3332   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3333     // Callers might not check the type, void has no uses.
3334     if (V.getType()->isVoidTy())
3335       return true;
3336 
3337     // If we replace a value with a constant there are no uses left afterwards.
3338     if (!isa<Constant>(V)) {
3339       bool UsedAssumedInformation = false;
3340       Optional<Constant *> C =
3341           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3342       if (!C.hasValue() || *C)
3343         return true;
3344     }
3345 
3346     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3347     // Explicitly set the dependence class to required because we want a long
3348     // chain of N dependent instructions to be considered live as soon as one is
3349     // without going through N update cycles. This is not required for
3350     // correctness.
3351     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3352                              DepClassTy::REQUIRED);
3353   }
3354 
3355   /// Determine if \p I is assumed to be side-effect free.
3356   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3357     if (!I || wouldInstructionBeTriviallyDead(I))
3358       return true;
3359 
3360     auto *CB = dyn_cast<CallBase>(I);
3361     if (!CB || isa<IntrinsicInst>(CB))
3362       return false;
3363 
3364     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3365     const auto &NoUnwindAA =
3366         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3367     if (!NoUnwindAA.isAssumedNoUnwind())
3368       return false;
3369     if (!NoUnwindAA.isKnownNoUnwind())
3370       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3371 
3372     const auto &MemBehaviorAA =
3373         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
3374     if (MemBehaviorAA.isAssumedReadOnly()) {
3375       if (!MemBehaviorAA.isKnownReadOnly())
3376         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3377       return true;
3378     }
3379     return false;
3380   }
3381 };
3382 
3383 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3384   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3385       : AAIsDeadValueImpl(IRP, A) {}
3386 
3387   /// See AbstractAttribute::initialize(...).
3388   void initialize(Attributor &A) override {
3389     if (isa<UndefValue>(getAssociatedValue())) {
3390       indicatePessimisticFixpoint();
3391       return;
3392     }
3393 
3394     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3395     if (!isAssumedSideEffectFree(A, I)) {
3396       if (!isa_and_nonnull<StoreInst>(I))
3397         indicatePessimisticFixpoint();
3398       else
3399         removeAssumedBits(HAS_NO_EFFECT);
3400     }
3401   }
3402 
3403   bool isDeadStore(Attributor &A, StoreInst &SI) {
3404     // Lang ref now states volatile store is not UB/dead, let's skip them.
3405     if (SI.isVolatile())
3406       return false;
3407 
3408     bool UsedAssumedInformation = false;
3409     SmallSetVector<Value *, 4> PotentialCopies;
3410     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3411                                              UsedAssumedInformation))
3412       return false;
3413     return llvm::all_of(PotentialCopies, [&](Value *V) {
3414       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3415                              UsedAssumedInformation);
3416     });
3417   }
3418 
3419   /// See AbstractAttribute::updateImpl(...).
3420   ChangeStatus updateImpl(Attributor &A) override {
3421     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3422     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3423       if (!isDeadStore(A, *SI))
3424         return indicatePessimisticFixpoint();
3425     } else {
3426       if (!isAssumedSideEffectFree(A, I))
3427         return indicatePessimisticFixpoint();
3428       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3429         return indicatePessimisticFixpoint();
3430     }
3431     return ChangeStatus::UNCHANGED;
3432   }
3433 
3434   /// See AbstractAttribute::manifest(...).
3435   ChangeStatus manifest(Attributor &A) override {
3436     Value &V = getAssociatedValue();
3437     if (auto *I = dyn_cast<Instruction>(&V)) {
3438       // If we get here we basically know the users are all dead. We check if
3439       // isAssumedSideEffectFree returns true here again because it might not be
3440       // the case and only the users are dead but the instruction (=call) is
3441       // still needed.
3442       if (isa<StoreInst>(I) ||
3443           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3444         A.deleteAfterManifest(*I);
3445         return ChangeStatus::CHANGED;
3446       }
3447     }
3448     if (V.use_empty())
3449       return ChangeStatus::UNCHANGED;
3450 
3451     bool UsedAssumedInformation = false;
3452     Optional<Constant *> C =
3453         A.getAssumedConstant(V, *this, UsedAssumedInformation);
3454     if (C.hasValue() && C.getValue())
3455       return ChangeStatus::UNCHANGED;
3456 
3457     // Replace the value with undef as it is dead but keep droppable uses around
3458     // as they provide information we don't want to give up on just yet.
3459     UndefValue &UV = *UndefValue::get(V.getType());
3460     bool AnyChange =
3461         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
3462     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3463   }
3464 
3465   /// See AbstractAttribute::trackStatistics()
3466   void trackStatistics() const override {
3467     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3468   }
3469 };
3470 
3471 struct AAIsDeadArgument : public AAIsDeadFloating {
3472   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3473       : AAIsDeadFloating(IRP, A) {}
3474 
3475   /// See AbstractAttribute::initialize(...).
3476   void initialize(Attributor &A) override {
3477     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3478       indicatePessimisticFixpoint();
3479   }
3480 
3481   /// See AbstractAttribute::manifest(...).
3482   ChangeStatus manifest(Attributor &A) override {
3483     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
3484     Argument &Arg = *getAssociatedArgument();
3485     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3486       if (A.registerFunctionSignatureRewrite(
3487               Arg, /* ReplacementTypes */ {},
3488               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3489               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3490         Arg.dropDroppableUses();
3491         return ChangeStatus::CHANGED;
3492       }
3493     return Changed;
3494   }
3495 
3496   /// See AbstractAttribute::trackStatistics()
3497   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3498 };
3499 
3500 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3501   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3502       : AAIsDeadValueImpl(IRP, A) {}
3503 
3504   /// See AbstractAttribute::initialize(...).
3505   void initialize(Attributor &A) override {
3506     if (isa<UndefValue>(getAssociatedValue()))
3507       indicatePessimisticFixpoint();
3508   }
3509 
3510   /// See AbstractAttribute::updateImpl(...).
3511   ChangeStatus updateImpl(Attributor &A) override {
3512     // TODO: Once we have call site specific value information we can provide
3513     //       call site specific liveness information and then it makes
3514     //       sense to specialize attributes for call sites arguments instead of
3515     //       redirecting requests to the callee argument.
3516     Argument *Arg = getAssociatedArgument();
3517     if (!Arg)
3518       return indicatePessimisticFixpoint();
3519     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3520     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3521     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3522   }
3523 
3524   /// See AbstractAttribute::manifest(...).
3525   ChangeStatus manifest(Attributor &A) override {
3526     CallBase &CB = cast<CallBase>(getAnchorValue());
3527     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3528     assert(!isa<UndefValue>(U.get()) &&
3529            "Expected undef values to be filtered out!");
3530     UndefValue &UV = *UndefValue::get(U->getType());
3531     if (A.changeUseAfterManifest(U, UV))
3532       return ChangeStatus::CHANGED;
3533     return ChangeStatus::UNCHANGED;
3534   }
3535 
3536   /// See AbstractAttribute::trackStatistics()
3537   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3538 };
3539 
3540 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3541   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3542       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3543 
3544   /// See AAIsDead::isAssumedDead().
3545   bool isAssumedDead() const override {
3546     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3547   }
3548 
3549   /// See AbstractAttribute::initialize(...).
3550   void initialize(Attributor &A) override {
3551     if (isa<UndefValue>(getAssociatedValue())) {
3552       indicatePessimisticFixpoint();
3553       return;
3554     }
3555 
3556     // We track this separately as a secondary state.
3557     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3558   }
3559 
3560   /// See AbstractAttribute::updateImpl(...).
3561   ChangeStatus updateImpl(Attributor &A) override {
3562     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3563     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3564       IsAssumedSideEffectFree = false;
3565       Changed = ChangeStatus::CHANGED;
3566     }
3567     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3568       return indicatePessimisticFixpoint();
3569     return Changed;
3570   }
3571 
3572   /// See AbstractAttribute::trackStatistics()
3573   void trackStatistics() const override {
3574     if (IsAssumedSideEffectFree)
3575       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3576     else
3577       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3578   }
3579 
3580   /// See AbstractAttribute::getAsStr().
3581   const std::string getAsStr() const override {
3582     return isAssumedDead()
3583                ? "assumed-dead"
3584                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3585   }
3586 
3587 private:
3588   bool IsAssumedSideEffectFree;
3589 };
3590 
3591 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3592   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3593       : AAIsDeadValueImpl(IRP, A) {}
3594 
3595   /// See AbstractAttribute::updateImpl(...).
3596   ChangeStatus updateImpl(Attributor &A) override {
3597 
3598     bool UsedAssumedInformation = false;
3599     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3600                               {Instruction::Ret}, UsedAssumedInformation);
3601 
3602     auto PredForCallSite = [&](AbstractCallSite ACS) {
3603       if (ACS.isCallbackCall() || !ACS.getInstruction())
3604         return false;
3605       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3606     };
3607 
3608     bool AllCallSitesKnown;
3609     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3610                                 AllCallSitesKnown))
3611       return indicatePessimisticFixpoint();
3612 
3613     return ChangeStatus::UNCHANGED;
3614   }
3615 
3616   /// See AbstractAttribute::manifest(...).
3617   ChangeStatus manifest(Attributor &A) override {
3618     // TODO: Rewrite the signature to return void?
3619     bool AnyChange = false;
3620     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3621     auto RetInstPred = [&](Instruction &I) {
3622       ReturnInst &RI = cast<ReturnInst>(I);
3623       if (!isa<UndefValue>(RI.getReturnValue()))
3624         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3625       return true;
3626     };
3627     bool UsedAssumedInformation = false;
3628     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3629                               UsedAssumedInformation);
3630     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3631   }
3632 
3633   /// See AbstractAttribute::trackStatistics()
3634   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3635 };
3636 
3637 struct AAIsDeadFunction : public AAIsDead {
3638   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3639 
3640   /// See AbstractAttribute::initialize(...).
3641   void initialize(Attributor &A) override {
3642     const Function *F = getAnchorScope();
3643     if (F && !F->isDeclaration()) {
3644       // We only want to compute liveness once. If the function is not part of
3645       // the SCC, skip it.
3646       if (A.isRunOn(*const_cast<Function *>(F))) {
3647         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3648         assumeLive(A, F->getEntryBlock());
3649       } else {
3650         indicatePessimisticFixpoint();
3651       }
3652     }
3653   }
3654 
3655   /// See AbstractAttribute::getAsStr().
3656   const std::string getAsStr() const override {
3657     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3658            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3659            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3660            std::to_string(KnownDeadEnds.size()) + "]";
3661   }
3662 
3663   /// See AbstractAttribute::manifest(...).
3664   ChangeStatus manifest(Attributor &A) override {
3665     assert(getState().isValidState() &&
3666            "Attempted to manifest an invalid state!");
3667 
3668     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3669     Function &F = *getAnchorScope();
3670 
3671     if (AssumedLiveBlocks.empty()) {
3672       A.deleteAfterManifest(F);
3673       return ChangeStatus::CHANGED;
3674     }
3675 
3676     // Flag to determine if we can change an invoke to a call assuming the
3677     // callee is nounwind. This is not possible if the personality of the
3678     // function allows to catch asynchronous exceptions.
3679     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3680 
3681     KnownDeadEnds.set_union(ToBeExploredFrom);
3682     for (const Instruction *DeadEndI : KnownDeadEnds) {
3683       auto *CB = dyn_cast<CallBase>(DeadEndI);
3684       if (!CB)
3685         continue;
3686       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3687           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3688       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3689       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3690         continue;
3691 
3692       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3693         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3694       else
3695         A.changeToUnreachableAfterManifest(
3696             const_cast<Instruction *>(DeadEndI->getNextNode()));
3697       HasChanged = ChangeStatus::CHANGED;
3698     }
3699 
3700     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3701     for (BasicBlock &BB : F)
3702       if (!AssumedLiveBlocks.count(&BB)) {
3703         A.deleteAfterManifest(BB);
3704         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3705       }
3706 
3707     return HasChanged;
3708   }
3709 
3710   /// See AbstractAttribute::updateImpl(...).
3711   ChangeStatus updateImpl(Attributor &A) override;
3712 
3713   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3714     return !AssumedLiveEdges.count(std::make_pair(From, To));
3715   }
3716 
3717   /// See AbstractAttribute::trackStatistics()
3718   void trackStatistics() const override {}
3719 
3720   /// Returns true if the function is assumed dead.
3721   bool isAssumedDead() const override { return false; }
3722 
3723   /// See AAIsDead::isKnownDead().
3724   bool isKnownDead() const override { return false; }
3725 
3726   /// See AAIsDead::isAssumedDead(BasicBlock *).
3727   bool isAssumedDead(const BasicBlock *BB) const override {
3728     assert(BB->getParent() == getAnchorScope() &&
3729            "BB must be in the same anchor scope function.");
3730 
3731     if (!getAssumed())
3732       return false;
3733     return !AssumedLiveBlocks.count(BB);
3734   }
3735 
3736   /// See AAIsDead::isKnownDead(BasicBlock *).
3737   bool isKnownDead(const BasicBlock *BB) const override {
3738     return getKnown() && isAssumedDead(BB);
3739   }
3740 
3741   /// See AAIsDead::isAssumed(Instruction *I).
3742   bool isAssumedDead(const Instruction *I) const override {
3743     assert(I->getParent()->getParent() == getAnchorScope() &&
3744            "Instruction must be in the same anchor scope function.");
3745 
3746     if (!getAssumed())
3747       return false;
3748 
3749     // If it is not in AssumedLiveBlocks then it for sure dead.
3750     // Otherwise, it can still be after noreturn call in a live block.
3751     if (!AssumedLiveBlocks.count(I->getParent()))
3752       return true;
3753 
3754     // If it is not after a liveness barrier it is live.
3755     const Instruction *PrevI = I->getPrevNode();
3756     while (PrevI) {
3757       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3758         return true;
3759       PrevI = PrevI->getPrevNode();
3760     }
3761     return false;
3762   }
3763 
3764   /// See AAIsDead::isKnownDead(Instruction *I).
3765   bool isKnownDead(const Instruction *I) const override {
3766     return getKnown() && isAssumedDead(I);
3767   }
3768 
3769   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3770   /// that internal function called from \p BB should now be looked at.
3771   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3772     if (!AssumedLiveBlocks.insert(&BB).second)
3773       return false;
3774 
3775     // We assume that all of BB is (probably) live now and if there are calls to
3776     // internal functions we will assume that those are now live as well. This
3777     // is a performance optimization for blocks with calls to a lot of internal
3778     // functions. It can however cause dead functions to be treated as live.
3779     for (const Instruction &I : BB)
3780       if (const auto *CB = dyn_cast<CallBase>(&I))
3781         if (const Function *F = CB->getCalledFunction())
3782           if (F->hasLocalLinkage())
3783             A.markLiveInternalFunction(*F);
3784     return true;
3785   }
3786 
3787   /// Collection of instructions that need to be explored again, e.g., we
3788   /// did assume they do not transfer control to (one of their) successors.
3789   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3790 
3791   /// Collection of instructions that are known to not transfer control.
3792   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3793 
3794   /// Collection of all assumed live edges
3795   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3796 
3797   /// Collection of all assumed live BasicBlocks.
3798   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3799 };
3800 
3801 static bool
3802 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3803                         AbstractAttribute &AA,
3804                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3805   const IRPosition &IPos = IRPosition::callsite_function(CB);
3806 
3807   const auto &NoReturnAA =
3808       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3809   if (NoReturnAA.isAssumedNoReturn())
3810     return !NoReturnAA.isKnownNoReturn();
3811   if (CB.isTerminator())
3812     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3813   else
3814     AliveSuccessors.push_back(CB.getNextNode());
3815   return false;
3816 }
3817 
3818 static bool
3819 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3820                         AbstractAttribute &AA,
3821                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3822   bool UsedAssumedInformation =
3823       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3824 
3825   // First, determine if we can change an invoke to a call assuming the
3826   // callee is nounwind. This is not possible if the personality of the
3827   // function allows to catch asynchronous exceptions.
3828   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3829     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3830   } else {
3831     const IRPosition &IPos = IRPosition::callsite_function(II);
3832     const auto &AANoUnw =
3833         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3834     if (AANoUnw.isAssumedNoUnwind()) {
3835       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3836     } else {
3837       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3838     }
3839   }
3840   return UsedAssumedInformation;
3841 }
3842 
3843 static bool
3844 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3845                         AbstractAttribute &AA,
3846                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3847   bool UsedAssumedInformation = false;
3848   if (BI.getNumSuccessors() == 1) {
3849     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3850   } else {
3851     Optional<Constant *> C =
3852         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3853     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3854       // No value yet, assume both edges are dead.
3855     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3856       const BasicBlock *SuccBB =
3857           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3858       AliveSuccessors.push_back(&SuccBB->front());
3859     } else {
3860       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3861       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3862       UsedAssumedInformation = false;
3863     }
3864   }
3865   return UsedAssumedInformation;
3866 }
3867 
3868 static bool
3869 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3870                         AbstractAttribute &AA,
3871                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3872   bool UsedAssumedInformation = false;
3873   Optional<Constant *> C =
3874       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3875   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3876     // No value yet, assume all edges are dead.
3877   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3878     for (auto &CaseIt : SI.cases()) {
3879       if (CaseIt.getCaseValue() == C.getValue()) {
3880         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3881         return UsedAssumedInformation;
3882       }
3883     }
3884     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3885     return UsedAssumedInformation;
3886   } else {
3887     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3888       AliveSuccessors.push_back(&SuccBB->front());
3889   }
3890   return UsedAssumedInformation;
3891 }
3892 
3893 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3894   ChangeStatus Change = ChangeStatus::UNCHANGED;
3895 
3896   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3897                     << getAnchorScope()->size() << "] BBs and "
3898                     << ToBeExploredFrom.size() << " exploration points and "
3899                     << KnownDeadEnds.size() << " known dead ends\n");
3900 
3901   // Copy and clear the list of instructions we need to explore from. It is
3902   // refilled with instructions the next update has to look at.
3903   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3904                                                ToBeExploredFrom.end());
3905   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3906 
3907   SmallVector<const Instruction *, 8> AliveSuccessors;
3908   while (!Worklist.empty()) {
3909     const Instruction *I = Worklist.pop_back_val();
3910     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3911 
3912     // Fast forward for uninteresting instructions. We could look for UB here
3913     // though.
3914     while (!I->isTerminator() && !isa<CallBase>(I))
3915       I = I->getNextNode();
3916 
3917     AliveSuccessors.clear();
3918 
3919     bool UsedAssumedInformation = false;
3920     switch (I->getOpcode()) {
3921     // TODO: look for (assumed) UB to backwards propagate "deadness".
3922     default:
3923       assert(I->isTerminator() &&
3924              "Expected non-terminators to be handled already!");
3925       for (const BasicBlock *SuccBB : successors(I->getParent()))
3926         AliveSuccessors.push_back(&SuccBB->front());
3927       break;
3928     case Instruction::Call:
3929       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3930                                                        *this, AliveSuccessors);
3931       break;
3932     case Instruction::Invoke:
3933       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3934                                                        *this, AliveSuccessors);
3935       break;
3936     case Instruction::Br:
3937       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3938                                                        *this, AliveSuccessors);
3939       break;
3940     case Instruction::Switch:
3941       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3942                                                        *this, AliveSuccessors);
3943       break;
3944     }
3945 
3946     if (UsedAssumedInformation) {
3947       NewToBeExploredFrom.insert(I);
3948     } else if (AliveSuccessors.empty() ||
3949                (I->isTerminator() &&
3950                 AliveSuccessors.size() < I->getNumSuccessors())) {
3951       if (KnownDeadEnds.insert(I))
3952         Change = ChangeStatus::CHANGED;
3953     }
3954 
3955     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3956                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3957                       << UsedAssumedInformation << "\n");
3958 
3959     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3960       if (!I->isTerminator()) {
3961         assert(AliveSuccessors.size() == 1 &&
3962                "Non-terminator expected to have a single successor!");
3963         Worklist.push_back(AliveSuccessor);
3964       } else {
3965         // record the assumed live edge
3966         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
3967         if (AssumedLiveEdges.insert(Edge).second)
3968           Change = ChangeStatus::CHANGED;
3969         if (assumeLive(A, *AliveSuccessor->getParent()))
3970           Worklist.push_back(AliveSuccessor);
3971       }
3972     }
3973   }
3974 
3975   // Check if the content of ToBeExploredFrom changed, ignore the order.
3976   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
3977       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
3978         return !ToBeExploredFrom.count(I);
3979       })) {
3980     Change = ChangeStatus::CHANGED;
3981     ToBeExploredFrom = std::move(NewToBeExploredFrom);
3982   }
3983 
3984   // If we know everything is live there is no need to query for liveness.
3985   // Instead, indicating a pessimistic fixpoint will cause the state to be
3986   // "invalid" and all queries to be answered conservatively without lookups.
3987   // To be in this state we have to (1) finished the exploration and (3) not
3988   // discovered any non-trivial dead end and (2) not ruled unreachable code
3989   // dead.
3990   if (ToBeExploredFrom.empty() &&
3991       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3992       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3993         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3994       }))
3995     return indicatePessimisticFixpoint();
3996   return Change;
3997 }
3998 
3999 /// Liveness information for a call sites.
4000 struct AAIsDeadCallSite final : AAIsDeadFunction {
4001   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4002       : AAIsDeadFunction(IRP, A) {}
4003 
4004   /// See AbstractAttribute::initialize(...).
4005   void initialize(Attributor &A) override {
4006     // TODO: Once we have call site specific value information we can provide
4007     //       call site specific liveness information and then it makes
4008     //       sense to specialize attributes for call sites instead of
4009     //       redirecting requests to the callee.
4010     llvm_unreachable("Abstract attributes for liveness are not "
4011                      "supported for call sites yet!");
4012   }
4013 
4014   /// See AbstractAttribute::updateImpl(...).
4015   ChangeStatus updateImpl(Attributor &A) override {
4016     return indicatePessimisticFixpoint();
4017   }
4018 
4019   /// See AbstractAttribute::trackStatistics()
4020   void trackStatistics() const override {}
4021 };
4022 
4023 /// -------------------- Dereferenceable Argument Attribute --------------------
4024 
4025 struct AADereferenceableImpl : AADereferenceable {
4026   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4027       : AADereferenceable(IRP, A) {}
4028   using StateType = DerefState;
4029 
4030   /// See AbstractAttribute::initialize(...).
4031   void initialize(Attributor &A) override {
4032     SmallVector<Attribute, 4> Attrs;
4033     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4034              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4035     for (const Attribute &Attr : Attrs)
4036       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4037 
4038     const IRPosition &IRP = this->getIRPosition();
4039     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4040 
4041     bool CanBeNull, CanBeFreed;
4042     takeKnownDerefBytesMaximum(
4043         IRP.getAssociatedValue().getPointerDereferenceableBytes(
4044             A.getDataLayout(), CanBeNull, CanBeFreed));
4045 
4046     bool IsFnInterface = IRP.isFnInterfaceKind();
4047     Function *FnScope = IRP.getAnchorScope();
4048     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4049       indicatePessimisticFixpoint();
4050       return;
4051     }
4052 
4053     if (Instruction *CtxI = getCtxI())
4054       followUsesInMBEC(*this, A, getState(), *CtxI);
4055   }
4056 
4057   /// See AbstractAttribute::getState()
4058   /// {
4059   StateType &getState() override { return *this; }
4060   const StateType &getState() const override { return *this; }
4061   /// }
4062 
4063   /// Helper function for collecting accessed bytes in must-be-executed-context
4064   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4065                               DerefState &State) {
4066     const Value *UseV = U->get();
4067     if (!UseV->getType()->isPointerTy())
4068       return;
4069 
4070     Type *PtrTy = UseV->getType();
4071     const DataLayout &DL = A.getDataLayout();
4072     int64_t Offset;
4073     if (const Value *Base = getBasePointerOfAccessPointerOperand(
4074             I, Offset, DL, /*AllowNonInbounds*/ true)) {
4075       if (Base == &getAssociatedValue() &&
4076           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
4077         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
4078         State.addAccessedBytes(Offset, Size);
4079       }
4080     }
4081   }
4082 
4083   /// See followUsesInMBEC
4084   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4085                        AADereferenceable::StateType &State) {
4086     bool IsNonNull = false;
4087     bool TrackUse = false;
4088     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4089         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4090     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4091                       << " for instruction " << *I << "\n");
4092 
4093     addAccessedBytesForUse(A, U, I, State);
4094     State.takeKnownDerefBytesMaximum(DerefBytes);
4095     return TrackUse;
4096   }
4097 
4098   /// See AbstractAttribute::manifest(...).
4099   ChangeStatus manifest(Attributor &A) override {
4100     ChangeStatus Change = AADereferenceable::manifest(A);
4101     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4102       removeAttrs({Attribute::DereferenceableOrNull});
4103       return ChangeStatus::CHANGED;
4104     }
4105     return Change;
4106   }
4107 
4108   void getDeducedAttributes(LLVMContext &Ctx,
4109                             SmallVectorImpl<Attribute> &Attrs) const override {
4110     // TODO: Add *_globally support
4111     if (isAssumedNonNull())
4112       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4113           Ctx, getAssumedDereferenceableBytes()));
4114     else
4115       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4116           Ctx, getAssumedDereferenceableBytes()));
4117   }
4118 
4119   /// See AbstractAttribute::getAsStr().
4120   const std::string getAsStr() const override {
4121     if (!getAssumedDereferenceableBytes())
4122       return "unknown-dereferenceable";
4123     return std::string("dereferenceable") +
4124            (isAssumedNonNull() ? "" : "_or_null") +
4125            (isAssumedGlobal() ? "_globally" : "") + "<" +
4126            std::to_string(getKnownDereferenceableBytes()) + "-" +
4127            std::to_string(getAssumedDereferenceableBytes()) + ">";
4128   }
4129 };
4130 
4131 /// Dereferenceable attribute for a floating value.
4132 struct AADereferenceableFloating : AADereferenceableImpl {
4133   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4134       : AADereferenceableImpl(IRP, A) {}
4135 
4136   /// See AbstractAttribute::updateImpl(...).
4137   ChangeStatus updateImpl(Attributor &A) override {
4138     const DataLayout &DL = A.getDataLayout();
4139 
4140     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4141                             bool Stripped) -> bool {
4142       unsigned IdxWidth =
4143           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4144       APInt Offset(IdxWidth, 0);
4145       const Value *Base =
4146           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
4147 
4148       const auto &AA = A.getAAFor<AADereferenceable>(
4149           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4150       int64_t DerefBytes = 0;
4151       if (!Stripped && this == &AA) {
4152         // Use IR information if we did not strip anything.
4153         // TODO: track globally.
4154         bool CanBeNull, CanBeFreed;
4155         DerefBytes =
4156             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4157         T.GlobalState.indicatePessimisticFixpoint();
4158       } else {
4159         const DerefState &DS = AA.getState();
4160         DerefBytes = DS.DerefBytesState.getAssumed();
4161         T.GlobalState &= DS.GlobalState;
4162       }
4163 
4164       // For now we do not try to "increase" dereferenceability due to negative
4165       // indices as we first have to come up with code to deal with loops and
4166       // for overflows of the dereferenceable bytes.
4167       int64_t OffsetSExt = Offset.getSExtValue();
4168       if (OffsetSExt < 0)
4169         OffsetSExt = 0;
4170 
4171       T.takeAssumedDerefBytesMinimum(
4172           std::max(int64_t(0), DerefBytes - OffsetSExt));
4173 
4174       if (this == &AA) {
4175         if (!Stripped) {
4176           // If nothing was stripped IR information is all we got.
4177           T.takeKnownDerefBytesMaximum(
4178               std::max(int64_t(0), DerefBytes - OffsetSExt));
4179           T.indicatePessimisticFixpoint();
4180         } else if (OffsetSExt > 0) {
4181           // If something was stripped but there is circular reasoning we look
4182           // for the offset. If it is positive we basically decrease the
4183           // dereferenceable bytes in a circluar loop now, which will simply
4184           // drive them down to the known value in a very slow way which we
4185           // can accelerate.
4186           T.indicatePessimisticFixpoint();
4187         }
4188       }
4189 
4190       return T.isValidState();
4191     };
4192 
4193     DerefState T;
4194     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4195                                            VisitValueCB, getCtxI()))
4196       return indicatePessimisticFixpoint();
4197 
4198     return clampStateAndIndicateChange(getState(), T);
4199   }
4200 
4201   /// See AbstractAttribute::trackStatistics()
4202   void trackStatistics() const override {
4203     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4204   }
4205 };
4206 
4207 /// Dereferenceable attribute for a return value.
4208 struct AADereferenceableReturned final
4209     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4210   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4211       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4212             IRP, A) {}
4213 
4214   /// See AbstractAttribute::trackStatistics()
4215   void trackStatistics() const override {
4216     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4217   }
4218 };
4219 
4220 /// Dereferenceable attribute for an argument
4221 struct AADereferenceableArgument final
4222     : AAArgumentFromCallSiteArguments<AADereferenceable,
4223                                       AADereferenceableImpl> {
4224   using Base =
4225       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4226   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4227       : Base(IRP, A) {}
4228 
4229   /// See AbstractAttribute::trackStatistics()
4230   void trackStatistics() const override {
4231     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4232   }
4233 };
4234 
4235 /// Dereferenceable attribute for a call site argument.
4236 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4237   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4238       : AADereferenceableFloating(IRP, A) {}
4239 
4240   /// See AbstractAttribute::trackStatistics()
4241   void trackStatistics() const override {
4242     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4243   }
4244 };
4245 
4246 /// Dereferenceable attribute deduction for a call site return value.
4247 struct AADereferenceableCallSiteReturned final
4248     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4249   using Base =
4250       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4251   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4252       : Base(IRP, A) {}
4253 
4254   /// See AbstractAttribute::trackStatistics()
4255   void trackStatistics() const override {
4256     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4257   }
4258 };
4259 
4260 // ------------------------ Align Argument Attribute ------------------------
4261 
4262 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4263                                     Value &AssociatedValue, const Use *U,
4264                                     const Instruction *I, bool &TrackUse) {
4265   // We need to follow common pointer manipulation uses to the accesses they
4266   // feed into.
4267   if (isa<CastInst>(I)) {
4268     // Follow all but ptr2int casts.
4269     TrackUse = !isa<PtrToIntInst>(I);
4270     return 0;
4271   }
4272   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4273     if (GEP->hasAllConstantIndices())
4274       TrackUse = true;
4275     return 0;
4276   }
4277 
4278   MaybeAlign MA;
4279   if (const auto *CB = dyn_cast<CallBase>(I)) {
4280     if (CB->isBundleOperand(U) || CB->isCallee(U))
4281       return 0;
4282 
4283     unsigned ArgNo = CB->getArgOperandNo(U);
4284     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4285     // As long as we only use known information there is no need to track
4286     // dependences here.
4287     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4288     MA = MaybeAlign(AlignAA.getKnownAlign());
4289   }
4290 
4291   const DataLayout &DL = A.getDataLayout();
4292   const Value *UseV = U->get();
4293   if (auto *SI = dyn_cast<StoreInst>(I)) {
4294     if (SI->getPointerOperand() == UseV)
4295       MA = SI->getAlign();
4296   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4297     if (LI->getPointerOperand() == UseV)
4298       MA = LI->getAlign();
4299   }
4300 
4301   if (!MA || *MA <= QueryingAA.getKnownAlign())
4302     return 0;
4303 
4304   unsigned Alignment = MA->value();
4305   int64_t Offset;
4306 
4307   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4308     if (Base == &AssociatedValue) {
4309       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4310       // So we can say that the maximum power of two which is a divisor of
4311       // gcd(Offset, Alignment) is an alignment.
4312 
4313       uint32_t gcd =
4314           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4315       Alignment = llvm::PowerOf2Floor(gcd);
4316     }
4317   }
4318 
4319   return Alignment;
4320 }
4321 
4322 struct AAAlignImpl : AAAlign {
4323   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4324 
4325   /// See AbstractAttribute::initialize(...).
4326   void initialize(Attributor &A) override {
4327     SmallVector<Attribute, 4> Attrs;
4328     getAttrs({Attribute::Alignment}, Attrs);
4329     for (const Attribute &Attr : Attrs)
4330       takeKnownMaximum(Attr.getValueAsInt());
4331 
4332     Value &V = getAssociatedValue();
4333     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
4334     //       use of the function pointer. This was caused by D73131. We want to
4335     //       avoid this for function pointers especially because we iterate
4336     //       their uses and int2ptr is not handled. It is not a correctness
4337     //       problem though!
4338     if (!V.getType()->getPointerElementType()->isFunctionTy())
4339       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4340 
4341     if (getIRPosition().isFnInterfaceKind() &&
4342         (!getAnchorScope() ||
4343          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4344       indicatePessimisticFixpoint();
4345       return;
4346     }
4347 
4348     if (Instruction *CtxI = getCtxI())
4349       followUsesInMBEC(*this, A, getState(), *CtxI);
4350   }
4351 
4352   /// See AbstractAttribute::manifest(...).
4353   ChangeStatus manifest(Attributor &A) override {
4354     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4355 
4356     // Check for users that allow alignment annotations.
4357     Value &AssociatedValue = getAssociatedValue();
4358     for (const Use &U : AssociatedValue.uses()) {
4359       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4360         if (SI->getPointerOperand() == &AssociatedValue)
4361           if (SI->getAlignment() < getAssumedAlign()) {
4362             STATS_DECLTRACK(AAAlign, Store,
4363                             "Number of times alignment added to a store");
4364             SI->setAlignment(Align(getAssumedAlign()));
4365             LoadStoreChanged = ChangeStatus::CHANGED;
4366           }
4367       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4368         if (LI->getPointerOperand() == &AssociatedValue)
4369           if (LI->getAlignment() < getAssumedAlign()) {
4370             LI->setAlignment(Align(getAssumedAlign()));
4371             STATS_DECLTRACK(AAAlign, Load,
4372                             "Number of times alignment added to a load");
4373             LoadStoreChanged = ChangeStatus::CHANGED;
4374           }
4375       }
4376     }
4377 
4378     ChangeStatus Changed = AAAlign::manifest(A);
4379 
4380     Align InheritAlign =
4381         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4382     if (InheritAlign >= getAssumedAlign())
4383       return LoadStoreChanged;
4384     return Changed | LoadStoreChanged;
4385   }
4386 
4387   // TODO: Provide a helper to determine the implied ABI alignment and check in
4388   //       the existing manifest method and a new one for AAAlignImpl that value
4389   //       to avoid making the alignment explicit if it did not improve.
4390 
4391   /// See AbstractAttribute::getDeducedAttributes
4392   virtual void
4393   getDeducedAttributes(LLVMContext &Ctx,
4394                        SmallVectorImpl<Attribute> &Attrs) const override {
4395     if (getAssumedAlign() > 1)
4396       Attrs.emplace_back(
4397           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4398   }
4399 
4400   /// See followUsesInMBEC
4401   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4402                        AAAlign::StateType &State) {
4403     bool TrackUse = false;
4404 
4405     unsigned int KnownAlign =
4406         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4407     State.takeKnownMaximum(KnownAlign);
4408 
4409     return TrackUse;
4410   }
4411 
4412   /// See AbstractAttribute::getAsStr().
4413   const std::string getAsStr() const override {
4414     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4415                                 "-" + std::to_string(getAssumedAlign()) + ">")
4416                              : "unknown-align";
4417   }
4418 };
4419 
4420 /// Align attribute for a floating value.
4421 struct AAAlignFloating : AAAlignImpl {
4422   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4423 
4424   /// See AbstractAttribute::updateImpl(...).
4425   ChangeStatus updateImpl(Attributor &A) override {
4426     const DataLayout &DL = A.getDataLayout();
4427 
4428     auto VisitValueCB = [&](Value &V, const Instruction *,
4429                             AAAlign::StateType &T, bool Stripped) -> bool {
4430       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4431                                            DepClassTy::REQUIRED);
4432       if (!Stripped && this == &AA) {
4433         int64_t Offset;
4434         unsigned Alignment = 1;
4435         if (const Value *Base =
4436                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4437           Align PA = Base->getPointerAlignment(DL);
4438           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4439           // So we can say that the maximum power of two which is a divisor of
4440           // gcd(Offset, Alignment) is an alignment.
4441 
4442           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4443                                                uint32_t(PA.value()));
4444           Alignment = llvm::PowerOf2Floor(gcd);
4445         } else {
4446           Alignment = V.getPointerAlignment(DL).value();
4447         }
4448         // Use only IR information if we did not strip anything.
4449         T.takeKnownMaximum(Alignment);
4450         T.indicatePessimisticFixpoint();
4451       } else {
4452         // Use abstract attribute information.
4453         const AAAlign::StateType &DS = AA.getState();
4454         T ^= DS;
4455       }
4456       return T.isValidState();
4457     };
4458 
4459     StateType T;
4460     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4461                                           VisitValueCB, getCtxI()))
4462       return indicatePessimisticFixpoint();
4463 
4464     // TODO: If we know we visited all incoming values, thus no are assumed
4465     // dead, we can take the known information from the state T.
4466     return clampStateAndIndicateChange(getState(), T);
4467   }
4468 
4469   /// See AbstractAttribute::trackStatistics()
4470   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4471 };
4472 
4473 /// Align attribute for function return value.
4474 struct AAAlignReturned final
4475     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4476   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4477   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4478 
4479   /// See AbstractAttribute::initialize(...).
4480   void initialize(Attributor &A) override {
4481     Base::initialize(A);
4482     Function *F = getAssociatedFunction();
4483     if (!F || F->isDeclaration())
4484       indicatePessimisticFixpoint();
4485   }
4486 
4487   /// See AbstractAttribute::trackStatistics()
4488   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4489 };
4490 
4491 /// Align attribute for function argument.
4492 struct AAAlignArgument final
4493     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4494   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4495   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4496 
4497   /// See AbstractAttribute::manifest(...).
4498   ChangeStatus manifest(Attributor &A) override {
4499     // If the associated argument is involved in a must-tail call we give up
4500     // because we would need to keep the argument alignments of caller and
4501     // callee in-sync. Just does not seem worth the trouble right now.
4502     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4503       return ChangeStatus::UNCHANGED;
4504     return Base::manifest(A);
4505   }
4506 
4507   /// See AbstractAttribute::trackStatistics()
4508   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4509 };
4510 
4511 struct AAAlignCallSiteArgument final : AAAlignFloating {
4512   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4513       : AAAlignFloating(IRP, A) {}
4514 
4515   /// See AbstractAttribute::manifest(...).
4516   ChangeStatus manifest(Attributor &A) override {
4517     // If the associated argument is involved in a must-tail call we give up
4518     // because we would need to keep the argument alignments of caller and
4519     // callee in-sync. Just does not seem worth the trouble right now.
4520     if (Argument *Arg = getAssociatedArgument())
4521       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4522         return ChangeStatus::UNCHANGED;
4523     ChangeStatus Changed = AAAlignImpl::manifest(A);
4524     Align InheritAlign =
4525         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4526     if (InheritAlign >= getAssumedAlign())
4527       Changed = ChangeStatus::UNCHANGED;
4528     return Changed;
4529   }
4530 
4531   /// See AbstractAttribute::updateImpl(Attributor &A).
4532   ChangeStatus updateImpl(Attributor &A) override {
4533     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4534     if (Argument *Arg = getAssociatedArgument()) {
4535       // We only take known information from the argument
4536       // so we do not need to track a dependence.
4537       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4538           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4539       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4540     }
4541     return Changed;
4542   }
4543 
4544   /// See AbstractAttribute::trackStatistics()
4545   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4546 };
4547 
4548 /// Align attribute deduction for a call site return value.
4549 struct AAAlignCallSiteReturned final
4550     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4551   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4552   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4553       : Base(IRP, A) {}
4554 
4555   /// See AbstractAttribute::initialize(...).
4556   void initialize(Attributor &A) override {
4557     Base::initialize(A);
4558     Function *F = getAssociatedFunction();
4559     if (!F || F->isDeclaration())
4560       indicatePessimisticFixpoint();
4561   }
4562 
4563   /// See AbstractAttribute::trackStatistics()
4564   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4565 };
4566 
4567 /// ------------------ Function No-Return Attribute ----------------------------
4568 struct AANoReturnImpl : public AANoReturn {
4569   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4570 
4571   /// See AbstractAttribute::initialize(...).
4572   void initialize(Attributor &A) override {
4573     AANoReturn::initialize(A);
4574     Function *F = getAssociatedFunction();
4575     if (!F || F->isDeclaration())
4576       indicatePessimisticFixpoint();
4577   }
4578 
4579   /// See AbstractAttribute::getAsStr().
4580   const std::string getAsStr() const override {
4581     return getAssumed() ? "noreturn" : "may-return";
4582   }
4583 
4584   /// See AbstractAttribute::updateImpl(Attributor &A).
4585   virtual ChangeStatus updateImpl(Attributor &A) override {
4586     auto CheckForNoReturn = [](Instruction &) { return false; };
4587     bool UsedAssumedInformation = false;
4588     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4589                                    {(unsigned)Instruction::Ret},
4590                                    UsedAssumedInformation))
4591       return indicatePessimisticFixpoint();
4592     return ChangeStatus::UNCHANGED;
4593   }
4594 };
4595 
4596 struct AANoReturnFunction final : AANoReturnImpl {
4597   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4598       : AANoReturnImpl(IRP, A) {}
4599 
4600   /// See AbstractAttribute::trackStatistics()
4601   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4602 };
4603 
4604 /// NoReturn attribute deduction for a call sites.
4605 struct AANoReturnCallSite final : AANoReturnImpl {
4606   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4607       : AANoReturnImpl(IRP, A) {}
4608 
4609   /// See AbstractAttribute::initialize(...).
4610   void initialize(Attributor &A) override {
4611     AANoReturnImpl::initialize(A);
4612     if (Function *F = getAssociatedFunction()) {
4613       const IRPosition &FnPos = IRPosition::function(*F);
4614       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4615       if (!FnAA.isAssumedNoReturn())
4616         indicatePessimisticFixpoint();
4617     }
4618   }
4619 
4620   /// See AbstractAttribute::updateImpl(...).
4621   ChangeStatus updateImpl(Attributor &A) override {
4622     // TODO: Once we have call site specific value information we can provide
4623     //       call site specific liveness information and then it makes
4624     //       sense to specialize attributes for call sites arguments instead of
4625     //       redirecting requests to the callee argument.
4626     Function *F = getAssociatedFunction();
4627     const IRPosition &FnPos = IRPosition::function(*F);
4628     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4629     return clampStateAndIndicateChange(getState(), FnAA.getState());
4630   }
4631 
4632   /// See AbstractAttribute::trackStatistics()
4633   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4634 };
4635 
4636 /// ----------------------- Variable Capturing ---------------------------------
4637 
4638 /// A class to hold the state of for no-capture attributes.
4639 struct AANoCaptureImpl : public AANoCapture {
4640   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4641 
4642   /// See AbstractAttribute::initialize(...).
4643   void initialize(Attributor &A) override {
4644     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4645       indicateOptimisticFixpoint();
4646       return;
4647     }
4648     Function *AnchorScope = getAnchorScope();
4649     if (isFnInterfaceKind() &&
4650         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4651       indicatePessimisticFixpoint();
4652       return;
4653     }
4654 
4655     // You cannot "capture" null in the default address space.
4656     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4657         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4658       indicateOptimisticFixpoint();
4659       return;
4660     }
4661 
4662     const Function *F =
4663         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4664 
4665     // Check what state the associated function can actually capture.
4666     if (F)
4667       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4668     else
4669       indicatePessimisticFixpoint();
4670   }
4671 
4672   /// See AbstractAttribute::updateImpl(...).
4673   ChangeStatus updateImpl(Attributor &A) override;
4674 
4675   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4676   virtual void
4677   getDeducedAttributes(LLVMContext &Ctx,
4678                        SmallVectorImpl<Attribute> &Attrs) const override {
4679     if (!isAssumedNoCaptureMaybeReturned())
4680       return;
4681 
4682     if (isArgumentPosition()) {
4683       if (isAssumedNoCapture())
4684         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4685       else if (ManifestInternal)
4686         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4687     }
4688   }
4689 
4690   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4691   /// depending on the ability of the function associated with \p IRP to capture
4692   /// state in memory and through "returning/throwing", respectively.
4693   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4694                                                    const Function &F,
4695                                                    BitIntegerState &State) {
4696     // TODO: Once we have memory behavior attributes we should use them here.
4697 
4698     // If we know we cannot communicate or write to memory, we do not care about
4699     // ptr2int anymore.
4700     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4701         F.getReturnType()->isVoidTy()) {
4702       State.addKnownBits(NO_CAPTURE);
4703       return;
4704     }
4705 
4706     // A function cannot capture state in memory if it only reads memory, it can
4707     // however return/throw state and the state might be influenced by the
4708     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4709     if (F.onlyReadsMemory())
4710       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4711 
4712     // A function cannot communicate state back if it does not through
4713     // exceptions and doesn not return values.
4714     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4715       State.addKnownBits(NOT_CAPTURED_IN_RET);
4716 
4717     // Check existing "returned" attributes.
4718     int ArgNo = IRP.getCalleeArgNo();
4719     if (F.doesNotThrow() && ArgNo >= 0) {
4720       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4721         if (F.hasParamAttribute(u, Attribute::Returned)) {
4722           if (u == unsigned(ArgNo))
4723             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4724           else if (F.onlyReadsMemory())
4725             State.addKnownBits(NO_CAPTURE);
4726           else
4727             State.addKnownBits(NOT_CAPTURED_IN_RET);
4728           break;
4729         }
4730     }
4731   }
4732 
4733   /// See AbstractState::getAsStr().
4734   const std::string getAsStr() const override {
4735     if (isKnownNoCapture())
4736       return "known not-captured";
4737     if (isAssumedNoCapture())
4738       return "assumed not-captured";
4739     if (isKnownNoCaptureMaybeReturned())
4740       return "known not-captured-maybe-returned";
4741     if (isAssumedNoCaptureMaybeReturned())
4742       return "assumed not-captured-maybe-returned";
4743     return "assumed-captured";
4744   }
4745 };
4746 
4747 /// Attributor-aware capture tracker.
4748 struct AACaptureUseTracker final : public CaptureTracker {
4749 
4750   /// Create a capture tracker that can lookup in-flight abstract attributes
4751   /// through the Attributor \p A.
4752   ///
4753   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4754   /// search is stopped. If a use leads to a return instruction,
4755   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4756   /// If a use leads to a ptr2int which may capture the value,
4757   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4758   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4759   /// set. All values in \p PotentialCopies are later tracked as well. For every
4760   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4761   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4762   /// conservatively set to true.
4763   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4764                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4765                       SmallSetVector<Value *, 4> &PotentialCopies,
4766                       unsigned &RemainingUsesToExplore)
4767       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4768         PotentialCopies(PotentialCopies),
4769         RemainingUsesToExplore(RemainingUsesToExplore) {}
4770 
4771   /// Determine if \p V maybe captured. *Also updates the state!*
4772   bool valueMayBeCaptured(const Value *V) {
4773     if (V->getType()->isPointerTy()) {
4774       PointerMayBeCaptured(V, this);
4775     } else {
4776       State.indicatePessimisticFixpoint();
4777     }
4778     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4779   }
4780 
4781   /// See CaptureTracker::tooManyUses().
4782   void tooManyUses() override {
4783     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4784   }
4785 
4786   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4787     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4788       return true;
4789     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4790         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4791     return DerefAA.getAssumedDereferenceableBytes();
4792   }
4793 
4794   /// See CaptureTracker::captured(...).
4795   bool captured(const Use *U) override {
4796     Instruction *UInst = cast<Instruction>(U->getUser());
4797     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4798                       << "\n");
4799 
4800     // Because we may reuse the tracker multiple times we keep track of the
4801     // number of explored uses ourselves as well.
4802     if (RemainingUsesToExplore-- == 0) {
4803       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4804       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4805                           /* Return */ true);
4806     }
4807 
4808     // Deal with ptr2int by following uses.
4809     if (isa<PtrToIntInst>(UInst)) {
4810       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4811       return valueMayBeCaptured(UInst);
4812     }
4813 
4814     // For stores we check if we can follow the value through memory or not.
4815     if (auto *SI = dyn_cast<StoreInst>(UInst)) {
4816       if (SI->isVolatile())
4817         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4818                             /* Return */ false);
4819       bool UsedAssumedInformation = false;
4820       if (!AA::getPotentialCopiesOfStoredValue(
4821               A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation))
4822         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4823                             /* Return */ false);
4824       // Not captured directly, potential copies will be checked.
4825       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4826                           /* Return */ false);
4827     }
4828 
4829     // Explicitly catch return instructions.
4830     if (isa<ReturnInst>(UInst)) {
4831       if (UInst->getFunction() == NoCaptureAA.getAnchorScope())
4832         return isCapturedIn(/* Memory */ false, /* Integer */ false,
4833                             /* Return */ true);
4834       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4835                           /* Return */ true);
4836     }
4837 
4838     // For now we only use special logic for call sites. However, the tracker
4839     // itself knows about a lot of other non-capturing cases already.
4840     auto *CB = dyn_cast<CallBase>(UInst);
4841     if (!CB || !CB->isArgOperand(U))
4842       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4843                           /* Return */ true);
4844 
4845     unsigned ArgNo = CB->getArgOperandNo(U);
4846     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4847     // If we have a abstract no-capture attribute for the argument we can use
4848     // it to justify a non-capture attribute here. This allows recursion!
4849     auto &ArgNoCaptureAA =
4850         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4851     if (ArgNoCaptureAA.isAssumedNoCapture())
4852       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4853                           /* Return */ false);
4854     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4855       addPotentialCopy(*CB);
4856       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4857                           /* Return */ false);
4858     }
4859 
4860     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4861     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4862                         /* Return */ true);
4863   }
4864 
4865   /// Register \p CS as potential copy of the value we are checking.
4866   void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); }
4867 
4868   /// See CaptureTracker::shouldExplore(...).
4869   bool shouldExplore(const Use *U) override {
4870     // Check liveness and ignore droppable users.
4871     bool UsedAssumedInformation = false;
4872     return !U->getUser()->isDroppable() &&
4873            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA,
4874                             UsedAssumedInformation);
4875   }
4876 
4877   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4878   /// \p CapturedInRet, then return the appropriate value for use in the
4879   /// CaptureTracker::captured() interface.
4880   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4881                     bool CapturedInRet) {
4882     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4883                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4884     if (CapturedInMem)
4885       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4886     if (CapturedInInt)
4887       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4888     if (CapturedInRet)
4889       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4890     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4891   }
4892 
4893 private:
4894   /// The attributor providing in-flight abstract attributes.
4895   Attributor &A;
4896 
4897   /// The abstract attribute currently updated.
4898   AANoCapture &NoCaptureAA;
4899 
4900   /// The abstract liveness state.
4901   const AAIsDead &IsDeadAA;
4902 
4903   /// The state currently updated.
4904   AANoCapture::StateType &State;
4905 
4906   /// Set of potential copies of the tracked value.
4907   SmallSetVector<Value *, 4> &PotentialCopies;
4908 
4909   /// Global counter to limit the number of explored uses.
4910   unsigned &RemainingUsesToExplore;
4911 };
4912 
4913 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4914   const IRPosition &IRP = getIRPosition();
4915   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4916                                   : &IRP.getAssociatedValue();
4917   if (!V)
4918     return indicatePessimisticFixpoint();
4919 
4920   const Function *F =
4921       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4922   assert(F && "Expected a function!");
4923   const IRPosition &FnPos = IRPosition::function(*F);
4924   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4925 
4926   AANoCapture::StateType T;
4927 
4928   // Readonly means we cannot capture through memory.
4929   const auto &FnMemAA =
4930       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4931   if (FnMemAA.isAssumedReadOnly()) {
4932     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4933     if (FnMemAA.isKnownReadOnly())
4934       addKnownBits(NOT_CAPTURED_IN_MEM);
4935     else
4936       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4937   }
4938 
4939   // Make sure all returned values are different than the underlying value.
4940   // TODO: we could do this in a more sophisticated way inside
4941   //       AAReturnedValues, e.g., track all values that escape through returns
4942   //       directly somehow.
4943   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4944     bool SeenConstant = false;
4945     for (auto &It : RVAA.returned_values()) {
4946       if (isa<Constant>(It.first)) {
4947         if (SeenConstant)
4948           return false;
4949         SeenConstant = true;
4950       } else if (!isa<Argument>(It.first) ||
4951                  It.first == getAssociatedArgument())
4952         return false;
4953     }
4954     return true;
4955   };
4956 
4957   const auto &NoUnwindAA =
4958       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4959   if (NoUnwindAA.isAssumedNoUnwind()) {
4960     bool IsVoidTy = F->getReturnType()->isVoidTy();
4961     const AAReturnedValues *RVAA =
4962         IsVoidTy ? nullptr
4963                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4964 
4965                                                  DepClassTy::OPTIONAL);
4966     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4967       T.addKnownBits(NOT_CAPTURED_IN_RET);
4968       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4969         return ChangeStatus::UNCHANGED;
4970       if (NoUnwindAA.isKnownNoUnwind() &&
4971           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4972         addKnownBits(NOT_CAPTURED_IN_RET);
4973         if (isKnown(NOT_CAPTURED_IN_MEM))
4974           return indicateOptimisticFixpoint();
4975       }
4976     }
4977   }
4978 
4979   // Use the CaptureTracker interface and logic with the specialized tracker,
4980   // defined in AACaptureUseTracker, that can look at in-flight abstract
4981   // attributes and directly updates the assumed state.
4982   SmallSetVector<Value *, 4> PotentialCopies;
4983   unsigned RemainingUsesToExplore =
4984       getDefaultMaxUsesToExploreForCaptureTracking();
4985   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4986                               RemainingUsesToExplore);
4987 
4988   // Check all potential copies of the associated value until we can assume
4989   // none will be captured or we have to assume at least one might be.
4990   unsigned Idx = 0;
4991   PotentialCopies.insert(V);
4992   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4993     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4994 
4995   AANoCapture::StateType &S = getState();
4996   auto Assumed = S.getAssumed();
4997   S.intersectAssumedBits(T.getAssumed());
4998   if (!isAssumedNoCaptureMaybeReturned())
4999     return indicatePessimisticFixpoint();
5000   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5001                                    : ChangeStatus::CHANGED;
5002 }
5003 
5004 /// NoCapture attribute for function arguments.
5005 struct AANoCaptureArgument final : AANoCaptureImpl {
5006   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5007       : AANoCaptureImpl(IRP, A) {}
5008 
5009   /// See AbstractAttribute::trackStatistics()
5010   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5011 };
5012 
5013 /// NoCapture attribute for call site arguments.
5014 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5015   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5016       : AANoCaptureImpl(IRP, A) {}
5017 
5018   /// See AbstractAttribute::initialize(...).
5019   void initialize(Attributor &A) override {
5020     if (Argument *Arg = getAssociatedArgument())
5021       if (Arg->hasByValAttr())
5022         indicateOptimisticFixpoint();
5023     AANoCaptureImpl::initialize(A);
5024   }
5025 
5026   /// See AbstractAttribute::updateImpl(...).
5027   ChangeStatus updateImpl(Attributor &A) override {
5028     // TODO: Once we have call site specific value information we can provide
5029     //       call site specific liveness information and then it makes
5030     //       sense to specialize attributes for call sites arguments instead of
5031     //       redirecting requests to the callee argument.
5032     Argument *Arg = getAssociatedArgument();
5033     if (!Arg)
5034       return indicatePessimisticFixpoint();
5035     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5036     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5037     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5038   }
5039 
5040   /// See AbstractAttribute::trackStatistics()
5041   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5042 };
5043 
5044 /// NoCapture attribute for floating values.
5045 struct AANoCaptureFloating final : AANoCaptureImpl {
5046   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5047       : AANoCaptureImpl(IRP, A) {}
5048 
5049   /// See AbstractAttribute::trackStatistics()
5050   void trackStatistics() const override {
5051     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5052   }
5053 };
5054 
5055 /// NoCapture attribute for function return value.
5056 struct AANoCaptureReturned final : AANoCaptureImpl {
5057   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5058       : AANoCaptureImpl(IRP, A) {
5059     llvm_unreachable("NoCapture is not applicable to function returns!");
5060   }
5061 
5062   /// See AbstractAttribute::initialize(...).
5063   void initialize(Attributor &A) override {
5064     llvm_unreachable("NoCapture is not applicable to function returns!");
5065   }
5066 
5067   /// See AbstractAttribute::updateImpl(...).
5068   ChangeStatus updateImpl(Attributor &A) override {
5069     llvm_unreachable("NoCapture is not applicable to function returns!");
5070   }
5071 
5072   /// See AbstractAttribute::trackStatistics()
5073   void trackStatistics() const override {}
5074 };
5075 
5076 /// NoCapture attribute deduction for a call site return value.
5077 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5078   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5079       : AANoCaptureImpl(IRP, A) {}
5080 
5081   /// See AbstractAttribute::initialize(...).
5082   void initialize(Attributor &A) override {
5083     const Function *F = getAnchorScope();
5084     // Check what state the associated function can actually capture.
5085     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5086   }
5087 
5088   /// See AbstractAttribute::trackStatistics()
5089   void trackStatistics() const override {
5090     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5091   }
5092 };
5093 
5094 /// ------------------ Value Simplify Attribute ----------------------------
5095 
5096 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5097   // FIXME: Add a typecast support.
5098   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5099       SimplifiedAssociatedValue, Other, Ty);
5100   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5101     return false;
5102 
5103   LLVM_DEBUG({
5104     if (SimplifiedAssociatedValue.hasValue())
5105       dbgs() << "[ValueSimplify] is assumed to be "
5106              << **SimplifiedAssociatedValue << "\n";
5107     else
5108       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5109   });
5110   return true;
5111 }
5112 
5113 struct AAValueSimplifyImpl : AAValueSimplify {
5114   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5115       : AAValueSimplify(IRP, A) {}
5116 
5117   /// See AbstractAttribute::initialize(...).
5118   void initialize(Attributor &A) override {
5119     if (getAssociatedValue().getType()->isVoidTy())
5120       indicatePessimisticFixpoint();
5121     if (A.hasSimplificationCallback(getIRPosition()))
5122       indicatePessimisticFixpoint();
5123   }
5124 
5125   /// See AbstractAttribute::getAsStr().
5126   const std::string getAsStr() const override {
5127     LLVM_DEBUG({
5128       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
5129       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5130         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5131     });
5132     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5133                           : "not-simple";
5134   }
5135 
5136   /// See AbstractAttribute::trackStatistics()
5137   void trackStatistics() const override {}
5138 
5139   /// See AAValueSimplify::getAssumedSimplifiedValue()
5140   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5141     return SimplifiedAssociatedValue;
5142   }
5143 
5144   /// Return a value we can use as replacement for the associated one, or
5145   /// nullptr if we don't have one that makes sense.
5146   Value *getReplacementValue(Attributor &A) const {
5147     Value *NewV;
5148     NewV = SimplifiedAssociatedValue.hasValue()
5149                ? SimplifiedAssociatedValue.getValue()
5150                : UndefValue::get(getAssociatedType());
5151     if (!NewV)
5152       return nullptr;
5153     NewV = AA::getWithType(*NewV, *getAssociatedType());
5154     if (!NewV || NewV == &getAssociatedValue())
5155       return nullptr;
5156     const Instruction *CtxI = getCtxI();
5157     if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache()))
5158       return nullptr;
5159     if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope()))
5160       return nullptr;
5161     return NewV;
5162   }
5163 
5164   /// Helper function for querying AAValueSimplify and updating candicate.
5165   /// \param IRP The value position we are trying to unify with SimplifiedValue
5166   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5167                       const IRPosition &IRP, bool Simplify = true) {
5168     bool UsedAssumedInformation = false;
5169     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5170     if (Simplify)
5171       QueryingValueSimplified =
5172           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5173     return unionAssumed(QueryingValueSimplified);
5174   }
5175 
5176   /// Returns a candidate is found or not
5177   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5178     if (!getAssociatedValue().getType()->isIntegerTy())
5179       return false;
5180 
5181     // This will also pass the call base context.
5182     const auto &AA =
5183         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5184 
5185     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5186 
5187     if (!COpt.hasValue()) {
5188       SimplifiedAssociatedValue = llvm::None;
5189       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5190       return true;
5191     }
5192     if (auto *C = COpt.getValue()) {
5193       SimplifiedAssociatedValue = C;
5194       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5195       return true;
5196     }
5197     return false;
5198   }
5199 
5200   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5201     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5202       return true;
5203     if (askSimplifiedValueFor<AAPotentialValues>(A))
5204       return true;
5205     return false;
5206   }
5207 
5208   /// See AbstractAttribute::manifest(...).
5209   ChangeStatus manifest(Attributor &A) override {
5210     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5211     if (getAssociatedValue().user_empty())
5212       return Changed;
5213 
5214     if (auto *NewV = getReplacementValue(A)) {
5215       LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> "
5216                         << *NewV << " :: " << *this << "\n");
5217       if (A.changeValueAfterManifest(getAssociatedValue(), *NewV))
5218         Changed = ChangeStatus::CHANGED;
5219     }
5220 
5221     return Changed | AAValueSimplify::manifest(A);
5222   }
5223 
5224   /// See AbstractState::indicatePessimisticFixpoint(...).
5225   ChangeStatus indicatePessimisticFixpoint() override {
5226     SimplifiedAssociatedValue = &getAssociatedValue();
5227     return AAValueSimplify::indicatePessimisticFixpoint();
5228   }
5229 
5230   static bool handleLoad(Attributor &A, const AbstractAttribute &AA,
5231                          LoadInst &L, function_ref<bool(Value &)> Union) {
5232     auto UnionWrapper = [&](Value &V, Value &Obj) {
5233       if (isa<AllocaInst>(Obj))
5234         return Union(V);
5235       if (!AA::isDynamicallyUnique(A, AA, V))
5236         return false;
5237       if (!AA::isValidAtPosition(V, L, A.getInfoCache()))
5238         return false;
5239       return Union(V);
5240     };
5241 
5242     Value &Ptr = *L.getPointerOperand();
5243     SmallVector<Value *, 8> Objects;
5244     if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L))
5245       return false;
5246 
5247     for (Value *Obj : Objects) {
5248       LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
5249       if (isa<UndefValue>(Obj))
5250         continue;
5251       if (isa<ConstantPointerNull>(Obj)) {
5252         // A null pointer access can be undefined but any offset from null may
5253         // be OK. We do not try to optimize the latter.
5254         bool UsedAssumedInformation = false;
5255         if (!NullPointerIsDefined(L.getFunction(),
5256                                   Ptr.getType()->getPointerAddressSpace()) &&
5257             A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj)
5258           continue;
5259         return false;
5260       }
5261       if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj))
5262         return false;
5263       Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType());
5264       if (!InitialVal || !Union(*InitialVal))
5265         return false;
5266 
5267       LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store "
5268                            "propagation, checking accesses next.\n");
5269 
5270       auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
5271         LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n");
5272         if (!Acc.isWrite())
5273           return true;
5274         if (Acc.isWrittenValueYetUndetermined())
5275           return true;
5276         Value *Content = Acc.getWrittenValue();
5277         if (!Content)
5278           return false;
5279         Value *CastedContent =
5280             AA::getWithType(*Content, *AA.getAssociatedType());
5281         if (!CastedContent)
5282           return false;
5283         if (IsExact)
5284           return UnionWrapper(*CastedContent, *Obj);
5285         if (auto *C = dyn_cast<Constant>(CastedContent))
5286           if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C))
5287             return UnionWrapper(*CastedContent, *Obj);
5288         return false;
5289       };
5290 
5291       auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj),
5292                                            DepClassTy::REQUIRED);
5293       if (!PI.forallInterferingAccesses(L, CheckAccess))
5294         return false;
5295     }
5296     return true;
5297   }
5298 };
5299 
5300 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5301   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5302       : AAValueSimplifyImpl(IRP, A) {}
5303 
5304   void initialize(Attributor &A) override {
5305     AAValueSimplifyImpl::initialize(A);
5306     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5307       indicatePessimisticFixpoint();
5308     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5309                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5310                 /* IgnoreSubsumingPositions */ true))
5311       indicatePessimisticFixpoint();
5312 
5313     // FIXME: This is a hack to prevent us from propagating function poiner in
5314     // the new pass manager CGSCC pass as it creates call edges the
5315     // CallGraphUpdater cannot handle yet.
5316     Value &V = getAssociatedValue();
5317     if (V.getType()->isPointerTy() &&
5318         V.getType()->getPointerElementType()->isFunctionTy() &&
5319         !A.isModulePass())
5320       indicatePessimisticFixpoint();
5321   }
5322 
5323   /// See AbstractAttribute::updateImpl(...).
5324   ChangeStatus updateImpl(Attributor &A) override {
5325     // Byval is only replacable if it is readonly otherwise we would write into
5326     // the replaced value and not the copy that byval creates implicitly.
5327     Argument *Arg = getAssociatedArgument();
5328     if (Arg->hasByValAttr()) {
5329       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5330       //       there is no race by not copying a constant byval.
5331       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
5332                                                        DepClassTy::REQUIRED);
5333       if (!MemAA.isAssumedReadOnly())
5334         return indicatePessimisticFixpoint();
5335     }
5336 
5337     auto Before = SimplifiedAssociatedValue;
5338 
5339     auto PredForCallSite = [&](AbstractCallSite ACS) {
5340       const IRPosition &ACSArgPos =
5341           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5342       // Check if a coresponding argument was found or if it is on not
5343       // associated (which can happen for callback calls).
5344       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5345         return false;
5346 
5347       // Simplify the argument operand explicitly and check if the result is
5348       // valid in the current scope. This avoids refering to simplified values
5349       // in other functions, e.g., we don't want to say a an argument in a
5350       // static function is actually an argument in a different function.
5351       bool UsedAssumedInformation = false;
5352       Optional<Constant *> SimpleArgOp =
5353           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5354       if (!SimpleArgOp.hasValue())
5355         return true;
5356       if (!SimpleArgOp.getValue())
5357         return false;
5358       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5359         return false;
5360       return unionAssumed(*SimpleArgOp);
5361     };
5362 
5363     // Generate a answer specific to a call site context.
5364     bool Success;
5365     bool AllCallSitesKnown;
5366     if (hasCallBaseContext() &&
5367         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5368       Success = PredForCallSite(
5369           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5370     else
5371       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5372                                        AllCallSitesKnown);
5373 
5374     if (!Success)
5375       if (!askSimplifiedValueForOtherAAs(A))
5376         return indicatePessimisticFixpoint();
5377 
5378     // If a candicate was found in this update, return CHANGED.
5379     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5380                                                : ChangeStatus ::CHANGED;
5381   }
5382 
5383   /// See AbstractAttribute::trackStatistics()
5384   void trackStatistics() const override {
5385     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5386   }
5387 };
5388 
5389 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5390   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5391       : AAValueSimplifyImpl(IRP, A) {}
5392 
5393   /// See AAValueSimplify::getAssumedSimplifiedValue()
5394   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5395     if (!isValidState())
5396       return nullptr;
5397     return SimplifiedAssociatedValue;
5398   }
5399 
5400   /// See AbstractAttribute::updateImpl(...).
5401   ChangeStatus updateImpl(Attributor &A) override {
5402     auto Before = SimplifiedAssociatedValue;
5403 
5404     auto PredForReturned = [&](Value &V) {
5405       return checkAndUpdate(A, *this,
5406                             IRPosition::value(V, getCallBaseContext()));
5407     };
5408 
5409     if (!A.checkForAllReturnedValues(PredForReturned, *this))
5410       if (!askSimplifiedValueForOtherAAs(A))
5411         return indicatePessimisticFixpoint();
5412 
5413     // If a candicate was found in this update, return CHANGED.
5414     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5415                                                : ChangeStatus ::CHANGED;
5416   }
5417 
5418   ChangeStatus manifest(Attributor &A) override {
5419     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5420 
5421     if (auto *NewV = getReplacementValue(A)) {
5422       auto PredForReturned =
5423           [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5424             for (ReturnInst *RI : RetInsts) {
5425               Value *ReturnedVal = RI->getReturnValue();
5426               if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal))
5427                 return true;
5428               assert(RI->getFunction() == getAnchorScope() &&
5429                      "ReturnInst in wrong function!");
5430               LLVM_DEBUG(dbgs()
5431                          << "[ValueSimplify] " << *ReturnedVal << " -> "
5432                          << *NewV << " in " << *RI << " :: " << *this << "\n");
5433               if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
5434                 Changed = ChangeStatus::CHANGED;
5435             }
5436             return true;
5437           };
5438       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
5439     }
5440 
5441     return Changed | AAValueSimplify::manifest(A);
5442   }
5443 
5444   /// See AbstractAttribute::trackStatistics()
5445   void trackStatistics() const override {
5446     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5447   }
5448 };
5449 
5450 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5451   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5452       : AAValueSimplifyImpl(IRP, A) {}
5453 
5454   /// See AbstractAttribute::initialize(...).
5455   void initialize(Attributor &A) override {
5456     AAValueSimplifyImpl::initialize(A);
5457     Value &V = getAnchorValue();
5458 
5459     // TODO: add other stuffs
5460     if (isa<Constant>(V))
5461       indicatePessimisticFixpoint();
5462   }
5463 
5464   /// Check if \p Cmp is a comparison we can simplify.
5465   ///
5466   /// We handle multiple cases, one in which at least one operand is an
5467   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5468   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5469   /// will be updated.
5470   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5471     auto Union = [&](Value &V) {
5472       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5473           SimplifiedAssociatedValue, &V, V.getType());
5474       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5475     };
5476 
5477     Value *LHS = Cmp.getOperand(0);
5478     Value *RHS = Cmp.getOperand(1);
5479 
5480     // Simplify the operands first.
5481     bool UsedAssumedInformation = false;
5482     const auto &SimplifiedLHS =
5483         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5484                                *this, UsedAssumedInformation);
5485     if (!SimplifiedLHS.hasValue())
5486       return true;
5487     if (!SimplifiedLHS.getValue())
5488       return false;
5489     LHS = *SimplifiedLHS;
5490 
5491     const auto &SimplifiedRHS =
5492         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5493                                *this, UsedAssumedInformation);
5494     if (!SimplifiedRHS.hasValue())
5495       return true;
5496     if (!SimplifiedRHS.getValue())
5497       return false;
5498     RHS = *SimplifiedRHS;
5499 
5500     LLVMContext &Ctx = Cmp.getContext();
5501     // Handle the trivial case first in which we don't even need to think about
5502     // null or non-null.
5503     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5504       Constant *NewVal =
5505           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5506       if (!Union(*NewVal))
5507         return false;
5508       if (!UsedAssumedInformation)
5509         indicateOptimisticFixpoint();
5510       return true;
5511     }
5512 
5513     // From now on we only handle equalities (==, !=).
5514     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5515     if (!ICmp || !ICmp->isEquality())
5516       return false;
5517 
5518     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5519     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5520     if (!LHSIsNull && !RHSIsNull)
5521       return false;
5522 
5523     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5524     // non-nullptr operand and if we assume it's non-null we can conclude the
5525     // result of the comparison.
5526     assert((LHSIsNull || RHSIsNull) &&
5527            "Expected nullptr versus non-nullptr comparison at this point");
5528 
5529     // The index is the operand that we assume is not null.
5530     unsigned PtrIdx = LHSIsNull;
5531     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5532         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5533         DepClassTy::REQUIRED);
5534     if (!PtrNonNullAA.isAssumedNonNull())
5535       return false;
5536     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5537 
5538     // The new value depends on the predicate, true for != and false for ==.
5539     Constant *NewVal = ConstantInt::get(
5540         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5541     if (!Union(*NewVal))
5542       return false;
5543 
5544     if (!UsedAssumedInformation)
5545       indicateOptimisticFixpoint();
5546 
5547     return true;
5548   }
5549 
5550   bool updateWithLoad(Attributor &A, LoadInst &L) {
5551     auto Union = [&](Value &V) {
5552       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5553           SimplifiedAssociatedValue, &V, L.getType());
5554       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5555     };
5556     return handleLoad(A, *this, L, Union);
5557   }
5558 
5559   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5560   /// simplify any operand of the instruction \p I. Return true if successful,
5561   /// in that case SimplifiedAssociatedValue will be updated.
5562   bool handleGenericInst(Attributor &A, Instruction &I) {
5563     bool SomeSimplified = false;
5564     bool UsedAssumedInformation = false;
5565 
5566     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5567     int Idx = 0;
5568     for (Value *Op : I.operands()) {
5569       const auto &SimplifiedOp =
5570           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5571                                  *this, UsedAssumedInformation);
5572       // If we are not sure about any operand we are not sure about the entire
5573       // instruction, we'll wait.
5574       if (!SimplifiedOp.hasValue())
5575         return true;
5576 
5577       if (SimplifiedOp.getValue())
5578         NewOps[Idx] = SimplifiedOp.getValue();
5579       else
5580         NewOps[Idx] = Op;
5581 
5582       SomeSimplified |= (NewOps[Idx] != Op);
5583       ++Idx;
5584     }
5585 
5586     // We won't bother with the InstSimplify interface if we didn't simplify any
5587     // operand ourselves.
5588     if (!SomeSimplified)
5589       return false;
5590 
5591     InformationCache &InfoCache = A.getInfoCache();
5592     Function *F = I.getFunction();
5593     const auto *DT =
5594         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5595     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5596     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5597     OptimizationRemarkEmitter *ORE = nullptr;
5598 
5599     const DataLayout &DL = I.getModule()->getDataLayout();
5600     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5601     if (Value *SimplifiedI =
5602             SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5603       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5604           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5605       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5606     }
5607     return false;
5608   }
5609 
5610   /// See AbstractAttribute::updateImpl(...).
5611   ChangeStatus updateImpl(Attributor &A) override {
5612     auto Before = SimplifiedAssociatedValue;
5613 
5614     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5615                             bool Stripped) -> bool {
5616       auto &AA = A.getAAFor<AAValueSimplify>(
5617           *this, IRPosition::value(V, getCallBaseContext()),
5618           DepClassTy::REQUIRED);
5619       if (!Stripped && this == &AA) {
5620 
5621         if (auto *I = dyn_cast<Instruction>(&V)) {
5622           if (auto *LI = dyn_cast<LoadInst>(&V))
5623             if (updateWithLoad(A, *LI))
5624               return true;
5625           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5626             if (handleCmp(A, *Cmp))
5627               return true;
5628           if (handleGenericInst(A, *I))
5629             return true;
5630         }
5631         // TODO: Look the instruction and check recursively.
5632 
5633         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5634                           << "\n");
5635         return false;
5636       }
5637       return checkAndUpdate(A, *this,
5638                             IRPosition::value(V, getCallBaseContext()));
5639     };
5640 
5641     bool Dummy = false;
5642     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5643                                      VisitValueCB, getCtxI(),
5644                                      /* UseValueSimplify */ false))
5645       if (!askSimplifiedValueForOtherAAs(A))
5646         return indicatePessimisticFixpoint();
5647 
5648     // If a candicate was found in this update, return CHANGED.
5649     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5650                                                : ChangeStatus ::CHANGED;
5651   }
5652 
5653   /// See AbstractAttribute::trackStatistics()
5654   void trackStatistics() const override {
5655     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5656   }
5657 };
5658 
5659 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5660   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5661       : AAValueSimplifyImpl(IRP, A) {}
5662 
5663   /// See AbstractAttribute::initialize(...).
5664   void initialize(Attributor &A) override {
5665     SimplifiedAssociatedValue = nullptr;
5666     indicateOptimisticFixpoint();
5667   }
5668   /// See AbstractAttribute::initialize(...).
5669   ChangeStatus updateImpl(Attributor &A) override {
5670     llvm_unreachable(
5671         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5672   }
5673   /// See AbstractAttribute::trackStatistics()
5674   void trackStatistics() const override {
5675     STATS_DECLTRACK_FN_ATTR(value_simplify)
5676   }
5677 };
5678 
5679 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5680   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5681       : AAValueSimplifyFunction(IRP, A) {}
5682   /// See AbstractAttribute::trackStatistics()
5683   void trackStatistics() const override {
5684     STATS_DECLTRACK_CS_ATTR(value_simplify)
5685   }
5686 };
5687 
5688 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5689   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5690       : AAValueSimplifyImpl(IRP, A) {}
5691 
5692   void initialize(Attributor &A) override {
5693     AAValueSimplifyImpl::initialize(A);
5694     if (!getAssociatedFunction())
5695       indicatePessimisticFixpoint();
5696   }
5697 
5698   /// See AbstractAttribute::updateImpl(...).
5699   ChangeStatus updateImpl(Attributor &A) override {
5700     auto Before = SimplifiedAssociatedValue;
5701     auto &RetAA = A.getAAFor<AAReturnedValues>(
5702         *this, IRPosition::function(*getAssociatedFunction()),
5703         DepClassTy::REQUIRED);
5704     auto PredForReturned =
5705         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5706           bool UsedAssumedInformation = false;
5707           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5708               &RetVal, *cast<CallBase>(getCtxI()), *this,
5709               UsedAssumedInformation);
5710           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5711               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5712           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5713         };
5714     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5715       if (!askSimplifiedValueForOtherAAs(A))
5716         return indicatePessimisticFixpoint();
5717     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5718                                                : ChangeStatus ::CHANGED;
5719   }
5720 
5721   void trackStatistics() const override {
5722     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5723   }
5724 };
5725 
5726 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5727   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5728       : AAValueSimplifyFloating(IRP, A) {}
5729 
5730   /// See AbstractAttribute::manifest(...).
5731   ChangeStatus manifest(Attributor &A) override {
5732     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5733 
5734     if (auto *NewV = getReplacementValue(A)) {
5735       Use &U = cast<CallBase>(&getAnchorValue())
5736                    ->getArgOperandUse(getCallSiteArgNo());
5737       if (A.changeUseAfterManifest(U, *NewV))
5738         Changed = ChangeStatus::CHANGED;
5739     }
5740 
5741     return Changed | AAValueSimplify::manifest(A);
5742   }
5743 
5744   void trackStatistics() const override {
5745     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5746   }
5747 };
5748 
5749 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5750 struct AAHeapToStackFunction final : public AAHeapToStack {
5751 
5752   struct AllocationInfo {
5753     /// The call that allocates the memory.
5754     CallBase *const CB;
5755 
5756     /// The kind of allocation.
5757     const enum class AllocationKind {
5758       MALLOC,
5759       CALLOC,
5760       ALIGNED_ALLOC,
5761     } Kind;
5762 
5763     /// The library function id for the allocation.
5764     LibFunc LibraryFunctionId = NotLibFunc;
5765 
5766     /// The status wrt. a rewrite.
5767     enum {
5768       STACK_DUE_TO_USE,
5769       STACK_DUE_TO_FREE,
5770       INVALID,
5771     } Status = STACK_DUE_TO_USE;
5772 
5773     /// Flag to indicate if we encountered a use that might free this allocation
5774     /// but which is not in the deallocation infos.
5775     bool HasPotentiallyFreeingUnknownUses = false;
5776 
5777     /// The set of free calls that use this allocation.
5778     SmallPtrSet<CallBase *, 1> PotentialFreeCalls{};
5779   };
5780 
5781   struct DeallocationInfo {
5782     /// The call that deallocates the memory.
5783     CallBase *const CB;
5784 
5785     /// Flag to indicate if we don't know all objects this deallocation might
5786     /// free.
5787     bool MightFreeUnknownObjects = false;
5788 
5789     /// The set of allocation calls that are potentially freed.
5790     SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{};
5791   };
5792 
5793   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5794       : AAHeapToStack(IRP, A) {}
5795 
5796   ~AAHeapToStackFunction() {
5797     // Ensure we call the destructor so we release any memory allocated in the
5798     // sets.
5799     for (auto &It : AllocationInfos)
5800       It.getSecond()->~AllocationInfo();
5801     for (auto &It : DeallocationInfos)
5802       It.getSecond()->~DeallocationInfo();
5803   }
5804 
5805   void initialize(Attributor &A) override {
5806     AAHeapToStack::initialize(A);
5807 
5808     const Function *F = getAnchorScope();
5809     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5810 
5811     auto AllocationIdentifierCB = [&](Instruction &I) {
5812       CallBase *CB = dyn_cast<CallBase>(&I);
5813       if (!CB)
5814         return true;
5815       if (isFreeCall(CB, TLI)) {
5816         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
5817         return true;
5818       }
5819       bool IsMalloc = isMallocLikeFn(CB, TLI);
5820       bool IsAlignedAllocLike = !IsMalloc && isAlignedAllocLikeFn(CB, TLI);
5821       bool IsCalloc =
5822           !IsMalloc && !IsAlignedAllocLike && isCallocLikeFn(CB, TLI);
5823       if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc)
5824         return true;
5825       auto Kind =
5826           IsMalloc ? AllocationInfo::AllocationKind::MALLOC
5827                    : (IsCalloc ? AllocationInfo::AllocationKind::CALLOC
5828                                : AllocationInfo::AllocationKind::ALIGNED_ALLOC);
5829 
5830       AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB, Kind};
5831       AllocationInfos[CB] = AI;
5832       TLI->getLibFunc(*CB, AI->LibraryFunctionId);
5833       return true;
5834     };
5835 
5836     bool UsedAssumedInformation = false;
5837     bool Success = A.checkForAllCallLikeInstructions(
5838         AllocationIdentifierCB, *this, UsedAssumedInformation,
5839         /* CheckBBLivenessOnly */ false,
5840         /* CheckPotentiallyDead */ true);
5841     (void)Success;
5842     assert(Success && "Did not expect the call base visit callback to fail!");
5843   }
5844 
5845   const std::string getAsStr() const override {
5846     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
5847     for (const auto &It : AllocationInfos) {
5848       if (It.second->Status == AllocationInfo::INVALID)
5849         ++NumInvalidMallocs;
5850       else
5851         ++NumH2SMallocs;
5852     }
5853     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
5854            std::to_string(NumInvalidMallocs);
5855   }
5856 
5857   /// See AbstractAttribute::trackStatistics().
5858   void trackStatistics() const override {
5859     STATS_DECL(
5860         MallocCalls, Function,
5861         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5862     for (auto &It : AllocationInfos)
5863       if (It.second->Status != AllocationInfo::INVALID)
5864         ++BUILD_STAT_NAME(MallocCalls, Function);
5865   }
5866 
5867   bool isAssumedHeapToStack(const CallBase &CB) const override {
5868     if (isValidState())
5869       if (AllocationInfo *AI = AllocationInfos.lookup(&CB))
5870         return AI->Status != AllocationInfo::INVALID;
5871     return false;
5872   }
5873 
5874   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
5875     if (!isValidState())
5876       return false;
5877 
5878     for (auto &It : AllocationInfos) {
5879       AllocationInfo &AI = *It.second;
5880       if (AI.Status == AllocationInfo::INVALID)
5881         continue;
5882 
5883       if (AI.PotentialFreeCalls.count(&CB))
5884         return true;
5885     }
5886 
5887     return false;
5888   }
5889 
5890   ChangeStatus manifest(Attributor &A) override {
5891     assert(getState().isValidState() &&
5892            "Attempted to manifest an invalid state!");
5893 
5894     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5895     Function *F = getAnchorScope();
5896     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5897 
5898     for (auto &It : AllocationInfos) {
5899       AllocationInfo &AI = *It.second;
5900       if (AI.Status == AllocationInfo::INVALID)
5901         continue;
5902 
5903       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
5904         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5905         A.deleteAfterManifest(*FreeCall);
5906         HasChanged = ChangeStatus::CHANGED;
5907       }
5908 
5909       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
5910                         << "\n");
5911 
5912       auto Remark = [&](OptimizationRemark OR) {
5913         LibFunc IsAllocShared;
5914         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
5915           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5916             return OR << "Moving globalized variable to the stack.";
5917         return OR << "Moving memory allocation from the heap to the stack.";
5918       };
5919       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
5920         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
5921       else
5922         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
5923 
5924       Value *Size;
5925       Optional<APInt> SizeAPI = getSize(A, *this, AI);
5926       if (SizeAPI.hasValue()) {
5927         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
5928       } else if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5929         auto *Num = AI.CB->getOperand(0);
5930         auto *SizeT = AI.CB->getOperand(1);
5931         IRBuilder<> B(AI.CB);
5932         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5933       } else if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5934         Size = AI.CB->getOperand(1);
5935       } else {
5936         Size = AI.CB->getOperand(0);
5937       }
5938 
5939       Align Alignment(1);
5940       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5941         Optional<APInt> AlignmentAPI =
5942             getAPInt(A, *this, *AI.CB->getArgOperand(0));
5943         assert(AlignmentAPI.hasValue() &&
5944                "Expected an alignment during manifest!");
5945         Alignment =
5946             max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue()));
5947       }
5948 
5949       unsigned AS = cast<PointerType>(AI.CB->getType())->getAddressSpace();
5950       Instruction *Alloca =
5951           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5952                          "", AI.CB->getNextNode());
5953 
5954       if (Alloca->getType() != AI.CB->getType())
5955         Alloca = new BitCastInst(Alloca, AI.CB->getType(), "malloc_bc",
5956                                  Alloca->getNextNode());
5957 
5958       A.changeValueAfterManifest(*AI.CB, *Alloca);
5959 
5960       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
5961         auto *NBB = II->getNormalDest();
5962         BranchInst::Create(NBB, AI.CB->getParent());
5963         A.deleteAfterManifest(*AI.CB);
5964       } else {
5965         A.deleteAfterManifest(*AI.CB);
5966       }
5967 
5968       // Zero out the allocated memory if it was a calloc.
5969       if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5970         auto *BI = new BitCastInst(Alloca, AI.CB->getType(), "calloc_bc",
5971                                    Alloca->getNextNode());
5972         Value *Ops[] = {
5973             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5974             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5975 
5976         Type *Tys[] = {BI->getType(), AI.CB->getOperand(0)->getType()};
5977         Module *M = F->getParent();
5978         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5979         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5980       }
5981       HasChanged = ChangeStatus::CHANGED;
5982     }
5983 
5984     return HasChanged;
5985   }
5986 
5987   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
5988                            Value &V) {
5989     bool UsedAssumedInformation = false;
5990     Optional<Constant *> SimpleV =
5991         A.getAssumedConstant(V, AA, UsedAssumedInformation);
5992     if (!SimpleV.hasValue())
5993       return APInt(64, 0);
5994     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
5995       return CI->getValue();
5996     return llvm::None;
5997   }
5998 
5999   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6000                           AllocationInfo &AI) {
6001 
6002     if (AI.Kind == AllocationInfo::AllocationKind::MALLOC)
6003       return getAPInt(A, AA, *AI.CB->getArgOperand(0));
6004 
6005     if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
6006       // Only if the alignment is also constant we return a size.
6007       return getAPInt(A, AA, *AI.CB->getArgOperand(0)).hasValue()
6008                  ? getAPInt(A, AA, *AI.CB->getArgOperand(1))
6009                  : llvm::None;
6010 
6011     assert(AI.Kind == AllocationInfo::AllocationKind::CALLOC &&
6012            "Expected only callocs are left");
6013     Optional<APInt> Num = getAPInt(A, AA, *AI.CB->getArgOperand(0));
6014     Optional<APInt> Size = getAPInt(A, AA, *AI.CB->getArgOperand(1));
6015     if (!Num.hasValue() || !Size.hasValue())
6016       return llvm::None;
6017     bool Overflow = false;
6018     Size = Size.getValue().umul_ov(Num.getValue(), Overflow);
6019     return Overflow ? llvm::None : Size;
6020   }
6021 
6022   /// Collection of all malloc-like calls in a function with associated
6023   /// information.
6024   DenseMap<CallBase *, AllocationInfo *> AllocationInfos;
6025 
6026   /// Collection of all free-like calls in a function with associated
6027   /// information.
6028   DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos;
6029 
6030   ChangeStatus updateImpl(Attributor &A) override;
6031 };
6032 
6033 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6034   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6035   const Function *F = getAnchorScope();
6036 
6037   const auto &LivenessAA =
6038       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6039 
6040   MustBeExecutedContextExplorer &Explorer =
6041       A.getInfoCache().getMustBeExecutedContextExplorer();
6042 
6043   bool StackIsAccessibleByOtherThreads =
6044       A.getInfoCache().stackIsAccessibleByOtherThreads();
6045 
6046   // Flag to ensure we update our deallocation information at most once per
6047   // updateImpl call and only if we use the free check reasoning.
6048   bool HasUpdatedFrees = false;
6049 
6050   auto UpdateFrees = [&]() {
6051     HasUpdatedFrees = true;
6052 
6053     for (auto &It : DeallocationInfos) {
6054       DeallocationInfo &DI = *It.second;
6055       // For now we cannot use deallocations that have unknown inputs, skip
6056       // them.
6057       if (DI.MightFreeUnknownObjects)
6058         continue;
6059 
6060       // No need to analyze dead calls, ignore them instead.
6061       bool UsedAssumedInformation = false;
6062       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6063                           /* CheckBBLivenessOnly */ true))
6064         continue;
6065 
6066       // Use the optimistic version to get the freed objects, ignoring dead
6067       // branches etc.
6068       SmallVector<Value *, 8> Objects;
6069       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6070                                            *this, DI.CB)) {
6071         LLVM_DEBUG(
6072             dbgs()
6073             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6074         DI.MightFreeUnknownObjects = true;
6075         continue;
6076       }
6077 
6078       // Check each object explicitly.
6079       for (auto *Obj : Objects) {
6080         // Free of null and undef can be ignored as no-ops (or UB in the latter
6081         // case).
6082         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6083           continue;
6084 
6085         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6086         if (!ObjCB) {
6087           LLVM_DEBUG(dbgs()
6088                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6089           DI.MightFreeUnknownObjects = true;
6090           continue;
6091         }
6092 
6093         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6094         if (!AI) {
6095           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6096                             << "\n");
6097           DI.MightFreeUnknownObjects = true;
6098           continue;
6099         }
6100 
6101         DI.PotentialAllocationCalls.insert(ObjCB);
6102       }
6103     }
6104   };
6105 
6106   auto FreeCheck = [&](AllocationInfo &AI) {
6107     // If the stack is not accessible by other threads, the "must-free" logic
6108     // doesn't apply as the pointer could be shared and needs to be places in
6109     // "shareable" memory.
6110     if (!StackIsAccessibleByOtherThreads) {
6111       auto &NoSyncAA =
6112           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6113       if (!NoSyncAA.isAssumedNoSync()) {
6114         LLVM_DEBUG(
6115             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6116                       "other threads and function is not nosync:\n");
6117         return false;
6118       }
6119     }
6120     if (!HasUpdatedFrees)
6121       UpdateFrees();
6122 
6123     // TODO: Allow multi exit functions that have different free calls.
6124     if (AI.PotentialFreeCalls.size() != 1) {
6125       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6126                         << AI.PotentialFreeCalls.size() << "\n");
6127       return false;
6128     }
6129     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6130     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6131     if (!DI) {
6132       LLVM_DEBUG(
6133           dbgs() << "[H2S] unique free call was not known as deallocation call "
6134                  << *UniqueFree << "\n");
6135       return false;
6136     }
6137     if (DI->MightFreeUnknownObjects) {
6138       LLVM_DEBUG(
6139           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6140       return false;
6141     }
6142     if (DI->PotentialAllocationCalls.size() > 1) {
6143       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6144                         << DI->PotentialAllocationCalls.size()
6145                         << " different allocations\n");
6146       return false;
6147     }
6148     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6149       LLVM_DEBUG(
6150           dbgs()
6151           << "[H2S] unique free call not known to free this allocation but "
6152           << **DI->PotentialAllocationCalls.begin() << "\n");
6153       return false;
6154     }
6155     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6156     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6157       LLVM_DEBUG(
6158           dbgs()
6159           << "[H2S] unique free call might not be executed with the allocation "
6160           << *UniqueFree << "\n");
6161       return false;
6162     }
6163     return true;
6164   };
6165 
6166   auto UsesCheck = [&](AllocationInfo &AI) {
6167     bool ValidUsesOnly = true;
6168 
6169     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6170       Instruction *UserI = cast<Instruction>(U.getUser());
6171       if (isa<LoadInst>(UserI))
6172         return true;
6173       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6174         if (SI->getValueOperand() == U.get()) {
6175           LLVM_DEBUG(dbgs()
6176                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6177           ValidUsesOnly = false;
6178         } else {
6179           // A store into the malloc'ed memory is fine.
6180         }
6181         return true;
6182       }
6183       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6184         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6185           return true;
6186         if (DeallocationInfos.count(CB)) {
6187           AI.PotentialFreeCalls.insert(CB);
6188           return true;
6189         }
6190 
6191         unsigned ArgNo = CB->getArgOperandNo(&U);
6192 
6193         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6194             *this, IRPosition::callsite_argument(*CB, ArgNo),
6195             DepClassTy::OPTIONAL);
6196 
6197         // If a call site argument use is nofree, we are fine.
6198         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6199             *this, IRPosition::callsite_argument(*CB, ArgNo),
6200             DepClassTy::OPTIONAL);
6201 
6202         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6203         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6204         if (MaybeCaptured ||
6205             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6206              MaybeFreed)) {
6207           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6208 
6209           // Emit a missed remark if this is missed OpenMP globalization.
6210           auto Remark = [&](OptimizationRemarkMissed ORM) {
6211             return ORM
6212                    << "Could not move globalized variable to the stack. "
6213                       "Variable is potentially captured in call. Mark "
6214                       "parameter as `__attribute__((noescape))` to override.";
6215           };
6216 
6217           if (ValidUsesOnly &&
6218               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6219             A.emitRemark<OptimizationRemarkMissed>(AI.CB, "OMP113", Remark);
6220 
6221           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6222           ValidUsesOnly = false;
6223         }
6224         return true;
6225       }
6226 
6227       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6228           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6229         Follow = true;
6230         return true;
6231       }
6232       // Unknown user for which we can not track uses further (in a way that
6233       // makes sense).
6234       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6235       ValidUsesOnly = false;
6236       return true;
6237     };
6238     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6239       return false;
6240     return ValidUsesOnly;
6241   };
6242 
6243   // The actual update starts here. We look at all allocations and depending on
6244   // their status perform the appropriate check(s).
6245   for (auto &It : AllocationInfos) {
6246     AllocationInfo &AI = *It.second;
6247     if (AI.Status == AllocationInfo::INVALID)
6248       continue;
6249 
6250     if (MaxHeapToStackSize == -1) {
6251       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
6252         if (!getAPInt(A, *this, *AI.CB->getArgOperand(0)).hasValue()) {
6253           LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6254                             << "\n");
6255           AI.Status = AllocationInfo::INVALID;
6256           Changed = ChangeStatus::CHANGED;
6257           continue;
6258         }
6259     } else {
6260       Optional<APInt> Size = getSize(A, *this, AI);
6261       if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6262         LLVM_DEBUG({
6263           if (!Size.hasValue())
6264             dbgs() << "[H2S] Unknown allocation size (or alignment): " << *AI.CB
6265                    << "\n";
6266           else
6267             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6268                    << MaxHeapToStackSize << "\n";
6269         });
6270 
6271         AI.Status = AllocationInfo::INVALID;
6272         Changed = ChangeStatus::CHANGED;
6273         continue;
6274       }
6275     }
6276 
6277     switch (AI.Status) {
6278     case AllocationInfo::STACK_DUE_TO_USE:
6279       if (UsesCheck(AI))
6280         continue;
6281       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6282       LLVM_FALLTHROUGH;
6283     case AllocationInfo::STACK_DUE_TO_FREE:
6284       if (FreeCheck(AI))
6285         continue;
6286       AI.Status = AllocationInfo::INVALID;
6287       Changed = ChangeStatus::CHANGED;
6288       continue;
6289     case AllocationInfo::INVALID:
6290       llvm_unreachable("Invalid allocations should never reach this point!");
6291     };
6292   }
6293 
6294   return Changed;
6295 }
6296 
6297 /// ----------------------- Privatizable Pointers ------------------------------
6298 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6299   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6300       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6301 
6302   ChangeStatus indicatePessimisticFixpoint() override {
6303     AAPrivatizablePtr::indicatePessimisticFixpoint();
6304     PrivatizableType = nullptr;
6305     return ChangeStatus::CHANGED;
6306   }
6307 
6308   /// Identify the type we can chose for a private copy of the underlying
6309   /// argument. None means it is not clear yet, nullptr means there is none.
6310   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6311 
6312   /// Return a privatizable type that encloses both T0 and T1.
6313   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6314   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6315     if (!T0.hasValue())
6316       return T1;
6317     if (!T1.hasValue())
6318       return T0;
6319     if (T0 == T1)
6320       return T0;
6321     return nullptr;
6322   }
6323 
6324   Optional<Type *> getPrivatizableType() const override {
6325     return PrivatizableType;
6326   }
6327 
6328   const std::string getAsStr() const override {
6329     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6330   }
6331 
6332 protected:
6333   Optional<Type *> PrivatizableType;
6334 };
6335 
6336 // TODO: Do this for call site arguments (probably also other values) as well.
6337 
6338 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6339   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6340       : AAPrivatizablePtrImpl(IRP, A) {}
6341 
6342   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6343   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6344     // If this is a byval argument and we know all the call sites (so we can
6345     // rewrite them), there is no need to check them explicitly.
6346     bool AllCallSitesKnown;
6347     if (getIRPosition().hasAttr(Attribute::ByVal) &&
6348         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6349                                true, AllCallSitesKnown))
6350       return getAssociatedValue().getType()->getPointerElementType();
6351 
6352     Optional<Type *> Ty;
6353     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6354 
6355     // Make sure the associated call site argument has the same type at all call
6356     // sites and it is an allocation we know is safe to privatize, for now that
6357     // means we only allow alloca instructions.
6358     // TODO: We can additionally analyze the accesses in the callee to  create
6359     //       the type from that information instead. That is a little more
6360     //       involved and will be done in a follow up patch.
6361     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6362       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6363       // Check if a coresponding argument was found or if it is one not
6364       // associated (which can happen for callback calls).
6365       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6366         return false;
6367 
6368       // Check that all call sites agree on a type.
6369       auto &PrivCSArgAA =
6370           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6371       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6372 
6373       LLVM_DEBUG({
6374         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6375         if (CSTy.hasValue() && CSTy.getValue())
6376           CSTy.getValue()->print(dbgs());
6377         else if (CSTy.hasValue())
6378           dbgs() << "<nullptr>";
6379         else
6380           dbgs() << "<none>";
6381       });
6382 
6383       Ty = combineTypes(Ty, CSTy);
6384 
6385       LLVM_DEBUG({
6386         dbgs() << " : New Type: ";
6387         if (Ty.hasValue() && Ty.getValue())
6388           Ty.getValue()->print(dbgs());
6389         else if (Ty.hasValue())
6390           dbgs() << "<nullptr>";
6391         else
6392           dbgs() << "<none>";
6393         dbgs() << "\n";
6394       });
6395 
6396       return !Ty.hasValue() || Ty.getValue();
6397     };
6398 
6399     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
6400       return nullptr;
6401     return Ty;
6402   }
6403 
6404   /// See AbstractAttribute::updateImpl(...).
6405   ChangeStatus updateImpl(Attributor &A) override {
6406     PrivatizableType = identifyPrivatizableType(A);
6407     if (!PrivatizableType.hasValue())
6408       return ChangeStatus::UNCHANGED;
6409     if (!PrivatizableType.getValue())
6410       return indicatePessimisticFixpoint();
6411 
6412     // The dependence is optional so we don't give up once we give up on the
6413     // alignment.
6414     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6415                         DepClassTy::OPTIONAL);
6416 
6417     // Avoid arguments with padding for now.
6418     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6419         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
6420                                                 A.getInfoCache().getDL())) {
6421       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6422       return indicatePessimisticFixpoint();
6423     }
6424 
6425     // Verify callee and caller agree on how the promoted argument would be
6426     // passed.
6427     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
6428     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
6429     // which doesn't require the arguments ArgumentPromotion wanted to pass.
6430     Function &Fn = *getIRPosition().getAnchorScope();
6431     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
6432     ArgsToPromote.insert(getAssociatedArgument());
6433     const auto *TTI =
6434         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6435     if (!TTI ||
6436         !ArgumentPromotionPass::areFunctionArgsABICompatible(
6437             Fn, *TTI, ArgsToPromote, Dummy) ||
6438         ArgsToPromote.empty()) {
6439       LLVM_DEBUG(
6440           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6441                  << Fn.getName() << "\n");
6442       return indicatePessimisticFixpoint();
6443     }
6444 
6445     // Collect the types that will replace the privatizable type in the function
6446     // signature.
6447     SmallVector<Type *, 16> ReplacementTypes;
6448     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6449 
6450     // Register a rewrite of the argument.
6451     Argument *Arg = getAssociatedArgument();
6452     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6453       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6454       return indicatePessimisticFixpoint();
6455     }
6456 
6457     unsigned ArgNo = Arg->getArgNo();
6458 
6459     // Helper to check if for the given call site the associated argument is
6460     // passed to a callback where the privatization would be different.
6461     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6462       SmallVector<const Use *, 4> CallbackUses;
6463       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6464       for (const Use *U : CallbackUses) {
6465         AbstractCallSite CBACS(U);
6466         assert(CBACS && CBACS.isCallbackCall());
6467         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6468           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6469 
6470           LLVM_DEBUG({
6471             dbgs()
6472                 << "[AAPrivatizablePtr] Argument " << *Arg
6473                 << "check if can be privatized in the context of its parent ("
6474                 << Arg->getParent()->getName()
6475                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6476                    "callback ("
6477                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6478                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6479                 << CBACS.getCallArgOperand(CBArg) << " vs "
6480                 << CB.getArgOperand(ArgNo) << "\n"
6481                 << "[AAPrivatizablePtr] " << CBArg << " : "
6482                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6483           });
6484 
6485           if (CBArgNo != int(ArgNo))
6486             continue;
6487           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6488               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6489           if (CBArgPrivAA.isValidState()) {
6490             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6491             if (!CBArgPrivTy.hasValue())
6492               continue;
6493             if (CBArgPrivTy.getValue() == PrivatizableType)
6494               continue;
6495           }
6496 
6497           LLVM_DEBUG({
6498             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6499                    << " cannot be privatized in the context of its parent ("
6500                    << Arg->getParent()->getName()
6501                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6502                       "callback ("
6503                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6504                    << ").\n[AAPrivatizablePtr] for which the argument "
6505                       "privatization is not compatible.\n";
6506           });
6507           return false;
6508         }
6509       }
6510       return true;
6511     };
6512 
6513     // Helper to check if for the given call site the associated argument is
6514     // passed to a direct call where the privatization would be different.
6515     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6516       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6517       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6518       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6519              "Expected a direct call operand for callback call operand");
6520 
6521       LLVM_DEBUG({
6522         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6523                << " check if be privatized in the context of its parent ("
6524                << Arg->getParent()->getName()
6525                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6526                   "direct call of ("
6527                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6528                << ").\n";
6529       });
6530 
6531       Function *DCCallee = DC->getCalledFunction();
6532       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6533         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6534             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6535             DepClassTy::REQUIRED);
6536         if (DCArgPrivAA.isValidState()) {
6537           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6538           if (!DCArgPrivTy.hasValue())
6539             return true;
6540           if (DCArgPrivTy.getValue() == PrivatizableType)
6541             return true;
6542         }
6543       }
6544 
6545       LLVM_DEBUG({
6546         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6547                << " cannot be privatized in the context of its parent ("
6548                << Arg->getParent()->getName()
6549                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6550                   "direct call of ("
6551                << ACS.getInstruction()->getCalledFunction()->getName()
6552                << ").\n[AAPrivatizablePtr] for which the argument "
6553                   "privatization is not compatible.\n";
6554       });
6555       return false;
6556     };
6557 
6558     // Helper to check if the associated argument is used at the given abstract
6559     // call site in a way that is incompatible with the privatization assumed
6560     // here.
6561     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6562       if (ACS.isDirectCall())
6563         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6564       if (ACS.isCallbackCall())
6565         return IsCompatiblePrivArgOfDirectCS(ACS);
6566       return false;
6567     };
6568 
6569     bool AllCallSitesKnown;
6570     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6571                                 AllCallSitesKnown))
6572       return indicatePessimisticFixpoint();
6573 
6574     return ChangeStatus::UNCHANGED;
6575   }
6576 
6577   /// Given a type to private \p PrivType, collect the constituates (which are
6578   /// used) in \p ReplacementTypes.
6579   static void
6580   identifyReplacementTypes(Type *PrivType,
6581                            SmallVectorImpl<Type *> &ReplacementTypes) {
6582     // TODO: For now we expand the privatization type to the fullest which can
6583     //       lead to dead arguments that need to be removed later.
6584     assert(PrivType && "Expected privatizable type!");
6585 
6586     // Traverse the type, extract constituate types on the outermost level.
6587     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6588       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6589         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6590     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6591       ReplacementTypes.append(PrivArrayType->getNumElements(),
6592                               PrivArrayType->getElementType());
6593     } else {
6594       ReplacementTypes.push_back(PrivType);
6595     }
6596   }
6597 
6598   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6599   /// The values needed are taken from the arguments of \p F starting at
6600   /// position \p ArgNo.
6601   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6602                                    unsigned ArgNo, Instruction &IP) {
6603     assert(PrivType && "Expected privatizable type!");
6604 
6605     IRBuilder<NoFolder> IRB(&IP);
6606     const DataLayout &DL = F.getParent()->getDataLayout();
6607 
6608     // Traverse the type, build GEPs and stores.
6609     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6610       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6611       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6612         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6613         Value *Ptr =
6614             constructPointer(PointeeTy, PrivType, &Base,
6615                              PrivStructLayout->getElementOffset(u), IRB, DL);
6616         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6617       }
6618     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6619       Type *PointeeTy = PrivArrayType->getElementType();
6620       Type *PointeePtrTy = PointeeTy->getPointerTo();
6621       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6622       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6623         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6624                                       u * PointeeTySize, IRB, DL);
6625         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6626       }
6627     } else {
6628       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6629     }
6630   }
6631 
6632   /// Extract values from \p Base according to the type \p PrivType at the
6633   /// call position \p ACS. The values are appended to \p ReplacementValues.
6634   void createReplacementValues(Align Alignment, Type *PrivType,
6635                                AbstractCallSite ACS, Value *Base,
6636                                SmallVectorImpl<Value *> &ReplacementValues) {
6637     assert(Base && "Expected base value!");
6638     assert(PrivType && "Expected privatizable type!");
6639     Instruction *IP = ACS.getInstruction();
6640 
6641     IRBuilder<NoFolder> IRB(IP);
6642     const DataLayout &DL = IP->getModule()->getDataLayout();
6643 
6644     if (Base->getType()->getPointerElementType() != PrivType)
6645       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
6646                                                  "", ACS.getInstruction());
6647 
6648     // Traverse the type, build GEPs and loads.
6649     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6650       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6651       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6652         Type *PointeeTy = PrivStructType->getElementType(u);
6653         Value *Ptr =
6654             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6655                              PrivStructLayout->getElementOffset(u), IRB, DL);
6656         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6657         L->setAlignment(Alignment);
6658         ReplacementValues.push_back(L);
6659       }
6660     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6661       Type *PointeeTy = PrivArrayType->getElementType();
6662       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6663       Type *PointeePtrTy = PointeeTy->getPointerTo();
6664       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6665         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6666                                       u * PointeeTySize, IRB, DL);
6667         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6668         L->setAlignment(Alignment);
6669         ReplacementValues.push_back(L);
6670       }
6671     } else {
6672       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6673       L->setAlignment(Alignment);
6674       ReplacementValues.push_back(L);
6675     }
6676   }
6677 
6678   /// See AbstractAttribute::manifest(...)
6679   ChangeStatus manifest(Attributor &A) override {
6680     if (!PrivatizableType.hasValue())
6681       return ChangeStatus::UNCHANGED;
6682     assert(PrivatizableType.getValue() && "Expected privatizable type!");
6683 
6684     // Collect all tail calls in the function as we cannot allow new allocas to
6685     // escape into tail recursion.
6686     // TODO: Be smarter about new allocas escaping into tail calls.
6687     SmallVector<CallInst *, 16> TailCalls;
6688     bool UsedAssumedInformation = false;
6689     if (!A.checkForAllInstructions(
6690             [&](Instruction &I) {
6691               CallInst &CI = cast<CallInst>(I);
6692               if (CI.isTailCall())
6693                 TailCalls.push_back(&CI);
6694               return true;
6695             },
6696             *this, {Instruction::Call}, UsedAssumedInformation))
6697       return ChangeStatus::UNCHANGED;
6698 
6699     Argument *Arg = getAssociatedArgument();
6700     // Query AAAlign attribute for alignment of associated argument to
6701     // determine the best alignment of loads.
6702     const auto &AlignAA =
6703         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
6704 
6705     // Callback to repair the associated function. A new alloca is placed at the
6706     // beginning and initialized with the values passed through arguments. The
6707     // new alloca replaces the use of the old pointer argument.
6708     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
6709         [=](const Attributor::ArgumentReplacementInfo &ARI,
6710             Function &ReplacementFn, Function::arg_iterator ArgIt) {
6711           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
6712           Instruction *IP = &*EntryBB.getFirstInsertionPt();
6713           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
6714                                            Arg->getName() + ".priv", IP);
6715           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
6716                                ArgIt->getArgNo(), *IP);
6717 
6718           if (AI->getType() != Arg->getType())
6719             AI =
6720                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
6721           Arg->replaceAllUsesWith(AI);
6722 
6723           for (CallInst *CI : TailCalls)
6724             CI->setTailCall(false);
6725         };
6726 
6727     // Callback to repair a call site of the associated function. The elements
6728     // of the privatizable type are loaded prior to the call and passed to the
6729     // new function version.
6730     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
6731         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
6732                       AbstractCallSite ACS,
6733                       SmallVectorImpl<Value *> &NewArgOperands) {
6734           // When no alignment is specified for the load instruction,
6735           // natural alignment is assumed.
6736           createReplacementValues(
6737               assumeAligned(AlignAA.getAssumedAlign()),
6738               PrivatizableType.getValue(), ACS,
6739               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
6740               NewArgOperands);
6741         };
6742 
6743     // Collect the types that will replace the privatizable type in the function
6744     // signature.
6745     SmallVector<Type *, 16> ReplacementTypes;
6746     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6747 
6748     // Register a rewrite of the argument.
6749     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
6750                                            std::move(FnRepairCB),
6751                                            std::move(ACSRepairCB)))
6752       return ChangeStatus::CHANGED;
6753     return ChangeStatus::UNCHANGED;
6754   }
6755 
6756   /// See AbstractAttribute::trackStatistics()
6757   void trackStatistics() const override {
6758     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
6759   }
6760 };
6761 
6762 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
6763   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
6764       : AAPrivatizablePtrImpl(IRP, A) {}
6765 
6766   /// See AbstractAttribute::initialize(...).
6767   virtual void initialize(Attributor &A) override {
6768     // TODO: We can privatize more than arguments.
6769     indicatePessimisticFixpoint();
6770   }
6771 
6772   ChangeStatus updateImpl(Attributor &A) override {
6773     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
6774                      "updateImpl will not be called");
6775   }
6776 
6777   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6778   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6779     Value *Obj = getUnderlyingObject(&getAssociatedValue());
6780     if (!Obj) {
6781       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
6782       return nullptr;
6783     }
6784 
6785     if (auto *AI = dyn_cast<AllocaInst>(Obj))
6786       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
6787         if (CI->isOne())
6788           return Obj->getType()->getPointerElementType();
6789     if (auto *Arg = dyn_cast<Argument>(Obj)) {
6790       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
6791           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
6792       if (PrivArgAA.isAssumedPrivatizablePtr())
6793         return Obj->getType()->getPointerElementType();
6794     }
6795 
6796     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
6797                          "alloca nor privatizable argument: "
6798                       << *Obj << "!\n");
6799     return nullptr;
6800   }
6801 
6802   /// See AbstractAttribute::trackStatistics()
6803   void trackStatistics() const override {
6804     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
6805   }
6806 };
6807 
6808 struct AAPrivatizablePtrCallSiteArgument final
6809     : public AAPrivatizablePtrFloating {
6810   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
6811       : AAPrivatizablePtrFloating(IRP, A) {}
6812 
6813   /// See AbstractAttribute::initialize(...).
6814   void initialize(Attributor &A) override {
6815     if (getIRPosition().hasAttr(Attribute::ByVal))
6816       indicateOptimisticFixpoint();
6817   }
6818 
6819   /// See AbstractAttribute::updateImpl(...).
6820   ChangeStatus updateImpl(Attributor &A) override {
6821     PrivatizableType = identifyPrivatizableType(A);
6822     if (!PrivatizableType.hasValue())
6823       return ChangeStatus::UNCHANGED;
6824     if (!PrivatizableType.getValue())
6825       return indicatePessimisticFixpoint();
6826 
6827     const IRPosition &IRP = getIRPosition();
6828     auto &NoCaptureAA =
6829         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
6830     if (!NoCaptureAA.isAssumedNoCapture()) {
6831       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
6832       return indicatePessimisticFixpoint();
6833     }
6834 
6835     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
6836     if (!NoAliasAA.isAssumedNoAlias()) {
6837       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
6838       return indicatePessimisticFixpoint();
6839     }
6840 
6841     const auto &MemBehaviorAA =
6842         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
6843     if (!MemBehaviorAA.isAssumedReadOnly()) {
6844       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
6845       return indicatePessimisticFixpoint();
6846     }
6847 
6848     return ChangeStatus::UNCHANGED;
6849   }
6850 
6851   /// See AbstractAttribute::trackStatistics()
6852   void trackStatistics() const override {
6853     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
6854   }
6855 };
6856 
6857 struct AAPrivatizablePtrCallSiteReturned final
6858     : public AAPrivatizablePtrFloating {
6859   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
6860       : AAPrivatizablePtrFloating(IRP, A) {}
6861 
6862   /// See AbstractAttribute::initialize(...).
6863   void initialize(Attributor &A) override {
6864     // TODO: We can privatize more than arguments.
6865     indicatePessimisticFixpoint();
6866   }
6867 
6868   /// See AbstractAttribute::trackStatistics()
6869   void trackStatistics() const override {
6870     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
6871   }
6872 };
6873 
6874 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
6875   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
6876       : AAPrivatizablePtrFloating(IRP, A) {}
6877 
6878   /// See AbstractAttribute::initialize(...).
6879   void initialize(Attributor &A) override {
6880     // TODO: We can privatize more than arguments.
6881     indicatePessimisticFixpoint();
6882   }
6883 
6884   /// See AbstractAttribute::trackStatistics()
6885   void trackStatistics() const override {
6886     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
6887   }
6888 };
6889 
6890 /// -------------------- Memory Behavior Attributes ----------------------------
6891 /// Includes read-none, read-only, and write-only.
6892 /// ----------------------------------------------------------------------------
6893 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
6894   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
6895       : AAMemoryBehavior(IRP, A) {}
6896 
6897   /// See AbstractAttribute::initialize(...).
6898   void initialize(Attributor &A) override {
6899     intersectAssumedBits(BEST_STATE);
6900     getKnownStateFromValue(getIRPosition(), getState());
6901     AAMemoryBehavior::initialize(A);
6902   }
6903 
6904   /// Return the memory behavior information encoded in the IR for \p IRP.
6905   static void getKnownStateFromValue(const IRPosition &IRP,
6906                                      BitIntegerState &State,
6907                                      bool IgnoreSubsumingPositions = false) {
6908     SmallVector<Attribute, 2> Attrs;
6909     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6910     for (const Attribute &Attr : Attrs) {
6911       switch (Attr.getKindAsEnum()) {
6912       case Attribute::ReadNone:
6913         State.addKnownBits(NO_ACCESSES);
6914         break;
6915       case Attribute::ReadOnly:
6916         State.addKnownBits(NO_WRITES);
6917         break;
6918       case Attribute::WriteOnly:
6919         State.addKnownBits(NO_READS);
6920         break;
6921       default:
6922         llvm_unreachable("Unexpected attribute!");
6923       }
6924     }
6925 
6926     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
6927       if (!I->mayReadFromMemory())
6928         State.addKnownBits(NO_READS);
6929       if (!I->mayWriteToMemory())
6930         State.addKnownBits(NO_WRITES);
6931     }
6932   }
6933 
6934   /// See AbstractAttribute::getDeducedAttributes(...).
6935   void getDeducedAttributes(LLVMContext &Ctx,
6936                             SmallVectorImpl<Attribute> &Attrs) const override {
6937     assert(Attrs.size() == 0);
6938     if (isAssumedReadNone())
6939       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6940     else if (isAssumedReadOnly())
6941       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
6942     else if (isAssumedWriteOnly())
6943       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
6944     assert(Attrs.size() <= 1);
6945   }
6946 
6947   /// See AbstractAttribute::manifest(...).
6948   ChangeStatus manifest(Attributor &A) override {
6949     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
6950       return ChangeStatus::UNCHANGED;
6951 
6952     const IRPosition &IRP = getIRPosition();
6953 
6954     // Check if we would improve the existing attributes first.
6955     SmallVector<Attribute, 4> DeducedAttrs;
6956     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6957     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6958           return IRP.hasAttr(Attr.getKindAsEnum(),
6959                              /* IgnoreSubsumingPositions */ true);
6960         }))
6961       return ChangeStatus::UNCHANGED;
6962 
6963     // Clear existing attributes.
6964     IRP.removeAttrs(AttrKinds);
6965 
6966     // Use the generic manifest method.
6967     return IRAttribute::manifest(A);
6968   }
6969 
6970   /// See AbstractState::getAsStr().
6971   const std::string getAsStr() const override {
6972     if (isAssumedReadNone())
6973       return "readnone";
6974     if (isAssumedReadOnly())
6975       return "readonly";
6976     if (isAssumedWriteOnly())
6977       return "writeonly";
6978     return "may-read/write";
6979   }
6980 
6981   /// The set of IR attributes AAMemoryBehavior deals with.
6982   static const Attribute::AttrKind AttrKinds[3];
6983 };
6984 
6985 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
6986     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
6987 
6988 /// Memory behavior attribute for a floating value.
6989 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
6990   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
6991       : AAMemoryBehaviorImpl(IRP, A) {}
6992 
6993   /// See AbstractAttribute::updateImpl(...).
6994   ChangeStatus updateImpl(Attributor &A) override;
6995 
6996   /// See AbstractAttribute::trackStatistics()
6997   void trackStatistics() const override {
6998     if (isAssumedReadNone())
6999       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7000     else if (isAssumedReadOnly())
7001       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7002     else if (isAssumedWriteOnly())
7003       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7004   }
7005 
7006 private:
7007   /// Return true if users of \p UserI might access the underlying
7008   /// variable/location described by \p U and should therefore be analyzed.
7009   bool followUsersOfUseIn(Attributor &A, const Use &U,
7010                           const Instruction *UserI);
7011 
7012   /// Update the state according to the effect of use \p U in \p UserI.
7013   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7014 };
7015 
7016 /// Memory behavior attribute for function argument.
7017 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7018   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7019       : AAMemoryBehaviorFloating(IRP, A) {}
7020 
7021   /// See AbstractAttribute::initialize(...).
7022   void initialize(Attributor &A) override {
7023     intersectAssumedBits(BEST_STATE);
7024     const IRPosition &IRP = getIRPosition();
7025     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7026     // can query it when we use has/getAttr. That would allow us to reuse the
7027     // initialize of the base class here.
7028     bool HasByVal =
7029         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7030     getKnownStateFromValue(IRP, getState(),
7031                            /* IgnoreSubsumingPositions */ HasByVal);
7032 
7033     // Initialize the use vector with all direct uses of the associated value.
7034     Argument *Arg = getAssociatedArgument();
7035     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7036       indicatePessimisticFixpoint();
7037   }
7038 
7039   ChangeStatus manifest(Attributor &A) override {
7040     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7041     if (!getAssociatedValue().getType()->isPointerTy())
7042       return ChangeStatus::UNCHANGED;
7043 
7044     // TODO: From readattrs.ll: "inalloca parameters are always
7045     //                           considered written"
7046     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7047       removeKnownBits(NO_WRITES);
7048       removeAssumedBits(NO_WRITES);
7049     }
7050     return AAMemoryBehaviorFloating::manifest(A);
7051   }
7052 
7053   /// See AbstractAttribute::trackStatistics()
7054   void trackStatistics() const override {
7055     if (isAssumedReadNone())
7056       STATS_DECLTRACK_ARG_ATTR(readnone)
7057     else if (isAssumedReadOnly())
7058       STATS_DECLTRACK_ARG_ATTR(readonly)
7059     else if (isAssumedWriteOnly())
7060       STATS_DECLTRACK_ARG_ATTR(writeonly)
7061   }
7062 };
7063 
7064 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7065   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7066       : AAMemoryBehaviorArgument(IRP, A) {}
7067 
7068   /// See AbstractAttribute::initialize(...).
7069   void initialize(Attributor &A) override {
7070     // If we don't have an associated attribute this is either a variadic call
7071     // or an indirect call, either way, nothing to do here.
7072     Argument *Arg = getAssociatedArgument();
7073     if (!Arg) {
7074       indicatePessimisticFixpoint();
7075       return;
7076     }
7077     if (Arg->hasByValAttr()) {
7078       addKnownBits(NO_WRITES);
7079       removeKnownBits(NO_READS);
7080       removeAssumedBits(NO_READS);
7081     }
7082     AAMemoryBehaviorArgument::initialize(A);
7083     if (getAssociatedFunction()->isDeclaration())
7084       indicatePessimisticFixpoint();
7085   }
7086 
7087   /// See AbstractAttribute::updateImpl(...).
7088   ChangeStatus updateImpl(Attributor &A) override {
7089     // TODO: Once we have call site specific value information we can provide
7090     //       call site specific liveness liveness information and then it makes
7091     //       sense to specialize attributes for call sites arguments instead of
7092     //       redirecting requests to the callee argument.
7093     Argument *Arg = getAssociatedArgument();
7094     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7095     auto &ArgAA =
7096         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7097     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7098   }
7099 
7100   /// See AbstractAttribute::trackStatistics()
7101   void trackStatistics() const override {
7102     if (isAssumedReadNone())
7103       STATS_DECLTRACK_CSARG_ATTR(readnone)
7104     else if (isAssumedReadOnly())
7105       STATS_DECLTRACK_CSARG_ATTR(readonly)
7106     else if (isAssumedWriteOnly())
7107       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7108   }
7109 };
7110 
7111 /// Memory behavior attribute for a call site return position.
7112 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7113   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7114       : AAMemoryBehaviorFloating(IRP, A) {}
7115 
7116   /// See AbstractAttribute::initialize(...).
7117   void initialize(Attributor &A) override {
7118     AAMemoryBehaviorImpl::initialize(A);
7119     Function *F = getAssociatedFunction();
7120     if (!F || F->isDeclaration())
7121       indicatePessimisticFixpoint();
7122   }
7123 
7124   /// See AbstractAttribute::manifest(...).
7125   ChangeStatus manifest(Attributor &A) override {
7126     // We do not annotate returned values.
7127     return ChangeStatus::UNCHANGED;
7128   }
7129 
7130   /// See AbstractAttribute::trackStatistics()
7131   void trackStatistics() const override {}
7132 };
7133 
7134 /// An AA to represent the memory behavior function attributes.
7135 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7136   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7137       : AAMemoryBehaviorImpl(IRP, A) {}
7138 
7139   /// See AbstractAttribute::updateImpl(Attributor &A).
7140   virtual ChangeStatus updateImpl(Attributor &A) override;
7141 
7142   /// See AbstractAttribute::manifest(...).
7143   ChangeStatus manifest(Attributor &A) override {
7144     Function &F = cast<Function>(getAnchorValue());
7145     if (isAssumedReadNone()) {
7146       F.removeFnAttr(Attribute::ArgMemOnly);
7147       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7148       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7149     }
7150     return AAMemoryBehaviorImpl::manifest(A);
7151   }
7152 
7153   /// See AbstractAttribute::trackStatistics()
7154   void trackStatistics() const override {
7155     if (isAssumedReadNone())
7156       STATS_DECLTRACK_FN_ATTR(readnone)
7157     else if (isAssumedReadOnly())
7158       STATS_DECLTRACK_FN_ATTR(readonly)
7159     else if (isAssumedWriteOnly())
7160       STATS_DECLTRACK_FN_ATTR(writeonly)
7161   }
7162 };
7163 
7164 /// AAMemoryBehavior attribute for call sites.
7165 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7166   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7167       : AAMemoryBehaviorImpl(IRP, A) {}
7168 
7169   /// See AbstractAttribute::initialize(...).
7170   void initialize(Attributor &A) override {
7171     AAMemoryBehaviorImpl::initialize(A);
7172     Function *F = getAssociatedFunction();
7173     if (!F || F->isDeclaration())
7174       indicatePessimisticFixpoint();
7175   }
7176 
7177   /// See AbstractAttribute::updateImpl(...).
7178   ChangeStatus updateImpl(Attributor &A) override {
7179     // TODO: Once we have call site specific value information we can provide
7180     //       call site specific liveness liveness information and then it makes
7181     //       sense to specialize attributes for call sites arguments instead of
7182     //       redirecting requests to the callee argument.
7183     Function *F = getAssociatedFunction();
7184     const IRPosition &FnPos = IRPosition::function(*F);
7185     auto &FnAA =
7186         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7187     return clampStateAndIndicateChange(getState(), FnAA.getState());
7188   }
7189 
7190   /// See AbstractAttribute::trackStatistics()
7191   void trackStatistics() const override {
7192     if (isAssumedReadNone())
7193       STATS_DECLTRACK_CS_ATTR(readnone)
7194     else if (isAssumedReadOnly())
7195       STATS_DECLTRACK_CS_ATTR(readonly)
7196     else if (isAssumedWriteOnly())
7197       STATS_DECLTRACK_CS_ATTR(writeonly)
7198   }
7199 };
7200 
7201 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7202 
7203   // The current assumed state used to determine a change.
7204   auto AssumedState = getAssumed();
7205 
7206   auto CheckRWInst = [&](Instruction &I) {
7207     // If the instruction has an own memory behavior state, use it to restrict
7208     // the local state. No further analysis is required as the other memory
7209     // state is as optimistic as it gets.
7210     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7211       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7212           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7213       intersectAssumedBits(MemBehaviorAA.getAssumed());
7214       return !isAtFixpoint();
7215     }
7216 
7217     // Remove access kind modifiers if necessary.
7218     if (I.mayReadFromMemory())
7219       removeAssumedBits(NO_READS);
7220     if (I.mayWriteToMemory())
7221       removeAssumedBits(NO_WRITES);
7222     return !isAtFixpoint();
7223   };
7224 
7225   bool UsedAssumedInformation = false;
7226   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7227                                           UsedAssumedInformation))
7228     return indicatePessimisticFixpoint();
7229 
7230   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7231                                         : ChangeStatus::UNCHANGED;
7232 }
7233 
7234 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7235 
7236   const IRPosition &IRP = getIRPosition();
7237   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7238   AAMemoryBehavior::StateType &S = getState();
7239 
7240   // First, check the function scope. We take the known information and we avoid
7241   // work if the assumed information implies the current assumed information for
7242   // this attribute. This is a valid for all but byval arguments.
7243   Argument *Arg = IRP.getAssociatedArgument();
7244   AAMemoryBehavior::base_t FnMemAssumedState =
7245       AAMemoryBehavior::StateType::getWorstState();
7246   if (!Arg || !Arg->hasByValAttr()) {
7247     const auto &FnMemAA =
7248         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7249     FnMemAssumedState = FnMemAA.getAssumed();
7250     S.addKnownBits(FnMemAA.getKnown());
7251     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7252       return ChangeStatus::UNCHANGED;
7253   }
7254 
7255   // The current assumed state used to determine a change.
7256   auto AssumedState = S.getAssumed();
7257 
7258   // Make sure the value is not captured (except through "return"), if
7259   // it is, any information derived would be irrelevant anyway as we cannot
7260   // check the potential aliases introduced by the capture. However, no need
7261   // to fall back to anythign less optimistic than the function state.
7262   const auto &ArgNoCaptureAA =
7263       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7264   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7265     S.intersectAssumedBits(FnMemAssumedState);
7266     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7267                                           : ChangeStatus::UNCHANGED;
7268   }
7269 
7270   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7271   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7272     Instruction *UserI = cast<Instruction>(U.getUser());
7273     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7274                       << " \n");
7275 
7276     // Droppable users, e.g., llvm::assume does not actually perform any action.
7277     if (UserI->isDroppable())
7278       return true;
7279 
7280     // Check if the users of UserI should also be visited.
7281     Follow = followUsersOfUseIn(A, U, UserI);
7282 
7283     // If UserI might touch memory we analyze the use in detail.
7284     if (UserI->mayReadOrWriteMemory())
7285       analyzeUseIn(A, U, UserI);
7286 
7287     return !isAtFixpoint();
7288   };
7289 
7290   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7291     return indicatePessimisticFixpoint();
7292 
7293   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7294                                         : ChangeStatus::UNCHANGED;
7295 }
7296 
7297 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7298                                                   const Instruction *UserI) {
7299   // The loaded value is unrelated to the pointer argument, no need to
7300   // follow the users of the load.
7301   if (isa<LoadInst>(UserI))
7302     return false;
7303 
7304   // By default we follow all uses assuming UserI might leak information on U,
7305   // we have special handling for call sites operands though.
7306   const auto *CB = dyn_cast<CallBase>(UserI);
7307   if (!CB || !CB->isArgOperand(&U))
7308     return true;
7309 
7310   // If the use is a call argument known not to be captured, the users of
7311   // the call do not need to be visited because they have to be unrelated to
7312   // the input. Note that this check is not trivial even though we disallow
7313   // general capturing of the underlying argument. The reason is that the
7314   // call might the argument "through return", which we allow and for which we
7315   // need to check call users.
7316   if (U.get()->getType()->isPointerTy()) {
7317     unsigned ArgNo = CB->getArgOperandNo(&U);
7318     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7319         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7320     return !ArgNoCaptureAA.isAssumedNoCapture();
7321   }
7322 
7323   return true;
7324 }
7325 
7326 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7327                                             const Instruction *UserI) {
7328   assert(UserI->mayReadOrWriteMemory());
7329 
7330   switch (UserI->getOpcode()) {
7331   default:
7332     // TODO: Handle all atomics and other side-effect operations we know of.
7333     break;
7334   case Instruction::Load:
7335     // Loads cause the NO_READS property to disappear.
7336     removeAssumedBits(NO_READS);
7337     return;
7338 
7339   case Instruction::Store:
7340     // Stores cause the NO_WRITES property to disappear if the use is the
7341     // pointer operand. Note that we do assume that capturing was taken care of
7342     // somewhere else.
7343     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7344       removeAssumedBits(NO_WRITES);
7345     return;
7346 
7347   case Instruction::Call:
7348   case Instruction::CallBr:
7349   case Instruction::Invoke: {
7350     // For call sites we look at the argument memory behavior attribute (this
7351     // could be recursive!) in order to restrict our own state.
7352     const auto *CB = cast<CallBase>(UserI);
7353 
7354     // Give up on operand bundles.
7355     if (CB->isBundleOperand(&U)) {
7356       indicatePessimisticFixpoint();
7357       return;
7358     }
7359 
7360     // Calling a function does read the function pointer, maybe write it if the
7361     // function is self-modifying.
7362     if (CB->isCallee(&U)) {
7363       removeAssumedBits(NO_READS);
7364       break;
7365     }
7366 
7367     // Adjust the possible access behavior based on the information on the
7368     // argument.
7369     IRPosition Pos;
7370     if (U.get()->getType()->isPointerTy())
7371       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7372     else
7373       Pos = IRPosition::callsite_function(*CB);
7374     const auto &MemBehaviorAA =
7375         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7376     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7377     // and at least "known".
7378     intersectAssumedBits(MemBehaviorAA.getAssumed());
7379     return;
7380   }
7381   };
7382 
7383   // Generally, look at the "may-properties" and adjust the assumed state if we
7384   // did not trigger special handling before.
7385   if (UserI->mayReadFromMemory())
7386     removeAssumedBits(NO_READS);
7387   if (UserI->mayWriteToMemory())
7388     removeAssumedBits(NO_WRITES);
7389 }
7390 
7391 /// -------------------- Memory Locations Attributes ---------------------------
7392 /// Includes read-none, argmemonly, inaccessiblememonly,
7393 /// inaccessiblememorargmemonly
7394 /// ----------------------------------------------------------------------------
7395 
7396 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7397     AAMemoryLocation::MemoryLocationsKind MLK) {
7398   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7399     return "all memory";
7400   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7401     return "no memory";
7402   std::string S = "memory:";
7403   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7404     S += "stack,";
7405   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7406     S += "constant,";
7407   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7408     S += "internal global,";
7409   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7410     S += "external global,";
7411   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7412     S += "argument,";
7413   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7414     S += "inaccessible,";
7415   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7416     S += "malloced,";
7417   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7418     S += "unknown,";
7419   S.pop_back();
7420   return S;
7421 }
7422 
7423 namespace {
7424 struct AAMemoryLocationImpl : public AAMemoryLocation {
7425 
7426   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7427       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7428     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7429       AccessKind2Accesses[u] = nullptr;
7430   }
7431 
7432   ~AAMemoryLocationImpl() {
7433     // The AccessSets are allocated via a BumpPtrAllocator, we call
7434     // the destructor manually.
7435     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7436       if (AccessKind2Accesses[u])
7437         AccessKind2Accesses[u]->~AccessSet();
7438   }
7439 
7440   /// See AbstractAttribute::initialize(...).
7441   void initialize(Attributor &A) override {
7442     intersectAssumedBits(BEST_STATE);
7443     getKnownStateFromValue(A, getIRPosition(), getState());
7444     AAMemoryLocation::initialize(A);
7445   }
7446 
7447   /// Return the memory behavior information encoded in the IR for \p IRP.
7448   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7449                                      BitIntegerState &State,
7450                                      bool IgnoreSubsumingPositions = false) {
7451     // For internal functions we ignore `argmemonly` and
7452     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7453     // constant propagation. It is unclear if this is the best way but it is
7454     // unlikely this will cause real performance problems. If we are deriving
7455     // attributes for the anchor function we even remove the attribute in
7456     // addition to ignoring it.
7457     bool UseArgMemOnly = true;
7458     Function *AnchorFn = IRP.getAnchorScope();
7459     if (AnchorFn && A.isRunOn(*AnchorFn))
7460       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7461 
7462     SmallVector<Attribute, 2> Attrs;
7463     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7464     for (const Attribute &Attr : Attrs) {
7465       switch (Attr.getKindAsEnum()) {
7466       case Attribute::ReadNone:
7467         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7468         break;
7469       case Attribute::InaccessibleMemOnly:
7470         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7471         break;
7472       case Attribute::ArgMemOnly:
7473         if (UseArgMemOnly)
7474           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7475         else
7476           IRP.removeAttrs({Attribute::ArgMemOnly});
7477         break;
7478       case Attribute::InaccessibleMemOrArgMemOnly:
7479         if (UseArgMemOnly)
7480           State.addKnownBits(inverseLocation(
7481               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7482         else
7483           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7484         break;
7485       default:
7486         llvm_unreachable("Unexpected attribute!");
7487       }
7488     }
7489   }
7490 
7491   /// See AbstractAttribute::getDeducedAttributes(...).
7492   void getDeducedAttributes(LLVMContext &Ctx,
7493                             SmallVectorImpl<Attribute> &Attrs) const override {
7494     assert(Attrs.size() == 0);
7495     if (isAssumedReadNone()) {
7496       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7497     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7498       if (isAssumedInaccessibleMemOnly())
7499         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7500       else if (isAssumedArgMemOnly())
7501         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7502       else if (isAssumedInaccessibleOrArgMemOnly())
7503         Attrs.push_back(
7504             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7505     }
7506     assert(Attrs.size() <= 1);
7507   }
7508 
7509   /// See AbstractAttribute::manifest(...).
7510   ChangeStatus manifest(Attributor &A) override {
7511     const IRPosition &IRP = getIRPosition();
7512 
7513     // Check if we would improve the existing attributes first.
7514     SmallVector<Attribute, 4> DeducedAttrs;
7515     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7516     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7517           return IRP.hasAttr(Attr.getKindAsEnum(),
7518                              /* IgnoreSubsumingPositions */ true);
7519         }))
7520       return ChangeStatus::UNCHANGED;
7521 
7522     // Clear existing attributes.
7523     IRP.removeAttrs(AttrKinds);
7524     if (isAssumedReadNone())
7525       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7526 
7527     // Use the generic manifest method.
7528     return IRAttribute::manifest(A);
7529   }
7530 
7531   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7532   bool checkForAllAccessesToMemoryKind(
7533       function_ref<bool(const Instruction *, const Value *, AccessKind,
7534                         MemoryLocationsKind)>
7535           Pred,
7536       MemoryLocationsKind RequestedMLK) const override {
7537     if (!isValidState())
7538       return false;
7539 
7540     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7541     if (AssumedMLK == NO_LOCATIONS)
7542       return true;
7543 
7544     unsigned Idx = 0;
7545     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7546          CurMLK *= 2, ++Idx) {
7547       if (CurMLK & RequestedMLK)
7548         continue;
7549 
7550       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7551         for (const AccessInfo &AI : *Accesses)
7552           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7553             return false;
7554     }
7555 
7556     return true;
7557   }
7558 
7559   ChangeStatus indicatePessimisticFixpoint() override {
7560     // If we give up and indicate a pessimistic fixpoint this instruction will
7561     // become an access for all potential access kinds:
7562     // TODO: Add pointers for argmemonly and globals to improve the results of
7563     //       checkForAllAccessesToMemoryKind.
7564     bool Changed = false;
7565     MemoryLocationsKind KnownMLK = getKnown();
7566     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7567     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7568       if (!(CurMLK & KnownMLK))
7569         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7570                                   getAccessKindFromInst(I));
7571     return AAMemoryLocation::indicatePessimisticFixpoint();
7572   }
7573 
7574 protected:
7575   /// Helper struct to tie together an instruction that has a read or write
7576   /// effect with the pointer it accesses (if any).
7577   struct AccessInfo {
7578 
7579     /// The instruction that caused the access.
7580     const Instruction *I;
7581 
7582     /// The base pointer that is accessed, or null if unknown.
7583     const Value *Ptr;
7584 
7585     /// The kind of access (read/write/read+write).
7586     AccessKind Kind;
7587 
7588     bool operator==(const AccessInfo &RHS) const {
7589       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7590     }
7591     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7592       if (LHS.I != RHS.I)
7593         return LHS.I < RHS.I;
7594       if (LHS.Ptr != RHS.Ptr)
7595         return LHS.Ptr < RHS.Ptr;
7596       if (LHS.Kind != RHS.Kind)
7597         return LHS.Kind < RHS.Kind;
7598       return false;
7599     }
7600   };
7601 
7602   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7603   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7604   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7605   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7606 
7607   /// Categorize the pointer arguments of CB that might access memory in
7608   /// AccessedLoc and update the state and access map accordingly.
7609   void
7610   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7611                                      AAMemoryLocation::StateType &AccessedLocs,
7612                                      bool &Changed);
7613 
7614   /// Return the kind(s) of location that may be accessed by \p V.
7615   AAMemoryLocation::MemoryLocationsKind
7616   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7617 
7618   /// Return the access kind as determined by \p I.
7619   AccessKind getAccessKindFromInst(const Instruction *I) {
7620     AccessKind AK = READ_WRITE;
7621     if (I) {
7622       AK = I->mayReadFromMemory() ? READ : NONE;
7623       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7624     }
7625     return AK;
7626   }
7627 
7628   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7629   /// an access of kind \p AK to a \p MLK memory location with the access
7630   /// pointer \p Ptr.
7631   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7632                                  MemoryLocationsKind MLK, const Instruction *I,
7633                                  const Value *Ptr, bool &Changed,
7634                                  AccessKind AK = READ_WRITE) {
7635 
7636     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7637     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7638     if (!Accesses)
7639       Accesses = new (Allocator) AccessSet();
7640     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7641     State.removeAssumedBits(MLK);
7642   }
7643 
7644   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7645   /// arguments, and update the state and access map accordingly.
7646   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7647                           AAMemoryLocation::StateType &State, bool &Changed);
7648 
7649   /// Used to allocate access sets.
7650   BumpPtrAllocator &Allocator;
7651 
7652   /// The set of IR attributes AAMemoryLocation deals with.
7653   static const Attribute::AttrKind AttrKinds[4];
7654 };
7655 
7656 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7657     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7658     Attribute::InaccessibleMemOrArgMemOnly};
7659 
7660 void AAMemoryLocationImpl::categorizePtrValue(
7661     Attributor &A, const Instruction &I, const Value &Ptr,
7662     AAMemoryLocation::StateType &State, bool &Changed) {
7663   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7664                     << Ptr << " ["
7665                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7666 
7667   SmallVector<Value *, 8> Objects;
7668   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I)) {
7669     LLVM_DEBUG(
7670         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7671     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7672                               getAccessKindFromInst(&I));
7673     return;
7674   }
7675 
7676   for (Value *Obj : Objects) {
7677     // TODO: recognize the TBAA used for constant accesses.
7678     MemoryLocationsKind MLK = NO_LOCATIONS;
7679     assert(!isa<GEPOperator>(Obj) && "GEPs should have been stripped.");
7680     if (isa<UndefValue>(Obj))
7681       continue;
7682     if (isa<Argument>(Obj)) {
7683       // TODO: For now we do not treat byval arguments as local copies performed
7684       // on the call edge, though, we should. To make that happen we need to
7685       // teach various passes, e.g., DSE, about the copy effect of a byval. That
7686       // would also allow us to mark functions only accessing byval arguments as
7687       // readnone again, atguably their acceses have no effect outside of the
7688       // function, like accesses to allocas.
7689       MLK = NO_ARGUMENT_MEM;
7690     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7691       // Reading constant memory is not treated as a read "effect" by the
7692       // function attr pass so we won't neither. Constants defined by TBAA are
7693       // similar. (We know we do not write it because it is constant.)
7694       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7695         if (GVar->isConstant())
7696           continue;
7697 
7698       if (GV->hasLocalLinkage())
7699         MLK = NO_GLOBAL_INTERNAL_MEM;
7700       else
7701         MLK = NO_GLOBAL_EXTERNAL_MEM;
7702     } else if (isa<ConstantPointerNull>(Obj) &&
7703                !NullPointerIsDefined(getAssociatedFunction(),
7704                                      Ptr.getType()->getPointerAddressSpace())) {
7705       continue;
7706     } else if (isa<AllocaInst>(Obj)) {
7707       MLK = NO_LOCAL_MEM;
7708     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
7709       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
7710           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
7711       if (NoAliasAA.isAssumedNoAlias())
7712         MLK = NO_MALLOCED_MEM;
7713       else
7714         MLK = NO_UNKOWN_MEM;
7715     } else {
7716       MLK = NO_UNKOWN_MEM;
7717     }
7718 
7719     assert(MLK != NO_LOCATIONS && "No location specified!");
7720     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
7721                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
7722                       << "\n");
7723     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
7724                               getAccessKindFromInst(&I));
7725   }
7726 
7727   LLVM_DEBUG(
7728       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
7729              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
7730 }
7731 
7732 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
7733     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
7734     bool &Changed) {
7735   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
7736 
7737     // Skip non-pointer arguments.
7738     const Value *ArgOp = CB.getArgOperand(ArgNo);
7739     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
7740       continue;
7741 
7742     // Skip readnone arguments.
7743     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
7744     const auto &ArgOpMemLocationAA =
7745         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
7746 
7747     if (ArgOpMemLocationAA.isAssumedReadNone())
7748       continue;
7749 
7750     // Categorize potentially accessed pointer arguments as if there was an
7751     // access instruction with them as pointer.
7752     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
7753   }
7754 }
7755 
7756 AAMemoryLocation::MemoryLocationsKind
7757 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
7758                                                   bool &Changed) {
7759   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
7760                     << I << "\n");
7761 
7762   AAMemoryLocation::StateType AccessedLocs;
7763   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
7764 
7765   if (auto *CB = dyn_cast<CallBase>(&I)) {
7766 
7767     // First check if we assume any memory is access is visible.
7768     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
7769         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
7770     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
7771                       << " [" << CBMemLocationAA << "]\n");
7772 
7773     if (CBMemLocationAA.isAssumedReadNone())
7774       return NO_LOCATIONS;
7775 
7776     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
7777       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
7778                                 Changed, getAccessKindFromInst(&I));
7779       return AccessedLocs.getAssumed();
7780     }
7781 
7782     uint32_t CBAssumedNotAccessedLocs =
7783         CBMemLocationAA.getAssumedNotAccessedLocation();
7784 
7785     // Set the argmemonly and global bit as we handle them separately below.
7786     uint32_t CBAssumedNotAccessedLocsNoArgMem =
7787         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
7788 
7789     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
7790       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
7791         continue;
7792       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
7793                                 getAccessKindFromInst(&I));
7794     }
7795 
7796     // Now handle global memory if it might be accessed. This is slightly tricky
7797     // as NO_GLOBAL_MEM has multiple bits set.
7798     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
7799     if (HasGlobalAccesses) {
7800       auto AccessPred = [&](const Instruction *, const Value *Ptr,
7801                             AccessKind Kind, MemoryLocationsKind MLK) {
7802         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
7803                                   getAccessKindFromInst(&I));
7804         return true;
7805       };
7806       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
7807               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
7808         return AccessedLocs.getWorstState();
7809     }
7810 
7811     LLVM_DEBUG(
7812         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
7813                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7814 
7815     // Now handle argument memory if it might be accessed.
7816     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
7817     if (HasArgAccesses)
7818       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
7819 
7820     LLVM_DEBUG(
7821         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
7822                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7823 
7824     return AccessedLocs.getAssumed();
7825   }
7826 
7827   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
7828     LLVM_DEBUG(
7829         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
7830                << I << " [" << *Ptr << "]\n");
7831     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
7832     return AccessedLocs.getAssumed();
7833   }
7834 
7835   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
7836                     << I << "\n");
7837   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
7838                             getAccessKindFromInst(&I));
7839   return AccessedLocs.getAssumed();
7840 }
7841 
7842 /// An AA to represent the memory behavior function attributes.
7843 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
7844   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
7845       : AAMemoryLocationImpl(IRP, A) {}
7846 
7847   /// See AbstractAttribute::updateImpl(Attributor &A).
7848   virtual ChangeStatus updateImpl(Attributor &A) override {
7849 
7850     const auto &MemBehaviorAA =
7851         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
7852     if (MemBehaviorAA.isAssumedReadNone()) {
7853       if (MemBehaviorAA.isKnownReadNone())
7854         return indicateOptimisticFixpoint();
7855       assert(isAssumedReadNone() &&
7856              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
7857       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
7858       return ChangeStatus::UNCHANGED;
7859     }
7860 
7861     // The current assumed state used to determine a change.
7862     auto AssumedState = getAssumed();
7863     bool Changed = false;
7864 
7865     auto CheckRWInst = [&](Instruction &I) {
7866       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
7867       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
7868                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
7869       removeAssumedBits(inverseLocation(MLK, false, false));
7870       // Stop once only the valid bit set in the *not assumed location*, thus
7871       // once we don't actually exclude any memory locations in the state.
7872       return getAssumedNotAccessedLocation() != VALID_STATE;
7873     };
7874 
7875     bool UsedAssumedInformation = false;
7876     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7877                                             UsedAssumedInformation))
7878       return indicatePessimisticFixpoint();
7879 
7880     Changed |= AssumedState != getAssumed();
7881     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7882   }
7883 
7884   /// See AbstractAttribute::trackStatistics()
7885   void trackStatistics() const override {
7886     if (isAssumedReadNone())
7887       STATS_DECLTRACK_FN_ATTR(readnone)
7888     else if (isAssumedArgMemOnly())
7889       STATS_DECLTRACK_FN_ATTR(argmemonly)
7890     else if (isAssumedInaccessibleMemOnly())
7891       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
7892     else if (isAssumedInaccessibleOrArgMemOnly())
7893       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
7894   }
7895 };
7896 
7897 /// AAMemoryLocation attribute for call sites.
7898 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
7899   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
7900       : AAMemoryLocationImpl(IRP, A) {}
7901 
7902   /// See AbstractAttribute::initialize(...).
7903   void initialize(Attributor &A) override {
7904     AAMemoryLocationImpl::initialize(A);
7905     Function *F = getAssociatedFunction();
7906     if (!F || F->isDeclaration())
7907       indicatePessimisticFixpoint();
7908   }
7909 
7910   /// See AbstractAttribute::updateImpl(...).
7911   ChangeStatus updateImpl(Attributor &A) override {
7912     // TODO: Once we have call site specific value information we can provide
7913     //       call site specific liveness liveness information and then it makes
7914     //       sense to specialize attributes for call sites arguments instead of
7915     //       redirecting requests to the callee argument.
7916     Function *F = getAssociatedFunction();
7917     const IRPosition &FnPos = IRPosition::function(*F);
7918     auto &FnAA =
7919         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
7920     bool Changed = false;
7921     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
7922                           AccessKind Kind, MemoryLocationsKind MLK) {
7923       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
7924                                 getAccessKindFromInst(I));
7925       return true;
7926     };
7927     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
7928       return indicatePessimisticFixpoint();
7929     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7930   }
7931 
7932   /// See AbstractAttribute::trackStatistics()
7933   void trackStatistics() const override {
7934     if (isAssumedReadNone())
7935       STATS_DECLTRACK_CS_ATTR(readnone)
7936   }
7937 };
7938 
7939 /// ------------------ Value Constant Range Attribute -------------------------
7940 
7941 struct AAValueConstantRangeImpl : AAValueConstantRange {
7942   using StateType = IntegerRangeState;
7943   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7944       : AAValueConstantRange(IRP, A) {}
7945 
7946   /// See AbstractAttribute::initialize(..).
7947   void initialize(Attributor &A) override {
7948     if (A.hasSimplificationCallback(getIRPosition())) {
7949       indicatePessimisticFixpoint();
7950       return;
7951     }
7952 
7953     // Intersect a range given by SCEV.
7954     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7955 
7956     // Intersect a range given by LVI.
7957     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7958   }
7959 
7960   /// See AbstractAttribute::getAsStr().
7961   const std::string getAsStr() const override {
7962     std::string Str;
7963     llvm::raw_string_ostream OS(Str);
7964     OS << "range(" << getBitWidth() << ")<";
7965     getKnown().print(OS);
7966     OS << " / ";
7967     getAssumed().print(OS);
7968     OS << ">";
7969     return OS.str();
7970   }
7971 
7972   /// Helper function to get a SCEV expr for the associated value at program
7973   /// point \p I.
7974   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7975     if (!getAnchorScope())
7976       return nullptr;
7977 
7978     ScalarEvolution *SE =
7979         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7980             *getAnchorScope());
7981 
7982     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7983         *getAnchorScope());
7984 
7985     if (!SE || !LI)
7986       return nullptr;
7987 
7988     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7989     if (!I)
7990       return S;
7991 
7992     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7993   }
7994 
7995   /// Helper function to get a range from SCEV for the associated value at
7996   /// program point \p I.
7997   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7998                                          const Instruction *I = nullptr) const {
7999     if (!getAnchorScope())
8000       return getWorstState(getBitWidth());
8001 
8002     ScalarEvolution *SE =
8003         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8004             *getAnchorScope());
8005 
8006     const SCEV *S = getSCEV(A, I);
8007     if (!SE || !S)
8008       return getWorstState(getBitWidth());
8009 
8010     return SE->getUnsignedRange(S);
8011   }
8012 
8013   /// Helper function to get a range from LVI for the associated value at
8014   /// program point \p I.
8015   ConstantRange
8016   getConstantRangeFromLVI(Attributor &A,
8017                           const Instruction *CtxI = nullptr) const {
8018     if (!getAnchorScope())
8019       return getWorstState(getBitWidth());
8020 
8021     LazyValueInfo *LVI =
8022         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8023             *getAnchorScope());
8024 
8025     if (!LVI || !CtxI)
8026       return getWorstState(getBitWidth());
8027     return LVI->getConstantRange(&getAssociatedValue(),
8028                                  const_cast<Instruction *>(CtxI));
8029   }
8030 
8031   /// Return true if \p CtxI is valid for querying outside analyses.
8032   /// This basically makes sure we do not ask intra-procedural analysis
8033   /// about a context in the wrong function or a context that violates
8034   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8035   /// if the original context of this AA is OK or should be considered invalid.
8036   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8037                                                const Instruction *CtxI,
8038                                                bool AllowAACtxI) const {
8039     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8040       return false;
8041 
8042     // Our context might be in a different function, neither intra-procedural
8043     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8044     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8045       return false;
8046 
8047     // If the context is not dominated by the value there are paths to the
8048     // context that do not define the value. This cannot be handled by
8049     // LazyValueInfo so we need to bail.
8050     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8051       InformationCache &InfoCache = A.getInfoCache();
8052       const DominatorTree *DT =
8053           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8054               *I->getFunction());
8055       return DT && DT->dominates(I, CtxI);
8056     }
8057 
8058     return true;
8059   }
8060 
8061   /// See AAValueConstantRange::getKnownConstantRange(..).
8062   ConstantRange
8063   getKnownConstantRange(Attributor &A,
8064                         const Instruction *CtxI = nullptr) const override {
8065     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8066                                                  /* AllowAACtxI */ false))
8067       return getKnown();
8068 
8069     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8070     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8071     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8072   }
8073 
8074   /// See AAValueConstantRange::getAssumedConstantRange(..).
8075   ConstantRange
8076   getAssumedConstantRange(Attributor &A,
8077                           const Instruction *CtxI = nullptr) const override {
8078     // TODO: Make SCEV use Attributor assumption.
8079     //       We may be able to bound a variable range via assumptions in
8080     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8081     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8082     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8083                                                  /* AllowAACtxI */ false))
8084       return getAssumed();
8085 
8086     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8087     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8088     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8089   }
8090 
8091   /// Helper function to create MDNode for range metadata.
8092   static MDNode *
8093   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8094                             const ConstantRange &AssumedConstantRange) {
8095     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8096                                   Ty, AssumedConstantRange.getLower())),
8097                               ConstantAsMetadata::get(ConstantInt::get(
8098                                   Ty, AssumedConstantRange.getUpper()))};
8099     return MDNode::get(Ctx, LowAndHigh);
8100   }
8101 
8102   /// Return true if \p Assumed is included in \p KnownRanges.
8103   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8104 
8105     if (Assumed.isFullSet())
8106       return false;
8107 
8108     if (!KnownRanges)
8109       return true;
8110 
8111     // If multiple ranges are annotated in IR, we give up to annotate assumed
8112     // range for now.
8113 
8114     // TODO:  If there exists a known range which containts assumed range, we
8115     // can say assumed range is better.
8116     if (KnownRanges->getNumOperands() > 2)
8117       return false;
8118 
8119     ConstantInt *Lower =
8120         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8121     ConstantInt *Upper =
8122         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8123 
8124     ConstantRange Known(Lower->getValue(), Upper->getValue());
8125     return Known.contains(Assumed) && Known != Assumed;
8126   }
8127 
8128   /// Helper function to set range metadata.
8129   static bool
8130   setRangeMetadataIfisBetterRange(Instruction *I,
8131                                   const ConstantRange &AssumedConstantRange) {
8132     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8133     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8134       if (!AssumedConstantRange.isEmptySet()) {
8135         I->setMetadata(LLVMContext::MD_range,
8136                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8137                                                  AssumedConstantRange));
8138         return true;
8139       }
8140     }
8141     return false;
8142   }
8143 
8144   /// See AbstractAttribute::manifest()
8145   ChangeStatus manifest(Attributor &A) override {
8146     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8147     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8148     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8149 
8150     auto &V = getAssociatedValue();
8151     if (!AssumedConstantRange.isEmptySet() &&
8152         !AssumedConstantRange.isSingleElement()) {
8153       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8154         assert(I == getCtxI() && "Should not annotate an instruction which is "
8155                                  "not the context instruction");
8156         if (isa<CallInst>(I) || isa<LoadInst>(I))
8157           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8158             Changed = ChangeStatus::CHANGED;
8159       }
8160     }
8161 
8162     return Changed;
8163   }
8164 };
8165 
8166 struct AAValueConstantRangeArgument final
8167     : AAArgumentFromCallSiteArguments<
8168           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8169           true /* BridgeCallBaseContext */> {
8170   using Base = AAArgumentFromCallSiteArguments<
8171       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8172       true /* BridgeCallBaseContext */>;
8173   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8174       : Base(IRP, A) {}
8175 
8176   /// See AbstractAttribute::initialize(..).
8177   void initialize(Attributor &A) override {
8178     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8179       indicatePessimisticFixpoint();
8180     } else {
8181       Base::initialize(A);
8182     }
8183   }
8184 
8185   /// See AbstractAttribute::trackStatistics()
8186   void trackStatistics() const override {
8187     STATS_DECLTRACK_ARG_ATTR(value_range)
8188   }
8189 };
8190 
8191 struct AAValueConstantRangeReturned
8192     : AAReturnedFromReturnedValues<AAValueConstantRange,
8193                                    AAValueConstantRangeImpl,
8194                                    AAValueConstantRangeImpl::StateType,
8195                                    /* PropogateCallBaseContext */ true> {
8196   using Base =
8197       AAReturnedFromReturnedValues<AAValueConstantRange,
8198                                    AAValueConstantRangeImpl,
8199                                    AAValueConstantRangeImpl::StateType,
8200                                    /* PropogateCallBaseContext */ true>;
8201   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8202       : Base(IRP, A) {}
8203 
8204   /// See AbstractAttribute::initialize(...).
8205   void initialize(Attributor &A) override {}
8206 
8207   /// See AbstractAttribute::trackStatistics()
8208   void trackStatistics() const override {
8209     STATS_DECLTRACK_FNRET_ATTR(value_range)
8210   }
8211 };
8212 
8213 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8214   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8215       : AAValueConstantRangeImpl(IRP, A) {}
8216 
8217   /// See AbstractAttribute::initialize(...).
8218   void initialize(Attributor &A) override {
8219     AAValueConstantRangeImpl::initialize(A);
8220     if (isAtFixpoint())
8221       return;
8222 
8223     Value &V = getAssociatedValue();
8224 
8225     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8226       unionAssumed(ConstantRange(C->getValue()));
8227       indicateOptimisticFixpoint();
8228       return;
8229     }
8230 
8231     if (isa<UndefValue>(&V)) {
8232       // Collapse the undef state to 0.
8233       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8234       indicateOptimisticFixpoint();
8235       return;
8236     }
8237 
8238     if (isa<CallBase>(&V))
8239       return;
8240 
8241     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8242       return;
8243 
8244     // If it is a load instruction with range metadata, use it.
8245     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8246       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8247         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8248         return;
8249       }
8250 
8251     // We can work with PHI and select instruction as we traverse their operands
8252     // during update.
8253     if (isa<SelectInst>(V) || isa<PHINode>(V))
8254       return;
8255 
8256     // Otherwise we give up.
8257     indicatePessimisticFixpoint();
8258 
8259     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8260                       << getAssociatedValue() << "\n");
8261   }
8262 
8263   bool calculateBinaryOperator(
8264       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8265       const Instruction *CtxI,
8266       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8267     Value *LHS = BinOp->getOperand(0);
8268     Value *RHS = BinOp->getOperand(1);
8269 
8270     // Simplify the operands first.
8271     bool UsedAssumedInformation = false;
8272     const auto &SimplifiedLHS =
8273         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8274                                *this, UsedAssumedInformation);
8275     if (!SimplifiedLHS.hasValue())
8276       return true;
8277     if (!SimplifiedLHS.getValue())
8278       return false;
8279     LHS = *SimplifiedLHS;
8280 
8281     const auto &SimplifiedRHS =
8282         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8283                                *this, UsedAssumedInformation);
8284     if (!SimplifiedRHS.hasValue())
8285       return true;
8286     if (!SimplifiedRHS.getValue())
8287       return false;
8288     RHS = *SimplifiedRHS;
8289 
8290     // TODO: Allow non integers as well.
8291     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8292       return false;
8293 
8294     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8295         *this, IRPosition::value(*LHS, getCallBaseContext()),
8296         DepClassTy::REQUIRED);
8297     QuerriedAAs.push_back(&LHSAA);
8298     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8299 
8300     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8301         *this, IRPosition::value(*RHS, getCallBaseContext()),
8302         DepClassTy::REQUIRED);
8303     QuerriedAAs.push_back(&RHSAA);
8304     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8305 
8306     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8307 
8308     T.unionAssumed(AssumedRange);
8309 
8310     // TODO: Track a known state too.
8311 
8312     return T.isValidState();
8313   }
8314 
8315   bool calculateCastInst(
8316       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8317       const Instruction *CtxI,
8318       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8319     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8320     // TODO: Allow non integers as well.
8321     Value *OpV = CastI->getOperand(0);
8322 
8323     // Simplify the operand first.
8324     bool UsedAssumedInformation = false;
8325     const auto &SimplifiedOpV =
8326         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8327                                *this, UsedAssumedInformation);
8328     if (!SimplifiedOpV.hasValue())
8329       return true;
8330     if (!SimplifiedOpV.getValue())
8331       return false;
8332     OpV = *SimplifiedOpV;
8333 
8334     if (!OpV->getType()->isIntegerTy())
8335       return false;
8336 
8337     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8338         *this, IRPosition::value(*OpV, getCallBaseContext()),
8339         DepClassTy::REQUIRED);
8340     QuerriedAAs.push_back(&OpAA);
8341     T.unionAssumed(
8342         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8343     return T.isValidState();
8344   }
8345 
8346   bool
8347   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8348                    const Instruction *CtxI,
8349                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8350     Value *LHS = CmpI->getOperand(0);
8351     Value *RHS = CmpI->getOperand(1);
8352 
8353     // Simplify the operands first.
8354     bool UsedAssumedInformation = false;
8355     const auto &SimplifiedLHS =
8356         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8357                                *this, UsedAssumedInformation);
8358     if (!SimplifiedLHS.hasValue())
8359       return true;
8360     if (!SimplifiedLHS.getValue())
8361       return false;
8362     LHS = *SimplifiedLHS;
8363 
8364     const auto &SimplifiedRHS =
8365         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8366                                *this, UsedAssumedInformation);
8367     if (!SimplifiedRHS.hasValue())
8368       return true;
8369     if (!SimplifiedRHS.getValue())
8370       return false;
8371     RHS = *SimplifiedRHS;
8372 
8373     // TODO: Allow non integers as well.
8374     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8375       return false;
8376 
8377     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8378         *this, IRPosition::value(*LHS, getCallBaseContext()),
8379         DepClassTy::REQUIRED);
8380     QuerriedAAs.push_back(&LHSAA);
8381     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8382         *this, IRPosition::value(*RHS, getCallBaseContext()),
8383         DepClassTy::REQUIRED);
8384     QuerriedAAs.push_back(&RHSAA);
8385     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8386     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8387 
8388     // If one of them is empty set, we can't decide.
8389     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8390       return true;
8391 
8392     bool MustTrue = false, MustFalse = false;
8393 
8394     auto AllowedRegion =
8395         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8396 
8397     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8398       MustFalse = true;
8399 
8400     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8401       MustTrue = true;
8402 
8403     assert((!MustTrue || !MustFalse) &&
8404            "Either MustTrue or MustFalse should be false!");
8405 
8406     if (MustTrue)
8407       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8408     else if (MustFalse)
8409       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8410     else
8411       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8412 
8413     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8414                       << " " << RHSAA << "\n");
8415 
8416     // TODO: Track a known state too.
8417     return T.isValidState();
8418   }
8419 
8420   /// See AbstractAttribute::updateImpl(...).
8421   ChangeStatus updateImpl(Attributor &A) override {
8422     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8423                             IntegerRangeState &T, bool Stripped) -> bool {
8424       Instruction *I = dyn_cast<Instruction>(&V);
8425       if (!I || isa<CallBase>(I)) {
8426 
8427         // Simplify the operand first.
8428         bool UsedAssumedInformation = false;
8429         const auto &SimplifiedOpV =
8430             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8431                                    *this, UsedAssumedInformation);
8432         if (!SimplifiedOpV.hasValue())
8433           return true;
8434         if (!SimplifiedOpV.getValue())
8435           return false;
8436         Value *VPtr = *SimplifiedOpV;
8437 
8438         // If the value is not instruction, we query AA to Attributor.
8439         const auto &AA = A.getAAFor<AAValueConstantRange>(
8440             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8441             DepClassTy::REQUIRED);
8442 
8443         // Clamp operator is not used to utilize a program point CtxI.
8444         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8445 
8446         return T.isValidState();
8447       }
8448 
8449       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8450       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8451         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8452           return false;
8453       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8454         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8455           return false;
8456       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8457         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8458           return false;
8459       } else {
8460         // Give up with other instructions.
8461         // TODO: Add other instructions
8462 
8463         T.indicatePessimisticFixpoint();
8464         return false;
8465       }
8466 
8467       // Catch circular reasoning in a pessimistic way for now.
8468       // TODO: Check how the range evolves and if we stripped anything, see also
8469       //       AADereferenceable or AAAlign for similar situations.
8470       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8471         if (QueriedAA != this)
8472           continue;
8473         // If we are in a stady state we do not need to worry.
8474         if (T.getAssumed() == getState().getAssumed())
8475           continue;
8476         T.indicatePessimisticFixpoint();
8477       }
8478 
8479       return T.isValidState();
8480     };
8481 
8482     IntegerRangeState T(getBitWidth());
8483 
8484     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8485                                                   VisitValueCB, getCtxI(),
8486                                                   /* UseValueSimplify */ false))
8487       return indicatePessimisticFixpoint();
8488 
8489     return clampStateAndIndicateChange(getState(), T);
8490   }
8491 
8492   /// See AbstractAttribute::trackStatistics()
8493   void trackStatistics() const override {
8494     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8495   }
8496 };
8497 
8498 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8499   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8500       : AAValueConstantRangeImpl(IRP, A) {}
8501 
8502   /// See AbstractAttribute::initialize(...).
8503   ChangeStatus updateImpl(Attributor &A) override {
8504     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8505                      "not be called");
8506   }
8507 
8508   /// See AbstractAttribute::trackStatistics()
8509   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8510 };
8511 
8512 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8513   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8514       : AAValueConstantRangeFunction(IRP, A) {}
8515 
8516   /// See AbstractAttribute::trackStatistics()
8517   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8518 };
8519 
8520 struct AAValueConstantRangeCallSiteReturned
8521     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8522                                      AAValueConstantRangeImpl,
8523                                      AAValueConstantRangeImpl::StateType,
8524                                      /* IntroduceCallBaseContext */ true> {
8525   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8526       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8527                                        AAValueConstantRangeImpl,
8528                                        AAValueConstantRangeImpl::StateType,
8529                                        /* IntroduceCallBaseContext */ true>(IRP,
8530                                                                             A) {
8531   }
8532 
8533   /// See AbstractAttribute::initialize(...).
8534   void initialize(Attributor &A) override {
8535     // If it is a load instruction with range metadata, use the metadata.
8536     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8537       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8538         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8539 
8540     AAValueConstantRangeImpl::initialize(A);
8541   }
8542 
8543   /// See AbstractAttribute::trackStatistics()
8544   void trackStatistics() const override {
8545     STATS_DECLTRACK_CSRET_ATTR(value_range)
8546   }
8547 };
8548 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8549   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8550       : AAValueConstantRangeFloating(IRP, A) {}
8551 
8552   /// See AbstractAttribute::manifest()
8553   ChangeStatus manifest(Attributor &A) override {
8554     return ChangeStatus::UNCHANGED;
8555   }
8556 
8557   /// See AbstractAttribute::trackStatistics()
8558   void trackStatistics() const override {
8559     STATS_DECLTRACK_CSARG_ATTR(value_range)
8560   }
8561 };
8562 
8563 /// ------------------ Potential Values Attribute -------------------------
8564 
8565 struct AAPotentialValuesImpl : AAPotentialValues {
8566   using StateType = PotentialConstantIntValuesState;
8567 
8568   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
8569       : AAPotentialValues(IRP, A) {}
8570 
8571   /// See AbstractAttribute::initialize(..).
8572   void initialize(Attributor &A) override {
8573     if (A.hasSimplificationCallback(getIRPosition()))
8574       indicatePessimisticFixpoint();
8575     else
8576       AAPotentialValues::initialize(A);
8577   }
8578 
8579   /// See AbstractAttribute::getAsStr().
8580   const std::string getAsStr() const override {
8581     std::string Str;
8582     llvm::raw_string_ostream OS(Str);
8583     OS << getState();
8584     return OS.str();
8585   }
8586 
8587   /// See AbstractAttribute::updateImpl(...).
8588   ChangeStatus updateImpl(Attributor &A) override {
8589     return indicatePessimisticFixpoint();
8590   }
8591 };
8592 
8593 struct AAPotentialValuesArgument final
8594     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8595                                       PotentialConstantIntValuesState> {
8596   using Base =
8597       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8598                                       PotentialConstantIntValuesState>;
8599   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
8600       : Base(IRP, A) {}
8601 
8602   /// See AbstractAttribute::initialize(..).
8603   void initialize(Attributor &A) override {
8604     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8605       indicatePessimisticFixpoint();
8606     } else {
8607       Base::initialize(A);
8608     }
8609   }
8610 
8611   /// See AbstractAttribute::trackStatistics()
8612   void trackStatistics() const override {
8613     STATS_DECLTRACK_ARG_ATTR(potential_values)
8614   }
8615 };
8616 
8617 struct AAPotentialValuesReturned
8618     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
8619   using Base =
8620       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
8621   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
8622       : Base(IRP, A) {}
8623 
8624   /// See AbstractAttribute::trackStatistics()
8625   void trackStatistics() const override {
8626     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8627   }
8628 };
8629 
8630 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
8631   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
8632       : AAPotentialValuesImpl(IRP, A) {}
8633 
8634   /// See AbstractAttribute::initialize(..).
8635   void initialize(Attributor &A) override {
8636     AAPotentialValuesImpl::initialize(A);
8637     if (isAtFixpoint())
8638       return;
8639 
8640     Value &V = getAssociatedValue();
8641 
8642     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8643       unionAssumed(C->getValue());
8644       indicateOptimisticFixpoint();
8645       return;
8646     }
8647 
8648     if (isa<UndefValue>(&V)) {
8649       unionAssumedWithUndef();
8650       indicateOptimisticFixpoint();
8651       return;
8652     }
8653 
8654     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8655       return;
8656 
8657     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8658       return;
8659 
8660     indicatePessimisticFixpoint();
8661 
8662     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
8663                       << getAssociatedValue() << "\n");
8664   }
8665 
8666   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8667                                 const APInt &RHS) {
8668     ICmpInst::Predicate Pred = ICI->getPredicate();
8669     switch (Pred) {
8670     case ICmpInst::ICMP_UGT:
8671       return LHS.ugt(RHS);
8672     case ICmpInst::ICMP_SGT:
8673       return LHS.sgt(RHS);
8674     case ICmpInst::ICMP_EQ:
8675       return LHS.eq(RHS);
8676     case ICmpInst::ICMP_UGE:
8677       return LHS.uge(RHS);
8678     case ICmpInst::ICMP_SGE:
8679       return LHS.sge(RHS);
8680     case ICmpInst::ICMP_ULT:
8681       return LHS.ult(RHS);
8682     case ICmpInst::ICMP_SLT:
8683       return LHS.slt(RHS);
8684     case ICmpInst::ICMP_NE:
8685       return LHS.ne(RHS);
8686     case ICmpInst::ICMP_ULE:
8687       return LHS.ule(RHS);
8688     case ICmpInst::ICMP_SLE:
8689       return LHS.sle(RHS);
8690     default:
8691       llvm_unreachable("Invalid ICmp predicate!");
8692     }
8693   }
8694 
8695   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8696                                  uint32_t ResultBitWidth) {
8697     Instruction::CastOps CastOp = CI->getOpcode();
8698     switch (CastOp) {
8699     default:
8700       llvm_unreachable("unsupported or not integer cast");
8701     case Instruction::Trunc:
8702       return Src.trunc(ResultBitWidth);
8703     case Instruction::SExt:
8704       return Src.sext(ResultBitWidth);
8705     case Instruction::ZExt:
8706       return Src.zext(ResultBitWidth);
8707     case Instruction::BitCast:
8708       return Src;
8709     }
8710   }
8711 
8712   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
8713                                        const APInt &LHS, const APInt &RHS,
8714                                        bool &SkipOperation, bool &Unsupported) {
8715     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
8716     // Unsupported is set to true when the binary operator is not supported.
8717     // SkipOperation is set to true when UB occur with the given operand pair
8718     // (LHS, RHS).
8719     // TODO: we should look at nsw and nuw keywords to handle operations
8720     //       that create poison or undef value.
8721     switch (BinOpcode) {
8722     default:
8723       Unsupported = true;
8724       return LHS;
8725     case Instruction::Add:
8726       return LHS + RHS;
8727     case Instruction::Sub:
8728       return LHS - RHS;
8729     case Instruction::Mul:
8730       return LHS * RHS;
8731     case Instruction::UDiv:
8732       if (RHS.isZero()) {
8733         SkipOperation = true;
8734         return LHS;
8735       }
8736       return LHS.udiv(RHS);
8737     case Instruction::SDiv:
8738       if (RHS.isZero()) {
8739         SkipOperation = true;
8740         return LHS;
8741       }
8742       return LHS.sdiv(RHS);
8743     case Instruction::URem:
8744       if (RHS.isZero()) {
8745         SkipOperation = true;
8746         return LHS;
8747       }
8748       return LHS.urem(RHS);
8749     case Instruction::SRem:
8750       if (RHS.isZero()) {
8751         SkipOperation = true;
8752         return LHS;
8753       }
8754       return LHS.srem(RHS);
8755     case Instruction::Shl:
8756       return LHS.shl(RHS);
8757     case Instruction::LShr:
8758       return LHS.lshr(RHS);
8759     case Instruction::AShr:
8760       return LHS.ashr(RHS);
8761     case Instruction::And:
8762       return LHS & RHS;
8763     case Instruction::Or:
8764       return LHS | RHS;
8765     case Instruction::Xor:
8766       return LHS ^ RHS;
8767     }
8768   }
8769 
8770   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
8771                                            const APInt &LHS, const APInt &RHS) {
8772     bool SkipOperation = false;
8773     bool Unsupported = false;
8774     APInt Result =
8775         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
8776     if (Unsupported)
8777       return false;
8778     // If SkipOperation is true, we can ignore this operand pair (L, R).
8779     if (!SkipOperation)
8780       unionAssumed(Result);
8781     return isValidState();
8782   }
8783 
8784   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
8785     auto AssumedBefore = getAssumed();
8786     Value *LHS = ICI->getOperand(0);
8787     Value *RHS = ICI->getOperand(1);
8788 
8789     // Simplify the operands first.
8790     bool UsedAssumedInformation = false;
8791     const auto &SimplifiedLHS =
8792         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8793                                *this, UsedAssumedInformation);
8794     if (!SimplifiedLHS.hasValue())
8795       return ChangeStatus::UNCHANGED;
8796     if (!SimplifiedLHS.getValue())
8797       return indicatePessimisticFixpoint();
8798     LHS = *SimplifiedLHS;
8799 
8800     const auto &SimplifiedRHS =
8801         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8802                                *this, UsedAssumedInformation);
8803     if (!SimplifiedRHS.hasValue())
8804       return ChangeStatus::UNCHANGED;
8805     if (!SimplifiedRHS.getValue())
8806       return indicatePessimisticFixpoint();
8807     RHS = *SimplifiedRHS;
8808 
8809     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8810       return indicatePessimisticFixpoint();
8811 
8812     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8813                                                 DepClassTy::REQUIRED);
8814     if (!LHSAA.isValidState())
8815       return indicatePessimisticFixpoint();
8816 
8817     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8818                                                 DepClassTy::REQUIRED);
8819     if (!RHSAA.isValidState())
8820       return indicatePessimisticFixpoint();
8821 
8822     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8823     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8824 
8825     // TODO: make use of undef flag to limit potential values aggressively.
8826     bool MaybeTrue = false, MaybeFalse = false;
8827     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
8828     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8829       // The result of any comparison between undefs can be soundly replaced
8830       // with undef.
8831       unionAssumedWithUndef();
8832     } else if (LHSAA.undefIsContained()) {
8833       for (const APInt &R : RHSAAPVS) {
8834         bool CmpResult = calculateICmpInst(ICI, Zero, R);
8835         MaybeTrue |= CmpResult;
8836         MaybeFalse |= !CmpResult;
8837         if (MaybeTrue & MaybeFalse)
8838           return indicatePessimisticFixpoint();
8839       }
8840     } else if (RHSAA.undefIsContained()) {
8841       for (const APInt &L : LHSAAPVS) {
8842         bool CmpResult = calculateICmpInst(ICI, L, Zero);
8843         MaybeTrue |= CmpResult;
8844         MaybeFalse |= !CmpResult;
8845         if (MaybeTrue & MaybeFalse)
8846           return indicatePessimisticFixpoint();
8847       }
8848     } else {
8849       for (const APInt &L : LHSAAPVS) {
8850         for (const APInt &R : RHSAAPVS) {
8851           bool CmpResult = calculateICmpInst(ICI, L, R);
8852           MaybeTrue |= CmpResult;
8853           MaybeFalse |= !CmpResult;
8854           if (MaybeTrue & MaybeFalse)
8855             return indicatePessimisticFixpoint();
8856         }
8857       }
8858     }
8859     if (MaybeTrue)
8860       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
8861     if (MaybeFalse)
8862       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
8863     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8864                                          : ChangeStatus::CHANGED;
8865   }
8866 
8867   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
8868     auto AssumedBefore = getAssumed();
8869     Value *LHS = SI->getTrueValue();
8870     Value *RHS = SI->getFalseValue();
8871 
8872     // Simplify the operands first.
8873     bool UsedAssumedInformation = false;
8874     const auto &SimplifiedLHS =
8875         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8876                                *this, UsedAssumedInformation);
8877     if (!SimplifiedLHS.hasValue())
8878       return ChangeStatus::UNCHANGED;
8879     if (!SimplifiedLHS.getValue())
8880       return indicatePessimisticFixpoint();
8881     LHS = *SimplifiedLHS;
8882 
8883     const auto &SimplifiedRHS =
8884         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8885                                *this, UsedAssumedInformation);
8886     if (!SimplifiedRHS.hasValue())
8887       return ChangeStatus::UNCHANGED;
8888     if (!SimplifiedRHS.getValue())
8889       return indicatePessimisticFixpoint();
8890     RHS = *SimplifiedRHS;
8891 
8892     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8893       return indicatePessimisticFixpoint();
8894 
8895     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
8896                                                   UsedAssumedInformation);
8897 
8898     // Check if we only need one operand.
8899     bool OnlyLeft = false, OnlyRight = false;
8900     if (C.hasValue() && *C && (*C)->isOneValue())
8901       OnlyLeft = true;
8902     else if (C.hasValue() && *C && (*C)->isZeroValue())
8903       OnlyRight = true;
8904 
8905     const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr;
8906     if (!OnlyRight) {
8907       LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8908                                              DepClassTy::REQUIRED);
8909       if (!LHSAA->isValidState())
8910         return indicatePessimisticFixpoint();
8911     }
8912     if (!OnlyLeft) {
8913       RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8914                                              DepClassTy::REQUIRED);
8915       if (!RHSAA->isValidState())
8916         return indicatePessimisticFixpoint();
8917     }
8918 
8919     if (!LHSAA || !RHSAA) {
8920       // select (true/false), lhs, rhs
8921       auto *OpAA = LHSAA ? LHSAA : RHSAA;
8922 
8923       if (OpAA->undefIsContained())
8924         unionAssumedWithUndef();
8925       else
8926         unionAssumed(*OpAA);
8927 
8928     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
8929       // select i1 *, undef , undef => undef
8930       unionAssumedWithUndef();
8931     } else {
8932       unionAssumed(*LHSAA);
8933       unionAssumed(*RHSAA);
8934     }
8935     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8936                                          : ChangeStatus::CHANGED;
8937   }
8938 
8939   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
8940     auto AssumedBefore = getAssumed();
8941     if (!CI->isIntegerCast())
8942       return indicatePessimisticFixpoint();
8943     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
8944     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
8945     Value *Src = CI->getOperand(0);
8946 
8947     // Simplify the operand first.
8948     bool UsedAssumedInformation = false;
8949     const auto &SimplifiedSrc =
8950         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
8951                                *this, UsedAssumedInformation);
8952     if (!SimplifiedSrc.hasValue())
8953       return ChangeStatus::UNCHANGED;
8954     if (!SimplifiedSrc.getValue())
8955       return indicatePessimisticFixpoint();
8956     Src = *SimplifiedSrc;
8957 
8958     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
8959                                                 DepClassTy::REQUIRED);
8960     if (!SrcAA.isValidState())
8961       return indicatePessimisticFixpoint();
8962     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
8963     if (SrcAA.undefIsContained())
8964       unionAssumedWithUndef();
8965     else {
8966       for (const APInt &S : SrcAAPVS) {
8967         APInt T = calculateCastInst(CI, S, ResultBitWidth);
8968         unionAssumed(T);
8969       }
8970     }
8971     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8972                                          : ChangeStatus::CHANGED;
8973   }
8974 
8975   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
8976     auto AssumedBefore = getAssumed();
8977     Value *LHS = BinOp->getOperand(0);
8978     Value *RHS = BinOp->getOperand(1);
8979 
8980     // Simplify the operands first.
8981     bool UsedAssumedInformation = false;
8982     const auto &SimplifiedLHS =
8983         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8984                                *this, UsedAssumedInformation);
8985     if (!SimplifiedLHS.hasValue())
8986       return ChangeStatus::UNCHANGED;
8987     if (!SimplifiedLHS.getValue())
8988       return indicatePessimisticFixpoint();
8989     LHS = *SimplifiedLHS;
8990 
8991     const auto &SimplifiedRHS =
8992         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8993                                *this, UsedAssumedInformation);
8994     if (!SimplifiedRHS.hasValue())
8995       return ChangeStatus::UNCHANGED;
8996     if (!SimplifiedRHS.getValue())
8997       return indicatePessimisticFixpoint();
8998     RHS = *SimplifiedRHS;
8999 
9000     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9001       return indicatePessimisticFixpoint();
9002 
9003     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
9004                                                 DepClassTy::REQUIRED);
9005     if (!LHSAA.isValidState())
9006       return indicatePessimisticFixpoint();
9007 
9008     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
9009                                                 DepClassTy::REQUIRED);
9010     if (!RHSAA.isValidState())
9011       return indicatePessimisticFixpoint();
9012 
9013     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
9014     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
9015     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9016 
9017     // TODO: make use of undef flag to limit potential values aggressively.
9018     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9019       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9020         return indicatePessimisticFixpoint();
9021     } else if (LHSAA.undefIsContained()) {
9022       for (const APInt &R : RHSAAPVS) {
9023         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9024           return indicatePessimisticFixpoint();
9025       }
9026     } else if (RHSAA.undefIsContained()) {
9027       for (const APInt &L : LHSAAPVS) {
9028         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9029           return indicatePessimisticFixpoint();
9030       }
9031     } else {
9032       for (const APInt &L : LHSAAPVS) {
9033         for (const APInt &R : RHSAAPVS) {
9034           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9035             return indicatePessimisticFixpoint();
9036         }
9037       }
9038     }
9039     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9040                                          : ChangeStatus::CHANGED;
9041   }
9042 
9043   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9044     auto AssumedBefore = getAssumed();
9045     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9046       Value *IncomingValue = PHI->getIncomingValue(u);
9047 
9048       // Simplify the operand first.
9049       bool UsedAssumedInformation = false;
9050       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9051           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9052           UsedAssumedInformation);
9053       if (!SimplifiedIncomingValue.hasValue())
9054         continue;
9055       if (!SimplifiedIncomingValue.getValue())
9056         return indicatePessimisticFixpoint();
9057       IncomingValue = *SimplifiedIncomingValue;
9058 
9059       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
9060           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9061       if (!PotentialValuesAA.isValidState())
9062         return indicatePessimisticFixpoint();
9063       if (PotentialValuesAA.undefIsContained())
9064         unionAssumedWithUndef();
9065       else
9066         unionAssumed(PotentialValuesAA.getAssumed());
9067     }
9068     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9069                                          : ChangeStatus::CHANGED;
9070   }
9071 
9072   ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) {
9073     if (!L.getType()->isIntegerTy())
9074       return indicatePessimisticFixpoint();
9075 
9076     auto Union = [&](Value &V) {
9077       if (isa<UndefValue>(V)) {
9078         unionAssumedWithUndef();
9079         return true;
9080       }
9081       if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) {
9082         unionAssumed(CI->getValue());
9083         return true;
9084       }
9085       return false;
9086     };
9087     auto AssumedBefore = getAssumed();
9088 
9089     if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union))
9090       return indicatePessimisticFixpoint();
9091 
9092     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9093                                          : ChangeStatus::CHANGED;
9094   }
9095 
9096   /// See AbstractAttribute::updateImpl(...).
9097   ChangeStatus updateImpl(Attributor &A) override {
9098     Value &V = getAssociatedValue();
9099     Instruction *I = dyn_cast<Instruction>(&V);
9100 
9101     if (auto *ICI = dyn_cast<ICmpInst>(I))
9102       return updateWithICmpInst(A, ICI);
9103 
9104     if (auto *SI = dyn_cast<SelectInst>(I))
9105       return updateWithSelectInst(A, SI);
9106 
9107     if (auto *CI = dyn_cast<CastInst>(I))
9108       return updateWithCastInst(A, CI);
9109 
9110     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9111       return updateWithBinaryOperator(A, BinOp);
9112 
9113     if (auto *PHI = dyn_cast<PHINode>(I))
9114       return updateWithPHINode(A, PHI);
9115 
9116     if (auto *L = dyn_cast<LoadInst>(I))
9117       return updateWithLoad(A, *L);
9118 
9119     return indicatePessimisticFixpoint();
9120   }
9121 
9122   /// See AbstractAttribute::trackStatistics()
9123   void trackStatistics() const override {
9124     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9125   }
9126 };
9127 
9128 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
9129   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
9130       : AAPotentialValuesImpl(IRP, A) {}
9131 
9132   /// See AbstractAttribute::initialize(...).
9133   ChangeStatus updateImpl(Attributor &A) override {
9134     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
9135                      "not be called");
9136   }
9137 
9138   /// See AbstractAttribute::trackStatistics()
9139   void trackStatistics() const override {
9140     STATS_DECLTRACK_FN_ATTR(potential_values)
9141   }
9142 };
9143 
9144 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
9145   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
9146       : AAPotentialValuesFunction(IRP, A) {}
9147 
9148   /// See AbstractAttribute::trackStatistics()
9149   void trackStatistics() const override {
9150     STATS_DECLTRACK_CS_ATTR(potential_values)
9151   }
9152 };
9153 
9154 struct AAPotentialValuesCallSiteReturned
9155     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
9156   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
9157       : AACallSiteReturnedFromReturned<AAPotentialValues,
9158                                        AAPotentialValuesImpl>(IRP, A) {}
9159 
9160   /// See AbstractAttribute::trackStatistics()
9161   void trackStatistics() const override {
9162     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9163   }
9164 };
9165 
9166 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
9167   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
9168       : AAPotentialValuesFloating(IRP, A) {}
9169 
9170   /// See AbstractAttribute::initialize(..).
9171   void initialize(Attributor &A) override {
9172     AAPotentialValuesImpl::initialize(A);
9173     if (isAtFixpoint())
9174       return;
9175 
9176     Value &V = getAssociatedValue();
9177 
9178     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9179       unionAssumed(C->getValue());
9180       indicateOptimisticFixpoint();
9181       return;
9182     }
9183 
9184     if (isa<UndefValue>(&V)) {
9185       unionAssumedWithUndef();
9186       indicateOptimisticFixpoint();
9187       return;
9188     }
9189   }
9190 
9191   /// See AbstractAttribute::updateImpl(...).
9192   ChangeStatus updateImpl(Attributor &A) override {
9193     Value &V = getAssociatedValue();
9194     auto AssumedBefore = getAssumed();
9195     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
9196                                              DepClassTy::REQUIRED);
9197     const auto &S = AA.getAssumed();
9198     unionAssumed(S);
9199     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9200                                          : ChangeStatus::CHANGED;
9201   }
9202 
9203   /// See AbstractAttribute::trackStatistics()
9204   void trackStatistics() const override {
9205     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9206   }
9207 };
9208 
9209 /// ------------------------ NoUndef Attribute ---------------------------------
9210 struct AANoUndefImpl : AANoUndef {
9211   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9212 
9213   /// See AbstractAttribute::initialize(...).
9214   void initialize(Attributor &A) override {
9215     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9216       indicateOptimisticFixpoint();
9217       return;
9218     }
9219     Value &V = getAssociatedValue();
9220     if (isa<UndefValue>(V))
9221       indicatePessimisticFixpoint();
9222     else if (isa<FreezeInst>(V))
9223       indicateOptimisticFixpoint();
9224     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9225              isGuaranteedNotToBeUndefOrPoison(&V))
9226       indicateOptimisticFixpoint();
9227     else
9228       AANoUndef::initialize(A);
9229   }
9230 
9231   /// See followUsesInMBEC
9232   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9233                        AANoUndef::StateType &State) {
9234     const Value *UseV = U->get();
9235     const DominatorTree *DT = nullptr;
9236     AssumptionCache *AC = nullptr;
9237     InformationCache &InfoCache = A.getInfoCache();
9238     if (Function *F = getAnchorScope()) {
9239       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9240       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9241     }
9242     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9243     bool TrackUse = false;
9244     // Track use for instructions which must produce undef or poison bits when
9245     // at least one operand contains such bits.
9246     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9247       TrackUse = true;
9248     return TrackUse;
9249   }
9250 
9251   /// See AbstractAttribute::getAsStr().
9252   const std::string getAsStr() const override {
9253     return getAssumed() ? "noundef" : "may-undef-or-poison";
9254   }
9255 
9256   ChangeStatus manifest(Attributor &A) override {
9257     // We don't manifest noundef attribute for dead positions because the
9258     // associated values with dead positions would be replaced with undef
9259     // values.
9260     bool UsedAssumedInformation = false;
9261     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9262                         UsedAssumedInformation))
9263       return ChangeStatus::UNCHANGED;
9264     // A position whose simplified value does not have any value is
9265     // considered to be dead. We don't manifest noundef in such positions for
9266     // the same reason above.
9267     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9268              .hasValue())
9269       return ChangeStatus::UNCHANGED;
9270     return AANoUndef::manifest(A);
9271   }
9272 };
9273 
9274 struct AANoUndefFloating : public AANoUndefImpl {
9275   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9276       : AANoUndefImpl(IRP, A) {}
9277 
9278   /// See AbstractAttribute::initialize(...).
9279   void initialize(Attributor &A) override {
9280     AANoUndefImpl::initialize(A);
9281     if (!getState().isAtFixpoint())
9282       if (Instruction *CtxI = getCtxI())
9283         followUsesInMBEC(*this, A, getState(), *CtxI);
9284   }
9285 
9286   /// See AbstractAttribute::updateImpl(...).
9287   ChangeStatus updateImpl(Attributor &A) override {
9288     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9289                             AANoUndef::StateType &T, bool Stripped) -> bool {
9290       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9291                                              DepClassTy::REQUIRED);
9292       if (!Stripped && this == &AA) {
9293         T.indicatePessimisticFixpoint();
9294       } else {
9295         const AANoUndef::StateType &S =
9296             static_cast<const AANoUndef::StateType &>(AA.getState());
9297         T ^= S;
9298       }
9299       return T.isValidState();
9300     };
9301 
9302     StateType T;
9303     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9304                                           VisitValueCB, getCtxI()))
9305       return indicatePessimisticFixpoint();
9306 
9307     return clampStateAndIndicateChange(getState(), T);
9308   }
9309 
9310   /// See AbstractAttribute::trackStatistics()
9311   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9312 };
9313 
9314 struct AANoUndefReturned final
9315     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9316   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9317       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9318 
9319   /// See AbstractAttribute::trackStatistics()
9320   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9321 };
9322 
9323 struct AANoUndefArgument final
9324     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9325   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9326       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9327 
9328   /// See AbstractAttribute::trackStatistics()
9329   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9330 };
9331 
9332 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9333   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9334       : AANoUndefFloating(IRP, A) {}
9335 
9336   /// See AbstractAttribute::trackStatistics()
9337   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9338 };
9339 
9340 struct AANoUndefCallSiteReturned final
9341     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9342   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9343       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9344 
9345   /// See AbstractAttribute::trackStatistics()
9346   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9347 };
9348 
9349 struct AACallEdgesImpl : public AACallEdges {
9350   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9351 
9352   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9353     return CalledFunctions;
9354   }
9355 
9356   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9357 
9358   virtual bool hasNonAsmUnknownCallee() const override {
9359     return HasUnknownCalleeNonAsm;
9360   }
9361 
9362   const std::string getAsStr() const override {
9363     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9364            std::to_string(CalledFunctions.size()) + "]";
9365   }
9366 
9367   void trackStatistics() const override {}
9368 
9369 protected:
9370   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9371     if (CalledFunctions.insert(Fn)) {
9372       Change = ChangeStatus::CHANGED;
9373       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9374                         << "\n");
9375     }
9376   }
9377 
9378   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9379     if (!HasUnknownCallee)
9380       Change = ChangeStatus::CHANGED;
9381     if (NonAsm && !HasUnknownCalleeNonAsm)
9382       Change = ChangeStatus::CHANGED;
9383     HasUnknownCalleeNonAsm |= NonAsm;
9384     HasUnknownCallee = true;
9385   }
9386 
9387 private:
9388   /// Optimistic set of functions that might be called by this position.
9389   SetVector<Function *> CalledFunctions;
9390 
9391   /// Is there any call with a unknown callee.
9392   bool HasUnknownCallee = false;
9393 
9394   /// Is there any call with a unknown callee, excluding any inline asm.
9395   bool HasUnknownCalleeNonAsm = false;
9396 };
9397 
9398 struct AACallEdgesCallSite : public AACallEdgesImpl {
9399   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9400       : AACallEdgesImpl(IRP, A) {}
9401   /// See AbstractAttribute::updateImpl(...).
9402   ChangeStatus updateImpl(Attributor &A) override {
9403     ChangeStatus Change = ChangeStatus::UNCHANGED;
9404 
9405     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9406                           bool Stripped) -> bool {
9407       if (Function *Fn = dyn_cast<Function>(&V)) {
9408         addCalledFunction(Fn, Change);
9409       } else {
9410         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9411         setHasUnknownCallee(true, Change);
9412       }
9413 
9414       // Explore all values.
9415       return true;
9416     };
9417 
9418     // Process any value that we might call.
9419     auto ProcessCalledOperand = [&](Value *V) {
9420       bool DummyValue = false;
9421       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9422                                        DummyValue, VisitValue, nullptr,
9423                                        false)) {
9424         // If we haven't gone through all values, assume that there are unknown
9425         // callees.
9426         setHasUnknownCallee(true, Change);
9427       }
9428     };
9429 
9430     CallBase *CB = static_cast<CallBase *>(getCtxI());
9431 
9432     if (CB->isInlineAsm()) {
9433       setHasUnknownCallee(false, Change);
9434       return Change;
9435     }
9436 
9437     // Process callee metadata if available.
9438     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9439       for (auto &Op : MD->operands()) {
9440         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9441         if (Callee)
9442           addCalledFunction(Callee, Change);
9443       }
9444       return Change;
9445     }
9446 
9447     // The most simple case.
9448     ProcessCalledOperand(CB->getCalledOperand());
9449 
9450     // Process callback functions.
9451     SmallVector<const Use *, 4u> CallbackUses;
9452     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9453     for (const Use *U : CallbackUses)
9454       ProcessCalledOperand(U->get());
9455 
9456     return Change;
9457   }
9458 };
9459 
9460 struct AACallEdgesFunction : public AACallEdgesImpl {
9461   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9462       : AACallEdgesImpl(IRP, A) {}
9463 
9464   /// See AbstractAttribute::updateImpl(...).
9465   ChangeStatus updateImpl(Attributor &A) override {
9466     ChangeStatus Change = ChangeStatus::UNCHANGED;
9467 
9468     auto ProcessCallInst = [&](Instruction &Inst) {
9469       CallBase &CB = static_cast<CallBase &>(Inst);
9470 
9471       auto &CBEdges = A.getAAFor<AACallEdges>(
9472           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9473       if (CBEdges.hasNonAsmUnknownCallee())
9474         setHasUnknownCallee(true, Change);
9475       if (CBEdges.hasUnknownCallee())
9476         setHasUnknownCallee(false, Change);
9477 
9478       for (Function *F : CBEdges.getOptimisticEdges())
9479         addCalledFunction(F, Change);
9480 
9481       return true;
9482     };
9483 
9484     // Visit all callable instructions.
9485     bool UsedAssumedInformation = false;
9486     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9487                                            UsedAssumedInformation)) {
9488       // If we haven't looked at all call like instructions, assume that there
9489       // are unknown callees.
9490       setHasUnknownCallee(true, Change);
9491     }
9492 
9493     return Change;
9494   }
9495 };
9496 
9497 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9498 private:
9499   struct QuerySet {
9500     void markReachable(Function *Fn) {
9501       Reachable.insert(Fn);
9502       Unreachable.erase(Fn);
9503     }
9504 
9505     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9506                         ArrayRef<const AACallEdges *> AAEdgesList) {
9507       ChangeStatus Change = ChangeStatus::UNCHANGED;
9508 
9509       for (auto *AAEdges : AAEdgesList) {
9510         if (AAEdges->hasUnknownCallee()) {
9511           if (!CanReachUnknownCallee)
9512             Change = ChangeStatus::CHANGED;
9513           CanReachUnknownCallee = true;
9514           return Change;
9515         }
9516       }
9517 
9518       for (Function *Fn : make_early_inc_range(Unreachable)) {
9519         if (checkIfReachable(A, AA, AAEdgesList, Fn)) {
9520           Change = ChangeStatus::CHANGED;
9521           markReachable(Fn);
9522         }
9523       }
9524       return Change;
9525     }
9526 
9527     bool isReachable(Attributor &A, const AAFunctionReachability &AA,
9528                      ArrayRef<const AACallEdges *> AAEdgesList, Function *Fn) {
9529       // Assume that we can reach the function.
9530       // TODO: Be more specific with the unknown callee.
9531       if (CanReachUnknownCallee)
9532         return true;
9533 
9534       if (Reachable.count(Fn))
9535         return true;
9536 
9537       if (Unreachable.count(Fn))
9538         return false;
9539 
9540       // We need to assume that this function can't reach Fn to prevent
9541       // an infinite loop if this function is recursive.
9542       Unreachable.insert(Fn);
9543 
9544       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9545       if (Result)
9546         markReachable(Fn);
9547       return Result;
9548     }
9549 
9550     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9551                           ArrayRef<const AACallEdges *> AAEdgesList,
9552                           Function *Fn) const {
9553 
9554       // Handle the most trivial case first.
9555       for (auto *AAEdges : AAEdgesList) {
9556         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9557 
9558         if (Edges.count(Fn))
9559           return true;
9560       }
9561 
9562       SmallVector<const AAFunctionReachability *, 8> Deps;
9563       for (auto &AAEdges : AAEdgesList) {
9564         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9565 
9566         for (Function *Edge : Edges) {
9567           // We don't need a dependency if the result is reachable.
9568           const AAFunctionReachability &EdgeReachability =
9569               A.getAAFor<AAFunctionReachability>(
9570                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9571           Deps.push_back(&EdgeReachability);
9572 
9573           if (EdgeReachability.canReach(A, Fn))
9574             return true;
9575         }
9576       }
9577 
9578       // The result is false for now, set dependencies and leave.
9579       for (auto Dep : Deps)
9580         A.recordDependence(AA, *Dep, DepClassTy::REQUIRED);
9581 
9582       return false;
9583     }
9584 
9585     /// Set of functions that we know for sure is reachable.
9586     DenseSet<Function *> Reachable;
9587 
9588     /// Set of functions that are unreachable, but might become reachable.
9589     DenseSet<Function *> Unreachable;
9590 
9591     /// If we can reach a function with a call to a unknown function we assume
9592     /// that we can reach any function.
9593     bool CanReachUnknownCallee = false;
9594   };
9595 
9596 public:
9597   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9598       : AAFunctionReachability(IRP, A) {}
9599 
9600   bool canReach(Attributor &A, Function *Fn) const override {
9601     const AACallEdges &AAEdges =
9602         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9603 
9604     // Attributor returns attributes as const, so this function has to be
9605     // const for users of this attribute to use it without having to do
9606     // a const_cast.
9607     // This is a hack for us to be able to cache queries.
9608     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9609     bool Result =
9610         NonConstThis->WholeFunction.isReachable(A, *this, {&AAEdges}, Fn);
9611 
9612     return Result;
9613   }
9614 
9615   /// Can \p CB reach \p Fn
9616   bool canReach(Attributor &A, CallBase &CB, Function *Fn) const override {
9617     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9618         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9619 
9620     // Attributor returns attributes as const, so this function has to be
9621     // const for users of this attribute to use it without having to do
9622     // a const_cast.
9623     // This is a hack for us to be able to cache queries.
9624     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9625     QuerySet &CBQuery = NonConstThis->CBQueries[&CB];
9626 
9627     bool Result = CBQuery.isReachable(A, *this, {&AAEdges}, Fn);
9628 
9629     return Result;
9630   }
9631 
9632   /// See AbstractAttribute::updateImpl(...).
9633   ChangeStatus updateImpl(Attributor &A) override {
9634     const AACallEdges &AAEdges =
9635         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9636     ChangeStatus Change = ChangeStatus::UNCHANGED;
9637 
9638     Change |= WholeFunction.update(A, *this, {&AAEdges});
9639 
9640     for (auto CBPair : CBQueries) {
9641       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9642           *this, IRPosition::callsite_function(*CBPair.first),
9643           DepClassTy::REQUIRED);
9644 
9645       Change |= CBPair.second.update(A, *this, {&AAEdges});
9646     }
9647 
9648     return Change;
9649   }
9650 
9651   const std::string getAsStr() const override {
9652     size_t QueryCount =
9653         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
9654 
9655     return "FunctionReachability [" +
9656            std::to_string(WholeFunction.Reachable.size()) + "," +
9657            std::to_string(QueryCount) + "]";
9658   }
9659 
9660   void trackStatistics() const override {}
9661 private:
9662   bool canReachUnknownCallee() const override {
9663     return WholeFunction.CanReachUnknownCallee;
9664   }
9665 
9666   /// Used to answer if a the whole function can reacha a specific function.
9667   QuerySet WholeFunction;
9668 
9669   /// Used to answer if a call base inside this function can reach a specific
9670   /// function.
9671   DenseMap<CallBase *, QuerySet> CBQueries;
9672 };
9673 
9674 } // namespace
9675 
9676 AACallGraphNode *AACallEdgeIterator::operator*() const {
9677   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
9678       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
9679 }
9680 
9681 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
9682 
9683 const char AAReturnedValues::ID = 0;
9684 const char AANoUnwind::ID = 0;
9685 const char AANoSync::ID = 0;
9686 const char AANoFree::ID = 0;
9687 const char AANonNull::ID = 0;
9688 const char AANoRecurse::ID = 0;
9689 const char AAWillReturn::ID = 0;
9690 const char AAUndefinedBehavior::ID = 0;
9691 const char AANoAlias::ID = 0;
9692 const char AAReachability::ID = 0;
9693 const char AANoReturn::ID = 0;
9694 const char AAIsDead::ID = 0;
9695 const char AADereferenceable::ID = 0;
9696 const char AAAlign::ID = 0;
9697 const char AANoCapture::ID = 0;
9698 const char AAValueSimplify::ID = 0;
9699 const char AAHeapToStack::ID = 0;
9700 const char AAPrivatizablePtr::ID = 0;
9701 const char AAMemoryBehavior::ID = 0;
9702 const char AAMemoryLocation::ID = 0;
9703 const char AAValueConstantRange::ID = 0;
9704 const char AAPotentialValues::ID = 0;
9705 const char AANoUndef::ID = 0;
9706 const char AACallEdges::ID = 0;
9707 const char AAFunctionReachability::ID = 0;
9708 const char AAPointerInfo::ID = 0;
9709 
9710 // Macro magic to create the static generator function for attributes that
9711 // follow the naming scheme.
9712 
9713 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
9714   case IRPosition::PK:                                                         \
9715     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
9716 
9717 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
9718   case IRPosition::PK:                                                         \
9719     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
9720     ++NumAAs;                                                                  \
9721     break;
9722 
9723 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
9724   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9725     CLASS *AA = nullptr;                                                       \
9726     switch (IRP.getPositionKind()) {                                           \
9727       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9728       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9729       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9730       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9731       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9732       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9733       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9734       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9735     }                                                                          \
9736     return *AA;                                                                \
9737   }
9738 
9739 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
9740   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9741     CLASS *AA = nullptr;                                                       \
9742     switch (IRP.getPositionKind()) {                                           \
9743       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9744       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
9745       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9746       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9747       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9748       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9749       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9750       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9751     }                                                                          \
9752     return *AA;                                                                \
9753   }
9754 
9755 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
9756   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9757     CLASS *AA = nullptr;                                                       \
9758     switch (IRP.getPositionKind()) {                                           \
9759       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9760       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9761       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9762       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9763       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9764       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9765       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9766       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9767     }                                                                          \
9768     return *AA;                                                                \
9769   }
9770 
9771 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
9772   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9773     CLASS *AA = nullptr;                                                       \
9774     switch (IRP.getPositionKind()) {                                           \
9775       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9776       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9777       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9778       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9779       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9780       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9781       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9782       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9783     }                                                                          \
9784     return *AA;                                                                \
9785   }
9786 
9787 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
9788   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9789     CLASS *AA = nullptr;                                                       \
9790     switch (IRP.getPositionKind()) {                                           \
9791       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9792       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9793       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9794       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9795       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9796       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9797       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9798       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9799     }                                                                          \
9800     return *AA;                                                                \
9801   }
9802 
9803 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
9804 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
9805 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
9806 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
9807 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
9808 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
9809 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
9810 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
9811 
9812 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
9813 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
9814 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
9815 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
9816 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
9817 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
9818 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
9819 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
9820 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
9821 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
9822 
9823 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
9824 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
9825 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
9826 
9827 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
9828 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
9829 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
9830 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
9831 
9832 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
9833 
9834 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
9835 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
9836 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
9837 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
9838 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
9839 #undef SWITCH_PK_CREATE
9840 #undef SWITCH_PK_INV
9841