1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/SetVector.h"
15 #include "llvm/IR/GlobalValue.h"
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/SCCIterator.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetOperations.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/AssumeBundleQueries.h"
27 #include "llvm/Analysis/AssumptionCache.h"
28 #include "llvm/Analysis/CaptureTracking.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/LazyValueInfo.h"
31 #include "llvm/Analysis/MemoryBuiltins.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/Analysis/ScalarEvolution.h"
34 #include "llvm/Analysis/TargetTransformInfo.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Assumptions.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/IRBuilder.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/NoFolder.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/Support/Alignment.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/GraphWriter.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/raw_ostream.h"
53 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
54 #include "llvm/Transforms/Utils/Cloning.h"
55 #include "llvm/Transforms/Utils/Local.h"
56 #include <cassert>
57 
58 using namespace llvm;
59 
60 #define DEBUG_TYPE "attributor"
61 
62 static cl::opt<bool> ManifestInternal(
63     "attributor-manifest-internal", cl::Hidden,
64     cl::desc("Manifest Attributor internal string attributes."),
65     cl::init(false));
66 
67 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
68                                        cl::Hidden);
69 
70 template <>
71 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
72 
73 static cl::opt<unsigned, true> MaxPotentialValues(
74     "attributor-max-potential-values", cl::Hidden,
75     cl::desc("Maximum number of potential values to be "
76              "tracked for each position."),
77     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
78     cl::init(7));
79 
80 static cl::opt<unsigned> MaxInterferingAccesses(
81     "attributor-max-interfering-accesses", cl::Hidden,
82     cl::desc("Maximum number of interfering accesses to "
83              "check before assuming all might interfere."),
84     cl::init(6));
85 
86 STATISTIC(NumAAs, "Number of abstract attributes created");
87 
88 // Some helper macros to deal with statistics tracking.
89 //
90 // Usage:
91 // For simple IR attribute tracking overload trackStatistics in the abstract
92 // attribute and choose the right STATS_DECLTRACK_********* macro,
93 // e.g.,:
94 //  void trackStatistics() const override {
95 //    STATS_DECLTRACK_ARG_ATTR(returned)
96 //  }
97 // If there is a single "increment" side one can use the macro
98 // STATS_DECLTRACK with a custom message. If there are multiple increment
99 // sides, STATS_DECL and STATS_TRACK can also be used separately.
100 //
101 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
102   ("Number of " #TYPE " marked '" #NAME "'")
103 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
104 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
105 #define STATS_DECL(NAME, TYPE, MSG)                                            \
106   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
107 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
108 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
109   {                                                                            \
110     STATS_DECL(NAME, TYPE, MSG)                                                \
111     STATS_TRACK(NAME, TYPE)                                                    \
112   }
113 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
114   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
115 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
116   STATS_DECLTRACK(NAME, CSArguments,                                           \
117                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
118 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
119   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
120 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
121   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
122 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
123   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
124                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
125 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
126   STATS_DECLTRACK(NAME, CSReturn,                                              \
127                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
128 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
129   STATS_DECLTRACK(NAME, Floating,                                              \
130                   ("Number of floating values known to be '" #NAME "'"))
131 
132 // Specialization of the operator<< for abstract attributes subclasses. This
133 // disambiguates situations where multiple operators are applicable.
134 namespace llvm {
135 #define PIPE_OPERATOR(CLASS)                                                   \
136   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
137     return OS << static_cast<const AbstractAttribute &>(AA);                   \
138   }
139 
140 PIPE_OPERATOR(AAIsDead)
141 PIPE_OPERATOR(AANoUnwind)
142 PIPE_OPERATOR(AANoSync)
143 PIPE_OPERATOR(AANoRecurse)
144 PIPE_OPERATOR(AAWillReturn)
145 PIPE_OPERATOR(AANoReturn)
146 PIPE_OPERATOR(AAReturnedValues)
147 PIPE_OPERATOR(AANonNull)
148 PIPE_OPERATOR(AANoAlias)
149 PIPE_OPERATOR(AADereferenceable)
150 PIPE_OPERATOR(AAAlign)
151 PIPE_OPERATOR(AAInstanceInfo)
152 PIPE_OPERATOR(AANoCapture)
153 PIPE_OPERATOR(AAValueSimplify)
154 PIPE_OPERATOR(AANoFree)
155 PIPE_OPERATOR(AAHeapToStack)
156 PIPE_OPERATOR(AAReachability)
157 PIPE_OPERATOR(AAMemoryBehavior)
158 PIPE_OPERATOR(AAMemoryLocation)
159 PIPE_OPERATOR(AAValueConstantRange)
160 PIPE_OPERATOR(AAPrivatizablePtr)
161 PIPE_OPERATOR(AAUndefinedBehavior)
162 PIPE_OPERATOR(AAPotentialValues)
163 PIPE_OPERATOR(AANoUndef)
164 PIPE_OPERATOR(AACallEdges)
165 PIPE_OPERATOR(AAFunctionReachability)
166 PIPE_OPERATOR(AAPointerInfo)
167 PIPE_OPERATOR(AAAssumptionInfo)
168 
169 #undef PIPE_OPERATOR
170 
171 template <>
172 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
173                                                      const DerefState &R) {
174   ChangeStatus CS0 =
175       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
176   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
177   return CS0 | CS1;
178 }
179 
180 } // namespace llvm
181 
182 /// Get pointer operand of memory accessing instruction. If \p I is
183 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
184 /// is set to false and the instruction is volatile, return nullptr.
185 static const Value *getPointerOperand(const Instruction *I,
186                                       bool AllowVolatile) {
187   if (!AllowVolatile && I->isVolatile())
188     return nullptr;
189 
190   if (auto *LI = dyn_cast<LoadInst>(I)) {
191     return LI->getPointerOperand();
192   }
193 
194   if (auto *SI = dyn_cast<StoreInst>(I)) {
195     return SI->getPointerOperand();
196   }
197 
198   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
199     return CXI->getPointerOperand();
200   }
201 
202   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
203     return RMWI->getPointerOperand();
204   }
205 
206   return nullptr;
207 }
208 
209 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
210 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
211 /// getelement pointer instructions that traverse the natural type of \p Ptr if
212 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
213 /// through a cast to i8*.
214 ///
215 /// TODO: This could probably live somewhere more prominantly if it doesn't
216 ///       already exist.
217 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
218                                int64_t Offset, IRBuilder<NoFolder> &IRB,
219                                const DataLayout &DL) {
220   assert(Offset >= 0 && "Negative offset not supported yet!");
221   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
222                     << "-bytes as " << *ResTy << "\n");
223 
224   if (Offset) {
225     Type *Ty = PtrElemTy;
226     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
227     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
228 
229     SmallVector<Value *, 4> ValIndices;
230     std::string GEPName = Ptr->getName().str();
231     for (const APInt &Index : IntIndices) {
232       ValIndices.push_back(IRB.getInt(Index));
233       GEPName += "." + std::to_string(Index.getZExtValue());
234     }
235 
236     // Create a GEP for the indices collected above.
237     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
238 
239     // If an offset is left we use byte-wise adjustment.
240     if (IntOffset != 0) {
241       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
242       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
243                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
244     }
245   }
246 
247   // Ensure the result has the requested type.
248   Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
249                                                 Ptr->getName() + ".cast");
250 
251   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
252   return Ptr;
253 }
254 
255 /// Recursively visit all values that might become \p IRP at some point. This
256 /// will be done by looking through cast instructions, selects, phis, and calls
257 /// with the "returned" attribute. Once we cannot look through the value any
258 /// further, the callback \p VisitValueCB is invoked and passed the current
259 /// value, the \p State, and a flag to indicate if we stripped anything.
260 /// Stripped means that we unpacked the value associated with \p IRP at least
261 /// once. Note that the value used for the callback may still be the value
262 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
263 /// we will never visit more values than specified by \p MaxValues.
264 /// If \p Intraprocedural is set to true only values valid in the scope of
265 /// \p CtxI will be visited and simplification into other scopes is prevented.
266 template <typename StateTy>
267 static bool genericValueTraversal(
268     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
269     StateTy &State,
270     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
271         VisitValueCB,
272     const Instruction *CtxI, bool &UsedAssumedInformation,
273     bool UseValueSimplify = true, int MaxValues = 16,
274     function_ref<Value *(Value *)> StripCB = nullptr,
275     bool Intraprocedural = false) {
276 
277   struct LivenessInfo {
278     const AAIsDead *LivenessAA = nullptr;
279     bool AnyDead = false;
280   };
281   SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
282   auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
283     LivenessInfo &LI = LivenessAAs[&F];
284     if (!LI.LivenessAA)
285       LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F),
286                                             DepClassTy::NONE);
287     return LI;
288   };
289 
290   Value *InitialV = &IRP.getAssociatedValue();
291   using Item = std::pair<Value *, const Instruction *>;
292   SmallSet<Item, 16> Visited;
293   SmallVector<Item, 16> Worklist;
294   Worklist.push_back({InitialV, CtxI});
295 
296   int Iteration = 0;
297   do {
298     Item I = Worklist.pop_back_val();
299     Value *V = I.first;
300     CtxI = I.second;
301     if (StripCB)
302       V = StripCB(V);
303 
304     // Check if we should process the current value. To prevent endless
305     // recursion keep a record of the values we followed!
306     if (!Visited.insert(I).second)
307       continue;
308 
309     // Make sure we limit the compile time for complex expressions.
310     if (Iteration++ >= MaxValues) {
311       LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
312                         << Iteration << "!\n");
313       return false;
314     }
315 
316     // Explicitly look through calls with a "returned" attribute if we do
317     // not have a pointer as stripPointerCasts only works on them.
318     Value *NewV = nullptr;
319     if (V->getType()->isPointerTy()) {
320       NewV = V->stripPointerCasts();
321     } else {
322       auto *CB = dyn_cast<CallBase>(V);
323       if (CB && CB->getCalledFunction()) {
324         for (Argument &Arg : CB->getCalledFunction()->args())
325           if (Arg.hasReturnedAttr()) {
326             NewV = CB->getArgOperand(Arg.getArgNo());
327             break;
328           }
329       }
330     }
331     if (NewV && NewV != V) {
332       Worklist.push_back({NewV, CtxI});
333       continue;
334     }
335 
336     // Look through select instructions, visit assumed potential values.
337     if (auto *SI = dyn_cast<SelectInst>(V)) {
338       Optional<Constant *> C = A.getAssumedConstant(
339           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
340       bool NoValueYet = !C.hasValue();
341       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
342         continue;
343       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
344         if (CI->isZero())
345           Worklist.push_back({SI->getFalseValue(), CtxI});
346         else
347           Worklist.push_back({SI->getTrueValue(), CtxI});
348         continue;
349       }
350       // We could not simplify the condition, assume both values.(
351       Worklist.push_back({SI->getTrueValue(), CtxI});
352       Worklist.push_back({SI->getFalseValue(), CtxI});
353       continue;
354     }
355 
356     // Look through phi nodes, visit all live operands.
357     if (auto *PHI = dyn_cast<PHINode>(V)) {
358       LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction());
359       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
360         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
361         if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) {
362           LI.AnyDead = true;
363           UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint();
364           continue;
365         }
366         Worklist.push_back(
367             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
368       }
369       continue;
370     }
371 
372     if (auto *Arg = dyn_cast<Argument>(V)) {
373       if (!Intraprocedural && !Arg->hasPassPointeeByValueCopyAttr()) {
374         SmallVector<Item> CallSiteValues;
375         bool UsedAssumedInformation = false;
376         if (A.checkForAllCallSites(
377                 [&](AbstractCallSite ACS) {
378                   // Callbacks might not have a corresponding call site operand,
379                   // stick with the argument in that case.
380                   Value *CSOp = ACS.getCallArgOperand(*Arg);
381                   if (!CSOp)
382                     return false;
383                   CallSiteValues.push_back({CSOp, ACS.getInstruction()});
384                   return true;
385                 },
386                 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) {
387           Worklist.append(CallSiteValues);
388           continue;
389         }
390       }
391     }
392 
393     if (UseValueSimplify && !isa<Constant>(V)) {
394       Optional<Value *> SimpleV =
395           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
396       if (!SimpleV.hasValue())
397         continue;
398       Value *NewV = SimpleV.getValue();
399       if (NewV && NewV != V) {
400         if (!Intraprocedural || !CtxI ||
401             AA::isValidInScope(*NewV, CtxI->getFunction())) {
402           Worklist.push_back({NewV, CtxI});
403           continue;
404         }
405       }
406     }
407 
408     if (auto *LI = dyn_cast<LoadInst>(V)) {
409       bool UsedAssumedInformation = false;
410       // If we ask for the potentially loaded values from the initial pointer we
411       // will simply end up here again. The load is as far as we can make it.
412       if (LI->getPointerOperand() != InitialV) {
413         SmallSetVector<Value *, 4> PotentialCopies;
414         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
415         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
416                                            PotentialValueOrigins, QueryingAA,
417                                            UsedAssumedInformation,
418                                            /* OnlyExact */ true)) {
419           // Values have to be dynamically unique or we loose the fact that a
420           // single llvm::Value might represent two runtime values (e.g., stack
421           // locations in different recursive calls).
422           bool DynamicallyUnique =
423               llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) {
424                 return AA::isDynamicallyUnique(A, QueryingAA, *PC);
425               });
426           if (DynamicallyUnique &&
427               (!Intraprocedural || !CtxI ||
428                llvm::all_of(PotentialCopies, [CtxI](Value *PC) {
429                  return AA::isValidInScope(*PC, CtxI->getFunction());
430                }))) {
431             for (auto *PotentialCopy : PotentialCopies)
432               Worklist.push_back({PotentialCopy, CtxI});
433             continue;
434           }
435         }
436       }
437     }
438 
439     // Once a leaf is reached we inform the user through the callback.
440     if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) {
441       LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: "
442                         << *V << "!\n");
443       return false;
444     }
445   } while (!Worklist.empty());
446 
447   // If we actually used liveness information so we have to record a dependence.
448   for (auto &It : LivenessAAs)
449     if (It.second.AnyDead)
450       A.recordDependence(*It.second.LivenessAA, QueryingAA,
451                          DepClassTy::OPTIONAL);
452 
453   // All values have been visited.
454   return true;
455 }
456 
457 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
458                                      SmallVectorImpl<Value *> &Objects,
459                                      const AbstractAttribute &QueryingAA,
460                                      const Instruction *CtxI,
461                                      bool &UsedAssumedInformation,
462                                      bool Intraprocedural) {
463   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
464   SmallPtrSet<Value *, 8> SeenObjects;
465   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
466                                      SmallVectorImpl<Value *> &Objects,
467                                      bool) -> bool {
468     if (SeenObjects.insert(&Val).second)
469       Objects.push_back(&Val);
470     return true;
471   };
472   if (!genericValueTraversal<decltype(Objects)>(
473           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
474           UsedAssumedInformation, true, 32, StripCB, Intraprocedural))
475     return false;
476   return true;
477 }
478 
479 static const Value *
480 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
481                           const Value *Val, const DataLayout &DL, APInt &Offset,
482                           bool GetMinOffset, bool AllowNonInbounds,
483                           bool UseAssumed = false) {
484 
485   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
486     const IRPosition &Pos = IRPosition::value(V);
487     // Only track dependence if we are going to use the assumed info.
488     const AAValueConstantRange &ValueConstantRangeAA =
489         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
490                                          UseAssumed ? DepClassTy::OPTIONAL
491                                                     : DepClassTy::NONE);
492     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
493                                      : ValueConstantRangeAA.getKnown();
494     if (Range.isFullSet())
495       return false;
496 
497     // We can only use the lower part of the range because the upper part can
498     // be higher than what the value can really be.
499     if (GetMinOffset)
500       ROffset = Range.getSignedMin();
501     else
502       ROffset = Range.getSignedMax();
503     return true;
504   };
505 
506   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
507                                                 /* AllowInvariant */ true,
508                                                 AttributorAnalysis);
509 }
510 
511 static const Value *
512 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
513                         const Value *Ptr, int64_t &BytesOffset,
514                         const DataLayout &DL, bool AllowNonInbounds = false) {
515   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
516   const Value *Base =
517       stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
518                                 /* GetMinOffset */ true, AllowNonInbounds);
519 
520   BytesOffset = OffsetAPInt.getSExtValue();
521   return Base;
522 }
523 
524 /// Clamp the information known for all returned values of a function
525 /// (identified by \p QueryingAA) into \p S.
526 template <typename AAType, typename StateType = typename AAType::StateType>
527 static void clampReturnedValueStates(
528     Attributor &A, const AAType &QueryingAA, StateType &S,
529     const IRPosition::CallBaseContext *CBContext = nullptr) {
530   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
531                     << QueryingAA << " into " << S << "\n");
532 
533   assert((QueryingAA.getIRPosition().getPositionKind() ==
534               IRPosition::IRP_RETURNED ||
535           QueryingAA.getIRPosition().getPositionKind() ==
536               IRPosition::IRP_CALL_SITE_RETURNED) &&
537          "Can only clamp returned value states for a function returned or call "
538          "site returned position!");
539 
540   // Use an optional state as there might not be any return values and we want
541   // to join (IntegerState::operator&) the state of all there are.
542   Optional<StateType> T;
543 
544   // Callback for each possibly returned value.
545   auto CheckReturnValue = [&](Value &RV) -> bool {
546     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
547     const AAType &AA =
548         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
549     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
550                       << " @ " << RVPos << "\n");
551     const StateType &AAS = AA.getState();
552     if (T.hasValue())
553       *T &= AAS;
554     else
555       T = AAS;
556     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
557                       << "\n");
558     return T->isValidState();
559   };
560 
561   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
562     S.indicatePessimisticFixpoint();
563   else if (T.hasValue())
564     S ^= *T;
565 }
566 
567 namespace {
568 /// Helper class for generic deduction: return value -> returned position.
569 template <typename AAType, typename BaseType,
570           typename StateType = typename BaseType::StateType,
571           bool PropagateCallBaseContext = false>
572 struct AAReturnedFromReturnedValues : public BaseType {
573   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
574       : BaseType(IRP, A) {}
575 
576   /// See AbstractAttribute::updateImpl(...).
577   ChangeStatus updateImpl(Attributor &A) override {
578     StateType S(StateType::getBestState(this->getState()));
579     clampReturnedValueStates<AAType, StateType>(
580         A, *this, S,
581         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
582     // TODO: If we know we visited all returned values, thus no are assumed
583     // dead, we can take the known information from the state T.
584     return clampStateAndIndicateChange<StateType>(this->getState(), S);
585   }
586 };
587 
588 /// Clamp the information known at all call sites for a given argument
589 /// (identified by \p QueryingAA) into \p S.
590 template <typename AAType, typename StateType = typename AAType::StateType>
591 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
592                                         StateType &S) {
593   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
594                     << QueryingAA << " into " << S << "\n");
595 
596   assert(QueryingAA.getIRPosition().getPositionKind() ==
597              IRPosition::IRP_ARGUMENT &&
598          "Can only clamp call site argument states for an argument position!");
599 
600   // Use an optional state as there might not be any return values and we want
601   // to join (IntegerState::operator&) the state of all there are.
602   Optional<StateType> T;
603 
604   // The argument number which is also the call site argument number.
605   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
606 
607   auto CallSiteCheck = [&](AbstractCallSite ACS) {
608     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
609     // Check if a coresponding argument was found or if it is on not associated
610     // (which can happen for callback calls).
611     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
612       return false;
613 
614     const AAType &AA =
615         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
616     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
617                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
618     const StateType &AAS = AA.getState();
619     if (T.hasValue())
620       *T &= AAS;
621     else
622       T = AAS;
623     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
624                       << "\n");
625     return T->isValidState();
626   };
627 
628   bool UsedAssumedInformation = false;
629   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
630                               UsedAssumedInformation))
631     S.indicatePessimisticFixpoint();
632   else if (T.hasValue())
633     S ^= *T;
634 }
635 
636 /// This function is the bridge between argument position and the call base
637 /// context.
638 template <typename AAType, typename BaseType,
639           typename StateType = typename AAType::StateType>
640 bool getArgumentStateFromCallBaseContext(Attributor &A,
641                                          BaseType &QueryingAttribute,
642                                          IRPosition &Pos, StateType &State) {
643   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
644          "Expected an 'argument' position !");
645   const CallBase *CBContext = Pos.getCallBaseContext();
646   if (!CBContext)
647     return false;
648 
649   int ArgNo = Pos.getCallSiteArgNo();
650   assert(ArgNo >= 0 && "Invalid Arg No!");
651 
652   const auto &AA = A.getAAFor<AAType>(
653       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
654       DepClassTy::REQUIRED);
655   const StateType &CBArgumentState =
656       static_cast<const StateType &>(AA.getState());
657 
658   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
659                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
660                     << "\n");
661 
662   // NOTE: If we want to do call site grouping it should happen here.
663   State ^= CBArgumentState;
664   return true;
665 }
666 
667 /// Helper class for generic deduction: call site argument -> argument position.
668 template <typename AAType, typename BaseType,
669           typename StateType = typename AAType::StateType,
670           bool BridgeCallBaseContext = false>
671 struct AAArgumentFromCallSiteArguments : public BaseType {
672   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
673       : BaseType(IRP, A) {}
674 
675   /// See AbstractAttribute::updateImpl(...).
676   ChangeStatus updateImpl(Attributor &A) override {
677     StateType S = StateType::getBestState(this->getState());
678 
679     if (BridgeCallBaseContext) {
680       bool Success =
681           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
682               A, *this, this->getIRPosition(), S);
683       if (Success)
684         return clampStateAndIndicateChange<StateType>(this->getState(), S);
685     }
686     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
687 
688     // TODO: If we know we visited all incoming values, thus no are assumed
689     // dead, we can take the known information from the state T.
690     return clampStateAndIndicateChange<StateType>(this->getState(), S);
691   }
692 };
693 
694 /// Helper class for generic replication: function returned -> cs returned.
695 template <typename AAType, typename BaseType,
696           typename StateType = typename BaseType::StateType,
697           bool IntroduceCallBaseContext = false>
698 struct AACallSiteReturnedFromReturned : public BaseType {
699   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
700       : BaseType(IRP, A) {}
701 
702   /// See AbstractAttribute::updateImpl(...).
703   ChangeStatus updateImpl(Attributor &A) override {
704     assert(this->getIRPosition().getPositionKind() ==
705                IRPosition::IRP_CALL_SITE_RETURNED &&
706            "Can only wrap function returned positions for call site returned "
707            "positions!");
708     auto &S = this->getState();
709 
710     const Function *AssociatedFunction =
711         this->getIRPosition().getAssociatedFunction();
712     if (!AssociatedFunction)
713       return S.indicatePessimisticFixpoint();
714 
715     CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
716     if (IntroduceCallBaseContext)
717       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
718                         << CBContext << "\n");
719 
720     IRPosition FnPos = IRPosition::returned(
721         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
722     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
723     return clampStateAndIndicateChange(S, AA.getState());
724   }
725 };
726 
727 /// Helper function to accumulate uses.
728 template <class AAType, typename StateType = typename AAType::StateType>
729 static void followUsesInContext(AAType &AA, Attributor &A,
730                                 MustBeExecutedContextExplorer &Explorer,
731                                 const Instruction *CtxI,
732                                 SetVector<const Use *> &Uses,
733                                 StateType &State) {
734   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
735   for (unsigned u = 0; u < Uses.size(); ++u) {
736     const Use *U = Uses[u];
737     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
738       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
739       if (Found && AA.followUseInMBEC(A, U, UserI, State))
740         for (const Use &Us : UserI->uses())
741           Uses.insert(&Us);
742     }
743   }
744 }
745 
746 /// Use the must-be-executed-context around \p I to add information into \p S.
747 /// The AAType class is required to have `followUseInMBEC` method with the
748 /// following signature and behaviour:
749 ///
750 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
751 /// U - Underlying use.
752 /// I - The user of the \p U.
753 /// Returns true if the value should be tracked transitively.
754 ///
755 template <class AAType, typename StateType = typename AAType::StateType>
756 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
757                              Instruction &CtxI) {
758 
759   // Container for (transitive) uses of the associated value.
760   SetVector<const Use *> Uses;
761   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
762     Uses.insert(&U);
763 
764   MustBeExecutedContextExplorer &Explorer =
765       A.getInfoCache().getMustBeExecutedContextExplorer();
766 
767   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
768 
769   if (S.isAtFixpoint())
770     return;
771 
772   SmallVector<const BranchInst *, 4> BrInsts;
773   auto Pred = [&](const Instruction *I) {
774     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
775       if (Br->isConditional())
776         BrInsts.push_back(Br);
777     return true;
778   };
779 
780   // Here, accumulate conditional branch instructions in the context. We
781   // explore the child paths and collect the known states. The disjunction of
782   // those states can be merged to its own state. Let ParentState_i be a state
783   // to indicate the known information for an i-th branch instruction in the
784   // context. ChildStates are created for its successors respectively.
785   //
786   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
787   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
788   //      ...
789   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
790   //
791   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
792   //
793   // FIXME: Currently, recursive branches are not handled. For example, we
794   // can't deduce that ptr must be dereferenced in below function.
795   //
796   // void f(int a, int c, int *ptr) {
797   //    if(a)
798   //      if (b) {
799   //        *ptr = 0;
800   //      } else {
801   //        *ptr = 1;
802   //      }
803   //    else {
804   //      if (b) {
805   //        *ptr = 0;
806   //      } else {
807   //        *ptr = 1;
808   //      }
809   //    }
810   // }
811 
812   Explorer.checkForAllContext(&CtxI, Pred);
813   for (const BranchInst *Br : BrInsts) {
814     StateType ParentState;
815 
816     // The known state of the parent state is a conjunction of children's
817     // known states so it is initialized with a best state.
818     ParentState.indicateOptimisticFixpoint();
819 
820     for (const BasicBlock *BB : Br->successors()) {
821       StateType ChildState;
822 
823       size_t BeforeSize = Uses.size();
824       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
825 
826       // Erase uses which only appear in the child.
827       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
828         It = Uses.erase(It);
829 
830       ParentState &= ChildState;
831     }
832 
833     // Use only known state.
834     S += ParentState;
835   }
836 }
837 } // namespace
838 
839 /// ------------------------ PointerInfo ---------------------------------------
840 
841 namespace llvm {
842 namespace AA {
843 namespace PointerInfo {
844 
845 struct State;
846 
847 } // namespace PointerInfo
848 } // namespace AA
849 
850 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
851 template <>
852 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
853   using Access = AAPointerInfo::Access;
854   static inline Access getEmptyKey();
855   static inline Access getTombstoneKey();
856   static unsigned getHashValue(const Access &A);
857   static bool isEqual(const Access &LHS, const Access &RHS);
858 };
859 
860 /// Helper that allows OffsetAndSize as a key in a DenseMap.
861 template <>
862 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
863     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
864 
865 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
866 /// but the instruction
867 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
868   using Base = DenseMapInfo<Instruction *>;
869   using Access = AAPointerInfo::Access;
870   static inline Access getEmptyKey();
871   static inline Access getTombstoneKey();
872   static unsigned getHashValue(const Access &A);
873   static bool isEqual(const Access &LHS, const Access &RHS);
874 };
875 
876 } // namespace llvm
877 
878 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
879 struct AA::PointerInfo::State : public AbstractState {
880 
881   ~State() {
882     // We do not delete the Accesses objects but need to destroy them still.
883     for (auto &It : AccessBins)
884       It.second->~Accesses();
885   }
886 
887   /// Return the best possible representable state.
888   static State getBestState(const State &SIS) { return State(); }
889 
890   /// Return the worst possible representable state.
891   static State getWorstState(const State &SIS) {
892     State R;
893     R.indicatePessimisticFixpoint();
894     return R;
895   }
896 
897   State() = default;
898   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {
899     SIS.AccessBins.clear();
900   }
901 
902   const State &getAssumed() const { return *this; }
903 
904   /// See AbstractState::isValidState().
905   bool isValidState() const override { return BS.isValidState(); }
906 
907   /// See AbstractState::isAtFixpoint().
908   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
909 
910   /// See AbstractState::indicateOptimisticFixpoint().
911   ChangeStatus indicateOptimisticFixpoint() override {
912     BS.indicateOptimisticFixpoint();
913     return ChangeStatus::UNCHANGED;
914   }
915 
916   /// See AbstractState::indicatePessimisticFixpoint().
917   ChangeStatus indicatePessimisticFixpoint() override {
918     BS.indicatePessimisticFixpoint();
919     return ChangeStatus::CHANGED;
920   }
921 
922   State &operator=(const State &R) {
923     if (this == &R)
924       return *this;
925     BS = R.BS;
926     AccessBins = R.AccessBins;
927     return *this;
928   }
929 
930   State &operator=(State &&R) {
931     if (this == &R)
932       return *this;
933     std::swap(BS, R.BS);
934     std::swap(AccessBins, R.AccessBins);
935     return *this;
936   }
937 
938   bool operator==(const State &R) const {
939     if (BS != R.BS)
940       return false;
941     if (AccessBins.size() != R.AccessBins.size())
942       return false;
943     auto It = begin(), RIt = R.begin(), E = end();
944     while (It != E) {
945       if (It->getFirst() != RIt->getFirst())
946         return false;
947       auto &Accs = It->getSecond();
948       auto &RAccs = RIt->getSecond();
949       if (Accs->size() != RAccs->size())
950         return false;
951       for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
952         if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
953           return false;
954       ++It;
955       ++RIt;
956     }
957     return true;
958   }
959   bool operator!=(const State &R) const { return !(*this == R); }
960 
961   /// We store accesses in a set with the instruction as key.
962   struct Accesses {
963     SmallVector<AAPointerInfo::Access, 4> Accesses;
964     DenseMap<const Instruction *, unsigned> Map;
965 
966     unsigned size() const { return Accesses.size(); }
967 
968     using vec_iterator = decltype(Accesses)::iterator;
969     vec_iterator begin() { return Accesses.begin(); }
970     vec_iterator end() { return Accesses.end(); }
971 
972     using iterator = decltype(Map)::const_iterator;
973     iterator find(AAPointerInfo::Access &Acc) {
974       return Map.find(Acc.getRemoteInst());
975     }
976     iterator find_end() { return Map.end(); }
977 
978     AAPointerInfo::Access &get(iterator &It) {
979       return Accesses[It->getSecond()];
980     }
981 
982     void insert(AAPointerInfo::Access &Acc) {
983       Map[Acc.getRemoteInst()] = Accesses.size();
984       Accesses.push_back(Acc);
985     }
986   };
987 
988   /// We store all accesses in bins denoted by their offset and size.
989   using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>;
990 
991   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
992   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
993 
994 protected:
995   /// The bins with all the accesses for the associated pointer.
996   AccessBinsTy AccessBins;
997 
998   /// Add a new access to the state at offset \p Offset and with size \p Size.
999   /// The access is associated with \p I, writes \p Content (if anything), and
1000   /// is of kind \p Kind.
1001   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
1002   ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
1003                          Instruction &I, Optional<Value *> Content,
1004                          AAPointerInfo::AccessKind Kind, Type *Ty,
1005                          Instruction *RemoteI = nullptr,
1006                          Accesses *BinPtr = nullptr) {
1007     AAPointerInfo::OffsetAndSize Key{Offset, Size};
1008     Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
1009     if (!Bin)
1010       Bin = new (A.Allocator) Accesses;
1011     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1012     // Check if we have an access for this instruction in this bin, if not,
1013     // simply add it.
1014     auto It = Bin->find(Acc);
1015     if (It == Bin->find_end()) {
1016       Bin->insert(Acc);
1017       return ChangeStatus::CHANGED;
1018     }
1019     // If the existing access is the same as then new one, nothing changed.
1020     AAPointerInfo::Access &Current = Bin->get(It);
1021     AAPointerInfo::Access Before = Current;
1022     // The new one will be combined with the existing one.
1023     Current &= Acc;
1024     return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1025   }
1026 
1027   /// See AAPointerInfo::forallInterferingAccesses.
1028   bool forallInterferingAccesses(
1029       AAPointerInfo::OffsetAndSize OAS,
1030       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1031     if (!isValidState())
1032       return false;
1033 
1034     for (auto &It : AccessBins) {
1035       AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
1036       if (!OAS.mayOverlap(ItOAS))
1037         continue;
1038       bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
1039       for (auto &Access : *It.getSecond())
1040         if (!CB(Access, IsExact))
1041           return false;
1042     }
1043     return true;
1044   }
1045 
1046   /// See AAPointerInfo::forallInterferingAccesses.
1047   bool forallInterferingAccesses(
1048       Instruction &I,
1049       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1050     if (!isValidState())
1051       return false;
1052 
1053     // First find the offset and size of I.
1054     AAPointerInfo::OffsetAndSize OAS(-1, -1);
1055     for (auto &It : AccessBins) {
1056       for (auto &Access : *It.getSecond()) {
1057         if (Access.getRemoteInst() == &I) {
1058           OAS = It.getFirst();
1059           break;
1060         }
1061       }
1062       if (OAS.getSize() != -1)
1063         break;
1064     }
1065     // No access for I was found, we are done.
1066     if (OAS.getSize() == -1)
1067       return true;
1068 
1069     // Now that we have an offset and size, find all overlapping ones and use
1070     // the callback on the accesses.
1071     return forallInterferingAccesses(OAS, CB);
1072   }
1073 
1074 private:
1075   /// State to track fixpoint and validity.
1076   BooleanState BS;
1077 };
1078 
1079 namespace {
1080 struct AAPointerInfoImpl
1081     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1082   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1083   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1084 
1085   /// See AbstractAttribute::initialize(...).
1086   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1087 
1088   /// See AbstractAttribute::getAsStr().
1089   const std::string getAsStr() const override {
1090     return std::string("PointerInfo ") +
1091            (isValidState() ? (std::string("#") +
1092                               std::to_string(AccessBins.size()) + " bins")
1093                            : "<invalid>");
1094   }
1095 
1096   /// See AbstractAttribute::manifest(...).
1097   ChangeStatus manifest(Attributor &A) override {
1098     return AAPointerInfo::manifest(A);
1099   }
1100 
1101   bool forallInterferingAccesses(
1102       OffsetAndSize OAS,
1103       function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1104       const override {
1105     return State::forallInterferingAccesses(OAS, CB);
1106   }
1107   bool forallInterferingAccesses(
1108       Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
1109       function_ref<bool(const Access &, bool)> UserCB) const override {
1110     SmallPtrSet<const Access *, 8> DominatingWrites;
1111     SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
1112 
1113     Function &Scope = *I.getFunction();
1114     const auto &NoSyncAA = A.getAAFor<AANoSync>(
1115         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1116     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1117         IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
1118     const bool NoSync = NoSyncAA.isAssumedNoSync();
1119 
1120     // Helper to determine if we need to consider threading, which we cannot
1121     // right now. However, if the function is (assumed) nosync or the thread
1122     // executing all instructions is the main thread only we can ignore
1123     // threading.
1124     auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
1125       if (NoSync)
1126         return true;
1127       if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
1128         return true;
1129       return false;
1130     };
1131 
1132     // Helper to determine if the access is executed by the same thread as the
1133     // load, for now it is sufficient to avoid any potential threading effects
1134     // as we cannot deal with them anyway.
1135     auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
1136       return CanIgnoreThreading(*Acc.getLocalInst());
1137     };
1138 
1139     // TODO: Use inter-procedural reachability and dominance.
1140     const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1141         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1142 
1143     const bool FindInterferingWrites = I.mayReadFromMemory();
1144     const bool FindInterferingReads = I.mayWriteToMemory();
1145     const bool UseDominanceReasoning = FindInterferingWrites;
1146     const bool CanUseCFGResoning = CanIgnoreThreading(I);
1147     InformationCache &InfoCache = A.getInfoCache();
1148     const DominatorTree *DT =
1149         NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning
1150             ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
1151                   Scope)
1152             : nullptr;
1153 
1154     enum GPUAddressSpace : unsigned {
1155       Generic = 0,
1156       Global = 1,
1157       Shared = 3,
1158       Constant = 4,
1159       Local = 5,
1160     };
1161 
1162     // Helper to check if a value has "kernel lifetime", that is it will not
1163     // outlive a GPU kernel. This is true for shared, constant, and local
1164     // globals on AMD and NVIDIA GPUs.
1165     auto HasKernelLifetime = [&](Value *V, Module &M) {
1166       Triple T(M.getTargetTriple());
1167       if (!(T.isAMDGPU() || T.isNVPTX()))
1168         return false;
1169       switch (V->getType()->getPointerAddressSpace()) {
1170       case GPUAddressSpace::Shared:
1171       case GPUAddressSpace::Constant:
1172       case GPUAddressSpace::Local:
1173         return true;
1174       default:
1175         return false;
1176       };
1177     };
1178 
1179     // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1180     // to determine if we should look at reachability from the callee. For
1181     // certain pointers we know the lifetime and we do not have to step into the
1182     // callee to determine reachability as the pointer would be dead in the
1183     // callee. See the conditional initialization below.
1184     std::function<bool(const Function &)> IsLiveInCalleeCB;
1185 
1186     if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1187       // If the alloca containing function is not recursive the alloca
1188       // must be dead in the callee.
1189       const Function *AIFn = AI->getFunction();
1190       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1191           *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL);
1192       if (NoRecurseAA.isAssumedNoRecurse()) {
1193         IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1194       }
1195     } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1196       // If the global has kernel lifetime we can stop if we reach a kernel
1197       // as it is "dead" in the (unknown) callees.
1198       if (HasKernelLifetime(GV, *GV->getParent()))
1199         IsLiveInCalleeCB = [](const Function &Fn) {
1200           return !Fn.hasFnAttribute("kernel");
1201         };
1202     }
1203 
1204     auto AccessCB = [&](const Access &Acc, bool Exact) {
1205       if ((!FindInterferingWrites || !Acc.isWrite()) &&
1206           (!FindInterferingReads || !Acc.isRead()))
1207         return true;
1208 
1209       // For now we only filter accesses based on CFG reasoning which does not
1210       // work yet if we have threading effects, or the access is complicated.
1211       if (CanUseCFGResoning) {
1212         if ((!Acc.isWrite() ||
1213              !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1214                                          IsLiveInCalleeCB)) &&
1215             (!Acc.isRead() ||
1216              !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1217                                          IsLiveInCalleeCB)))
1218           return true;
1219         if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) &&
1220             IsSameThreadAsLoad(Acc)) {
1221           if (DT->dominates(Acc.getLocalInst(), &I))
1222             DominatingWrites.insert(&Acc);
1223         }
1224       }
1225 
1226       InterferingAccesses.push_back({&Acc, Exact});
1227       return true;
1228     };
1229     if (!State::forallInterferingAccesses(I, AccessCB))
1230       return false;
1231 
1232     // If we cannot use CFG reasoning we only filter the non-write accesses
1233     // and are done here.
1234     if (!CanUseCFGResoning) {
1235       for (auto &It : InterferingAccesses)
1236         if (!UserCB(*It.first, It.second))
1237           return false;
1238       return true;
1239     }
1240 
1241     // Helper to determine if we can skip a specific write access. This is in
1242     // the worst case quadratic as we are looking for another write that will
1243     // hide the effect of this one.
1244     auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1245       if (!IsSameThreadAsLoad(Acc))
1246         return false;
1247       if (!DominatingWrites.count(&Acc))
1248         return false;
1249       for (const Access *DomAcc : DominatingWrites) {
1250         assert(Acc.getLocalInst()->getFunction() ==
1251                    DomAcc->getLocalInst()->getFunction() &&
1252                "Expected dominating writes to be in the same function!");
1253 
1254         if (DomAcc != &Acc &&
1255             DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1256           return true;
1257         }
1258       }
1259       return false;
1260     };
1261 
1262     // Run the user callback on all accesses we cannot skip and return if that
1263     // succeeded for all or not.
1264     unsigned NumInterferingAccesses = InterferingAccesses.size();
1265     for (auto &It : InterferingAccesses) {
1266       if (!DT || NumInterferingAccesses > MaxInterferingAccesses ||
1267           !CanSkipAccess(*It.first, It.second)) {
1268         if (!UserCB(*It.first, It.second))
1269           return false;
1270       }
1271     }
1272     return true;
1273   }
1274 
1275   ChangeStatus translateAndAddCalleeState(Attributor &A,
1276                                           const AAPointerInfo &CalleeAA,
1277                                           int64_t CallArgOffset, CallBase &CB) {
1278     using namespace AA::PointerInfo;
1279     if (!CalleeAA.getState().isValidState() || !isValidState())
1280       return indicatePessimisticFixpoint();
1281 
1282     const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1283     bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1284 
1285     // Combine the accesses bin by bin.
1286     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1287     for (auto &It : CalleeImplAA.getState()) {
1288       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1289       if (CallArgOffset != OffsetAndSize::Unknown)
1290         OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1291                             It.first.getSize());
1292       Accesses *Bin = AccessBins[OAS];
1293       for (const AAPointerInfo::Access &RAcc : *It.second) {
1294         if (IsByval && !RAcc.isRead())
1295           continue;
1296         bool UsedAssumedInformation = false;
1297         Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1298             RAcc.getContent(), CB, *this, UsedAssumedInformation);
1299         AccessKind AK =
1300             AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1301                                                  : AccessKind::AK_READ_WRITE));
1302         Changed =
1303             Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1304                                 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1305       }
1306     }
1307     return Changed;
1308   }
1309 
1310   /// Statistic tracking for all AAPointerInfo implementations.
1311   /// See AbstractAttribute::trackStatistics().
1312   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1313 };
1314 
1315 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1316   using AccessKind = AAPointerInfo::AccessKind;
1317   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1318       : AAPointerInfoImpl(IRP, A) {}
1319 
1320   /// See AbstractAttribute::initialize(...).
1321   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1322 
1323   /// Deal with an access and signal if it was handled successfully.
1324   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1325                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1326                     ChangeStatus &Changed, Type *Ty,
1327                     int64_t Size = OffsetAndSize::Unknown) {
1328     using namespace AA::PointerInfo;
1329     // No need to find a size if one is given or the offset is unknown.
1330     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1331         Ty) {
1332       const DataLayout &DL = A.getDataLayout();
1333       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1334       if (!AccessSize.isScalable())
1335         Size = AccessSize.getFixedSize();
1336     }
1337     Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1338     return true;
1339   };
1340 
1341   /// Helper struct, will support ranges eventually.
1342   struct OffsetInfo {
1343     int64_t Offset = OffsetAndSize::Unknown;
1344 
1345     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1346   };
1347 
1348   /// See AbstractAttribute::updateImpl(...).
1349   ChangeStatus updateImpl(Attributor &A) override {
1350     using namespace AA::PointerInfo;
1351     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1352     Value &AssociatedValue = getAssociatedValue();
1353 
1354     const DataLayout &DL = A.getDataLayout();
1355     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1356     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1357 
1358     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1359                                      bool &Follow) {
1360       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1361       UsrOI = PtrOI;
1362       Follow = true;
1363       return true;
1364     };
1365 
1366     const auto *TLI = getAnchorScope()
1367                           ? A.getInfoCache().getTargetLibraryInfoForFunction(
1368                                 *getAnchorScope())
1369                           : nullptr;
1370     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1371       Value *CurPtr = U.get();
1372       User *Usr = U.getUser();
1373       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1374                         << *Usr << "\n");
1375       assert(OffsetInfoMap.count(CurPtr) &&
1376              "The current pointer offset should have been seeded!");
1377 
1378       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1379         if (CE->isCast())
1380           return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1381         if (CE->isCompare())
1382           return true;
1383         if (!isa<GEPOperator>(CE)) {
1384           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1385                             << "\n");
1386           return false;
1387         }
1388       }
1389       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1390         // Note the order here, the Usr access might change the map, CurPtr is
1391         // already in it though.
1392         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1393         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1394         UsrOI = PtrOI;
1395 
1396         // TODO: Use range information.
1397         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1398             !GEP->hasAllConstantIndices()) {
1399           UsrOI.Offset = OffsetAndSize::Unknown;
1400           Follow = true;
1401           return true;
1402         }
1403 
1404         SmallVector<Value *, 8> Indices;
1405         for (Use &Idx : GEP->indices()) {
1406           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1407             Indices.push_back(CIdx);
1408             continue;
1409           }
1410 
1411           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1412                             << " : " << *Idx << "\n");
1413           return false;
1414         }
1415         UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1416                                           GEP->getSourceElementType(), Indices);
1417         Follow = true;
1418         return true;
1419       }
1420       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1421         return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1422 
1423       // For PHIs we need to take care of the recurrence explicitly as the value
1424       // might change while we iterate through a loop. For now, we give up if
1425       // the PHI is not invariant.
1426       if (isa<PHINode>(Usr)) {
1427         // Note the order here, the Usr access might change the map, CurPtr is
1428         // already in it though.
1429         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1430         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1431         // Check if the PHI is invariant (so far).
1432         if (UsrOI == PtrOI)
1433           return true;
1434 
1435         // Check if the PHI operand has already an unknown offset as we can't
1436         // improve on that anymore.
1437         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1438           UsrOI = PtrOI;
1439           Follow = true;
1440           return true;
1441         }
1442 
1443         // Check if the PHI operand is not dependent on the PHI itself.
1444         // TODO: This is not great as we look at the pointer type. However, it
1445         // is unclear where the Offset size comes from with typeless pointers.
1446         APInt Offset(
1447             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1448             0);
1449         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1450                                     DL, Offset, /* AllowNonInbounds */ true)) {
1451           if (Offset != PtrOI.Offset) {
1452             LLVM_DEBUG(dbgs()
1453                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1454                        << *CurPtr << " in " << *Usr << "\n");
1455             return false;
1456           }
1457           return HandlePassthroughUser(Usr, PtrOI, Follow);
1458         }
1459 
1460         // TODO: Approximate in case we know the direction of the recurrence.
1461         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1462                           << *CurPtr << " in " << *Usr << "\n");
1463         UsrOI = PtrOI;
1464         UsrOI.Offset = OffsetAndSize::Unknown;
1465         Follow = true;
1466         return true;
1467       }
1468 
1469       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1470         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1471                             AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1472                             Changed, LoadI->getType());
1473       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1474         if (StoreI->getValueOperand() == CurPtr) {
1475           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1476                             << *StoreI << "\n");
1477           return false;
1478         }
1479         bool UsedAssumedInformation = false;
1480         Optional<Value *> Content = A.getAssumedSimplified(
1481             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1482         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1483                             OffsetInfoMap[CurPtr].Offset, Changed,
1484                             StoreI->getValueOperand()->getType());
1485       }
1486       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1487         if (CB->isLifetimeStartOrEnd())
1488           return true;
1489         if (TLI && isFreeCall(CB, TLI))
1490           return true;
1491         if (CB->isArgOperand(&U)) {
1492           unsigned ArgNo = CB->getArgOperandNo(&U);
1493           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1494               *this, IRPosition::callsite_argument(*CB, ArgNo),
1495               DepClassTy::REQUIRED);
1496           Changed = translateAndAddCalleeState(
1497                         A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) |
1498                     Changed;
1499           return true;
1500         }
1501         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1502                           << "\n");
1503         // TODO: Allow some call uses
1504         return false;
1505       }
1506 
1507       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1508       return false;
1509     };
1510     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1511       if (OffsetInfoMap.count(NewU))
1512         return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1513       OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1514       return true;
1515     };
1516     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1517                            /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1518                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
1519       return indicatePessimisticFixpoint();
1520 
1521     LLVM_DEBUG({
1522       dbgs() << "Accesses by bin after update:\n";
1523       for (auto &It : AccessBins) {
1524         dbgs() << "[" << It.first.getOffset() << "-"
1525                << It.first.getOffset() + It.first.getSize()
1526                << "] : " << It.getSecond()->size() << "\n";
1527         for (auto &Acc : *It.getSecond()) {
1528           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1529                  << "\n";
1530           if (Acc.getLocalInst() != Acc.getRemoteInst())
1531             dbgs() << "     -->                         "
1532                    << *Acc.getRemoteInst() << "\n";
1533           if (!Acc.isWrittenValueYetUndetermined()) {
1534             if (Acc.getWrittenValue())
1535               dbgs() << "       - c: " << *Acc.getWrittenValue() << "\n";
1536             else
1537               dbgs() << "       - c: <unknown>\n";
1538           }
1539         }
1540       }
1541     });
1542 
1543     return Changed;
1544   }
1545 
1546   /// See AbstractAttribute::trackStatistics()
1547   void trackStatistics() const override {
1548     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1549   }
1550 };
1551 
1552 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1553   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1554       : AAPointerInfoImpl(IRP, A) {}
1555 
1556   /// See AbstractAttribute::updateImpl(...).
1557   ChangeStatus updateImpl(Attributor &A) override {
1558     return indicatePessimisticFixpoint();
1559   }
1560 
1561   /// See AbstractAttribute::trackStatistics()
1562   void trackStatistics() const override {
1563     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1564   }
1565 };
1566 
1567 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1568   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1569       : AAPointerInfoFloating(IRP, A) {}
1570 
1571   /// See AbstractAttribute::initialize(...).
1572   void initialize(Attributor &A) override {
1573     AAPointerInfoFloating::initialize(A);
1574     if (getAnchorScope()->isDeclaration())
1575       indicatePessimisticFixpoint();
1576   }
1577 
1578   /// See AbstractAttribute::trackStatistics()
1579   void trackStatistics() const override {
1580     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1581   }
1582 };
1583 
1584 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1585   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1586       : AAPointerInfoFloating(IRP, A) {}
1587 
1588   /// See AbstractAttribute::updateImpl(...).
1589   ChangeStatus updateImpl(Attributor &A) override {
1590     using namespace AA::PointerInfo;
1591     // We handle memory intrinsics explicitly, at least the first (=
1592     // destination) and second (=source) arguments as we know how they are
1593     // accessed.
1594     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1595       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1596       int64_t LengthVal = OffsetAndSize::Unknown;
1597       if (Length)
1598         LengthVal = Length->getSExtValue();
1599       Value &Ptr = getAssociatedValue();
1600       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1601       ChangeStatus Changed = ChangeStatus::UNCHANGED;
1602       if (ArgNo == 0) {
1603         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1604                      nullptr, LengthVal);
1605       } else if (ArgNo == 1) {
1606         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1607                      nullptr, LengthVal);
1608       } else {
1609         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1610                           << *MI << "\n");
1611         return indicatePessimisticFixpoint();
1612       }
1613       return Changed;
1614     }
1615 
1616     // TODO: Once we have call site specific value information we can provide
1617     //       call site specific liveness information and then it makes
1618     //       sense to specialize attributes for call sites arguments instead of
1619     //       redirecting requests to the callee argument.
1620     Argument *Arg = getAssociatedArgument();
1621     if (!Arg)
1622       return indicatePessimisticFixpoint();
1623     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1624     auto &ArgAA =
1625         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1626     return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1627   }
1628 
1629   /// See AbstractAttribute::trackStatistics()
1630   void trackStatistics() const override {
1631     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1632   }
1633 };
1634 
1635 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1636   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1637       : AAPointerInfoFloating(IRP, A) {}
1638 
1639   /// See AbstractAttribute::trackStatistics()
1640   void trackStatistics() const override {
1641     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1642   }
1643 };
1644 } // namespace
1645 
1646 /// -----------------------NoUnwind Function Attribute--------------------------
1647 
1648 namespace {
1649 struct AANoUnwindImpl : AANoUnwind {
1650   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1651 
1652   const std::string getAsStr() const override {
1653     return getAssumed() ? "nounwind" : "may-unwind";
1654   }
1655 
1656   /// See AbstractAttribute::updateImpl(...).
1657   ChangeStatus updateImpl(Attributor &A) override {
1658     auto Opcodes = {
1659         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1660         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1661         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1662 
1663     auto CheckForNoUnwind = [&](Instruction &I) {
1664       if (!I.mayThrow())
1665         return true;
1666 
1667       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1668         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1669             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1670         return NoUnwindAA.isAssumedNoUnwind();
1671       }
1672       return false;
1673     };
1674 
1675     bool UsedAssumedInformation = false;
1676     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1677                                    UsedAssumedInformation))
1678       return indicatePessimisticFixpoint();
1679 
1680     return ChangeStatus::UNCHANGED;
1681   }
1682 };
1683 
1684 struct AANoUnwindFunction final : public AANoUnwindImpl {
1685   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1686       : AANoUnwindImpl(IRP, A) {}
1687 
1688   /// See AbstractAttribute::trackStatistics()
1689   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1690 };
1691 
1692 /// NoUnwind attribute deduction for a call sites.
1693 struct AANoUnwindCallSite final : AANoUnwindImpl {
1694   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1695       : AANoUnwindImpl(IRP, A) {}
1696 
1697   /// See AbstractAttribute::initialize(...).
1698   void initialize(Attributor &A) override {
1699     AANoUnwindImpl::initialize(A);
1700     Function *F = getAssociatedFunction();
1701     if (!F || F->isDeclaration())
1702       indicatePessimisticFixpoint();
1703   }
1704 
1705   /// See AbstractAttribute::updateImpl(...).
1706   ChangeStatus updateImpl(Attributor &A) override {
1707     // TODO: Once we have call site specific value information we can provide
1708     //       call site specific liveness information and then it makes
1709     //       sense to specialize attributes for call sites arguments instead of
1710     //       redirecting requests to the callee argument.
1711     Function *F = getAssociatedFunction();
1712     const IRPosition &FnPos = IRPosition::function(*F);
1713     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1714     return clampStateAndIndicateChange(getState(), FnAA.getState());
1715   }
1716 
1717   /// See AbstractAttribute::trackStatistics()
1718   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1719 };
1720 } // namespace
1721 
1722 /// --------------------- Function Return Values -------------------------------
1723 
1724 namespace {
1725 /// "Attribute" that collects all potential returned values and the return
1726 /// instructions that they arise from.
1727 ///
1728 /// If there is a unique returned value R, the manifest method will:
1729 ///   - mark R with the "returned" attribute, if R is an argument.
1730 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1731 
1732   /// Mapping of values potentially returned by the associated function to the
1733   /// return instructions that might return them.
1734   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1735 
1736   /// State flags
1737   ///
1738   ///{
1739   bool IsFixed = false;
1740   bool IsValidState = true;
1741   ///}
1742 
1743 public:
1744   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1745       : AAReturnedValues(IRP, A) {}
1746 
1747   /// See AbstractAttribute::initialize(...).
1748   void initialize(Attributor &A) override {
1749     // Reset the state.
1750     IsFixed = false;
1751     IsValidState = true;
1752     ReturnedValues.clear();
1753 
1754     Function *F = getAssociatedFunction();
1755     if (!F || F->isDeclaration()) {
1756       indicatePessimisticFixpoint();
1757       return;
1758     }
1759     assert(!F->getReturnType()->isVoidTy() &&
1760            "Did not expect a void return type!");
1761 
1762     // The map from instruction opcodes to those instructions in the function.
1763     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1764 
1765     // Look through all arguments, if one is marked as returned we are done.
1766     for (Argument &Arg : F->args()) {
1767       if (Arg.hasReturnedAttr()) {
1768         auto &ReturnInstSet = ReturnedValues[&Arg];
1769         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1770           for (Instruction *RI : *Insts)
1771             ReturnInstSet.insert(cast<ReturnInst>(RI));
1772 
1773         indicateOptimisticFixpoint();
1774         return;
1775       }
1776     }
1777 
1778     if (!A.isFunctionIPOAmendable(*F))
1779       indicatePessimisticFixpoint();
1780   }
1781 
1782   /// See AbstractAttribute::manifest(...).
1783   ChangeStatus manifest(Attributor &A) override;
1784 
1785   /// See AbstractAttribute::getState(...).
1786   AbstractState &getState() override { return *this; }
1787 
1788   /// See AbstractAttribute::getState(...).
1789   const AbstractState &getState() const override { return *this; }
1790 
1791   /// See AbstractAttribute::updateImpl(Attributor &A).
1792   ChangeStatus updateImpl(Attributor &A) override;
1793 
1794   llvm::iterator_range<iterator> returned_values() override {
1795     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1796   }
1797 
1798   llvm::iterator_range<const_iterator> returned_values() const override {
1799     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1800   }
1801 
1802   /// Return the number of potential return values, -1 if unknown.
1803   size_t getNumReturnValues() const override {
1804     return isValidState() ? ReturnedValues.size() : -1;
1805   }
1806 
1807   /// Return an assumed unique return value if a single candidate is found. If
1808   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1809   /// Optional::NoneType.
1810   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1811 
1812   /// See AbstractState::checkForAllReturnedValues(...).
1813   bool checkForAllReturnedValuesAndReturnInsts(
1814       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1815       const override;
1816 
1817   /// Pretty print the attribute similar to the IR representation.
1818   const std::string getAsStr() const override;
1819 
1820   /// See AbstractState::isAtFixpoint().
1821   bool isAtFixpoint() const override { return IsFixed; }
1822 
1823   /// See AbstractState::isValidState().
1824   bool isValidState() const override { return IsValidState; }
1825 
1826   /// See AbstractState::indicateOptimisticFixpoint(...).
1827   ChangeStatus indicateOptimisticFixpoint() override {
1828     IsFixed = true;
1829     return ChangeStatus::UNCHANGED;
1830   }
1831 
1832   ChangeStatus indicatePessimisticFixpoint() override {
1833     IsFixed = true;
1834     IsValidState = false;
1835     return ChangeStatus::CHANGED;
1836   }
1837 };
1838 
1839 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1840   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1841 
1842   // Bookkeeping.
1843   assert(isValidState());
1844   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1845                   "Number of function with known return values");
1846 
1847   // Check if we have an assumed unique return value that we could manifest.
1848   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1849 
1850   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1851     return Changed;
1852 
1853   // Bookkeeping.
1854   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1855                   "Number of function with unique return");
1856   // If the assumed unique return value is an argument, annotate it.
1857   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1858     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1859             getAssociatedFunction()->getReturnType())) {
1860       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1861       Changed = IRAttribute::manifest(A);
1862     }
1863   }
1864   return Changed;
1865 }
1866 
1867 const std::string AAReturnedValuesImpl::getAsStr() const {
1868   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1869          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1870 }
1871 
1872 Optional<Value *>
1873 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1874   // If checkForAllReturnedValues provides a unique value, ignoring potential
1875   // undef values that can also be present, it is assumed to be the actual
1876   // return value and forwarded to the caller of this method. If there are
1877   // multiple, a nullptr is returned indicating there cannot be a unique
1878   // returned value.
1879   Optional<Value *> UniqueRV;
1880   Type *Ty = getAssociatedFunction()->getReturnType();
1881 
1882   auto Pred = [&](Value &RV) -> bool {
1883     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1884     return UniqueRV != Optional<Value *>(nullptr);
1885   };
1886 
1887   if (!A.checkForAllReturnedValues(Pred, *this))
1888     UniqueRV = nullptr;
1889 
1890   return UniqueRV;
1891 }
1892 
1893 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1894     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1895     const {
1896   if (!isValidState())
1897     return false;
1898 
1899   // Check all returned values but ignore call sites as long as we have not
1900   // encountered an overdefined one during an update.
1901   for (auto &It : ReturnedValues) {
1902     Value *RV = It.first;
1903     if (!Pred(*RV, It.second))
1904       return false;
1905   }
1906 
1907   return true;
1908 }
1909 
1910 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1911   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1912 
1913   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1914                            bool) -> bool {
1915     assert(AA::isValidInScope(V, Ret.getFunction()) &&
1916            "Assumed returned value should be valid in function scope!");
1917     if (ReturnedValues[&V].insert(&Ret))
1918       Changed = ChangeStatus::CHANGED;
1919     return true;
1920   };
1921 
1922   bool UsedAssumedInformation = false;
1923   auto ReturnInstCB = [&](Instruction &I) {
1924     ReturnInst &Ret = cast<ReturnInst>(I);
1925     return genericValueTraversal<ReturnInst>(
1926         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1927         &I, UsedAssumedInformation, /* UseValueSimplify */ true,
1928         /* MaxValues */ 16,
1929         /* StripCB */ nullptr, /* Intraprocedural */ true);
1930   };
1931 
1932   // Discover returned values from all live returned instructions in the
1933   // associated function.
1934   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1935                                  UsedAssumedInformation))
1936     return indicatePessimisticFixpoint();
1937   return Changed;
1938 }
1939 
1940 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1941   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1942       : AAReturnedValuesImpl(IRP, A) {}
1943 
1944   /// See AbstractAttribute::trackStatistics()
1945   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1946 };
1947 
1948 /// Returned values information for a call sites.
1949 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1950   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1951       : AAReturnedValuesImpl(IRP, A) {}
1952 
1953   /// See AbstractAttribute::initialize(...).
1954   void initialize(Attributor &A) override {
1955     // TODO: Once we have call site specific value information we can provide
1956     //       call site specific liveness information and then it makes
1957     //       sense to specialize attributes for call sites instead of
1958     //       redirecting requests to the callee.
1959     llvm_unreachable("Abstract attributes for returned values are not "
1960                      "supported for call sites yet!");
1961   }
1962 
1963   /// See AbstractAttribute::updateImpl(...).
1964   ChangeStatus updateImpl(Attributor &A) override {
1965     return indicatePessimisticFixpoint();
1966   }
1967 
1968   /// See AbstractAttribute::trackStatistics()
1969   void trackStatistics() const override {}
1970 };
1971 } // namespace
1972 
1973 /// ------------------------ NoSync Function Attribute -------------------------
1974 
1975 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
1976   if (!I->isAtomic())
1977     return false;
1978 
1979   if (auto *FI = dyn_cast<FenceInst>(I))
1980     // All legal orderings for fence are stronger than monotonic.
1981     return FI->getSyncScopeID() != SyncScope::SingleThread;
1982   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1983     // Unordered is not a legal ordering for cmpxchg.
1984     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1985             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1986   }
1987 
1988   AtomicOrdering Ordering;
1989   switch (I->getOpcode()) {
1990   case Instruction::AtomicRMW:
1991     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1992     break;
1993   case Instruction::Store:
1994     Ordering = cast<StoreInst>(I)->getOrdering();
1995     break;
1996   case Instruction::Load:
1997     Ordering = cast<LoadInst>(I)->getOrdering();
1998     break;
1999   default:
2000     llvm_unreachable(
2001         "New atomic operations need to be known in the attributor.");
2002   }
2003 
2004   return (Ordering != AtomicOrdering::Unordered &&
2005           Ordering != AtomicOrdering::Monotonic);
2006 }
2007 
2008 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
2009 /// which would be nosync except that they have a volatile flag.  All other
2010 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
2011 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
2012   if (auto *MI = dyn_cast<MemIntrinsic>(I))
2013     return !MI->isVolatile();
2014   return false;
2015 }
2016 
2017 namespace {
2018 struct AANoSyncImpl : AANoSync {
2019   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
2020 
2021   const std::string getAsStr() const override {
2022     return getAssumed() ? "nosync" : "may-sync";
2023   }
2024 
2025   /// See AbstractAttribute::updateImpl(...).
2026   ChangeStatus updateImpl(Attributor &A) override;
2027 };
2028 
2029 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2030 
2031   auto CheckRWInstForNoSync = [&](Instruction &I) {
2032     return AA::isNoSyncInst(A, I, *this);
2033   };
2034 
2035   auto CheckForNoSync = [&](Instruction &I) {
2036     // At this point we handled all read/write effects and they are all
2037     // nosync, so they can be skipped.
2038     if (I.mayReadOrWriteMemory())
2039       return true;
2040 
2041     // non-convergent and readnone imply nosync.
2042     return !cast<CallBase>(I).isConvergent();
2043   };
2044 
2045   bool UsedAssumedInformation = false;
2046   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2047                                           UsedAssumedInformation) ||
2048       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2049                                          UsedAssumedInformation))
2050     return indicatePessimisticFixpoint();
2051 
2052   return ChangeStatus::UNCHANGED;
2053 }
2054 
2055 struct AANoSyncFunction final : public AANoSyncImpl {
2056   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2057       : AANoSyncImpl(IRP, A) {}
2058 
2059   /// See AbstractAttribute::trackStatistics()
2060   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
2061 };
2062 
2063 /// NoSync attribute deduction for a call sites.
2064 struct AANoSyncCallSite final : AANoSyncImpl {
2065   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2066       : AANoSyncImpl(IRP, A) {}
2067 
2068   /// See AbstractAttribute::initialize(...).
2069   void initialize(Attributor &A) override {
2070     AANoSyncImpl::initialize(A);
2071     Function *F = getAssociatedFunction();
2072     if (!F || F->isDeclaration())
2073       indicatePessimisticFixpoint();
2074   }
2075 
2076   /// See AbstractAttribute::updateImpl(...).
2077   ChangeStatus updateImpl(Attributor &A) override {
2078     // TODO: Once we have call site specific value information we can provide
2079     //       call site specific liveness information and then it makes
2080     //       sense to specialize attributes for call sites arguments instead of
2081     //       redirecting requests to the callee argument.
2082     Function *F = getAssociatedFunction();
2083     const IRPosition &FnPos = IRPosition::function(*F);
2084     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
2085     return clampStateAndIndicateChange(getState(), FnAA.getState());
2086   }
2087 
2088   /// See AbstractAttribute::trackStatistics()
2089   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
2090 };
2091 } // namespace
2092 
2093 /// ------------------------ No-Free Attributes ----------------------------
2094 
2095 namespace {
2096 struct AANoFreeImpl : public AANoFree {
2097   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2098 
2099   /// See AbstractAttribute::updateImpl(...).
2100   ChangeStatus updateImpl(Attributor &A) override {
2101     auto CheckForNoFree = [&](Instruction &I) {
2102       const auto &CB = cast<CallBase>(I);
2103       if (CB.hasFnAttr(Attribute::NoFree))
2104         return true;
2105 
2106       const auto &NoFreeAA = A.getAAFor<AANoFree>(
2107           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2108       return NoFreeAA.isAssumedNoFree();
2109     };
2110 
2111     bool UsedAssumedInformation = false;
2112     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2113                                            UsedAssumedInformation))
2114       return indicatePessimisticFixpoint();
2115     return ChangeStatus::UNCHANGED;
2116   }
2117 
2118   /// See AbstractAttribute::getAsStr().
2119   const std::string getAsStr() const override {
2120     return getAssumed() ? "nofree" : "may-free";
2121   }
2122 };
2123 
2124 struct AANoFreeFunction final : public AANoFreeImpl {
2125   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2126       : AANoFreeImpl(IRP, A) {}
2127 
2128   /// See AbstractAttribute::trackStatistics()
2129   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2130 };
2131 
2132 /// NoFree attribute deduction for a call sites.
2133 struct AANoFreeCallSite final : AANoFreeImpl {
2134   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2135       : AANoFreeImpl(IRP, A) {}
2136 
2137   /// See AbstractAttribute::initialize(...).
2138   void initialize(Attributor &A) override {
2139     AANoFreeImpl::initialize(A);
2140     Function *F = getAssociatedFunction();
2141     if (!F || F->isDeclaration())
2142       indicatePessimisticFixpoint();
2143   }
2144 
2145   /// See AbstractAttribute::updateImpl(...).
2146   ChangeStatus updateImpl(Attributor &A) override {
2147     // TODO: Once we have call site specific value information we can provide
2148     //       call site specific liveness information and then it makes
2149     //       sense to specialize attributes for call sites arguments instead of
2150     //       redirecting requests to the callee argument.
2151     Function *F = getAssociatedFunction();
2152     const IRPosition &FnPos = IRPosition::function(*F);
2153     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2154     return clampStateAndIndicateChange(getState(), FnAA.getState());
2155   }
2156 
2157   /// See AbstractAttribute::trackStatistics()
2158   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2159 };
2160 
2161 /// NoFree attribute for floating values.
2162 struct AANoFreeFloating : AANoFreeImpl {
2163   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2164       : AANoFreeImpl(IRP, A) {}
2165 
2166   /// See AbstractAttribute::trackStatistics()
2167   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2168 
2169   /// See Abstract Attribute::updateImpl(...).
2170   ChangeStatus updateImpl(Attributor &A) override {
2171     const IRPosition &IRP = getIRPosition();
2172 
2173     const auto &NoFreeAA = A.getAAFor<AANoFree>(
2174         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2175     if (NoFreeAA.isAssumedNoFree())
2176       return ChangeStatus::UNCHANGED;
2177 
2178     Value &AssociatedValue = getIRPosition().getAssociatedValue();
2179     auto Pred = [&](const Use &U, bool &Follow) -> bool {
2180       Instruction *UserI = cast<Instruction>(U.getUser());
2181       if (auto *CB = dyn_cast<CallBase>(UserI)) {
2182         if (CB->isBundleOperand(&U))
2183           return false;
2184         if (!CB->isArgOperand(&U))
2185           return true;
2186         unsigned ArgNo = CB->getArgOperandNo(&U);
2187 
2188         const auto &NoFreeArg = A.getAAFor<AANoFree>(
2189             *this, IRPosition::callsite_argument(*CB, ArgNo),
2190             DepClassTy::REQUIRED);
2191         return NoFreeArg.isAssumedNoFree();
2192       }
2193 
2194       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2195           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2196         Follow = true;
2197         return true;
2198       }
2199       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2200           isa<ReturnInst>(UserI))
2201         return true;
2202 
2203       // Unknown user.
2204       return false;
2205     };
2206     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2207       return indicatePessimisticFixpoint();
2208 
2209     return ChangeStatus::UNCHANGED;
2210   }
2211 };
2212 
2213 /// NoFree attribute for a call site argument.
2214 struct AANoFreeArgument final : AANoFreeFloating {
2215   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2216       : AANoFreeFloating(IRP, A) {}
2217 
2218   /// See AbstractAttribute::trackStatistics()
2219   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2220 };
2221 
2222 /// NoFree attribute for call site arguments.
2223 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2224   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2225       : AANoFreeFloating(IRP, A) {}
2226 
2227   /// See AbstractAttribute::updateImpl(...).
2228   ChangeStatus updateImpl(Attributor &A) override {
2229     // TODO: Once we have call site specific value information we can provide
2230     //       call site specific liveness information and then it makes
2231     //       sense to specialize attributes for call sites arguments instead of
2232     //       redirecting requests to the callee argument.
2233     Argument *Arg = getAssociatedArgument();
2234     if (!Arg)
2235       return indicatePessimisticFixpoint();
2236     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2237     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2238     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2239   }
2240 
2241   /// See AbstractAttribute::trackStatistics()
2242   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2243 };
2244 
2245 /// NoFree attribute for function return value.
2246 struct AANoFreeReturned final : AANoFreeFloating {
2247   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2248       : AANoFreeFloating(IRP, A) {
2249     llvm_unreachable("NoFree is not applicable to function returns!");
2250   }
2251 
2252   /// See AbstractAttribute::initialize(...).
2253   void initialize(Attributor &A) override {
2254     llvm_unreachable("NoFree is not applicable to function returns!");
2255   }
2256 
2257   /// See AbstractAttribute::updateImpl(...).
2258   ChangeStatus updateImpl(Attributor &A) override {
2259     llvm_unreachable("NoFree is not applicable to function returns!");
2260   }
2261 
2262   /// See AbstractAttribute::trackStatistics()
2263   void trackStatistics() const override {}
2264 };
2265 
2266 /// NoFree attribute deduction for a call site return value.
2267 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2268   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2269       : AANoFreeFloating(IRP, A) {}
2270 
2271   ChangeStatus manifest(Attributor &A) override {
2272     return ChangeStatus::UNCHANGED;
2273   }
2274   /// See AbstractAttribute::trackStatistics()
2275   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2276 };
2277 } // namespace
2278 
2279 /// ------------------------ NonNull Argument Attribute ------------------------
2280 namespace {
2281 static int64_t getKnownNonNullAndDerefBytesForUse(
2282     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2283     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2284   TrackUse = false;
2285 
2286   const Value *UseV = U->get();
2287   if (!UseV->getType()->isPointerTy())
2288     return 0;
2289 
2290   // We need to follow common pointer manipulation uses to the accesses they
2291   // feed into. We can try to be smart to avoid looking through things we do not
2292   // like for now, e.g., non-inbounds GEPs.
2293   if (isa<CastInst>(I)) {
2294     TrackUse = true;
2295     return 0;
2296   }
2297 
2298   if (isa<GetElementPtrInst>(I)) {
2299     TrackUse = true;
2300     return 0;
2301   }
2302 
2303   Type *PtrTy = UseV->getType();
2304   const Function *F = I->getFunction();
2305   bool NullPointerIsDefined =
2306       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2307   const DataLayout &DL = A.getInfoCache().getDL();
2308   if (const auto *CB = dyn_cast<CallBase>(I)) {
2309     if (CB->isBundleOperand(U)) {
2310       if (RetainedKnowledge RK = getKnowledgeFromUse(
2311               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2312         IsNonNull |=
2313             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2314         return RK.ArgValue;
2315       }
2316       return 0;
2317     }
2318 
2319     if (CB->isCallee(U)) {
2320       IsNonNull |= !NullPointerIsDefined;
2321       return 0;
2322     }
2323 
2324     unsigned ArgNo = CB->getArgOperandNo(U);
2325     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2326     // As long as we only use known information there is no need to track
2327     // dependences here.
2328     auto &DerefAA =
2329         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2330     IsNonNull |= DerefAA.isKnownNonNull();
2331     return DerefAA.getKnownDereferenceableBytes();
2332   }
2333 
2334   Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2335   if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2336     return 0;
2337 
2338   int64_t Offset;
2339   const Value *Base =
2340       getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2341   if (Base && Base == &AssociatedValue) {
2342     int64_t DerefBytes = Loc->Size.getValue() + Offset;
2343     IsNonNull |= !NullPointerIsDefined;
2344     return std::max(int64_t(0), DerefBytes);
2345   }
2346 
2347   /// Corner case when an offset is 0.
2348   Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2349                                           /*AllowNonInbounds*/ true);
2350   if (Base && Base == &AssociatedValue && Offset == 0) {
2351     int64_t DerefBytes = Loc->Size.getValue();
2352     IsNonNull |= !NullPointerIsDefined;
2353     return std::max(int64_t(0), DerefBytes);
2354   }
2355 
2356   return 0;
2357 }
2358 
2359 struct AANonNullImpl : AANonNull {
2360   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2361       : AANonNull(IRP, A),
2362         NullIsDefined(NullPointerIsDefined(
2363             getAnchorScope(),
2364             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2365 
2366   /// See AbstractAttribute::initialize(...).
2367   void initialize(Attributor &A) override {
2368     Value &V = getAssociatedValue();
2369     if (!NullIsDefined &&
2370         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2371                 /* IgnoreSubsumingPositions */ false, &A)) {
2372       indicateOptimisticFixpoint();
2373       return;
2374     }
2375 
2376     if (isa<ConstantPointerNull>(V)) {
2377       indicatePessimisticFixpoint();
2378       return;
2379     }
2380 
2381     AANonNull::initialize(A);
2382 
2383     bool CanBeNull, CanBeFreed;
2384     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2385                                          CanBeFreed)) {
2386       if (!CanBeNull) {
2387         indicateOptimisticFixpoint();
2388         return;
2389       }
2390     }
2391 
2392     if (isa<GlobalValue>(&getAssociatedValue())) {
2393       indicatePessimisticFixpoint();
2394       return;
2395     }
2396 
2397     if (Instruction *CtxI = getCtxI())
2398       followUsesInMBEC(*this, A, getState(), *CtxI);
2399   }
2400 
2401   /// See followUsesInMBEC
2402   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2403                        AANonNull::StateType &State) {
2404     bool IsNonNull = false;
2405     bool TrackUse = false;
2406     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2407                                        IsNonNull, TrackUse);
2408     State.setKnown(IsNonNull);
2409     return TrackUse;
2410   }
2411 
2412   /// See AbstractAttribute::getAsStr().
2413   const std::string getAsStr() const override {
2414     return getAssumed() ? "nonnull" : "may-null";
2415   }
2416 
2417   /// Flag to determine if the underlying value can be null and still allow
2418   /// valid accesses.
2419   const bool NullIsDefined;
2420 };
2421 
2422 /// NonNull attribute for a floating value.
2423 struct AANonNullFloating : public AANonNullImpl {
2424   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2425       : AANonNullImpl(IRP, A) {}
2426 
2427   /// See AbstractAttribute::updateImpl(...).
2428   ChangeStatus updateImpl(Attributor &A) override {
2429     const DataLayout &DL = A.getDataLayout();
2430 
2431     DominatorTree *DT = nullptr;
2432     AssumptionCache *AC = nullptr;
2433     InformationCache &InfoCache = A.getInfoCache();
2434     if (const Function *Fn = getAnchorScope()) {
2435       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2436       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2437     }
2438 
2439     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2440                             AANonNull::StateType &T, bool Stripped) -> bool {
2441       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2442                                              DepClassTy::REQUIRED);
2443       if (!Stripped && this == &AA) {
2444         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2445           T.indicatePessimisticFixpoint();
2446       } else {
2447         // Use abstract attribute information.
2448         const AANonNull::StateType &NS = AA.getState();
2449         T ^= NS;
2450       }
2451       return T.isValidState();
2452     };
2453 
2454     StateType T;
2455     bool UsedAssumedInformation = false;
2456     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2457                                           VisitValueCB, getCtxI(),
2458                                           UsedAssumedInformation))
2459       return indicatePessimisticFixpoint();
2460 
2461     return clampStateAndIndicateChange(getState(), T);
2462   }
2463 
2464   /// See AbstractAttribute::trackStatistics()
2465   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2466 };
2467 
2468 /// NonNull attribute for function return value.
2469 struct AANonNullReturned final
2470     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2471   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2472       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2473 
2474   /// See AbstractAttribute::getAsStr().
2475   const std::string getAsStr() const override {
2476     return getAssumed() ? "nonnull" : "may-null";
2477   }
2478 
2479   /// See AbstractAttribute::trackStatistics()
2480   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2481 };
2482 
2483 /// NonNull attribute for function argument.
2484 struct AANonNullArgument final
2485     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2486   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2487       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2488 
2489   /// See AbstractAttribute::trackStatistics()
2490   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2491 };
2492 
2493 struct AANonNullCallSiteArgument final : AANonNullFloating {
2494   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2495       : AANonNullFloating(IRP, A) {}
2496 
2497   /// See AbstractAttribute::trackStatistics()
2498   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2499 };
2500 
2501 /// NonNull attribute for a call site return position.
2502 struct AANonNullCallSiteReturned final
2503     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2504   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2505       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2506 
2507   /// See AbstractAttribute::trackStatistics()
2508   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2509 };
2510 } // namespace
2511 
2512 /// ------------------------ No-Recurse Attributes ----------------------------
2513 
2514 namespace {
2515 struct AANoRecurseImpl : public AANoRecurse {
2516   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2517 
2518   /// See AbstractAttribute::getAsStr()
2519   const std::string getAsStr() const override {
2520     return getAssumed() ? "norecurse" : "may-recurse";
2521   }
2522 };
2523 
2524 struct AANoRecurseFunction final : AANoRecurseImpl {
2525   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2526       : AANoRecurseImpl(IRP, A) {}
2527 
2528   /// See AbstractAttribute::updateImpl(...).
2529   ChangeStatus updateImpl(Attributor &A) override {
2530 
2531     // If all live call sites are known to be no-recurse, we are as well.
2532     auto CallSitePred = [&](AbstractCallSite ACS) {
2533       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2534           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2535           DepClassTy::NONE);
2536       return NoRecurseAA.isKnownNoRecurse();
2537     };
2538     bool UsedAssumedInformation = false;
2539     if (A.checkForAllCallSites(CallSitePred, *this, true,
2540                                UsedAssumedInformation)) {
2541       // If we know all call sites and all are known no-recurse, we are done.
2542       // If all known call sites, which might not be all that exist, are known
2543       // to be no-recurse, we are not done but we can continue to assume
2544       // no-recurse. If one of the call sites we have not visited will become
2545       // live, another update is triggered.
2546       if (!UsedAssumedInformation)
2547         indicateOptimisticFixpoint();
2548       return ChangeStatus::UNCHANGED;
2549     }
2550 
2551     const AAFunctionReachability &EdgeReachability =
2552         A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2553                                            DepClassTy::REQUIRED);
2554     if (EdgeReachability.canReach(A, *getAnchorScope()))
2555       return indicatePessimisticFixpoint();
2556     return ChangeStatus::UNCHANGED;
2557   }
2558 
2559   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2560 };
2561 
2562 /// NoRecurse attribute deduction for a call sites.
2563 struct AANoRecurseCallSite final : AANoRecurseImpl {
2564   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2565       : AANoRecurseImpl(IRP, A) {}
2566 
2567   /// See AbstractAttribute::initialize(...).
2568   void initialize(Attributor &A) override {
2569     AANoRecurseImpl::initialize(A);
2570     Function *F = getAssociatedFunction();
2571     if (!F || F->isDeclaration())
2572       indicatePessimisticFixpoint();
2573   }
2574 
2575   /// See AbstractAttribute::updateImpl(...).
2576   ChangeStatus updateImpl(Attributor &A) override {
2577     // TODO: Once we have call site specific value information we can provide
2578     //       call site specific liveness information and then it makes
2579     //       sense to specialize attributes for call sites arguments instead of
2580     //       redirecting requests to the callee argument.
2581     Function *F = getAssociatedFunction();
2582     const IRPosition &FnPos = IRPosition::function(*F);
2583     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2584     return clampStateAndIndicateChange(getState(), FnAA.getState());
2585   }
2586 
2587   /// See AbstractAttribute::trackStatistics()
2588   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2589 };
2590 } // namespace
2591 
2592 /// -------------------- Undefined-Behavior Attributes ------------------------
2593 
2594 namespace {
2595 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2596   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2597       : AAUndefinedBehavior(IRP, A) {}
2598 
2599   /// See AbstractAttribute::updateImpl(...).
2600   // through a pointer (i.e. also branches etc.)
2601   ChangeStatus updateImpl(Attributor &A) override {
2602     const size_t UBPrevSize = KnownUBInsts.size();
2603     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2604 
2605     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2606       // Lang ref now states volatile store is not UB, let's skip them.
2607       if (I.isVolatile() && I.mayWriteToMemory())
2608         return true;
2609 
2610       // Skip instructions that are already saved.
2611       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2612         return true;
2613 
2614       // If we reach here, we know we have an instruction
2615       // that accesses memory through a pointer operand,
2616       // for which getPointerOperand() should give it to us.
2617       Value *PtrOp =
2618           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2619       assert(PtrOp &&
2620              "Expected pointer operand of memory accessing instruction");
2621 
2622       // Either we stopped and the appropriate action was taken,
2623       // or we got back a simplified value to continue.
2624       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2625       if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2626         return true;
2627       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2628 
2629       // A memory access through a pointer is considered UB
2630       // only if the pointer has constant null value.
2631       // TODO: Expand it to not only check constant values.
2632       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2633         AssumedNoUBInsts.insert(&I);
2634         return true;
2635       }
2636       const Type *PtrTy = PtrOpVal->getType();
2637 
2638       // Because we only consider instructions inside functions,
2639       // assume that a parent function exists.
2640       const Function *F = I.getFunction();
2641 
2642       // A memory access using constant null pointer is only considered UB
2643       // if null pointer is _not_ defined for the target platform.
2644       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2645         AssumedNoUBInsts.insert(&I);
2646       else
2647         KnownUBInsts.insert(&I);
2648       return true;
2649     };
2650 
2651     auto InspectBrInstForUB = [&](Instruction &I) {
2652       // A conditional branch instruction is considered UB if it has `undef`
2653       // condition.
2654 
2655       // Skip instructions that are already saved.
2656       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2657         return true;
2658 
2659       // We know we have a branch instruction.
2660       auto *BrInst = cast<BranchInst>(&I);
2661 
2662       // Unconditional branches are never considered UB.
2663       if (BrInst->isUnconditional())
2664         return true;
2665 
2666       // Either we stopped and the appropriate action was taken,
2667       // or we got back a simplified value to continue.
2668       Optional<Value *> SimplifiedCond =
2669           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2670       if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2671         return true;
2672       AssumedNoUBInsts.insert(&I);
2673       return true;
2674     };
2675 
2676     auto InspectCallSiteForUB = [&](Instruction &I) {
2677       // Check whether a callsite always cause UB or not
2678 
2679       // Skip instructions that are already saved.
2680       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2681         return true;
2682 
2683       // Check nonnull and noundef argument attribute violation for each
2684       // callsite.
2685       CallBase &CB = cast<CallBase>(I);
2686       Function *Callee = CB.getCalledFunction();
2687       if (!Callee)
2688         return true;
2689       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2690         // If current argument is known to be simplified to null pointer and the
2691         // corresponding argument position is known to have nonnull attribute,
2692         // the argument is poison. Furthermore, if the argument is poison and
2693         // the position is known to have noundef attriubte, this callsite is
2694         // considered UB.
2695         if (idx >= Callee->arg_size())
2696           break;
2697         Value *ArgVal = CB.getArgOperand(idx);
2698         if (!ArgVal)
2699           continue;
2700         // Here, we handle three cases.
2701         //   (1) Not having a value means it is dead. (we can replace the value
2702         //       with undef)
2703         //   (2) Simplified to undef. The argument violate noundef attriubte.
2704         //   (3) Simplified to null pointer where known to be nonnull.
2705         //       The argument is a poison value and violate noundef attribute.
2706         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2707         auto &NoUndefAA =
2708             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2709         if (!NoUndefAA.isKnownNoUndef())
2710           continue;
2711         bool UsedAssumedInformation = false;
2712         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2713             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2714         if (UsedAssumedInformation)
2715           continue;
2716         if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2717           return true;
2718         if (!SimplifiedVal.hasValue() ||
2719             isa<UndefValue>(*SimplifiedVal.getValue())) {
2720           KnownUBInsts.insert(&I);
2721           continue;
2722         }
2723         if (!ArgVal->getType()->isPointerTy() ||
2724             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2725           continue;
2726         auto &NonNullAA =
2727             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2728         if (NonNullAA.isKnownNonNull())
2729           KnownUBInsts.insert(&I);
2730       }
2731       return true;
2732     };
2733 
2734     auto InspectReturnInstForUB = [&](Instruction &I) {
2735       auto &RI = cast<ReturnInst>(I);
2736       // Either we stopped and the appropriate action was taken,
2737       // or we got back a simplified return value to continue.
2738       Optional<Value *> SimplifiedRetValue =
2739           stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2740       if (!SimplifiedRetValue.hasValue() || !SimplifiedRetValue.getValue())
2741         return true;
2742 
2743       // Check if a return instruction always cause UB or not
2744       // Note: It is guaranteed that the returned position of the anchor
2745       //       scope has noundef attribute when this is called.
2746       //       We also ensure the return position is not "assumed dead"
2747       //       because the returned value was then potentially simplified to
2748       //       `undef` in AAReturnedValues without removing the `noundef`
2749       //       attribute yet.
2750 
2751       // When the returned position has noundef attriubte, UB occurs in the
2752       // following cases.
2753       //   (1) Returned value is known to be undef.
2754       //   (2) The value is known to be a null pointer and the returned
2755       //       position has nonnull attribute (because the returned value is
2756       //       poison).
2757       if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2758         auto &NonNullAA = A.getAAFor<AANonNull>(
2759             *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2760         if (NonNullAA.isKnownNonNull())
2761           KnownUBInsts.insert(&I);
2762       }
2763 
2764       return true;
2765     };
2766 
2767     bool UsedAssumedInformation = false;
2768     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2769                               {Instruction::Load, Instruction::Store,
2770                                Instruction::AtomicCmpXchg,
2771                                Instruction::AtomicRMW},
2772                               UsedAssumedInformation,
2773                               /* CheckBBLivenessOnly */ true);
2774     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2775                               UsedAssumedInformation,
2776                               /* CheckBBLivenessOnly */ true);
2777     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2778                                       UsedAssumedInformation);
2779 
2780     // If the returned position of the anchor scope has noundef attriubte, check
2781     // all returned instructions.
2782     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2783       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2784       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2785         auto &RetPosNoUndefAA =
2786             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2787         if (RetPosNoUndefAA.isKnownNoUndef())
2788           A.checkForAllInstructions(InspectReturnInstForUB, *this,
2789                                     {Instruction::Ret}, UsedAssumedInformation,
2790                                     /* CheckBBLivenessOnly */ true);
2791       }
2792     }
2793 
2794     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2795         UBPrevSize != KnownUBInsts.size())
2796       return ChangeStatus::CHANGED;
2797     return ChangeStatus::UNCHANGED;
2798   }
2799 
2800   bool isKnownToCauseUB(Instruction *I) const override {
2801     return KnownUBInsts.count(I);
2802   }
2803 
2804   bool isAssumedToCauseUB(Instruction *I) const override {
2805     // In simple words, if an instruction is not in the assumed to _not_
2806     // cause UB, then it is assumed UB (that includes those
2807     // in the KnownUBInsts set). The rest is boilerplate
2808     // is to ensure that it is one of the instructions we test
2809     // for UB.
2810 
2811     switch (I->getOpcode()) {
2812     case Instruction::Load:
2813     case Instruction::Store:
2814     case Instruction::AtomicCmpXchg:
2815     case Instruction::AtomicRMW:
2816       return !AssumedNoUBInsts.count(I);
2817     case Instruction::Br: {
2818       auto *BrInst = cast<BranchInst>(I);
2819       if (BrInst->isUnconditional())
2820         return false;
2821       return !AssumedNoUBInsts.count(I);
2822     } break;
2823     default:
2824       return false;
2825     }
2826     return false;
2827   }
2828 
2829   ChangeStatus manifest(Attributor &A) override {
2830     if (KnownUBInsts.empty())
2831       return ChangeStatus::UNCHANGED;
2832     for (Instruction *I : KnownUBInsts)
2833       A.changeToUnreachableAfterManifest(I);
2834     return ChangeStatus::CHANGED;
2835   }
2836 
2837   /// See AbstractAttribute::getAsStr()
2838   const std::string getAsStr() const override {
2839     return getAssumed() ? "undefined-behavior" : "no-ub";
2840   }
2841 
2842   /// Note: The correctness of this analysis depends on the fact that the
2843   /// following 2 sets will stop changing after some point.
2844   /// "Change" here means that their size changes.
2845   /// The size of each set is monotonically increasing
2846   /// (we only add items to them) and it is upper bounded by the number of
2847   /// instructions in the processed function (we can never save more
2848   /// elements in either set than this number). Hence, at some point,
2849   /// they will stop increasing.
2850   /// Consequently, at some point, both sets will have stopped
2851   /// changing, effectively making the analysis reach a fixpoint.
2852 
2853   /// Note: These 2 sets are disjoint and an instruction can be considered
2854   /// one of 3 things:
2855   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2856   ///    the KnownUBInsts set.
2857   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2858   ///    has a reason to assume it).
2859   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2860   ///    could not find a reason to assume or prove that it can cause UB,
2861   ///    hence it assumes it doesn't. We have a set for these instructions
2862   ///    so that we don't reprocess them in every update.
2863   ///    Note however that instructions in this set may cause UB.
2864 
2865 protected:
2866   /// A set of all live instructions _known_ to cause UB.
2867   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2868 
2869 private:
2870   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2871   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2872 
2873   // Should be called on updates in which if we're processing an instruction
2874   // \p I that depends on a value \p V, one of the following has to happen:
2875   // - If the value is assumed, then stop.
2876   // - If the value is known but undef, then consider it UB.
2877   // - Otherwise, do specific processing with the simplified value.
2878   // We return None in the first 2 cases to signify that an appropriate
2879   // action was taken and the caller should stop.
2880   // Otherwise, we return the simplified value that the caller should
2881   // use for specific processing.
2882   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2883                                          Instruction *I) {
2884     bool UsedAssumedInformation = false;
2885     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2886         IRPosition::value(*V), *this, UsedAssumedInformation);
2887     if (!UsedAssumedInformation) {
2888       // Don't depend on assumed values.
2889       if (!SimplifiedV.hasValue()) {
2890         // If it is known (which we tested above) but it doesn't have a value,
2891         // then we can assume `undef` and hence the instruction is UB.
2892         KnownUBInsts.insert(I);
2893         return llvm::None;
2894       }
2895       if (!SimplifiedV.getValue())
2896         return nullptr;
2897       V = *SimplifiedV;
2898     }
2899     if (isa<UndefValue>(V)) {
2900       KnownUBInsts.insert(I);
2901       return llvm::None;
2902     }
2903     return V;
2904   }
2905 };
2906 
2907 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2908   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2909       : AAUndefinedBehaviorImpl(IRP, A) {}
2910 
2911   /// See AbstractAttribute::trackStatistics()
2912   void trackStatistics() const override {
2913     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2914                "Number of instructions known to have UB");
2915     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2916         KnownUBInsts.size();
2917   }
2918 };
2919 } // namespace
2920 
2921 /// ------------------------ Will-Return Attributes ----------------------------
2922 
2923 namespace {
2924 // Helper function that checks whether a function has any cycle which we don't
2925 // know if it is bounded or not.
2926 // Loops with maximum trip count are considered bounded, any other cycle not.
2927 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2928   ScalarEvolution *SE =
2929       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2930   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2931   // If either SCEV or LoopInfo is not available for the function then we assume
2932   // any cycle to be unbounded cycle.
2933   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2934   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2935   if (!SE || !LI) {
2936     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2937       if (SCCI.hasCycle())
2938         return true;
2939     return false;
2940   }
2941 
2942   // If there's irreducible control, the function may contain non-loop cycles.
2943   if (mayContainIrreducibleControl(F, LI))
2944     return true;
2945 
2946   // Any loop that does not have a max trip count is considered unbounded cycle.
2947   for (auto *L : LI->getLoopsInPreorder()) {
2948     if (!SE->getSmallConstantMaxTripCount(L))
2949       return true;
2950   }
2951   return false;
2952 }
2953 
2954 struct AAWillReturnImpl : public AAWillReturn {
2955   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2956       : AAWillReturn(IRP, A) {}
2957 
2958   /// See AbstractAttribute::initialize(...).
2959   void initialize(Attributor &A) override {
2960     AAWillReturn::initialize(A);
2961 
2962     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2963       indicateOptimisticFixpoint();
2964       return;
2965     }
2966   }
2967 
2968   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2969   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2970     // Check for `mustprogress` in the scope and the associated function which
2971     // might be different if this is a call site.
2972     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2973         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2974       return false;
2975 
2976     bool IsKnown;
2977     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
2978       return IsKnown || !KnownOnly;
2979     return false;
2980   }
2981 
2982   /// See AbstractAttribute::updateImpl(...).
2983   ChangeStatus updateImpl(Attributor &A) override {
2984     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2985       return ChangeStatus::UNCHANGED;
2986 
2987     auto CheckForWillReturn = [&](Instruction &I) {
2988       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2989       const auto &WillReturnAA =
2990           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2991       if (WillReturnAA.isKnownWillReturn())
2992         return true;
2993       if (!WillReturnAA.isAssumedWillReturn())
2994         return false;
2995       const auto &NoRecurseAA =
2996           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2997       return NoRecurseAA.isAssumedNoRecurse();
2998     };
2999 
3000     bool UsedAssumedInformation = false;
3001     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
3002                                            UsedAssumedInformation))
3003       return indicatePessimisticFixpoint();
3004 
3005     return ChangeStatus::UNCHANGED;
3006   }
3007 
3008   /// See AbstractAttribute::getAsStr()
3009   const std::string getAsStr() const override {
3010     return getAssumed() ? "willreturn" : "may-noreturn";
3011   }
3012 };
3013 
3014 struct AAWillReturnFunction final : AAWillReturnImpl {
3015   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
3016       : AAWillReturnImpl(IRP, A) {}
3017 
3018   /// See AbstractAttribute::initialize(...).
3019   void initialize(Attributor &A) override {
3020     AAWillReturnImpl::initialize(A);
3021 
3022     Function *F = getAnchorScope();
3023     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
3024       indicatePessimisticFixpoint();
3025   }
3026 
3027   /// See AbstractAttribute::trackStatistics()
3028   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
3029 };
3030 
3031 /// WillReturn attribute deduction for a call sites.
3032 struct AAWillReturnCallSite final : AAWillReturnImpl {
3033   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3034       : AAWillReturnImpl(IRP, A) {}
3035 
3036   /// See AbstractAttribute::initialize(...).
3037   void initialize(Attributor &A) override {
3038     AAWillReturnImpl::initialize(A);
3039     Function *F = getAssociatedFunction();
3040     if (!F || !A.isFunctionIPOAmendable(*F))
3041       indicatePessimisticFixpoint();
3042   }
3043 
3044   /// See AbstractAttribute::updateImpl(...).
3045   ChangeStatus updateImpl(Attributor &A) override {
3046     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3047       return ChangeStatus::UNCHANGED;
3048 
3049     // TODO: Once we have call site specific value information we can provide
3050     //       call site specific liveness information and then it makes
3051     //       sense to specialize attributes for call sites arguments instead of
3052     //       redirecting requests to the callee argument.
3053     Function *F = getAssociatedFunction();
3054     const IRPosition &FnPos = IRPosition::function(*F);
3055     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
3056     return clampStateAndIndicateChange(getState(), FnAA.getState());
3057   }
3058 
3059   /// See AbstractAttribute::trackStatistics()
3060   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
3061 };
3062 } // namespace
3063 
3064 /// -------------------AAReachability Attribute--------------------------
3065 
3066 namespace {
3067 struct AAReachabilityImpl : AAReachability {
3068   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
3069       : AAReachability(IRP, A) {}
3070 
3071   const std::string getAsStr() const override {
3072     // TODO: Return the number of reachable queries.
3073     return "reachable";
3074   }
3075 
3076   /// See AbstractAttribute::updateImpl(...).
3077   ChangeStatus updateImpl(Attributor &A) override {
3078     return ChangeStatus::UNCHANGED;
3079   }
3080 };
3081 
3082 struct AAReachabilityFunction final : public AAReachabilityImpl {
3083   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
3084       : AAReachabilityImpl(IRP, A) {}
3085 
3086   /// See AbstractAttribute::trackStatistics()
3087   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
3088 };
3089 } // namespace
3090 
3091 /// ------------------------ NoAlias Argument Attribute ------------------------
3092 
3093 namespace {
3094 struct AANoAliasImpl : AANoAlias {
3095   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3096     assert(getAssociatedType()->isPointerTy() &&
3097            "Noalias is a pointer attribute");
3098   }
3099 
3100   const std::string getAsStr() const override {
3101     return getAssumed() ? "noalias" : "may-alias";
3102   }
3103 };
3104 
3105 /// NoAlias attribute for a floating value.
3106 struct AANoAliasFloating final : AANoAliasImpl {
3107   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3108       : AANoAliasImpl(IRP, A) {}
3109 
3110   /// See AbstractAttribute::initialize(...).
3111   void initialize(Attributor &A) override {
3112     AANoAliasImpl::initialize(A);
3113     Value *Val = &getAssociatedValue();
3114     do {
3115       CastInst *CI = dyn_cast<CastInst>(Val);
3116       if (!CI)
3117         break;
3118       Value *Base = CI->getOperand(0);
3119       if (!Base->hasOneUse())
3120         break;
3121       Val = Base;
3122     } while (true);
3123 
3124     if (!Val->getType()->isPointerTy()) {
3125       indicatePessimisticFixpoint();
3126       return;
3127     }
3128 
3129     if (isa<AllocaInst>(Val))
3130       indicateOptimisticFixpoint();
3131     else if (isa<ConstantPointerNull>(Val) &&
3132              !NullPointerIsDefined(getAnchorScope(),
3133                                    Val->getType()->getPointerAddressSpace()))
3134       indicateOptimisticFixpoint();
3135     else if (Val != &getAssociatedValue()) {
3136       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3137           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3138       if (ValNoAliasAA.isKnownNoAlias())
3139         indicateOptimisticFixpoint();
3140     }
3141   }
3142 
3143   /// See AbstractAttribute::updateImpl(...).
3144   ChangeStatus updateImpl(Attributor &A) override {
3145     // TODO: Implement this.
3146     return indicatePessimisticFixpoint();
3147   }
3148 
3149   /// See AbstractAttribute::trackStatistics()
3150   void trackStatistics() const override {
3151     STATS_DECLTRACK_FLOATING_ATTR(noalias)
3152   }
3153 };
3154 
3155 /// NoAlias attribute for an argument.
3156 struct AANoAliasArgument final
3157     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3158   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3159   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3160 
3161   /// See AbstractAttribute::initialize(...).
3162   void initialize(Attributor &A) override {
3163     Base::initialize(A);
3164     // See callsite argument attribute and callee argument attribute.
3165     if (hasAttr({Attribute::ByVal}))
3166       indicateOptimisticFixpoint();
3167   }
3168 
3169   /// See AbstractAttribute::update(...).
3170   ChangeStatus updateImpl(Attributor &A) override {
3171     // We have to make sure no-alias on the argument does not break
3172     // synchronization when this is a callback argument, see also [1] below.
3173     // If synchronization cannot be affected, we delegate to the base updateImpl
3174     // function, otherwise we give up for now.
3175 
3176     // If the function is no-sync, no-alias cannot break synchronization.
3177     const auto &NoSyncAA =
3178         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3179                              DepClassTy::OPTIONAL);
3180     if (NoSyncAA.isAssumedNoSync())
3181       return Base::updateImpl(A);
3182 
3183     // If the argument is read-only, no-alias cannot break synchronization.
3184     bool IsKnown;
3185     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3186       return Base::updateImpl(A);
3187 
3188     // If the argument is never passed through callbacks, no-alias cannot break
3189     // synchronization.
3190     bool UsedAssumedInformation = false;
3191     if (A.checkForAllCallSites(
3192             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3193             true, UsedAssumedInformation))
3194       return Base::updateImpl(A);
3195 
3196     // TODO: add no-alias but make sure it doesn't break synchronization by
3197     // introducing fake uses. See:
3198     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3199     //     International Workshop on OpenMP 2018,
3200     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3201 
3202     return indicatePessimisticFixpoint();
3203   }
3204 
3205   /// See AbstractAttribute::trackStatistics()
3206   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3207 };
3208 
3209 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3210   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3211       : AANoAliasImpl(IRP, A) {}
3212 
3213   /// See AbstractAttribute::initialize(...).
3214   void initialize(Attributor &A) override {
3215     // See callsite argument attribute and callee argument attribute.
3216     const auto &CB = cast<CallBase>(getAnchorValue());
3217     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3218       indicateOptimisticFixpoint();
3219     Value &Val = getAssociatedValue();
3220     if (isa<ConstantPointerNull>(Val) &&
3221         !NullPointerIsDefined(getAnchorScope(),
3222                               Val.getType()->getPointerAddressSpace()))
3223       indicateOptimisticFixpoint();
3224   }
3225 
3226   /// Determine if the underlying value may alias with the call site argument
3227   /// \p OtherArgNo of \p ICS (= the underlying call site).
3228   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3229                             const AAMemoryBehavior &MemBehaviorAA,
3230                             const CallBase &CB, unsigned OtherArgNo) {
3231     // We do not need to worry about aliasing with the underlying IRP.
3232     if (this->getCalleeArgNo() == (int)OtherArgNo)
3233       return false;
3234 
3235     // If it is not a pointer or pointer vector we do not alias.
3236     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3237     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3238       return false;
3239 
3240     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3241         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3242 
3243     // If the argument is readnone, there is no read-write aliasing.
3244     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3245       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3246       return false;
3247     }
3248 
3249     // If the argument is readonly and the underlying value is readonly, there
3250     // is no read-write aliasing.
3251     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3252     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3253       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3254       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3255       return false;
3256     }
3257 
3258     // We have to utilize actual alias analysis queries so we need the object.
3259     if (!AAR)
3260       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3261 
3262     // Try to rule it out at the call site.
3263     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3264     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3265                          "callsite arguments: "
3266                       << getAssociatedValue() << " " << *ArgOp << " => "
3267                       << (IsAliasing ? "" : "no-") << "alias \n");
3268 
3269     return IsAliasing;
3270   }
3271 
3272   bool
3273   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3274                                          const AAMemoryBehavior &MemBehaviorAA,
3275                                          const AANoAlias &NoAliasAA) {
3276     // We can deduce "noalias" if the following conditions hold.
3277     // (i)   Associated value is assumed to be noalias in the definition.
3278     // (ii)  Associated value is assumed to be no-capture in all the uses
3279     //       possibly executed before this callsite.
3280     // (iii) There is no other pointer argument which could alias with the
3281     //       value.
3282 
3283     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3284     if (!AssociatedValueIsNoAliasAtDef) {
3285       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3286                         << " is not no-alias at the definition\n");
3287       return false;
3288     }
3289 
3290     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3291 
3292     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3293     const Function *ScopeFn = VIRP.getAnchorScope();
3294     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3295     // Check whether the value is captured in the scope using AANoCapture.
3296     //      Look at CFG and check only uses possibly executed before this
3297     //      callsite.
3298     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3299       Instruction *UserI = cast<Instruction>(U.getUser());
3300 
3301       // If UserI is the curr instruction and there is a single potential use of
3302       // the value in UserI we allow the use.
3303       // TODO: We should inspect the operands and allow those that cannot alias
3304       //       with the value.
3305       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3306         return true;
3307 
3308       if (ScopeFn) {
3309         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3310           if (CB->isArgOperand(&U)) {
3311 
3312             unsigned ArgNo = CB->getArgOperandNo(&U);
3313 
3314             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3315                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3316                 DepClassTy::OPTIONAL);
3317 
3318             if (NoCaptureAA.isAssumedNoCapture())
3319               return true;
3320           }
3321         }
3322 
3323         if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this))
3324           return true;
3325       }
3326 
3327       // For cases which can potentially have more users
3328       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3329           isa<SelectInst>(U)) {
3330         Follow = true;
3331         return true;
3332       }
3333 
3334       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
3335       return false;
3336     };
3337 
3338     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3339       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3340         LLVM_DEBUG(
3341             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3342                    << " cannot be noalias as it is potentially captured\n");
3343         return false;
3344       }
3345     }
3346     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3347 
3348     // Check there is no other pointer argument which could alias with the
3349     // value passed at this call site.
3350     // TODO: AbstractCallSite
3351     const auto &CB = cast<CallBase>(getAnchorValue());
3352     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3353       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3354         return false;
3355 
3356     return true;
3357   }
3358 
3359   /// See AbstractAttribute::updateImpl(...).
3360   ChangeStatus updateImpl(Attributor &A) override {
3361     // If the argument is readnone we are done as there are no accesses via the
3362     // argument.
3363     auto &MemBehaviorAA =
3364         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3365     if (MemBehaviorAA.isAssumedReadNone()) {
3366       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3367       return ChangeStatus::UNCHANGED;
3368     }
3369 
3370     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3371     const auto &NoAliasAA =
3372         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3373 
3374     AAResults *AAR = nullptr;
3375     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3376                                                NoAliasAA)) {
3377       LLVM_DEBUG(
3378           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3379       return ChangeStatus::UNCHANGED;
3380     }
3381 
3382     return indicatePessimisticFixpoint();
3383   }
3384 
3385   /// See AbstractAttribute::trackStatistics()
3386   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3387 };
3388 
3389 /// NoAlias attribute for function return value.
3390 struct AANoAliasReturned final : AANoAliasImpl {
3391   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3392       : AANoAliasImpl(IRP, A) {}
3393 
3394   /// See AbstractAttribute::initialize(...).
3395   void initialize(Attributor &A) override {
3396     AANoAliasImpl::initialize(A);
3397     Function *F = getAssociatedFunction();
3398     if (!F || F->isDeclaration())
3399       indicatePessimisticFixpoint();
3400   }
3401 
3402   /// See AbstractAttribute::updateImpl(...).
3403   virtual ChangeStatus updateImpl(Attributor &A) override {
3404 
3405     auto CheckReturnValue = [&](Value &RV) -> bool {
3406       if (Constant *C = dyn_cast<Constant>(&RV))
3407         if (C->isNullValue() || isa<UndefValue>(C))
3408           return true;
3409 
3410       /// For now, we can only deduce noalias if we have call sites.
3411       /// FIXME: add more support.
3412       if (!isa<CallBase>(&RV))
3413         return false;
3414 
3415       const IRPosition &RVPos = IRPosition::value(RV);
3416       const auto &NoAliasAA =
3417           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3418       if (!NoAliasAA.isAssumedNoAlias())
3419         return false;
3420 
3421       const auto &NoCaptureAA =
3422           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3423       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3424     };
3425 
3426     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3427       return indicatePessimisticFixpoint();
3428 
3429     return ChangeStatus::UNCHANGED;
3430   }
3431 
3432   /// See AbstractAttribute::trackStatistics()
3433   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3434 };
3435 
3436 /// NoAlias attribute deduction for a call site return value.
3437 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3438   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3439       : AANoAliasImpl(IRP, A) {}
3440 
3441   /// See AbstractAttribute::initialize(...).
3442   void initialize(Attributor &A) override {
3443     AANoAliasImpl::initialize(A);
3444     Function *F = getAssociatedFunction();
3445     if (!F || F->isDeclaration())
3446       indicatePessimisticFixpoint();
3447   }
3448 
3449   /// See AbstractAttribute::updateImpl(...).
3450   ChangeStatus updateImpl(Attributor &A) override {
3451     // TODO: Once we have call site specific value information we can provide
3452     //       call site specific liveness information and then it makes
3453     //       sense to specialize attributes for call sites arguments instead of
3454     //       redirecting requests to the callee argument.
3455     Function *F = getAssociatedFunction();
3456     const IRPosition &FnPos = IRPosition::returned(*F);
3457     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3458     return clampStateAndIndicateChange(getState(), FnAA.getState());
3459   }
3460 
3461   /// See AbstractAttribute::trackStatistics()
3462   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3463 };
3464 } // namespace
3465 
3466 /// -------------------AAIsDead Function Attribute-----------------------
3467 
3468 namespace {
3469 struct AAIsDeadValueImpl : public AAIsDead {
3470   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3471 
3472   /// See AbstractAttribute::initialize(...).
3473   void initialize(Attributor &A) override {
3474     if (auto *Scope = getAnchorScope())
3475       if (!A.isRunOn(*Scope))
3476         indicatePessimisticFixpoint();
3477   }
3478 
3479   /// See AAIsDead::isAssumedDead().
3480   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3481 
3482   /// See AAIsDead::isKnownDead().
3483   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3484 
3485   /// See AAIsDead::isAssumedDead(BasicBlock *).
3486   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3487 
3488   /// See AAIsDead::isKnownDead(BasicBlock *).
3489   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3490 
3491   /// See AAIsDead::isAssumedDead(Instruction *I).
3492   bool isAssumedDead(const Instruction *I) const override {
3493     return I == getCtxI() && isAssumedDead();
3494   }
3495 
3496   /// See AAIsDead::isKnownDead(Instruction *I).
3497   bool isKnownDead(const Instruction *I) const override {
3498     return isAssumedDead(I) && isKnownDead();
3499   }
3500 
3501   /// See AbstractAttribute::getAsStr().
3502   virtual const std::string getAsStr() const override {
3503     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3504   }
3505 
3506   /// Check if all uses are assumed dead.
3507   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3508     // Callers might not check the type, void has no uses.
3509     if (V.getType()->isVoidTy() || V.use_empty())
3510       return true;
3511 
3512     // If we replace a value with a constant there are no uses left afterwards.
3513     if (!isa<Constant>(V)) {
3514       if (auto *I = dyn_cast<Instruction>(&V))
3515         if (!A.isRunOn(*I->getFunction()))
3516           return false;
3517       bool UsedAssumedInformation = false;
3518       Optional<Constant *> C =
3519           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3520       if (!C.hasValue() || *C)
3521         return true;
3522     }
3523 
3524     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3525     // Explicitly set the dependence class to required because we want a long
3526     // chain of N dependent instructions to be considered live as soon as one is
3527     // without going through N update cycles. This is not required for
3528     // correctness.
3529     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3530                              DepClassTy::REQUIRED,
3531                              /* IgnoreDroppableUses */ false);
3532   }
3533 
3534   /// Determine if \p I is assumed to be side-effect free.
3535   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3536     if (!I || wouldInstructionBeTriviallyDead(I))
3537       return true;
3538 
3539     auto *CB = dyn_cast<CallBase>(I);
3540     if (!CB || isa<IntrinsicInst>(CB))
3541       return false;
3542 
3543     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3544     const auto &NoUnwindAA =
3545         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3546     if (!NoUnwindAA.isAssumedNoUnwind())
3547       return false;
3548     if (!NoUnwindAA.isKnownNoUnwind())
3549       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3550 
3551     bool IsKnown;
3552     return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3553   }
3554 };
3555 
3556 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3557   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3558       : AAIsDeadValueImpl(IRP, A) {}
3559 
3560   /// See AbstractAttribute::initialize(...).
3561   void initialize(Attributor &A) override {
3562     AAIsDeadValueImpl::initialize(A);
3563 
3564     if (isa<UndefValue>(getAssociatedValue())) {
3565       indicatePessimisticFixpoint();
3566       return;
3567     }
3568 
3569     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3570     if (!isAssumedSideEffectFree(A, I)) {
3571       if (!isa_and_nonnull<StoreInst>(I))
3572         indicatePessimisticFixpoint();
3573       else
3574         removeAssumedBits(HAS_NO_EFFECT);
3575     }
3576   }
3577 
3578   bool isDeadStore(Attributor &A, StoreInst &SI) {
3579     // Lang ref now states volatile store is not UB/dead, let's skip them.
3580     if (SI.isVolatile())
3581       return false;
3582 
3583     bool UsedAssumedInformation = false;
3584     SmallSetVector<Value *, 4> PotentialCopies;
3585     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3586                                              UsedAssumedInformation))
3587       return false;
3588     return llvm::all_of(PotentialCopies, [&](Value *V) {
3589       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3590                              UsedAssumedInformation);
3591     });
3592   }
3593 
3594   /// See AbstractAttribute::getAsStr().
3595   const std::string getAsStr() const override {
3596     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3597     if (isa_and_nonnull<StoreInst>(I))
3598       if (isValidState())
3599         return "assumed-dead-store";
3600     return AAIsDeadValueImpl::getAsStr();
3601   }
3602 
3603   /// See AbstractAttribute::updateImpl(...).
3604   ChangeStatus updateImpl(Attributor &A) override {
3605     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3606     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3607       if (!isDeadStore(A, *SI))
3608         return indicatePessimisticFixpoint();
3609     } else {
3610       if (!isAssumedSideEffectFree(A, I))
3611         return indicatePessimisticFixpoint();
3612       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3613         return indicatePessimisticFixpoint();
3614     }
3615     return ChangeStatus::UNCHANGED;
3616   }
3617 
3618   bool isRemovableStore() const override {
3619     return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3620   }
3621 
3622   /// See AbstractAttribute::manifest(...).
3623   ChangeStatus manifest(Attributor &A) override {
3624     Value &V = getAssociatedValue();
3625     if (auto *I = dyn_cast<Instruction>(&V)) {
3626       // If we get here we basically know the users are all dead. We check if
3627       // isAssumedSideEffectFree returns true here again because it might not be
3628       // the case and only the users are dead but the instruction (=call) is
3629       // still needed.
3630       if (isa<StoreInst>(I) ||
3631           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3632         A.deleteAfterManifest(*I);
3633         return ChangeStatus::CHANGED;
3634       }
3635     }
3636     return ChangeStatus::UNCHANGED;
3637   }
3638 
3639   /// See AbstractAttribute::trackStatistics()
3640   void trackStatistics() const override {
3641     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3642   }
3643 };
3644 
3645 struct AAIsDeadArgument : public AAIsDeadFloating {
3646   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3647       : AAIsDeadFloating(IRP, A) {}
3648 
3649   /// See AbstractAttribute::initialize(...).
3650   void initialize(Attributor &A) override {
3651     AAIsDeadFloating::initialize(A);
3652     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3653       indicatePessimisticFixpoint();
3654   }
3655 
3656   /// See AbstractAttribute::manifest(...).
3657   ChangeStatus manifest(Attributor &A) override {
3658     Argument &Arg = *getAssociatedArgument();
3659     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3660       if (A.registerFunctionSignatureRewrite(
3661               Arg, /* ReplacementTypes */ {},
3662               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3663               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3664         return ChangeStatus::CHANGED;
3665       }
3666     return ChangeStatus::UNCHANGED;
3667   }
3668 
3669   /// See AbstractAttribute::trackStatistics()
3670   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3671 };
3672 
3673 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3674   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3675       : AAIsDeadValueImpl(IRP, A) {}
3676 
3677   /// See AbstractAttribute::initialize(...).
3678   void initialize(Attributor &A) override {
3679     AAIsDeadValueImpl::initialize(A);
3680     if (isa<UndefValue>(getAssociatedValue()))
3681       indicatePessimisticFixpoint();
3682   }
3683 
3684   /// See AbstractAttribute::updateImpl(...).
3685   ChangeStatus updateImpl(Attributor &A) override {
3686     // TODO: Once we have call site specific value information we can provide
3687     //       call site specific liveness information and then it makes
3688     //       sense to specialize attributes for call sites arguments instead of
3689     //       redirecting requests to the callee argument.
3690     Argument *Arg = getAssociatedArgument();
3691     if (!Arg)
3692       return indicatePessimisticFixpoint();
3693     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3694     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3695     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3696   }
3697 
3698   /// See AbstractAttribute::manifest(...).
3699   ChangeStatus manifest(Attributor &A) override {
3700     CallBase &CB = cast<CallBase>(getAnchorValue());
3701     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3702     assert(!isa<UndefValue>(U.get()) &&
3703            "Expected undef values to be filtered out!");
3704     UndefValue &UV = *UndefValue::get(U->getType());
3705     if (A.changeUseAfterManifest(U, UV))
3706       return ChangeStatus::CHANGED;
3707     return ChangeStatus::UNCHANGED;
3708   }
3709 
3710   /// See AbstractAttribute::trackStatistics()
3711   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3712 };
3713 
3714 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3715   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3716       : AAIsDeadFloating(IRP, A) {}
3717 
3718   /// See AAIsDead::isAssumedDead().
3719   bool isAssumedDead() const override {
3720     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3721   }
3722 
3723   /// See AbstractAttribute::initialize(...).
3724   void initialize(Attributor &A) override {
3725     AAIsDeadFloating::initialize(A);
3726     if (isa<UndefValue>(getAssociatedValue())) {
3727       indicatePessimisticFixpoint();
3728       return;
3729     }
3730 
3731     // We track this separately as a secondary state.
3732     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3733   }
3734 
3735   /// See AbstractAttribute::updateImpl(...).
3736   ChangeStatus updateImpl(Attributor &A) override {
3737     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3738     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3739       IsAssumedSideEffectFree = false;
3740       Changed = ChangeStatus::CHANGED;
3741     }
3742     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3743       return indicatePessimisticFixpoint();
3744     return Changed;
3745   }
3746 
3747   /// See AbstractAttribute::trackStatistics()
3748   void trackStatistics() const override {
3749     if (IsAssumedSideEffectFree)
3750       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3751     else
3752       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3753   }
3754 
3755   /// See AbstractAttribute::getAsStr().
3756   const std::string getAsStr() const override {
3757     return isAssumedDead()
3758                ? "assumed-dead"
3759                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3760   }
3761 
3762 private:
3763   bool IsAssumedSideEffectFree = true;
3764 };
3765 
3766 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3767   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3768       : AAIsDeadValueImpl(IRP, A) {}
3769 
3770   /// See AbstractAttribute::updateImpl(...).
3771   ChangeStatus updateImpl(Attributor &A) override {
3772 
3773     bool UsedAssumedInformation = false;
3774     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3775                               {Instruction::Ret}, UsedAssumedInformation);
3776 
3777     auto PredForCallSite = [&](AbstractCallSite ACS) {
3778       if (ACS.isCallbackCall() || !ACS.getInstruction())
3779         return false;
3780       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3781     };
3782 
3783     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3784                                 UsedAssumedInformation))
3785       return indicatePessimisticFixpoint();
3786 
3787     return ChangeStatus::UNCHANGED;
3788   }
3789 
3790   /// See AbstractAttribute::manifest(...).
3791   ChangeStatus manifest(Attributor &A) override {
3792     // TODO: Rewrite the signature to return void?
3793     bool AnyChange = false;
3794     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3795     auto RetInstPred = [&](Instruction &I) {
3796       ReturnInst &RI = cast<ReturnInst>(I);
3797       if (!isa<UndefValue>(RI.getReturnValue()))
3798         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3799       return true;
3800     };
3801     bool UsedAssumedInformation = false;
3802     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3803                               UsedAssumedInformation);
3804     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3805   }
3806 
3807   /// See AbstractAttribute::trackStatistics()
3808   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3809 };
3810 
3811 struct AAIsDeadFunction : public AAIsDead {
3812   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3813 
3814   /// See AbstractAttribute::initialize(...).
3815   void initialize(Attributor &A) override {
3816     Function *F = getAnchorScope();
3817     if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3818       indicatePessimisticFixpoint();
3819       return;
3820     }
3821     ToBeExploredFrom.insert(&F->getEntryBlock().front());
3822     assumeLive(A, F->getEntryBlock());
3823   }
3824 
3825   /// See AbstractAttribute::getAsStr().
3826   const std::string getAsStr() const override {
3827     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3828            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3829            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3830            std::to_string(KnownDeadEnds.size()) + "]";
3831   }
3832 
3833   /// See AbstractAttribute::manifest(...).
3834   ChangeStatus manifest(Attributor &A) override {
3835     assert(getState().isValidState() &&
3836            "Attempted to manifest an invalid state!");
3837 
3838     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3839     Function &F = *getAnchorScope();
3840 
3841     if (AssumedLiveBlocks.empty()) {
3842       A.deleteAfterManifest(F);
3843       return ChangeStatus::CHANGED;
3844     }
3845 
3846     // Flag to determine if we can change an invoke to a call assuming the
3847     // callee is nounwind. This is not possible if the personality of the
3848     // function allows to catch asynchronous exceptions.
3849     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3850 
3851     KnownDeadEnds.set_union(ToBeExploredFrom);
3852     for (const Instruction *DeadEndI : KnownDeadEnds) {
3853       auto *CB = dyn_cast<CallBase>(DeadEndI);
3854       if (!CB)
3855         continue;
3856       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3857           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3858       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3859       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3860         continue;
3861 
3862       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3863         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3864       else
3865         A.changeToUnreachableAfterManifest(
3866             const_cast<Instruction *>(DeadEndI->getNextNode()));
3867       HasChanged = ChangeStatus::CHANGED;
3868     }
3869 
3870     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3871     for (BasicBlock &BB : F)
3872       if (!AssumedLiveBlocks.count(&BB)) {
3873         A.deleteAfterManifest(BB);
3874         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3875         HasChanged = ChangeStatus::CHANGED;
3876       }
3877 
3878     return HasChanged;
3879   }
3880 
3881   /// See AbstractAttribute::updateImpl(...).
3882   ChangeStatus updateImpl(Attributor &A) override;
3883 
3884   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3885     assert(From->getParent() == getAnchorScope() &&
3886            To->getParent() == getAnchorScope() &&
3887            "Used AAIsDead of the wrong function");
3888     return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3889   }
3890 
3891   /// See AbstractAttribute::trackStatistics()
3892   void trackStatistics() const override {}
3893 
3894   /// Returns true if the function is assumed dead.
3895   bool isAssumedDead() const override { return false; }
3896 
3897   /// See AAIsDead::isKnownDead().
3898   bool isKnownDead() const override { return false; }
3899 
3900   /// See AAIsDead::isAssumedDead(BasicBlock *).
3901   bool isAssumedDead(const BasicBlock *BB) const override {
3902     assert(BB->getParent() == getAnchorScope() &&
3903            "BB must be in the same anchor scope function.");
3904 
3905     if (!getAssumed())
3906       return false;
3907     return !AssumedLiveBlocks.count(BB);
3908   }
3909 
3910   /// See AAIsDead::isKnownDead(BasicBlock *).
3911   bool isKnownDead(const BasicBlock *BB) const override {
3912     return getKnown() && isAssumedDead(BB);
3913   }
3914 
3915   /// See AAIsDead::isAssumed(Instruction *I).
3916   bool isAssumedDead(const Instruction *I) const override {
3917     assert(I->getParent()->getParent() == getAnchorScope() &&
3918            "Instruction must be in the same anchor scope function.");
3919 
3920     if (!getAssumed())
3921       return false;
3922 
3923     // If it is not in AssumedLiveBlocks then it for sure dead.
3924     // Otherwise, it can still be after noreturn call in a live block.
3925     if (!AssumedLiveBlocks.count(I->getParent()))
3926       return true;
3927 
3928     // If it is not after a liveness barrier it is live.
3929     const Instruction *PrevI = I->getPrevNode();
3930     while (PrevI) {
3931       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3932         return true;
3933       PrevI = PrevI->getPrevNode();
3934     }
3935     return false;
3936   }
3937 
3938   /// See AAIsDead::isKnownDead(Instruction *I).
3939   bool isKnownDead(const Instruction *I) const override {
3940     return getKnown() && isAssumedDead(I);
3941   }
3942 
3943   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3944   /// that internal function called from \p BB should now be looked at.
3945   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3946     if (!AssumedLiveBlocks.insert(&BB).second)
3947       return false;
3948 
3949     // We assume that all of BB is (probably) live now and if there are calls to
3950     // internal functions we will assume that those are now live as well. This
3951     // is a performance optimization for blocks with calls to a lot of internal
3952     // functions. It can however cause dead functions to be treated as live.
3953     for (const Instruction &I : BB)
3954       if (const auto *CB = dyn_cast<CallBase>(&I))
3955         if (const Function *F = CB->getCalledFunction())
3956           if (F->hasLocalLinkage())
3957             A.markLiveInternalFunction(*F);
3958     return true;
3959   }
3960 
3961   /// Collection of instructions that need to be explored again, e.g., we
3962   /// did assume they do not transfer control to (one of their) successors.
3963   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3964 
3965   /// Collection of instructions that are known to not transfer control.
3966   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3967 
3968   /// Collection of all assumed live edges
3969   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3970 
3971   /// Collection of all assumed live BasicBlocks.
3972   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3973 };
3974 
3975 static bool
3976 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3977                         AbstractAttribute &AA,
3978                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3979   const IRPosition &IPos = IRPosition::callsite_function(CB);
3980 
3981   const auto &NoReturnAA =
3982       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3983   if (NoReturnAA.isAssumedNoReturn())
3984     return !NoReturnAA.isKnownNoReturn();
3985   if (CB.isTerminator())
3986     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3987   else
3988     AliveSuccessors.push_back(CB.getNextNode());
3989   return false;
3990 }
3991 
3992 static bool
3993 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3994                         AbstractAttribute &AA,
3995                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3996   bool UsedAssumedInformation =
3997       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3998 
3999   // First, determine if we can change an invoke to a call assuming the
4000   // callee is nounwind. This is not possible if the personality of the
4001   // function allows to catch asynchronous exceptions.
4002   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
4003     AliveSuccessors.push_back(&II.getUnwindDest()->front());
4004   } else {
4005     const IRPosition &IPos = IRPosition::callsite_function(II);
4006     const auto &AANoUnw =
4007         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
4008     if (AANoUnw.isAssumedNoUnwind()) {
4009       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
4010     } else {
4011       AliveSuccessors.push_back(&II.getUnwindDest()->front());
4012     }
4013   }
4014   return UsedAssumedInformation;
4015 }
4016 
4017 static bool
4018 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
4019                         AbstractAttribute &AA,
4020                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4021   bool UsedAssumedInformation = false;
4022   if (BI.getNumSuccessors() == 1) {
4023     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4024   } else {
4025     Optional<Constant *> C =
4026         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
4027     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
4028       // No value yet, assume both edges are dead.
4029     } else if (isa_and_nonnull<ConstantInt>(*C)) {
4030       const BasicBlock *SuccBB =
4031           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
4032       AliveSuccessors.push_back(&SuccBB->front());
4033     } else {
4034       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4035       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4036       UsedAssumedInformation = false;
4037     }
4038   }
4039   return UsedAssumedInformation;
4040 }
4041 
4042 static bool
4043 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4044                         AbstractAttribute &AA,
4045                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4046   bool UsedAssumedInformation = false;
4047   Optional<Constant *> C =
4048       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
4049   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
4050     // No value yet, assume all edges are dead.
4051   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
4052     for (auto &CaseIt : SI.cases()) {
4053       if (CaseIt.getCaseValue() == C.getValue()) {
4054         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4055         return UsedAssumedInformation;
4056       }
4057     }
4058     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4059     return UsedAssumedInformation;
4060   } else {
4061     for (const BasicBlock *SuccBB : successors(SI.getParent()))
4062       AliveSuccessors.push_back(&SuccBB->front());
4063   }
4064   return UsedAssumedInformation;
4065 }
4066 
4067 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4068   ChangeStatus Change = ChangeStatus::UNCHANGED;
4069 
4070   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4071                     << getAnchorScope()->size() << "] BBs and "
4072                     << ToBeExploredFrom.size() << " exploration points and "
4073                     << KnownDeadEnds.size() << " known dead ends\n");
4074 
4075   // Copy and clear the list of instructions we need to explore from. It is
4076   // refilled with instructions the next update has to look at.
4077   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4078                                                ToBeExploredFrom.end());
4079   decltype(ToBeExploredFrom) NewToBeExploredFrom;
4080 
4081   SmallVector<const Instruction *, 8> AliveSuccessors;
4082   while (!Worklist.empty()) {
4083     const Instruction *I = Worklist.pop_back_val();
4084     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4085 
4086     // Fast forward for uninteresting instructions. We could look for UB here
4087     // though.
4088     while (!I->isTerminator() && !isa<CallBase>(I))
4089       I = I->getNextNode();
4090 
4091     AliveSuccessors.clear();
4092 
4093     bool UsedAssumedInformation = false;
4094     switch (I->getOpcode()) {
4095     // TODO: look for (assumed) UB to backwards propagate "deadness".
4096     default:
4097       assert(I->isTerminator() &&
4098              "Expected non-terminators to be handled already!");
4099       for (const BasicBlock *SuccBB : successors(I->getParent()))
4100         AliveSuccessors.push_back(&SuccBB->front());
4101       break;
4102     case Instruction::Call:
4103       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4104                                                        *this, AliveSuccessors);
4105       break;
4106     case Instruction::Invoke:
4107       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4108                                                        *this, AliveSuccessors);
4109       break;
4110     case Instruction::Br:
4111       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4112                                                        *this, AliveSuccessors);
4113       break;
4114     case Instruction::Switch:
4115       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4116                                                        *this, AliveSuccessors);
4117       break;
4118     }
4119 
4120     if (UsedAssumedInformation) {
4121       NewToBeExploredFrom.insert(I);
4122     } else if (AliveSuccessors.empty() ||
4123                (I->isTerminator() &&
4124                 AliveSuccessors.size() < I->getNumSuccessors())) {
4125       if (KnownDeadEnds.insert(I))
4126         Change = ChangeStatus::CHANGED;
4127     }
4128 
4129     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4130                       << AliveSuccessors.size() << " UsedAssumedInformation: "
4131                       << UsedAssumedInformation << "\n");
4132 
4133     for (const Instruction *AliveSuccessor : AliveSuccessors) {
4134       if (!I->isTerminator()) {
4135         assert(AliveSuccessors.size() == 1 &&
4136                "Non-terminator expected to have a single successor!");
4137         Worklist.push_back(AliveSuccessor);
4138       } else {
4139         // record the assumed live edge
4140         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4141         if (AssumedLiveEdges.insert(Edge).second)
4142           Change = ChangeStatus::CHANGED;
4143         if (assumeLive(A, *AliveSuccessor->getParent()))
4144           Worklist.push_back(AliveSuccessor);
4145       }
4146     }
4147   }
4148 
4149   // Check if the content of ToBeExploredFrom changed, ignore the order.
4150   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4151       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4152         return !ToBeExploredFrom.count(I);
4153       })) {
4154     Change = ChangeStatus::CHANGED;
4155     ToBeExploredFrom = std::move(NewToBeExploredFrom);
4156   }
4157 
4158   // If we know everything is live there is no need to query for liveness.
4159   // Instead, indicating a pessimistic fixpoint will cause the state to be
4160   // "invalid" and all queries to be answered conservatively without lookups.
4161   // To be in this state we have to (1) finished the exploration and (3) not
4162   // discovered any non-trivial dead end and (2) not ruled unreachable code
4163   // dead.
4164   if (ToBeExploredFrom.empty() &&
4165       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4166       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4167         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4168       }))
4169     return indicatePessimisticFixpoint();
4170   return Change;
4171 }
4172 
4173 /// Liveness information for a call sites.
4174 struct AAIsDeadCallSite final : AAIsDeadFunction {
4175   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4176       : AAIsDeadFunction(IRP, A) {}
4177 
4178   /// See AbstractAttribute::initialize(...).
4179   void initialize(Attributor &A) override {
4180     // TODO: Once we have call site specific value information we can provide
4181     //       call site specific liveness information and then it makes
4182     //       sense to specialize attributes for call sites instead of
4183     //       redirecting requests to the callee.
4184     llvm_unreachable("Abstract attributes for liveness are not "
4185                      "supported for call sites yet!");
4186   }
4187 
4188   /// See AbstractAttribute::updateImpl(...).
4189   ChangeStatus updateImpl(Attributor &A) override {
4190     return indicatePessimisticFixpoint();
4191   }
4192 
4193   /// See AbstractAttribute::trackStatistics()
4194   void trackStatistics() const override {}
4195 };
4196 } // namespace
4197 
4198 /// -------------------- Dereferenceable Argument Attribute --------------------
4199 
4200 namespace {
4201 struct AADereferenceableImpl : AADereferenceable {
4202   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4203       : AADereferenceable(IRP, A) {}
4204   using StateType = DerefState;
4205 
4206   /// See AbstractAttribute::initialize(...).
4207   void initialize(Attributor &A) override {
4208     SmallVector<Attribute, 4> Attrs;
4209     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4210              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4211     for (const Attribute &Attr : Attrs)
4212       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4213 
4214     const IRPosition &IRP = this->getIRPosition();
4215     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4216 
4217     bool CanBeNull, CanBeFreed;
4218     takeKnownDerefBytesMaximum(
4219         IRP.getAssociatedValue().getPointerDereferenceableBytes(
4220             A.getDataLayout(), CanBeNull, CanBeFreed));
4221 
4222     bool IsFnInterface = IRP.isFnInterfaceKind();
4223     Function *FnScope = IRP.getAnchorScope();
4224     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4225       indicatePessimisticFixpoint();
4226       return;
4227     }
4228 
4229     if (Instruction *CtxI = getCtxI())
4230       followUsesInMBEC(*this, A, getState(), *CtxI);
4231   }
4232 
4233   /// See AbstractAttribute::getState()
4234   /// {
4235   StateType &getState() override { return *this; }
4236   const StateType &getState() const override { return *this; }
4237   /// }
4238 
4239   /// Helper function for collecting accessed bytes in must-be-executed-context
4240   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4241                               DerefState &State) {
4242     const Value *UseV = U->get();
4243     if (!UseV->getType()->isPointerTy())
4244       return;
4245 
4246     Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4247     if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4248       return;
4249 
4250     int64_t Offset;
4251     const Value *Base = GetPointerBaseWithConstantOffset(
4252         Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4253     if (Base && Base == &getAssociatedValue())
4254       State.addAccessedBytes(Offset, Loc->Size.getValue());
4255   }
4256 
4257   /// See followUsesInMBEC
4258   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4259                        AADereferenceable::StateType &State) {
4260     bool IsNonNull = false;
4261     bool TrackUse = false;
4262     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4263         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4264     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4265                       << " for instruction " << *I << "\n");
4266 
4267     addAccessedBytesForUse(A, U, I, State);
4268     State.takeKnownDerefBytesMaximum(DerefBytes);
4269     return TrackUse;
4270   }
4271 
4272   /// See AbstractAttribute::manifest(...).
4273   ChangeStatus manifest(Attributor &A) override {
4274     ChangeStatus Change = AADereferenceable::manifest(A);
4275     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4276       removeAttrs({Attribute::DereferenceableOrNull});
4277       return ChangeStatus::CHANGED;
4278     }
4279     return Change;
4280   }
4281 
4282   void getDeducedAttributes(LLVMContext &Ctx,
4283                             SmallVectorImpl<Attribute> &Attrs) const override {
4284     // TODO: Add *_globally support
4285     if (isAssumedNonNull())
4286       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4287           Ctx, getAssumedDereferenceableBytes()));
4288     else
4289       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4290           Ctx, getAssumedDereferenceableBytes()));
4291   }
4292 
4293   /// See AbstractAttribute::getAsStr().
4294   const std::string getAsStr() const override {
4295     if (!getAssumedDereferenceableBytes())
4296       return "unknown-dereferenceable";
4297     return std::string("dereferenceable") +
4298            (isAssumedNonNull() ? "" : "_or_null") +
4299            (isAssumedGlobal() ? "_globally" : "") + "<" +
4300            std::to_string(getKnownDereferenceableBytes()) + "-" +
4301            std::to_string(getAssumedDereferenceableBytes()) + ">";
4302   }
4303 };
4304 
4305 /// Dereferenceable attribute for a floating value.
4306 struct AADereferenceableFloating : AADereferenceableImpl {
4307   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4308       : AADereferenceableImpl(IRP, A) {}
4309 
4310   /// See AbstractAttribute::updateImpl(...).
4311   ChangeStatus updateImpl(Attributor &A) override {
4312     const DataLayout &DL = A.getDataLayout();
4313 
4314     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4315                             bool Stripped) -> bool {
4316       unsigned IdxWidth =
4317           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4318       APInt Offset(IdxWidth, 0);
4319       const Value *Base = stripAndAccumulateOffsets(
4320           A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4321           /* AllowNonInbounds */ true);
4322 
4323       const auto &AA = A.getAAFor<AADereferenceable>(
4324           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4325       int64_t DerefBytes = 0;
4326       if (!Stripped && this == &AA) {
4327         // Use IR information if we did not strip anything.
4328         // TODO: track globally.
4329         bool CanBeNull, CanBeFreed;
4330         DerefBytes =
4331             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4332         T.GlobalState.indicatePessimisticFixpoint();
4333       } else {
4334         const DerefState &DS = AA.getState();
4335         DerefBytes = DS.DerefBytesState.getAssumed();
4336         T.GlobalState &= DS.GlobalState;
4337       }
4338 
4339       // For now we do not try to "increase" dereferenceability due to negative
4340       // indices as we first have to come up with code to deal with loops and
4341       // for overflows of the dereferenceable bytes.
4342       int64_t OffsetSExt = Offset.getSExtValue();
4343       if (OffsetSExt < 0)
4344         OffsetSExt = 0;
4345 
4346       T.takeAssumedDerefBytesMinimum(
4347           std::max(int64_t(0), DerefBytes - OffsetSExt));
4348 
4349       if (this == &AA) {
4350         if (!Stripped) {
4351           // If nothing was stripped IR information is all we got.
4352           T.takeKnownDerefBytesMaximum(
4353               std::max(int64_t(0), DerefBytes - OffsetSExt));
4354           T.indicatePessimisticFixpoint();
4355         } else if (OffsetSExt > 0) {
4356           // If something was stripped but there is circular reasoning we look
4357           // for the offset. If it is positive we basically decrease the
4358           // dereferenceable bytes in a circluar loop now, which will simply
4359           // drive them down to the known value in a very slow way which we
4360           // can accelerate.
4361           T.indicatePessimisticFixpoint();
4362         }
4363       }
4364 
4365       return T.isValidState();
4366     };
4367 
4368     DerefState T;
4369     bool UsedAssumedInformation = false;
4370     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4371                                            VisitValueCB, getCtxI(),
4372                                            UsedAssumedInformation))
4373       return indicatePessimisticFixpoint();
4374 
4375     return clampStateAndIndicateChange(getState(), T);
4376   }
4377 
4378   /// See AbstractAttribute::trackStatistics()
4379   void trackStatistics() const override {
4380     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4381   }
4382 };
4383 
4384 /// Dereferenceable attribute for a return value.
4385 struct AADereferenceableReturned final
4386     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4387   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4388       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4389             IRP, A) {}
4390 
4391   /// See AbstractAttribute::trackStatistics()
4392   void trackStatistics() const override {
4393     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4394   }
4395 };
4396 
4397 /// Dereferenceable attribute for an argument
4398 struct AADereferenceableArgument final
4399     : AAArgumentFromCallSiteArguments<AADereferenceable,
4400                                       AADereferenceableImpl> {
4401   using Base =
4402       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4403   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4404       : Base(IRP, A) {}
4405 
4406   /// See AbstractAttribute::trackStatistics()
4407   void trackStatistics() const override {
4408     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4409   }
4410 };
4411 
4412 /// Dereferenceable attribute for a call site argument.
4413 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4414   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4415       : AADereferenceableFloating(IRP, A) {}
4416 
4417   /// See AbstractAttribute::trackStatistics()
4418   void trackStatistics() const override {
4419     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4420   }
4421 };
4422 
4423 /// Dereferenceable attribute deduction for a call site return value.
4424 struct AADereferenceableCallSiteReturned final
4425     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4426   using Base =
4427       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4428   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4429       : Base(IRP, A) {}
4430 
4431   /// See AbstractAttribute::trackStatistics()
4432   void trackStatistics() const override {
4433     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4434   }
4435 };
4436 } // namespace
4437 
4438 // ------------------------ Align Argument Attribute ------------------------
4439 
4440 namespace {
4441 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4442                                     Value &AssociatedValue, const Use *U,
4443                                     const Instruction *I, bool &TrackUse) {
4444   // We need to follow common pointer manipulation uses to the accesses they
4445   // feed into.
4446   if (isa<CastInst>(I)) {
4447     // Follow all but ptr2int casts.
4448     TrackUse = !isa<PtrToIntInst>(I);
4449     return 0;
4450   }
4451   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4452     if (GEP->hasAllConstantIndices())
4453       TrackUse = true;
4454     return 0;
4455   }
4456 
4457   MaybeAlign MA;
4458   if (const auto *CB = dyn_cast<CallBase>(I)) {
4459     if (CB->isBundleOperand(U) || CB->isCallee(U))
4460       return 0;
4461 
4462     unsigned ArgNo = CB->getArgOperandNo(U);
4463     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4464     // As long as we only use known information there is no need to track
4465     // dependences here.
4466     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4467     MA = MaybeAlign(AlignAA.getKnownAlign());
4468   }
4469 
4470   const DataLayout &DL = A.getDataLayout();
4471   const Value *UseV = U->get();
4472   if (auto *SI = dyn_cast<StoreInst>(I)) {
4473     if (SI->getPointerOperand() == UseV)
4474       MA = SI->getAlign();
4475   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4476     if (LI->getPointerOperand() == UseV)
4477       MA = LI->getAlign();
4478   }
4479 
4480   if (!MA || *MA <= QueryingAA.getKnownAlign())
4481     return 0;
4482 
4483   unsigned Alignment = MA->value();
4484   int64_t Offset;
4485 
4486   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4487     if (Base == &AssociatedValue) {
4488       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4489       // So we can say that the maximum power of two which is a divisor of
4490       // gcd(Offset, Alignment) is an alignment.
4491 
4492       uint32_t gcd =
4493           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4494       Alignment = llvm::PowerOf2Floor(gcd);
4495     }
4496   }
4497 
4498   return Alignment;
4499 }
4500 
4501 struct AAAlignImpl : AAAlign {
4502   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4503 
4504   /// See AbstractAttribute::initialize(...).
4505   void initialize(Attributor &A) override {
4506     SmallVector<Attribute, 4> Attrs;
4507     getAttrs({Attribute::Alignment}, Attrs);
4508     for (const Attribute &Attr : Attrs)
4509       takeKnownMaximum(Attr.getValueAsInt());
4510 
4511     Value &V = getAssociatedValue();
4512     takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4513 
4514     if (getIRPosition().isFnInterfaceKind() &&
4515         (!getAnchorScope() ||
4516          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4517       indicatePessimisticFixpoint();
4518       return;
4519     }
4520 
4521     if (Instruction *CtxI = getCtxI())
4522       followUsesInMBEC(*this, A, getState(), *CtxI);
4523   }
4524 
4525   /// See AbstractAttribute::manifest(...).
4526   ChangeStatus manifest(Attributor &A) override {
4527     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4528 
4529     // Check for users that allow alignment annotations.
4530     Value &AssociatedValue = getAssociatedValue();
4531     for (const Use &U : AssociatedValue.uses()) {
4532       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4533         if (SI->getPointerOperand() == &AssociatedValue)
4534           if (SI->getAlignment() < getAssumedAlign()) {
4535             STATS_DECLTRACK(AAAlign, Store,
4536                             "Number of times alignment added to a store");
4537             SI->setAlignment(Align(getAssumedAlign()));
4538             LoadStoreChanged = ChangeStatus::CHANGED;
4539           }
4540       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4541         if (LI->getPointerOperand() == &AssociatedValue)
4542           if (LI->getAlignment() < getAssumedAlign()) {
4543             LI->setAlignment(Align(getAssumedAlign()));
4544             STATS_DECLTRACK(AAAlign, Load,
4545                             "Number of times alignment added to a load");
4546             LoadStoreChanged = ChangeStatus::CHANGED;
4547           }
4548       }
4549     }
4550 
4551     ChangeStatus Changed = AAAlign::manifest(A);
4552 
4553     Align InheritAlign =
4554         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4555     if (InheritAlign >= getAssumedAlign())
4556       return LoadStoreChanged;
4557     return Changed | LoadStoreChanged;
4558   }
4559 
4560   // TODO: Provide a helper to determine the implied ABI alignment and check in
4561   //       the existing manifest method and a new one for AAAlignImpl that value
4562   //       to avoid making the alignment explicit if it did not improve.
4563 
4564   /// See AbstractAttribute::getDeducedAttributes
4565   virtual void
4566   getDeducedAttributes(LLVMContext &Ctx,
4567                        SmallVectorImpl<Attribute> &Attrs) const override {
4568     if (getAssumedAlign() > 1)
4569       Attrs.emplace_back(
4570           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4571   }
4572 
4573   /// See followUsesInMBEC
4574   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4575                        AAAlign::StateType &State) {
4576     bool TrackUse = false;
4577 
4578     unsigned int KnownAlign =
4579         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4580     State.takeKnownMaximum(KnownAlign);
4581 
4582     return TrackUse;
4583   }
4584 
4585   /// See AbstractAttribute::getAsStr().
4586   const std::string getAsStr() const override {
4587     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4588                                 "-" + std::to_string(getAssumedAlign()) + ">")
4589                              : "unknown-align";
4590   }
4591 };
4592 
4593 /// Align attribute for a floating value.
4594 struct AAAlignFloating : AAAlignImpl {
4595   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4596 
4597   /// See AbstractAttribute::updateImpl(...).
4598   ChangeStatus updateImpl(Attributor &A) override {
4599     const DataLayout &DL = A.getDataLayout();
4600 
4601     auto VisitValueCB = [&](Value &V, const Instruction *,
4602                             AAAlign::StateType &T, bool Stripped) -> bool {
4603       if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4604         return true;
4605       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4606                                            DepClassTy::REQUIRED);
4607       if (!Stripped && this == &AA) {
4608         int64_t Offset;
4609         unsigned Alignment = 1;
4610         if (const Value *Base =
4611                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4612           // TODO: Use AAAlign for the base too.
4613           Align PA = Base->getPointerAlignment(DL);
4614           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4615           // So we can say that the maximum power of two which is a divisor of
4616           // gcd(Offset, Alignment) is an alignment.
4617 
4618           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4619                                                uint32_t(PA.value()));
4620           Alignment = llvm::PowerOf2Floor(gcd);
4621         } else {
4622           Alignment = V.getPointerAlignment(DL).value();
4623         }
4624         // Use only IR information if we did not strip anything.
4625         T.takeKnownMaximum(Alignment);
4626         T.indicatePessimisticFixpoint();
4627       } else {
4628         // Use abstract attribute information.
4629         const AAAlign::StateType &DS = AA.getState();
4630         T ^= DS;
4631       }
4632       return T.isValidState();
4633     };
4634 
4635     StateType T;
4636     bool UsedAssumedInformation = false;
4637     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4638                                           VisitValueCB, getCtxI(),
4639                                           UsedAssumedInformation))
4640       return indicatePessimisticFixpoint();
4641 
4642     // TODO: If we know we visited all incoming values, thus no are assumed
4643     // dead, we can take the known information from the state T.
4644     return clampStateAndIndicateChange(getState(), T);
4645   }
4646 
4647   /// See AbstractAttribute::trackStatistics()
4648   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4649 };
4650 
4651 /// Align attribute for function return value.
4652 struct AAAlignReturned final
4653     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4654   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4655   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4656 
4657   /// See AbstractAttribute::initialize(...).
4658   void initialize(Attributor &A) override {
4659     Base::initialize(A);
4660     Function *F = getAssociatedFunction();
4661     if (!F || F->isDeclaration())
4662       indicatePessimisticFixpoint();
4663   }
4664 
4665   /// See AbstractAttribute::trackStatistics()
4666   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4667 };
4668 
4669 /// Align attribute for function argument.
4670 struct AAAlignArgument final
4671     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4672   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4673   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4674 
4675   /// See AbstractAttribute::manifest(...).
4676   ChangeStatus manifest(Attributor &A) override {
4677     // If the associated argument is involved in a must-tail call we give up
4678     // because we would need to keep the argument alignments of caller and
4679     // callee in-sync. Just does not seem worth the trouble right now.
4680     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4681       return ChangeStatus::UNCHANGED;
4682     return Base::manifest(A);
4683   }
4684 
4685   /// See AbstractAttribute::trackStatistics()
4686   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4687 };
4688 
4689 struct AAAlignCallSiteArgument final : AAAlignFloating {
4690   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4691       : AAAlignFloating(IRP, A) {}
4692 
4693   /// See AbstractAttribute::manifest(...).
4694   ChangeStatus manifest(Attributor &A) override {
4695     // If the associated argument is involved in a must-tail call we give up
4696     // because we would need to keep the argument alignments of caller and
4697     // callee in-sync. Just does not seem worth the trouble right now.
4698     if (Argument *Arg = getAssociatedArgument())
4699       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4700         return ChangeStatus::UNCHANGED;
4701     ChangeStatus Changed = AAAlignImpl::manifest(A);
4702     Align InheritAlign =
4703         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4704     if (InheritAlign >= getAssumedAlign())
4705       Changed = ChangeStatus::UNCHANGED;
4706     return Changed;
4707   }
4708 
4709   /// See AbstractAttribute::updateImpl(Attributor &A).
4710   ChangeStatus updateImpl(Attributor &A) override {
4711     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4712     if (Argument *Arg = getAssociatedArgument()) {
4713       // We only take known information from the argument
4714       // so we do not need to track a dependence.
4715       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4716           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4717       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4718     }
4719     return Changed;
4720   }
4721 
4722   /// See AbstractAttribute::trackStatistics()
4723   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4724 };
4725 
4726 /// Align attribute deduction for a call site return value.
4727 struct AAAlignCallSiteReturned final
4728     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4729   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4730   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4731       : Base(IRP, A) {}
4732 
4733   /// See AbstractAttribute::initialize(...).
4734   void initialize(Attributor &A) override {
4735     Base::initialize(A);
4736     Function *F = getAssociatedFunction();
4737     if (!F || F->isDeclaration())
4738       indicatePessimisticFixpoint();
4739   }
4740 
4741   /// See AbstractAttribute::trackStatistics()
4742   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4743 };
4744 } // namespace
4745 
4746 /// ------------------ Function No-Return Attribute ----------------------------
4747 namespace {
4748 struct AANoReturnImpl : public AANoReturn {
4749   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4750 
4751   /// See AbstractAttribute::initialize(...).
4752   void initialize(Attributor &A) override {
4753     AANoReturn::initialize(A);
4754     Function *F = getAssociatedFunction();
4755     if (!F || F->isDeclaration())
4756       indicatePessimisticFixpoint();
4757   }
4758 
4759   /// See AbstractAttribute::getAsStr().
4760   const std::string getAsStr() const override {
4761     return getAssumed() ? "noreturn" : "may-return";
4762   }
4763 
4764   /// See AbstractAttribute::updateImpl(Attributor &A).
4765   virtual ChangeStatus updateImpl(Attributor &A) override {
4766     auto CheckForNoReturn = [](Instruction &) { return false; };
4767     bool UsedAssumedInformation = false;
4768     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4769                                    {(unsigned)Instruction::Ret},
4770                                    UsedAssumedInformation))
4771       return indicatePessimisticFixpoint();
4772     return ChangeStatus::UNCHANGED;
4773   }
4774 };
4775 
4776 struct AANoReturnFunction final : AANoReturnImpl {
4777   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4778       : AANoReturnImpl(IRP, A) {}
4779 
4780   /// See AbstractAttribute::trackStatistics()
4781   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4782 };
4783 
4784 /// NoReturn attribute deduction for a call sites.
4785 struct AANoReturnCallSite final : AANoReturnImpl {
4786   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4787       : AANoReturnImpl(IRP, A) {}
4788 
4789   /// See AbstractAttribute::initialize(...).
4790   void initialize(Attributor &A) override {
4791     AANoReturnImpl::initialize(A);
4792     if (Function *F = getAssociatedFunction()) {
4793       const IRPosition &FnPos = IRPosition::function(*F);
4794       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4795       if (!FnAA.isAssumedNoReturn())
4796         indicatePessimisticFixpoint();
4797     }
4798   }
4799 
4800   /// See AbstractAttribute::updateImpl(...).
4801   ChangeStatus updateImpl(Attributor &A) override {
4802     // TODO: Once we have call site specific value information we can provide
4803     //       call site specific liveness information and then it makes
4804     //       sense to specialize attributes for call sites arguments instead of
4805     //       redirecting requests to the callee argument.
4806     Function *F = getAssociatedFunction();
4807     const IRPosition &FnPos = IRPosition::function(*F);
4808     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4809     return clampStateAndIndicateChange(getState(), FnAA.getState());
4810   }
4811 
4812   /// See AbstractAttribute::trackStatistics()
4813   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4814 };
4815 } // namespace
4816 
4817 /// ----------------------- Instance Info ---------------------------------
4818 
4819 namespace {
4820 /// A class to hold the state of for no-capture attributes.
4821 struct AAInstanceInfoImpl : public AAInstanceInfo {
4822   AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4823       : AAInstanceInfo(IRP, A) {}
4824 
4825   /// See AbstractAttribute::initialize(...).
4826   void initialize(Attributor &A) override {
4827     Value &V = getAssociatedValue();
4828     if (auto *C = dyn_cast<Constant>(&V)) {
4829       if (C->isThreadDependent())
4830         indicatePessimisticFixpoint();
4831       else
4832         indicateOptimisticFixpoint();
4833       return;
4834     }
4835     if (auto *CB = dyn_cast<CallBase>(&V))
4836       if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4837           !CB->mayReadFromMemory()) {
4838         indicateOptimisticFixpoint();
4839         return;
4840       }
4841   }
4842 
4843   /// See AbstractAttribute::updateImpl(...).
4844   ChangeStatus updateImpl(Attributor &A) override {
4845     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4846 
4847     Value &V = getAssociatedValue();
4848     const Function *Scope = nullptr;
4849     if (auto *I = dyn_cast<Instruction>(&V))
4850       Scope = I->getFunction();
4851     if (auto *A = dyn_cast<Argument>(&V)) {
4852       Scope = A->getParent();
4853       if (!Scope->hasLocalLinkage())
4854         return Changed;
4855     }
4856     if (!Scope)
4857       return indicateOptimisticFixpoint();
4858 
4859     auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4860         *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
4861     if (NoRecurseAA.isAssumedNoRecurse())
4862       return Changed;
4863 
4864     auto UsePred = [&](const Use &U, bool &Follow) {
4865       const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4866       if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4867           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4868         Follow = true;
4869         return true;
4870       }
4871       if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4872           (isa<StoreInst>(UserI) &&
4873            cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4874         return true;
4875       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4876         // This check is not guaranteeing uniqueness but for now that we cannot
4877         // end up with two versions of \p U thinking it was one.
4878         if (!CB->getCalledFunction() ||
4879             !CB->getCalledFunction()->hasLocalLinkage())
4880           return true;
4881         if (!CB->isArgOperand(&U))
4882           return false;
4883         const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4884             *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4885             DepClassTy::OPTIONAL);
4886         if (ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4887           return true;
4888       }
4889       return false;
4890     };
4891 
4892     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4893       if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4894         auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4895         if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr))
4896           return true;
4897         auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(
4898             *SI->getFunction());
4899         if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr))
4900           return true;
4901       }
4902       return false;
4903     };
4904 
4905     if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4906                            DepClassTy::OPTIONAL,
4907                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
4908       return indicatePessimisticFixpoint();
4909 
4910     return Changed;
4911   }
4912 
4913   /// See AbstractState::getAsStr().
4914   const std::string getAsStr() const override {
4915     return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4916   }
4917 
4918   /// See AbstractAttribute::trackStatistics()
4919   void trackStatistics() const override {}
4920 };
4921 
4922 /// InstanceInfo attribute for floating values.
4923 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4924   AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4925       : AAInstanceInfoImpl(IRP, A) {}
4926 };
4927 
4928 /// NoCapture attribute for function arguments.
4929 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4930   AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4931       : AAInstanceInfoFloating(IRP, A) {}
4932 };
4933 
4934 /// InstanceInfo attribute for call site arguments.
4935 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4936   AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4937       : AAInstanceInfoImpl(IRP, A) {}
4938 
4939   /// See AbstractAttribute::updateImpl(...).
4940   ChangeStatus updateImpl(Attributor &A) override {
4941     // TODO: Once we have call site specific value information we can provide
4942     //       call site specific liveness information and then it makes
4943     //       sense to specialize attributes for call sites arguments instead of
4944     //       redirecting requests to the callee argument.
4945     Argument *Arg = getAssociatedArgument();
4946     if (!Arg)
4947       return indicatePessimisticFixpoint();
4948     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4949     auto &ArgAA =
4950         A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
4951     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4952   }
4953 };
4954 
4955 /// InstanceInfo attribute for function return value.
4956 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
4957   AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
4958       : AAInstanceInfoImpl(IRP, A) {
4959     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4960   }
4961 
4962   /// See AbstractAttribute::initialize(...).
4963   void initialize(Attributor &A) override {
4964     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4965   }
4966 
4967   /// See AbstractAttribute::updateImpl(...).
4968   ChangeStatus updateImpl(Attributor &A) override {
4969     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4970   }
4971 };
4972 
4973 /// InstanceInfo attribute deduction for a call site return value.
4974 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
4975   AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
4976       : AAInstanceInfoFloating(IRP, A) {}
4977 };
4978 } // namespace
4979 
4980 /// ----------------------- Variable Capturing ---------------------------------
4981 
4982 namespace {
4983 /// A class to hold the state of for no-capture attributes.
4984 struct AANoCaptureImpl : public AANoCapture {
4985   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4986 
4987   /// See AbstractAttribute::initialize(...).
4988   void initialize(Attributor &A) override {
4989     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4990       indicateOptimisticFixpoint();
4991       return;
4992     }
4993     Function *AnchorScope = getAnchorScope();
4994     if (isFnInterfaceKind() &&
4995         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4996       indicatePessimisticFixpoint();
4997       return;
4998     }
4999 
5000     // You cannot "capture" null in the default address space.
5001     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
5002         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
5003       indicateOptimisticFixpoint();
5004       return;
5005     }
5006 
5007     const Function *F =
5008         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
5009 
5010     // Check what state the associated function can actually capture.
5011     if (F)
5012       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5013     else
5014       indicatePessimisticFixpoint();
5015   }
5016 
5017   /// See AbstractAttribute::updateImpl(...).
5018   ChangeStatus updateImpl(Attributor &A) override;
5019 
5020   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
5021   virtual void
5022   getDeducedAttributes(LLVMContext &Ctx,
5023                        SmallVectorImpl<Attribute> &Attrs) const override {
5024     if (!isAssumedNoCaptureMaybeReturned())
5025       return;
5026 
5027     if (isArgumentPosition()) {
5028       if (isAssumedNoCapture())
5029         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
5030       else if (ManifestInternal)
5031         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
5032     }
5033   }
5034 
5035   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
5036   /// depending on the ability of the function associated with \p IRP to capture
5037   /// state in memory and through "returning/throwing", respectively.
5038   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
5039                                                    const Function &F,
5040                                                    BitIntegerState &State) {
5041     // TODO: Once we have memory behavior attributes we should use them here.
5042 
5043     // If we know we cannot communicate or write to memory, we do not care about
5044     // ptr2int anymore.
5045     if (F.onlyReadsMemory() && F.doesNotThrow() &&
5046         F.getReturnType()->isVoidTy()) {
5047       State.addKnownBits(NO_CAPTURE);
5048       return;
5049     }
5050 
5051     // A function cannot capture state in memory if it only reads memory, it can
5052     // however return/throw state and the state might be influenced by the
5053     // pointer value, e.g., loading from a returned pointer might reveal a bit.
5054     if (F.onlyReadsMemory())
5055       State.addKnownBits(NOT_CAPTURED_IN_MEM);
5056 
5057     // A function cannot communicate state back if it does not through
5058     // exceptions and doesn not return values.
5059     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
5060       State.addKnownBits(NOT_CAPTURED_IN_RET);
5061 
5062     // Check existing "returned" attributes.
5063     int ArgNo = IRP.getCalleeArgNo();
5064     if (F.doesNotThrow() && ArgNo >= 0) {
5065       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
5066         if (F.hasParamAttribute(u, Attribute::Returned)) {
5067           if (u == unsigned(ArgNo))
5068             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5069           else if (F.onlyReadsMemory())
5070             State.addKnownBits(NO_CAPTURE);
5071           else
5072             State.addKnownBits(NOT_CAPTURED_IN_RET);
5073           break;
5074         }
5075     }
5076   }
5077 
5078   /// See AbstractState::getAsStr().
5079   const std::string getAsStr() const override {
5080     if (isKnownNoCapture())
5081       return "known not-captured";
5082     if (isAssumedNoCapture())
5083       return "assumed not-captured";
5084     if (isKnownNoCaptureMaybeReturned())
5085       return "known not-captured-maybe-returned";
5086     if (isAssumedNoCaptureMaybeReturned())
5087       return "assumed not-captured-maybe-returned";
5088     return "assumed-captured";
5089   }
5090 
5091   /// Check the use \p U and update \p State accordingly. Return true if we
5092   /// should continue to update the state.
5093   bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5094                 bool &Follow) {
5095     Instruction *UInst = cast<Instruction>(U.getUser());
5096     LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5097                       << *UInst << "\n");
5098 
5099     // Deal with ptr2int by following uses.
5100     if (isa<PtrToIntInst>(UInst)) {
5101       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5102       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5103                           /* Return */ true);
5104     }
5105 
5106     // For stores we already checked if we can follow them, if they make it
5107     // here we give up.
5108     if (isa<StoreInst>(UInst))
5109       return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5110                           /* Return */ false);
5111 
5112     // Explicitly catch return instructions.
5113     if (isa<ReturnInst>(UInst)) {
5114       if (UInst->getFunction() == getAnchorScope())
5115         return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5116                             /* Return */ true);
5117       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5118                           /* Return */ true);
5119     }
5120 
5121     // For now we only use special logic for call sites. However, the tracker
5122     // itself knows about a lot of other non-capturing cases already.
5123     auto *CB = dyn_cast<CallBase>(UInst);
5124     if (!CB || !CB->isArgOperand(&U))
5125       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5126                           /* Return */ true);
5127 
5128     unsigned ArgNo = CB->getArgOperandNo(&U);
5129     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5130     // If we have a abstract no-capture attribute for the argument we can use
5131     // it to justify a non-capture attribute here. This allows recursion!
5132     auto &ArgNoCaptureAA =
5133         A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5134     if (ArgNoCaptureAA.isAssumedNoCapture())
5135       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5136                           /* Return */ false);
5137     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5138       Follow = true;
5139       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5140                           /* Return */ false);
5141     }
5142 
5143     // Lastly, we could not find a reason no-capture can be assumed so we don't.
5144     return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5145                         /* Return */ true);
5146   }
5147 
5148   /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5149   /// \p CapturedInRet, then return true if we should continue updating the
5150   /// state.
5151   static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5152                            bool CapturedInInt, bool CapturedInRet) {
5153     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5154                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5155     if (CapturedInMem)
5156       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5157     if (CapturedInInt)
5158       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5159     if (CapturedInRet)
5160       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5161     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5162   }
5163 };
5164 
5165 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5166   const IRPosition &IRP = getIRPosition();
5167   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5168                                   : &IRP.getAssociatedValue();
5169   if (!V)
5170     return indicatePessimisticFixpoint();
5171 
5172   const Function *F =
5173       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5174   assert(F && "Expected a function!");
5175   const IRPosition &FnPos = IRPosition::function(*F);
5176 
5177   AANoCapture::StateType T;
5178 
5179   // Readonly means we cannot capture through memory.
5180   bool IsKnown;
5181   if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5182     T.addKnownBits(NOT_CAPTURED_IN_MEM);
5183     if (IsKnown)
5184       addKnownBits(NOT_CAPTURED_IN_MEM);
5185   }
5186 
5187   // Make sure all returned values are different than the underlying value.
5188   // TODO: we could do this in a more sophisticated way inside
5189   //       AAReturnedValues, e.g., track all values that escape through returns
5190   //       directly somehow.
5191   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5192     bool SeenConstant = false;
5193     for (auto &It : RVAA.returned_values()) {
5194       if (isa<Constant>(It.first)) {
5195         if (SeenConstant)
5196           return false;
5197         SeenConstant = true;
5198       } else if (!isa<Argument>(It.first) ||
5199                  It.first == getAssociatedArgument())
5200         return false;
5201     }
5202     return true;
5203   };
5204 
5205   const auto &NoUnwindAA =
5206       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5207   if (NoUnwindAA.isAssumedNoUnwind()) {
5208     bool IsVoidTy = F->getReturnType()->isVoidTy();
5209     const AAReturnedValues *RVAA =
5210         IsVoidTy ? nullptr
5211                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5212 
5213                                                  DepClassTy::OPTIONAL);
5214     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5215       T.addKnownBits(NOT_CAPTURED_IN_RET);
5216       if (T.isKnown(NOT_CAPTURED_IN_MEM))
5217         return ChangeStatus::UNCHANGED;
5218       if (NoUnwindAA.isKnownNoUnwind() &&
5219           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5220         addKnownBits(NOT_CAPTURED_IN_RET);
5221         if (isKnown(NOT_CAPTURED_IN_MEM))
5222           return indicateOptimisticFixpoint();
5223       }
5224     }
5225   }
5226 
5227   auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5228     const auto &DerefAA = A.getAAFor<AADereferenceable>(
5229         *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5230     return DerefAA.getAssumedDereferenceableBytes();
5231   };
5232 
5233   auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5234     switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5235     case UseCaptureKind::NO_CAPTURE:
5236       return true;
5237     case UseCaptureKind::MAY_CAPTURE:
5238       return checkUse(A, T, U, Follow);
5239     case UseCaptureKind::PASSTHROUGH:
5240       Follow = true;
5241       return true;
5242     }
5243     llvm_unreachable("Unexpected use capture kind!");
5244   };
5245 
5246   if (!A.checkForAllUses(UseCheck, *this, *V))
5247     return indicatePessimisticFixpoint();
5248 
5249   AANoCapture::StateType &S = getState();
5250   auto Assumed = S.getAssumed();
5251   S.intersectAssumedBits(T.getAssumed());
5252   if (!isAssumedNoCaptureMaybeReturned())
5253     return indicatePessimisticFixpoint();
5254   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5255                                    : ChangeStatus::CHANGED;
5256 }
5257 
5258 /// NoCapture attribute for function arguments.
5259 struct AANoCaptureArgument final : AANoCaptureImpl {
5260   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5261       : AANoCaptureImpl(IRP, A) {}
5262 
5263   /// See AbstractAttribute::trackStatistics()
5264   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5265 };
5266 
5267 /// NoCapture attribute for call site arguments.
5268 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5269   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5270       : AANoCaptureImpl(IRP, A) {}
5271 
5272   /// See AbstractAttribute::initialize(...).
5273   void initialize(Attributor &A) override {
5274     if (Argument *Arg = getAssociatedArgument())
5275       if (Arg->hasByValAttr())
5276         indicateOptimisticFixpoint();
5277     AANoCaptureImpl::initialize(A);
5278   }
5279 
5280   /// See AbstractAttribute::updateImpl(...).
5281   ChangeStatus updateImpl(Attributor &A) override {
5282     // TODO: Once we have call site specific value information we can provide
5283     //       call site specific liveness information and then it makes
5284     //       sense to specialize attributes for call sites arguments instead of
5285     //       redirecting requests to the callee argument.
5286     Argument *Arg = getAssociatedArgument();
5287     if (!Arg)
5288       return indicatePessimisticFixpoint();
5289     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5290     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5291     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5292   }
5293 
5294   /// See AbstractAttribute::trackStatistics()
5295   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5296 };
5297 
5298 /// NoCapture attribute for floating values.
5299 struct AANoCaptureFloating final : AANoCaptureImpl {
5300   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5301       : AANoCaptureImpl(IRP, A) {}
5302 
5303   /// See AbstractAttribute::trackStatistics()
5304   void trackStatistics() const override {
5305     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5306   }
5307 };
5308 
5309 /// NoCapture attribute for function return value.
5310 struct AANoCaptureReturned final : AANoCaptureImpl {
5311   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5312       : AANoCaptureImpl(IRP, A) {
5313     llvm_unreachable("NoCapture is not applicable to function returns!");
5314   }
5315 
5316   /// See AbstractAttribute::initialize(...).
5317   void initialize(Attributor &A) override {
5318     llvm_unreachable("NoCapture is not applicable to function returns!");
5319   }
5320 
5321   /// See AbstractAttribute::updateImpl(...).
5322   ChangeStatus updateImpl(Attributor &A) override {
5323     llvm_unreachable("NoCapture is not applicable to function returns!");
5324   }
5325 
5326   /// See AbstractAttribute::trackStatistics()
5327   void trackStatistics() const override {}
5328 };
5329 
5330 /// NoCapture attribute deduction for a call site return value.
5331 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5332   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5333       : AANoCaptureImpl(IRP, A) {}
5334 
5335   /// See AbstractAttribute::initialize(...).
5336   void initialize(Attributor &A) override {
5337     const Function *F = getAnchorScope();
5338     // Check what state the associated function can actually capture.
5339     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5340   }
5341 
5342   /// See AbstractAttribute::trackStatistics()
5343   void trackStatistics() const override {
5344     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5345   }
5346 };
5347 } // namespace
5348 
5349 /// ------------------ Value Simplify Attribute ----------------------------
5350 
5351 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5352   // FIXME: Add a typecast support.
5353   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5354       SimplifiedAssociatedValue, Other, Ty);
5355   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5356     return false;
5357 
5358   LLVM_DEBUG({
5359     if (SimplifiedAssociatedValue.hasValue())
5360       dbgs() << "[ValueSimplify] is assumed to be "
5361              << **SimplifiedAssociatedValue << "\n";
5362     else
5363       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5364   });
5365   return true;
5366 }
5367 
5368 namespace {
5369 struct AAValueSimplifyImpl : AAValueSimplify {
5370   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5371       : AAValueSimplify(IRP, A) {}
5372 
5373   /// See AbstractAttribute::initialize(...).
5374   void initialize(Attributor &A) override {
5375     if (getAssociatedValue().getType()->isVoidTy())
5376       indicatePessimisticFixpoint();
5377     if (A.hasSimplificationCallback(getIRPosition()))
5378       indicatePessimisticFixpoint();
5379   }
5380 
5381   /// See AbstractAttribute::getAsStr().
5382   const std::string getAsStr() const override {
5383     LLVM_DEBUG({
5384       errs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
5385       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5386         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5387     });
5388     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5389                           : "not-simple";
5390   }
5391 
5392   /// See AbstractAttribute::trackStatistics()
5393   void trackStatistics() const override {}
5394 
5395   /// See AAValueSimplify::getAssumedSimplifiedValue()
5396   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5397     return SimplifiedAssociatedValue;
5398   }
5399 
5400   /// Ensure the return value is \p V with type \p Ty, if not possible return
5401   /// nullptr. If \p Check is true we will only verify such an operation would
5402   /// suceed and return a non-nullptr value if that is the case. No IR is
5403   /// generated or modified.
5404   static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
5405                            bool Check) {
5406     if (auto *TypedV = AA::getWithType(V, Ty))
5407       return TypedV;
5408     if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
5409       return Check ? &V
5410                    : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty,
5411                                                                       "", CtxI);
5412     return nullptr;
5413   }
5414 
5415   /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
5416   /// If \p Check is true we will only verify such an operation would suceed and
5417   /// return a non-nullptr value if that is the case. No IR is generated or
5418   /// modified.
5419   static Value *reproduceInst(Attributor &A,
5420                               const AbstractAttribute &QueryingAA,
5421                               Instruction &I, Type &Ty, Instruction *CtxI,
5422                               bool Check, ValueToValueMapTy &VMap) {
5423     assert(CtxI && "Cannot reproduce an instruction without context!");
5424     if (Check && (I.mayReadFromMemory() ||
5425                   !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
5426                                                 /* TLI */ nullptr)))
5427       return nullptr;
5428     for (Value *Op : I.operands()) {
5429       Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
5430       if (!NewOp) {
5431         assert(Check && "Manifest of new value unexpectedly failed!");
5432         return nullptr;
5433       }
5434       if (!Check)
5435         VMap[Op] = NewOp;
5436     }
5437     if (Check)
5438       return &I;
5439 
5440     Instruction *CloneI = I.clone();
5441     VMap[&I] = CloneI;
5442     CloneI->insertBefore(CtxI);
5443     RemapInstruction(CloneI, VMap);
5444     return CloneI;
5445   }
5446 
5447   /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
5448   /// If \p Check is true we will only verify such an operation would suceed and
5449   /// return a non-nullptr value if that is the case. No IR is generated or
5450   /// modified.
5451   static Value *reproduceValue(Attributor &A,
5452                                const AbstractAttribute &QueryingAA, Value &V,
5453                                Type &Ty, Instruction *CtxI, bool Check,
5454                                ValueToValueMapTy &VMap) {
5455     if (const auto &NewV = VMap.lookup(&V))
5456       return NewV;
5457     bool UsedAssumedInformation = false;
5458     Optional<Value *> SimpleV =
5459         A.getAssumedSimplified(V, QueryingAA, UsedAssumedInformation);
5460     if (!SimpleV.hasValue())
5461       return PoisonValue::get(&Ty);
5462     Value *EffectiveV = &V;
5463     if (SimpleV.getValue())
5464       EffectiveV = SimpleV.getValue();
5465     if (auto *C = dyn_cast<Constant>(EffectiveV))
5466       if (!C->canTrap())
5467         return C;
5468     if (CtxI && AA::isValidAtPosition(*EffectiveV, *CtxI, A.getInfoCache()))
5469       return ensureType(A, *EffectiveV, Ty, CtxI, Check);
5470     if (auto *I = dyn_cast<Instruction>(EffectiveV))
5471       if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
5472         return ensureType(A, *NewV, Ty, CtxI, Check);
5473     return nullptr;
5474   }
5475 
5476   /// Return a value we can use as replacement for the associated one, or
5477   /// nullptr if we don't have one that makes sense.
5478   Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
5479     Value *NewV = SimplifiedAssociatedValue.hasValue()
5480                       ? SimplifiedAssociatedValue.getValue()
5481                       : UndefValue::get(getAssociatedType());
5482     if (NewV && NewV != &getAssociatedValue()) {
5483       ValueToValueMapTy VMap;
5484       // First verify we can reprduce the value with the required type at the
5485       // context location before we actually start modifying the IR.
5486       if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5487                          /* CheckOnly */ true, VMap))
5488         return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5489                               /* CheckOnly */ false, VMap);
5490     }
5491     return nullptr;
5492   }
5493 
5494   /// Helper function for querying AAValueSimplify and updating candicate.
5495   /// \param IRP The value position we are trying to unify with SimplifiedValue
5496   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5497                       const IRPosition &IRP, bool Simplify = true) {
5498     bool UsedAssumedInformation = false;
5499     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5500     if (Simplify)
5501       QueryingValueSimplified =
5502           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5503     return unionAssumed(QueryingValueSimplified);
5504   }
5505 
5506   /// Returns a candidate is found or not
5507   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5508     if (!getAssociatedValue().getType()->isIntegerTy())
5509       return false;
5510 
5511     // This will also pass the call base context.
5512     const auto &AA =
5513         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5514 
5515     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5516 
5517     if (!COpt.hasValue()) {
5518       SimplifiedAssociatedValue = llvm::None;
5519       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5520       return true;
5521     }
5522     if (auto *C = COpt.getValue()) {
5523       SimplifiedAssociatedValue = C;
5524       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5525       return true;
5526     }
5527     return false;
5528   }
5529 
5530   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5531     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5532       return true;
5533     if (askSimplifiedValueFor<AAPotentialValues>(A))
5534       return true;
5535     return false;
5536   }
5537 
5538   /// See AbstractAttribute::manifest(...).
5539   ChangeStatus manifest(Attributor &A) override {
5540     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5541     for (auto &U : getAssociatedValue().uses()) {
5542       // Check if we need to adjust the insertion point to make sure the IR is
5543       // valid.
5544       Instruction *IP = dyn_cast<Instruction>(U.getUser());
5545       if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
5546         IP = PHI->getIncomingBlock(U)->getTerminator();
5547       if (auto *NewV = manifestReplacementValue(A, IP)) {
5548         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
5549                           << " -> " << *NewV << " :: " << *this << "\n");
5550         if (A.changeUseAfterManifest(U, *NewV))
5551           Changed = ChangeStatus::CHANGED;
5552       }
5553     }
5554 
5555     return Changed | AAValueSimplify::manifest(A);
5556   }
5557 
5558   /// See AbstractState::indicatePessimisticFixpoint(...).
5559   ChangeStatus indicatePessimisticFixpoint() override {
5560     SimplifiedAssociatedValue = &getAssociatedValue();
5561     return AAValueSimplify::indicatePessimisticFixpoint();
5562   }
5563 };
5564 
5565 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5566   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5567       : AAValueSimplifyImpl(IRP, A) {}
5568 
5569   void initialize(Attributor &A) override {
5570     AAValueSimplifyImpl::initialize(A);
5571     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5572       indicatePessimisticFixpoint();
5573     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5574                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5575                 /* IgnoreSubsumingPositions */ true))
5576       indicatePessimisticFixpoint();
5577   }
5578 
5579   /// See AbstractAttribute::updateImpl(...).
5580   ChangeStatus updateImpl(Attributor &A) override {
5581     // Byval is only replacable if it is readonly otherwise we would write into
5582     // the replaced value and not the copy that byval creates implicitly.
5583     Argument *Arg = getAssociatedArgument();
5584     if (Arg->hasByValAttr()) {
5585       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5586       //       there is no race by not copying a constant byval.
5587       bool IsKnown;
5588       if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
5589         return indicatePessimisticFixpoint();
5590     }
5591 
5592     auto Before = SimplifiedAssociatedValue;
5593 
5594     auto PredForCallSite = [&](AbstractCallSite ACS) {
5595       const IRPosition &ACSArgPos =
5596           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5597       // Check if a coresponding argument was found or if it is on not
5598       // associated (which can happen for callback calls).
5599       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5600         return false;
5601 
5602       // Simplify the argument operand explicitly and check if the result is
5603       // valid in the current scope. This avoids refering to simplified values
5604       // in other functions, e.g., we don't want to say a an argument in a
5605       // static function is actually an argument in a different function.
5606       bool UsedAssumedInformation = false;
5607       Optional<Constant *> SimpleArgOp =
5608           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5609       if (!SimpleArgOp.hasValue())
5610         return true;
5611       if (!SimpleArgOp.getValue())
5612         return false;
5613       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5614         return false;
5615       return unionAssumed(*SimpleArgOp);
5616     };
5617 
5618     // Generate a answer specific to a call site context.
5619     bool Success;
5620     bool UsedAssumedInformation = false;
5621     if (hasCallBaseContext() &&
5622         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5623       Success = PredForCallSite(
5624           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5625     else
5626       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5627                                        UsedAssumedInformation);
5628 
5629     if (!Success)
5630       if (!askSimplifiedValueForOtherAAs(A))
5631         return indicatePessimisticFixpoint();
5632 
5633     // If a candicate was found in this update, return CHANGED.
5634     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5635                                                : ChangeStatus ::CHANGED;
5636   }
5637 
5638   /// See AbstractAttribute::trackStatistics()
5639   void trackStatistics() const override {
5640     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5641   }
5642 };
5643 
5644 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5645   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5646       : AAValueSimplifyImpl(IRP, A) {}
5647 
5648   /// See AAValueSimplify::getAssumedSimplifiedValue()
5649   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5650     if (!isValidState())
5651       return nullptr;
5652     return SimplifiedAssociatedValue;
5653   }
5654 
5655   /// See AbstractAttribute::updateImpl(...).
5656   ChangeStatus updateImpl(Attributor &A) override {
5657     auto Before = SimplifiedAssociatedValue;
5658 
5659     auto ReturnInstCB = [&](Instruction &I) {
5660       auto &RI = cast<ReturnInst>(I);
5661       return checkAndUpdate(
5662           A, *this,
5663           IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
5664     };
5665 
5666     bool UsedAssumedInformation = false;
5667     if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
5668                                    UsedAssumedInformation))
5669       if (!askSimplifiedValueForOtherAAs(A))
5670         return indicatePessimisticFixpoint();
5671 
5672     // If a candicate was found in this update, return CHANGED.
5673     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5674                                                : ChangeStatus ::CHANGED;
5675   }
5676 
5677   ChangeStatus manifest(Attributor &A) override {
5678     // We queried AAValueSimplify for the returned values so they will be
5679     // replaced if a simplified form was found. Nothing to do here.
5680     return ChangeStatus::UNCHANGED;
5681   }
5682 
5683   /// See AbstractAttribute::trackStatistics()
5684   void trackStatistics() const override {
5685     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5686   }
5687 };
5688 
5689 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5690   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5691       : AAValueSimplifyImpl(IRP, A) {}
5692 
5693   /// See AbstractAttribute::initialize(...).
5694   void initialize(Attributor &A) override {
5695     AAValueSimplifyImpl::initialize(A);
5696     Value &V = getAnchorValue();
5697 
5698     // TODO: add other stuffs
5699     if (isa<Constant>(V))
5700       indicatePessimisticFixpoint();
5701   }
5702 
5703   /// Check if \p Cmp is a comparison we can simplify.
5704   ///
5705   /// We handle multiple cases, one in which at least one operand is an
5706   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5707   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5708   /// will be updated.
5709   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5710     auto Union = [&](Value &V) {
5711       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5712           SimplifiedAssociatedValue, &V, V.getType());
5713       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5714     };
5715 
5716     Value *LHS = Cmp.getOperand(0);
5717     Value *RHS = Cmp.getOperand(1);
5718 
5719     // Simplify the operands first.
5720     bool UsedAssumedInformation = false;
5721     const auto &SimplifiedLHS =
5722         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5723                                *this, UsedAssumedInformation);
5724     if (!SimplifiedLHS.hasValue())
5725       return true;
5726     if (!SimplifiedLHS.getValue())
5727       return false;
5728     LHS = *SimplifiedLHS;
5729 
5730     const auto &SimplifiedRHS =
5731         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5732                                *this, UsedAssumedInformation);
5733     if (!SimplifiedRHS.hasValue())
5734       return true;
5735     if (!SimplifiedRHS.getValue())
5736       return false;
5737     RHS = *SimplifiedRHS;
5738 
5739     LLVMContext &Ctx = Cmp.getContext();
5740     // Handle the trivial case first in which we don't even need to think about
5741     // null or non-null.
5742     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5743       Constant *NewVal =
5744           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5745       if (!Union(*NewVal))
5746         return false;
5747       if (!UsedAssumedInformation)
5748         indicateOptimisticFixpoint();
5749       return true;
5750     }
5751 
5752     // From now on we only handle equalities (==, !=).
5753     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5754     if (!ICmp || !ICmp->isEquality())
5755       return false;
5756 
5757     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5758     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5759     if (!LHSIsNull && !RHSIsNull)
5760       return false;
5761 
5762     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5763     // non-nullptr operand and if we assume it's non-null we can conclude the
5764     // result of the comparison.
5765     assert((LHSIsNull || RHSIsNull) &&
5766            "Expected nullptr versus non-nullptr comparison at this point");
5767 
5768     // The index is the operand that we assume is not null.
5769     unsigned PtrIdx = LHSIsNull;
5770     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5771         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5772         DepClassTy::REQUIRED);
5773     if (!PtrNonNullAA.isAssumedNonNull())
5774       return false;
5775     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5776 
5777     // The new value depends on the predicate, true for != and false for ==.
5778     Constant *NewVal = ConstantInt::get(
5779         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5780     if (!Union(*NewVal))
5781       return false;
5782 
5783     if (!UsedAssumedInformation)
5784       indicateOptimisticFixpoint();
5785 
5786     return true;
5787   }
5788 
5789   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5790   /// simplify any operand of the instruction \p I. Return true if successful,
5791   /// in that case SimplifiedAssociatedValue will be updated.
5792   bool handleGenericInst(Attributor &A, Instruction &I) {
5793     bool SomeSimplified = false;
5794     bool UsedAssumedInformation = false;
5795 
5796     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5797     int Idx = 0;
5798     for (Value *Op : I.operands()) {
5799       const auto &SimplifiedOp =
5800           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5801                                  *this, UsedAssumedInformation);
5802       // If we are not sure about any operand we are not sure about the entire
5803       // instruction, we'll wait.
5804       if (!SimplifiedOp.hasValue())
5805         return true;
5806 
5807       if (SimplifiedOp.getValue())
5808         NewOps[Idx] = SimplifiedOp.getValue();
5809       else
5810         NewOps[Idx] = Op;
5811 
5812       SomeSimplified |= (NewOps[Idx] != Op);
5813       ++Idx;
5814     }
5815 
5816     // We won't bother with the InstSimplify interface if we didn't simplify any
5817     // operand ourselves.
5818     if (!SomeSimplified)
5819       return false;
5820 
5821     InformationCache &InfoCache = A.getInfoCache();
5822     Function *F = I.getFunction();
5823     const auto *DT =
5824         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5825     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5826     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5827     OptimizationRemarkEmitter *ORE = nullptr;
5828 
5829     const DataLayout &DL = I.getModule()->getDataLayout();
5830     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5831     if (Value *SimplifiedI =
5832             SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5833       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5834           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5835       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5836     }
5837     return false;
5838   }
5839 
5840   /// See AbstractAttribute::updateImpl(...).
5841   ChangeStatus updateImpl(Attributor &A) override {
5842     auto Before = SimplifiedAssociatedValue;
5843 
5844     // Do not simplify loads that are only used in llvm.assume if we cannot also
5845     // remove all stores that may feed into the load. The reason is that the
5846     // assume is probably worth something as long as the stores are around.
5847     if (auto *LI = dyn_cast<LoadInst>(&getAssociatedValue())) {
5848       InformationCache &InfoCache = A.getInfoCache();
5849       if (InfoCache.isOnlyUsedByAssume(*LI)) {
5850         SmallSetVector<Value *, 4> PotentialCopies;
5851         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
5852         bool UsedAssumedInformation = false;
5853         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
5854                                            PotentialValueOrigins, *this,
5855                                            UsedAssumedInformation,
5856                                            /* OnlyExact */ true)) {
5857           if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
5858                 if (!I)
5859                   return true;
5860                 if (auto *SI = dyn_cast<StoreInst>(I))
5861                   return A.isAssumedDead(SI->getOperandUse(0), this,
5862                                          /* LivenessAA */ nullptr,
5863                                          UsedAssumedInformation,
5864                                          /* CheckBBLivenessOnly */ false);
5865                 return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
5866                                        UsedAssumedInformation,
5867                                        /* CheckBBLivenessOnly */ false);
5868               }))
5869             return indicatePessimisticFixpoint();
5870         }
5871       }
5872     }
5873 
5874     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5875                             bool Stripped) -> bool {
5876       auto &AA = A.getAAFor<AAValueSimplify>(
5877           *this, IRPosition::value(V, getCallBaseContext()),
5878           DepClassTy::REQUIRED);
5879       if (!Stripped && this == &AA) {
5880 
5881         if (auto *I = dyn_cast<Instruction>(&V)) {
5882           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5883             if (handleCmp(A, *Cmp))
5884               return true;
5885           if (handleGenericInst(A, *I))
5886             return true;
5887         }
5888         // TODO: Look the instruction and check recursively.
5889 
5890         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5891                           << "\n");
5892         return false;
5893       }
5894       return checkAndUpdate(A, *this,
5895                             IRPosition::value(V, getCallBaseContext()));
5896     };
5897 
5898     bool Dummy = false;
5899     bool UsedAssumedInformation = false;
5900     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5901                                      VisitValueCB, getCtxI(),
5902                                      UsedAssumedInformation,
5903                                      /* UseValueSimplify */ false))
5904       if (!askSimplifiedValueForOtherAAs(A))
5905         return indicatePessimisticFixpoint();
5906 
5907     // If a candicate was found in this update, return CHANGED.
5908     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5909                                                : ChangeStatus ::CHANGED;
5910   }
5911 
5912   /// See AbstractAttribute::trackStatistics()
5913   void trackStatistics() const override {
5914     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5915   }
5916 };
5917 
5918 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5919   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5920       : AAValueSimplifyImpl(IRP, A) {}
5921 
5922   /// See AbstractAttribute::initialize(...).
5923   void initialize(Attributor &A) override {
5924     SimplifiedAssociatedValue = nullptr;
5925     indicateOptimisticFixpoint();
5926   }
5927   /// See AbstractAttribute::initialize(...).
5928   ChangeStatus updateImpl(Attributor &A) override {
5929     llvm_unreachable(
5930         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5931   }
5932   /// See AbstractAttribute::trackStatistics()
5933   void trackStatistics() const override {
5934     STATS_DECLTRACK_FN_ATTR(value_simplify)
5935   }
5936 };
5937 
5938 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5939   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5940       : AAValueSimplifyFunction(IRP, A) {}
5941   /// See AbstractAttribute::trackStatistics()
5942   void trackStatistics() const override {
5943     STATS_DECLTRACK_CS_ATTR(value_simplify)
5944   }
5945 };
5946 
5947 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5948   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5949       : AAValueSimplifyImpl(IRP, A) {}
5950 
5951   void initialize(Attributor &A) override {
5952     AAValueSimplifyImpl::initialize(A);
5953     Function *Fn = getAssociatedFunction();
5954     if (!Fn) {
5955       indicatePessimisticFixpoint();
5956       return;
5957     }
5958     for (Argument &Arg : Fn->args()) {
5959       if (Arg.hasReturnedAttr()) {
5960         auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
5961                                                  Arg.getArgNo());
5962         if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
5963             checkAndUpdate(A, *this, IRP))
5964           indicateOptimisticFixpoint();
5965         else
5966           indicatePessimisticFixpoint();
5967         return;
5968       }
5969     }
5970   }
5971 
5972   /// See AbstractAttribute::updateImpl(...).
5973   ChangeStatus updateImpl(Attributor &A) override {
5974     auto Before = SimplifiedAssociatedValue;
5975     auto &RetAA = A.getAAFor<AAReturnedValues>(
5976         *this, IRPosition::function(*getAssociatedFunction()),
5977         DepClassTy::REQUIRED);
5978     auto PredForReturned =
5979         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5980           bool UsedAssumedInformation = false;
5981           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5982               &RetVal, *cast<CallBase>(getCtxI()), *this,
5983               UsedAssumedInformation);
5984           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5985               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5986           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5987         };
5988     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5989       if (!askSimplifiedValueForOtherAAs(A))
5990         return indicatePessimisticFixpoint();
5991     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5992                                                : ChangeStatus ::CHANGED;
5993   }
5994 
5995   void trackStatistics() const override {
5996     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5997   }
5998 };
5999 
6000 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
6001   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
6002       : AAValueSimplifyFloating(IRP, A) {}
6003 
6004   /// See AbstractAttribute::manifest(...).
6005   ChangeStatus manifest(Attributor &A) override {
6006     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6007 
6008     if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
6009       Use &U = cast<CallBase>(&getAnchorValue())
6010                    ->getArgOperandUse(getCallSiteArgNo());
6011       if (A.changeUseAfterManifest(U, *NewV))
6012         Changed = ChangeStatus::CHANGED;
6013     }
6014 
6015     return Changed | AAValueSimplify::manifest(A);
6016   }
6017 
6018   void trackStatistics() const override {
6019     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
6020   }
6021 };
6022 } // namespace
6023 
6024 /// ----------------------- Heap-To-Stack Conversion ---------------------------
6025 namespace {
6026 struct AAHeapToStackFunction final : public AAHeapToStack {
6027 
6028   struct AllocationInfo {
6029     /// The call that allocates the memory.
6030     CallBase *const CB;
6031 
6032     /// The library function id for the allocation.
6033     LibFunc LibraryFunctionId = NotLibFunc;
6034 
6035     /// The status wrt. a rewrite.
6036     enum {
6037       STACK_DUE_TO_USE,
6038       STACK_DUE_TO_FREE,
6039       INVALID,
6040     } Status = STACK_DUE_TO_USE;
6041 
6042     /// Flag to indicate if we encountered a use that might free this allocation
6043     /// but which is not in the deallocation infos.
6044     bool HasPotentiallyFreeingUnknownUses = false;
6045 
6046     /// The set of free calls that use this allocation.
6047     SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
6048   };
6049 
6050   struct DeallocationInfo {
6051     /// The call that deallocates the memory.
6052     CallBase *const CB;
6053 
6054     /// Flag to indicate if we don't know all objects this deallocation might
6055     /// free.
6056     bool MightFreeUnknownObjects = false;
6057 
6058     /// The set of allocation calls that are potentially freed.
6059     SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
6060   };
6061 
6062   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
6063       : AAHeapToStack(IRP, A) {}
6064 
6065   ~AAHeapToStackFunction() {
6066     // Ensure we call the destructor so we release any memory allocated in the
6067     // sets.
6068     for (auto &It : AllocationInfos)
6069       It.second->~AllocationInfo();
6070     for (auto &It : DeallocationInfos)
6071       It.second->~DeallocationInfo();
6072   }
6073 
6074   void initialize(Attributor &A) override {
6075     AAHeapToStack::initialize(A);
6076 
6077     const Function *F = getAnchorScope();
6078     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6079 
6080     auto AllocationIdentifierCB = [&](Instruction &I) {
6081       CallBase *CB = dyn_cast<CallBase>(&I);
6082       if (!CB)
6083         return true;
6084       if (isFreeCall(CB, TLI)) {
6085         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
6086         return true;
6087       }
6088       // To do heap to stack, we need to know that the allocation itself is
6089       // removable once uses are rewritten, and that we can initialize the
6090       // alloca to the same pattern as the original allocation result.
6091       if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) {
6092         auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
6093         if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
6094           AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
6095           AllocationInfos[CB] = AI;
6096           if (TLI)
6097             TLI->getLibFunc(*CB, AI->LibraryFunctionId);
6098         }
6099       }
6100       return true;
6101     };
6102 
6103     bool UsedAssumedInformation = false;
6104     bool Success = A.checkForAllCallLikeInstructions(
6105         AllocationIdentifierCB, *this, UsedAssumedInformation,
6106         /* CheckBBLivenessOnly */ false,
6107         /* CheckPotentiallyDead */ true);
6108     (void)Success;
6109     assert(Success && "Did not expect the call base visit callback to fail!");
6110 
6111     Attributor::SimplifictionCallbackTy SCB =
6112         [](const IRPosition &, const AbstractAttribute *,
6113            bool &) -> Optional<Value *> { return nullptr; };
6114     for (const auto &It : AllocationInfos)
6115       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6116                                        SCB);
6117     for (const auto &It : DeallocationInfos)
6118       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6119                                        SCB);
6120   }
6121 
6122   const std::string getAsStr() const override {
6123     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
6124     for (const auto &It : AllocationInfos) {
6125       if (It.second->Status == AllocationInfo::INVALID)
6126         ++NumInvalidMallocs;
6127       else
6128         ++NumH2SMallocs;
6129     }
6130     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
6131            std::to_string(NumInvalidMallocs);
6132   }
6133 
6134   /// See AbstractAttribute::trackStatistics().
6135   void trackStatistics() const override {
6136     STATS_DECL(
6137         MallocCalls, Function,
6138         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
6139     for (auto &It : AllocationInfos)
6140       if (It.second->Status != AllocationInfo::INVALID)
6141         ++BUILD_STAT_NAME(MallocCalls, Function);
6142   }
6143 
6144   bool isAssumedHeapToStack(const CallBase &CB) const override {
6145     if (isValidState())
6146       if (AllocationInfo *AI =
6147               AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
6148         return AI->Status != AllocationInfo::INVALID;
6149     return false;
6150   }
6151 
6152   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
6153     if (!isValidState())
6154       return false;
6155 
6156     for (auto &It : AllocationInfos) {
6157       AllocationInfo &AI = *It.second;
6158       if (AI.Status == AllocationInfo::INVALID)
6159         continue;
6160 
6161       if (AI.PotentialFreeCalls.count(&CB))
6162         return true;
6163     }
6164 
6165     return false;
6166   }
6167 
6168   ChangeStatus manifest(Attributor &A) override {
6169     assert(getState().isValidState() &&
6170            "Attempted to manifest an invalid state!");
6171 
6172     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
6173     Function *F = getAnchorScope();
6174     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6175 
6176     for (auto &It : AllocationInfos) {
6177       AllocationInfo &AI = *It.second;
6178       if (AI.Status == AllocationInfo::INVALID)
6179         continue;
6180 
6181       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
6182         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
6183         A.deleteAfterManifest(*FreeCall);
6184         HasChanged = ChangeStatus::CHANGED;
6185       }
6186 
6187       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
6188                         << "\n");
6189 
6190       auto Remark = [&](OptimizationRemark OR) {
6191         LibFunc IsAllocShared;
6192         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
6193           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
6194             return OR << "Moving globalized variable to the stack.";
6195         return OR << "Moving memory allocation from the heap to the stack.";
6196       };
6197       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6198         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
6199       else
6200         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
6201 
6202       const DataLayout &DL = A.getInfoCache().getDL();
6203       Value *Size;
6204       Optional<APInt> SizeAPI = getSize(A, *this, AI);
6205       if (SizeAPI.hasValue()) {
6206         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
6207       } else {
6208         LLVMContext &Ctx = AI.CB->getContext();
6209         ObjectSizeOpts Opts;
6210         ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
6211         SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
6212         assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
6213                cast<ConstantInt>(SizeOffsetPair.second)->isZero());
6214         Size = SizeOffsetPair.first;
6215       }
6216 
6217       Align Alignment(1);
6218       if (MaybeAlign RetAlign = AI.CB->getRetAlign())
6219         Alignment = max(Alignment, RetAlign);
6220       if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6221         Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
6222         assert(AlignmentAPI.hasValue() &&
6223                "Expected an alignment during manifest!");
6224         Alignment =
6225             max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue()));
6226       }
6227 
6228       // TODO: Hoist the alloca towards the function entry.
6229       unsigned AS = DL.getAllocaAddrSpace();
6230       Instruction *Alloca = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
6231                                            Size, Alignment, "", AI.CB);
6232 
6233       if (Alloca->getType() != AI.CB->getType())
6234         Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6235             Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
6236 
6237       auto *I8Ty = Type::getInt8Ty(F->getContext());
6238       auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6239       assert(InitVal &&
6240              "Must be able to materialize initial memory state of allocation");
6241 
6242       A.changeValueAfterManifest(*AI.CB, *Alloca);
6243 
6244       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6245         auto *NBB = II->getNormalDest();
6246         BranchInst::Create(NBB, AI.CB->getParent());
6247         A.deleteAfterManifest(*AI.CB);
6248       } else {
6249         A.deleteAfterManifest(*AI.CB);
6250       }
6251 
6252       // Initialize the alloca with the same value as used by the allocation
6253       // function.  We can skip undef as the initial value of an alloc is
6254       // undef, and the memset would simply end up being DSEd.
6255       if (!isa<UndefValue>(InitVal)) {
6256         IRBuilder<> Builder(Alloca->getNextNode());
6257         // TODO: Use alignment above if align!=1
6258         Builder.CreateMemSet(Alloca, InitVal, Size, None);
6259       }
6260       HasChanged = ChangeStatus::CHANGED;
6261     }
6262 
6263     return HasChanged;
6264   }
6265 
6266   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6267                            Value &V) {
6268     bool UsedAssumedInformation = false;
6269     Optional<Constant *> SimpleV =
6270         A.getAssumedConstant(V, AA, UsedAssumedInformation);
6271     if (!SimpleV.hasValue())
6272       return APInt(64, 0);
6273     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
6274       return CI->getValue();
6275     return llvm::None;
6276   }
6277 
6278   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6279                           AllocationInfo &AI) {
6280     auto Mapper = [&](const Value *V) -> const Value * {
6281       bool UsedAssumedInformation = false;
6282       if (Optional<Constant *> SimpleV =
6283               A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6284         if (*SimpleV)
6285           return *SimpleV;
6286       return V;
6287     };
6288 
6289     const Function *F = getAnchorScope();
6290     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6291     return getAllocSize(AI.CB, TLI, Mapper);
6292   }
6293 
6294   /// Collection of all malloc-like calls in a function with associated
6295   /// information.
6296   MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6297 
6298   /// Collection of all free-like calls in a function with associated
6299   /// information.
6300   MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6301 
6302   ChangeStatus updateImpl(Attributor &A) override;
6303 };
6304 
6305 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6306   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6307   const Function *F = getAnchorScope();
6308   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6309 
6310   const auto &LivenessAA =
6311       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6312 
6313   MustBeExecutedContextExplorer &Explorer =
6314       A.getInfoCache().getMustBeExecutedContextExplorer();
6315 
6316   bool StackIsAccessibleByOtherThreads =
6317       A.getInfoCache().stackIsAccessibleByOtherThreads();
6318 
6319   // Flag to ensure we update our deallocation information at most once per
6320   // updateImpl call and only if we use the free check reasoning.
6321   bool HasUpdatedFrees = false;
6322 
6323   auto UpdateFrees = [&]() {
6324     HasUpdatedFrees = true;
6325 
6326     for (auto &It : DeallocationInfos) {
6327       DeallocationInfo &DI = *It.second;
6328       // For now we cannot use deallocations that have unknown inputs, skip
6329       // them.
6330       if (DI.MightFreeUnknownObjects)
6331         continue;
6332 
6333       // No need to analyze dead calls, ignore them instead.
6334       bool UsedAssumedInformation = false;
6335       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6336                           /* CheckBBLivenessOnly */ true))
6337         continue;
6338 
6339       // Use the optimistic version to get the freed objects, ignoring dead
6340       // branches etc.
6341       SmallVector<Value *, 8> Objects;
6342       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6343                                            *this, DI.CB,
6344                                            UsedAssumedInformation)) {
6345         LLVM_DEBUG(
6346             dbgs()
6347             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6348         DI.MightFreeUnknownObjects = true;
6349         continue;
6350       }
6351 
6352       // Check each object explicitly.
6353       for (auto *Obj : Objects) {
6354         // Free of null and undef can be ignored as no-ops (or UB in the latter
6355         // case).
6356         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6357           continue;
6358 
6359         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6360         if (!ObjCB) {
6361           LLVM_DEBUG(dbgs()
6362                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6363           DI.MightFreeUnknownObjects = true;
6364           continue;
6365         }
6366 
6367         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6368         if (!AI) {
6369           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6370                             << "\n");
6371           DI.MightFreeUnknownObjects = true;
6372           continue;
6373         }
6374 
6375         DI.PotentialAllocationCalls.insert(ObjCB);
6376       }
6377     }
6378   };
6379 
6380   auto FreeCheck = [&](AllocationInfo &AI) {
6381     // If the stack is not accessible by other threads, the "must-free" logic
6382     // doesn't apply as the pointer could be shared and needs to be places in
6383     // "shareable" memory.
6384     if (!StackIsAccessibleByOtherThreads) {
6385       auto &NoSyncAA =
6386           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6387       if (!NoSyncAA.isAssumedNoSync()) {
6388         LLVM_DEBUG(
6389             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6390                       "other threads and function is not nosync:\n");
6391         return false;
6392       }
6393     }
6394     if (!HasUpdatedFrees)
6395       UpdateFrees();
6396 
6397     // TODO: Allow multi exit functions that have different free calls.
6398     if (AI.PotentialFreeCalls.size() != 1) {
6399       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6400                         << AI.PotentialFreeCalls.size() << "\n");
6401       return false;
6402     }
6403     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6404     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6405     if (!DI) {
6406       LLVM_DEBUG(
6407           dbgs() << "[H2S] unique free call was not known as deallocation call "
6408                  << *UniqueFree << "\n");
6409       return false;
6410     }
6411     if (DI->MightFreeUnknownObjects) {
6412       LLVM_DEBUG(
6413           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6414       return false;
6415     }
6416     if (DI->PotentialAllocationCalls.size() > 1) {
6417       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6418                         << DI->PotentialAllocationCalls.size()
6419                         << " different allocations\n");
6420       return false;
6421     }
6422     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6423       LLVM_DEBUG(
6424           dbgs()
6425           << "[H2S] unique free call not known to free this allocation but "
6426           << **DI->PotentialAllocationCalls.begin() << "\n");
6427       return false;
6428     }
6429     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6430     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6431       LLVM_DEBUG(
6432           dbgs()
6433           << "[H2S] unique free call might not be executed with the allocation "
6434           << *UniqueFree << "\n");
6435       return false;
6436     }
6437     return true;
6438   };
6439 
6440   auto UsesCheck = [&](AllocationInfo &AI) {
6441     bool ValidUsesOnly = true;
6442 
6443     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6444       Instruction *UserI = cast<Instruction>(U.getUser());
6445       if (isa<LoadInst>(UserI))
6446         return true;
6447       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6448         if (SI->getValueOperand() == U.get()) {
6449           LLVM_DEBUG(dbgs()
6450                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6451           ValidUsesOnly = false;
6452         } else {
6453           // A store into the malloc'ed memory is fine.
6454         }
6455         return true;
6456       }
6457       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6458         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6459           return true;
6460         if (DeallocationInfos.count(CB)) {
6461           AI.PotentialFreeCalls.insert(CB);
6462           return true;
6463         }
6464 
6465         unsigned ArgNo = CB->getArgOperandNo(&U);
6466 
6467         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6468             *this, IRPosition::callsite_argument(*CB, ArgNo),
6469             DepClassTy::OPTIONAL);
6470 
6471         // If a call site argument use is nofree, we are fine.
6472         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6473             *this, IRPosition::callsite_argument(*CB, ArgNo),
6474             DepClassTy::OPTIONAL);
6475 
6476         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6477         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6478         if (MaybeCaptured ||
6479             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6480              MaybeFreed)) {
6481           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6482 
6483           // Emit a missed remark if this is missed OpenMP globalization.
6484           auto Remark = [&](OptimizationRemarkMissed ORM) {
6485             return ORM
6486                    << "Could not move globalized variable to the stack. "
6487                       "Variable is potentially captured in call. Mark "
6488                       "parameter as `__attribute__((noescape))` to override.";
6489           };
6490 
6491           if (ValidUsesOnly &&
6492               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6493             A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
6494 
6495           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6496           ValidUsesOnly = false;
6497         }
6498         return true;
6499       }
6500 
6501       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6502           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6503         Follow = true;
6504         return true;
6505       }
6506       // Unknown user for which we can not track uses further (in a way that
6507       // makes sense).
6508       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6509       ValidUsesOnly = false;
6510       return true;
6511     };
6512     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6513       return false;
6514     return ValidUsesOnly;
6515   };
6516 
6517   // The actual update starts here. We look at all allocations and depending on
6518   // their status perform the appropriate check(s).
6519   for (auto &It : AllocationInfos) {
6520     AllocationInfo &AI = *It.second;
6521     if (AI.Status == AllocationInfo::INVALID)
6522       continue;
6523 
6524     if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6525       Optional<APInt> APAlign = getAPInt(A, *this, *Align);
6526       if (!APAlign) {
6527         // Can't generate an alloca which respects the required alignment
6528         // on the allocation.
6529         LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6530                           << "\n");
6531         AI.Status = AllocationInfo::INVALID;
6532         Changed = ChangeStatus::CHANGED;
6533         continue;
6534       } else {
6535         if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
6536             !APAlign->isPowerOf2()) {
6537           LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
6538                             << "\n");
6539           AI.Status = AllocationInfo::INVALID;
6540           Changed = ChangeStatus::CHANGED;
6541           continue;
6542         }
6543       }
6544     }
6545 
6546     if (MaxHeapToStackSize != -1) {
6547       Optional<APInt> Size = getSize(A, *this, AI);
6548       if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6549         LLVM_DEBUG({
6550           if (!Size.hasValue())
6551             dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
6552           else
6553             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6554                    << MaxHeapToStackSize << "\n";
6555         });
6556 
6557         AI.Status = AllocationInfo::INVALID;
6558         Changed = ChangeStatus::CHANGED;
6559         continue;
6560       }
6561     }
6562 
6563     switch (AI.Status) {
6564     case AllocationInfo::STACK_DUE_TO_USE:
6565       if (UsesCheck(AI))
6566         continue;
6567       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6568       LLVM_FALLTHROUGH;
6569     case AllocationInfo::STACK_DUE_TO_FREE:
6570       if (FreeCheck(AI))
6571         continue;
6572       AI.Status = AllocationInfo::INVALID;
6573       Changed = ChangeStatus::CHANGED;
6574       continue;
6575     case AllocationInfo::INVALID:
6576       llvm_unreachable("Invalid allocations should never reach this point!");
6577     };
6578   }
6579 
6580   return Changed;
6581 }
6582 } // namespace
6583 
6584 /// ----------------------- Privatizable Pointers ------------------------------
6585 namespace {
6586 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6587   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6588       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6589 
6590   ChangeStatus indicatePessimisticFixpoint() override {
6591     AAPrivatizablePtr::indicatePessimisticFixpoint();
6592     PrivatizableType = nullptr;
6593     return ChangeStatus::CHANGED;
6594   }
6595 
6596   /// Identify the type we can chose for a private copy of the underlying
6597   /// argument. None means it is not clear yet, nullptr means there is none.
6598   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6599 
6600   /// Return a privatizable type that encloses both T0 and T1.
6601   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6602   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6603     if (!T0.hasValue())
6604       return T1;
6605     if (!T1.hasValue())
6606       return T0;
6607     if (T0 == T1)
6608       return T0;
6609     return nullptr;
6610   }
6611 
6612   Optional<Type *> getPrivatizableType() const override {
6613     return PrivatizableType;
6614   }
6615 
6616   const std::string getAsStr() const override {
6617     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6618   }
6619 
6620 protected:
6621   Optional<Type *> PrivatizableType;
6622 };
6623 
6624 // TODO: Do this for call site arguments (probably also other values) as well.
6625 
6626 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6627   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6628       : AAPrivatizablePtrImpl(IRP, A) {}
6629 
6630   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6631   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6632     // If this is a byval argument and we know all the call sites (so we can
6633     // rewrite them), there is no need to check them explicitly.
6634     bool UsedAssumedInformation = false;
6635     SmallVector<Attribute, 1> Attrs;
6636     getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true);
6637     if (!Attrs.empty() &&
6638         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6639                                true, UsedAssumedInformation))
6640       return Attrs[0].getValueAsType();
6641 
6642     Optional<Type *> Ty;
6643     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6644 
6645     // Make sure the associated call site argument has the same type at all call
6646     // sites and it is an allocation we know is safe to privatize, for now that
6647     // means we only allow alloca instructions.
6648     // TODO: We can additionally analyze the accesses in the callee to  create
6649     //       the type from that information instead. That is a little more
6650     //       involved and will be done in a follow up patch.
6651     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6652       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6653       // Check if a coresponding argument was found or if it is one not
6654       // associated (which can happen for callback calls).
6655       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6656         return false;
6657 
6658       // Check that all call sites agree on a type.
6659       auto &PrivCSArgAA =
6660           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6661       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6662 
6663       LLVM_DEBUG({
6664         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6665         if (CSTy.hasValue() && CSTy.getValue())
6666           CSTy.getValue()->print(dbgs());
6667         else if (CSTy.hasValue())
6668           dbgs() << "<nullptr>";
6669         else
6670           dbgs() << "<none>";
6671       });
6672 
6673       Ty = combineTypes(Ty, CSTy);
6674 
6675       LLVM_DEBUG({
6676         dbgs() << " : New Type: ";
6677         if (Ty.hasValue() && Ty.getValue())
6678           Ty.getValue()->print(dbgs());
6679         else if (Ty.hasValue())
6680           dbgs() << "<nullptr>";
6681         else
6682           dbgs() << "<none>";
6683         dbgs() << "\n";
6684       });
6685 
6686       return !Ty.hasValue() || Ty.getValue();
6687     };
6688 
6689     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6690                                 UsedAssumedInformation))
6691       return nullptr;
6692     return Ty;
6693   }
6694 
6695   /// See AbstractAttribute::updateImpl(...).
6696   ChangeStatus updateImpl(Attributor &A) override {
6697     PrivatizableType = identifyPrivatizableType(A);
6698     if (!PrivatizableType.hasValue())
6699       return ChangeStatus::UNCHANGED;
6700     if (!PrivatizableType.getValue())
6701       return indicatePessimisticFixpoint();
6702 
6703     // The dependence is optional so we don't give up once we give up on the
6704     // alignment.
6705     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6706                         DepClassTy::OPTIONAL);
6707 
6708     // Avoid arguments with padding for now.
6709     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6710         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
6711                                                 A.getInfoCache().getDL())) {
6712       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6713       return indicatePessimisticFixpoint();
6714     }
6715 
6716     // Collect the types that will replace the privatizable type in the function
6717     // signature.
6718     SmallVector<Type *, 16> ReplacementTypes;
6719     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6720 
6721     // Verify callee and caller agree on how the promoted argument would be
6722     // passed.
6723     Function &Fn = *getIRPosition().getAnchorScope();
6724     const auto *TTI =
6725         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6726     if (!TTI) {
6727       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
6728                         << Fn.getName() << "\n");
6729       return indicatePessimisticFixpoint();
6730     }
6731 
6732     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6733       CallBase *CB = ACS.getInstruction();
6734       return TTI->areTypesABICompatible(
6735           CB->getCaller(), CB->getCalledFunction(), ReplacementTypes);
6736     };
6737     bool UsedAssumedInformation = false;
6738     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6739                                 UsedAssumedInformation)) {
6740       LLVM_DEBUG(
6741           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6742                  << Fn.getName() << "\n");
6743       return indicatePessimisticFixpoint();
6744     }
6745 
6746     // Register a rewrite of the argument.
6747     Argument *Arg = getAssociatedArgument();
6748     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6749       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6750       return indicatePessimisticFixpoint();
6751     }
6752 
6753     unsigned ArgNo = Arg->getArgNo();
6754 
6755     // Helper to check if for the given call site the associated argument is
6756     // passed to a callback where the privatization would be different.
6757     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6758       SmallVector<const Use *, 4> CallbackUses;
6759       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6760       for (const Use *U : CallbackUses) {
6761         AbstractCallSite CBACS(U);
6762         assert(CBACS && CBACS.isCallbackCall());
6763         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6764           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6765 
6766           LLVM_DEBUG({
6767             dbgs()
6768                 << "[AAPrivatizablePtr] Argument " << *Arg
6769                 << "check if can be privatized in the context of its parent ("
6770                 << Arg->getParent()->getName()
6771                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6772                    "callback ("
6773                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6774                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6775                 << CBACS.getCallArgOperand(CBArg) << " vs "
6776                 << CB.getArgOperand(ArgNo) << "\n"
6777                 << "[AAPrivatizablePtr] " << CBArg << " : "
6778                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6779           });
6780 
6781           if (CBArgNo != int(ArgNo))
6782             continue;
6783           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6784               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6785           if (CBArgPrivAA.isValidState()) {
6786             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6787             if (!CBArgPrivTy.hasValue())
6788               continue;
6789             if (CBArgPrivTy.getValue() == PrivatizableType)
6790               continue;
6791           }
6792 
6793           LLVM_DEBUG({
6794             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6795                    << " cannot be privatized in the context of its parent ("
6796                    << Arg->getParent()->getName()
6797                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6798                       "callback ("
6799                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6800                    << ").\n[AAPrivatizablePtr] for which the argument "
6801                       "privatization is not compatible.\n";
6802           });
6803           return false;
6804         }
6805       }
6806       return true;
6807     };
6808 
6809     // Helper to check if for the given call site the associated argument is
6810     // passed to a direct call where the privatization would be different.
6811     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6812       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6813       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6814       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6815              "Expected a direct call operand for callback call operand");
6816 
6817       LLVM_DEBUG({
6818         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6819                << " check if be privatized in the context of its parent ("
6820                << Arg->getParent()->getName()
6821                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6822                   "direct call of ("
6823                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6824                << ").\n";
6825       });
6826 
6827       Function *DCCallee = DC->getCalledFunction();
6828       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6829         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6830             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6831             DepClassTy::REQUIRED);
6832         if (DCArgPrivAA.isValidState()) {
6833           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6834           if (!DCArgPrivTy.hasValue())
6835             return true;
6836           if (DCArgPrivTy.getValue() == PrivatizableType)
6837             return true;
6838         }
6839       }
6840 
6841       LLVM_DEBUG({
6842         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6843                << " cannot be privatized in the context of its parent ("
6844                << Arg->getParent()->getName()
6845                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6846                   "direct call of ("
6847                << ACS.getInstruction()->getCalledFunction()->getName()
6848                << ").\n[AAPrivatizablePtr] for which the argument "
6849                   "privatization is not compatible.\n";
6850       });
6851       return false;
6852     };
6853 
6854     // Helper to check if the associated argument is used at the given abstract
6855     // call site in a way that is incompatible with the privatization assumed
6856     // here.
6857     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6858       if (ACS.isDirectCall())
6859         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6860       if (ACS.isCallbackCall())
6861         return IsCompatiblePrivArgOfDirectCS(ACS);
6862       return false;
6863     };
6864 
6865     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6866                                 UsedAssumedInformation))
6867       return indicatePessimisticFixpoint();
6868 
6869     return ChangeStatus::UNCHANGED;
6870   }
6871 
6872   /// Given a type to private \p PrivType, collect the constituates (which are
6873   /// used) in \p ReplacementTypes.
6874   static void
6875   identifyReplacementTypes(Type *PrivType,
6876                            SmallVectorImpl<Type *> &ReplacementTypes) {
6877     // TODO: For now we expand the privatization type to the fullest which can
6878     //       lead to dead arguments that need to be removed later.
6879     assert(PrivType && "Expected privatizable type!");
6880 
6881     // Traverse the type, extract constituate types on the outermost level.
6882     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6883       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6884         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6885     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6886       ReplacementTypes.append(PrivArrayType->getNumElements(),
6887                               PrivArrayType->getElementType());
6888     } else {
6889       ReplacementTypes.push_back(PrivType);
6890     }
6891   }
6892 
6893   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6894   /// The values needed are taken from the arguments of \p F starting at
6895   /// position \p ArgNo.
6896   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6897                                    unsigned ArgNo, Instruction &IP) {
6898     assert(PrivType && "Expected privatizable type!");
6899 
6900     IRBuilder<NoFolder> IRB(&IP);
6901     const DataLayout &DL = F.getParent()->getDataLayout();
6902 
6903     // Traverse the type, build GEPs and stores.
6904     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6905       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6906       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6907         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6908         Value *Ptr =
6909             constructPointer(PointeeTy, PrivType, &Base,
6910                              PrivStructLayout->getElementOffset(u), IRB, DL);
6911         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6912       }
6913     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6914       Type *PointeeTy = PrivArrayType->getElementType();
6915       Type *PointeePtrTy = PointeeTy->getPointerTo();
6916       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6917       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6918         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6919                                       u * PointeeTySize, IRB, DL);
6920         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6921       }
6922     } else {
6923       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6924     }
6925   }
6926 
6927   /// Extract values from \p Base according to the type \p PrivType at the
6928   /// call position \p ACS. The values are appended to \p ReplacementValues.
6929   void createReplacementValues(Align Alignment, Type *PrivType,
6930                                AbstractCallSite ACS, Value *Base,
6931                                SmallVectorImpl<Value *> &ReplacementValues) {
6932     assert(Base && "Expected base value!");
6933     assert(PrivType && "Expected privatizable type!");
6934     Instruction *IP = ACS.getInstruction();
6935 
6936     IRBuilder<NoFolder> IRB(IP);
6937     const DataLayout &DL = IP->getModule()->getDataLayout();
6938 
6939     Type *PrivPtrType = PrivType->getPointerTo();
6940     if (Base->getType() != PrivPtrType)
6941       Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6942           Base, PrivPtrType, "", ACS.getInstruction());
6943 
6944     // Traverse the type, build GEPs and loads.
6945     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6946       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6947       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6948         Type *PointeeTy = PrivStructType->getElementType(u);
6949         Value *Ptr =
6950             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6951                              PrivStructLayout->getElementOffset(u), IRB, DL);
6952         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6953         L->setAlignment(Alignment);
6954         ReplacementValues.push_back(L);
6955       }
6956     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6957       Type *PointeeTy = PrivArrayType->getElementType();
6958       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6959       Type *PointeePtrTy = PointeeTy->getPointerTo();
6960       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6961         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6962                                       u * PointeeTySize, IRB, DL);
6963         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6964         L->setAlignment(Alignment);
6965         ReplacementValues.push_back(L);
6966       }
6967     } else {
6968       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6969       L->setAlignment(Alignment);
6970       ReplacementValues.push_back(L);
6971     }
6972   }
6973 
6974   /// See AbstractAttribute::manifest(...)
6975   ChangeStatus manifest(Attributor &A) override {
6976     if (!PrivatizableType.hasValue())
6977       return ChangeStatus::UNCHANGED;
6978     assert(PrivatizableType.getValue() && "Expected privatizable type!");
6979 
6980     // Collect all tail calls in the function as we cannot allow new allocas to
6981     // escape into tail recursion.
6982     // TODO: Be smarter about new allocas escaping into tail calls.
6983     SmallVector<CallInst *, 16> TailCalls;
6984     bool UsedAssumedInformation = false;
6985     if (!A.checkForAllInstructions(
6986             [&](Instruction &I) {
6987               CallInst &CI = cast<CallInst>(I);
6988               if (CI.isTailCall())
6989                 TailCalls.push_back(&CI);
6990               return true;
6991             },
6992             *this, {Instruction::Call}, UsedAssumedInformation))
6993       return ChangeStatus::UNCHANGED;
6994 
6995     Argument *Arg = getAssociatedArgument();
6996     // Query AAAlign attribute for alignment of associated argument to
6997     // determine the best alignment of loads.
6998     const auto &AlignAA =
6999         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
7000 
7001     // Callback to repair the associated function. A new alloca is placed at the
7002     // beginning and initialized with the values passed through arguments. The
7003     // new alloca replaces the use of the old pointer argument.
7004     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
7005         [=](const Attributor::ArgumentReplacementInfo &ARI,
7006             Function &ReplacementFn, Function::arg_iterator ArgIt) {
7007           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
7008           Instruction *IP = &*EntryBB.getFirstInsertionPt();
7009           const DataLayout &DL = IP->getModule()->getDataLayout();
7010           unsigned AS = DL.getAllocaAddrSpace();
7011           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS,
7012                                            Arg->getName() + ".priv", IP);
7013           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
7014                                ArgIt->getArgNo(), *IP);
7015 
7016           if (AI->getType() != Arg->getType())
7017             AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7018                 AI, Arg->getType(), "", IP);
7019           Arg->replaceAllUsesWith(AI);
7020 
7021           for (CallInst *CI : TailCalls)
7022             CI->setTailCall(false);
7023         };
7024 
7025     // Callback to repair a call site of the associated function. The elements
7026     // of the privatizable type are loaded prior to the call and passed to the
7027     // new function version.
7028     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
7029         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
7030                       AbstractCallSite ACS,
7031                       SmallVectorImpl<Value *> &NewArgOperands) {
7032           // When no alignment is specified for the load instruction,
7033           // natural alignment is assumed.
7034           createReplacementValues(
7035               assumeAligned(AlignAA.getAssumedAlign()),
7036               PrivatizableType.getValue(), ACS,
7037               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
7038               NewArgOperands);
7039         };
7040 
7041     // Collect the types that will replace the privatizable type in the function
7042     // signature.
7043     SmallVector<Type *, 16> ReplacementTypes;
7044     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
7045 
7046     // Register a rewrite of the argument.
7047     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
7048                                            std::move(FnRepairCB),
7049                                            std::move(ACSRepairCB)))
7050       return ChangeStatus::CHANGED;
7051     return ChangeStatus::UNCHANGED;
7052   }
7053 
7054   /// See AbstractAttribute::trackStatistics()
7055   void trackStatistics() const override {
7056     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
7057   }
7058 };
7059 
7060 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
7061   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
7062       : AAPrivatizablePtrImpl(IRP, A) {}
7063 
7064   /// See AbstractAttribute::initialize(...).
7065   virtual void initialize(Attributor &A) override {
7066     // TODO: We can privatize more than arguments.
7067     indicatePessimisticFixpoint();
7068   }
7069 
7070   ChangeStatus updateImpl(Attributor &A) override {
7071     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
7072                      "updateImpl will not be called");
7073   }
7074 
7075   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
7076   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
7077     Value *Obj = getUnderlyingObject(&getAssociatedValue());
7078     if (!Obj) {
7079       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
7080       return nullptr;
7081     }
7082 
7083     if (auto *AI = dyn_cast<AllocaInst>(Obj))
7084       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
7085         if (CI->isOne())
7086           return AI->getAllocatedType();
7087     if (auto *Arg = dyn_cast<Argument>(Obj)) {
7088       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
7089           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
7090       if (PrivArgAA.isAssumedPrivatizablePtr())
7091         return PrivArgAA.getPrivatizableType();
7092     }
7093 
7094     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
7095                          "alloca nor privatizable argument: "
7096                       << *Obj << "!\n");
7097     return nullptr;
7098   }
7099 
7100   /// See AbstractAttribute::trackStatistics()
7101   void trackStatistics() const override {
7102     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
7103   }
7104 };
7105 
7106 struct AAPrivatizablePtrCallSiteArgument final
7107     : public AAPrivatizablePtrFloating {
7108   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
7109       : AAPrivatizablePtrFloating(IRP, A) {}
7110 
7111   /// See AbstractAttribute::initialize(...).
7112   void initialize(Attributor &A) override {
7113     if (getIRPosition().hasAttr(Attribute::ByVal))
7114       indicateOptimisticFixpoint();
7115   }
7116 
7117   /// See AbstractAttribute::updateImpl(...).
7118   ChangeStatus updateImpl(Attributor &A) override {
7119     PrivatizableType = identifyPrivatizableType(A);
7120     if (!PrivatizableType.hasValue())
7121       return ChangeStatus::UNCHANGED;
7122     if (!PrivatizableType.getValue())
7123       return indicatePessimisticFixpoint();
7124 
7125     const IRPosition &IRP = getIRPosition();
7126     auto &NoCaptureAA =
7127         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
7128     if (!NoCaptureAA.isAssumedNoCapture()) {
7129       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
7130       return indicatePessimisticFixpoint();
7131     }
7132 
7133     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
7134     if (!NoAliasAA.isAssumedNoAlias()) {
7135       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
7136       return indicatePessimisticFixpoint();
7137     }
7138 
7139     bool IsKnown;
7140     if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
7141       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
7142       return indicatePessimisticFixpoint();
7143     }
7144 
7145     return ChangeStatus::UNCHANGED;
7146   }
7147 
7148   /// See AbstractAttribute::trackStatistics()
7149   void trackStatistics() const override {
7150     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
7151   }
7152 };
7153 
7154 struct AAPrivatizablePtrCallSiteReturned final
7155     : public AAPrivatizablePtrFloating {
7156   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
7157       : AAPrivatizablePtrFloating(IRP, A) {}
7158 
7159   /// See AbstractAttribute::initialize(...).
7160   void initialize(Attributor &A) override {
7161     // TODO: We can privatize more than arguments.
7162     indicatePessimisticFixpoint();
7163   }
7164 
7165   /// See AbstractAttribute::trackStatistics()
7166   void trackStatistics() const override {
7167     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
7168   }
7169 };
7170 
7171 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
7172   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
7173       : AAPrivatizablePtrFloating(IRP, A) {}
7174 
7175   /// See AbstractAttribute::initialize(...).
7176   void initialize(Attributor &A) override {
7177     // TODO: We can privatize more than arguments.
7178     indicatePessimisticFixpoint();
7179   }
7180 
7181   /// See AbstractAttribute::trackStatistics()
7182   void trackStatistics() const override {
7183     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
7184   }
7185 };
7186 } // namespace
7187 
7188 /// -------------------- Memory Behavior Attributes ----------------------------
7189 /// Includes read-none, read-only, and write-only.
7190 /// ----------------------------------------------------------------------------
7191 namespace {
7192 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
7193   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
7194       : AAMemoryBehavior(IRP, A) {}
7195 
7196   /// See AbstractAttribute::initialize(...).
7197   void initialize(Attributor &A) override {
7198     intersectAssumedBits(BEST_STATE);
7199     getKnownStateFromValue(getIRPosition(), getState());
7200     AAMemoryBehavior::initialize(A);
7201   }
7202 
7203   /// Return the memory behavior information encoded in the IR for \p IRP.
7204   static void getKnownStateFromValue(const IRPosition &IRP,
7205                                      BitIntegerState &State,
7206                                      bool IgnoreSubsumingPositions = false) {
7207     SmallVector<Attribute, 2> Attrs;
7208     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7209     for (const Attribute &Attr : Attrs) {
7210       switch (Attr.getKindAsEnum()) {
7211       case Attribute::ReadNone:
7212         State.addKnownBits(NO_ACCESSES);
7213         break;
7214       case Attribute::ReadOnly:
7215         State.addKnownBits(NO_WRITES);
7216         break;
7217       case Attribute::WriteOnly:
7218         State.addKnownBits(NO_READS);
7219         break;
7220       default:
7221         llvm_unreachable("Unexpected attribute!");
7222       }
7223     }
7224 
7225     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7226       if (!I->mayReadFromMemory())
7227         State.addKnownBits(NO_READS);
7228       if (!I->mayWriteToMemory())
7229         State.addKnownBits(NO_WRITES);
7230     }
7231   }
7232 
7233   /// See AbstractAttribute::getDeducedAttributes(...).
7234   void getDeducedAttributes(LLVMContext &Ctx,
7235                             SmallVectorImpl<Attribute> &Attrs) const override {
7236     assert(Attrs.size() == 0);
7237     if (isAssumedReadNone())
7238       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7239     else if (isAssumedReadOnly())
7240       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7241     else if (isAssumedWriteOnly())
7242       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7243     assert(Attrs.size() <= 1);
7244   }
7245 
7246   /// See AbstractAttribute::manifest(...).
7247   ChangeStatus manifest(Attributor &A) override {
7248     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
7249       return ChangeStatus::UNCHANGED;
7250 
7251     const IRPosition &IRP = getIRPosition();
7252 
7253     // Check if we would improve the existing attributes first.
7254     SmallVector<Attribute, 4> DeducedAttrs;
7255     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7256     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7257           return IRP.hasAttr(Attr.getKindAsEnum(),
7258                              /* IgnoreSubsumingPositions */ true);
7259         }))
7260       return ChangeStatus::UNCHANGED;
7261 
7262     // Clear existing attributes.
7263     IRP.removeAttrs(AttrKinds);
7264 
7265     // Use the generic manifest method.
7266     return IRAttribute::manifest(A);
7267   }
7268 
7269   /// See AbstractState::getAsStr().
7270   const std::string getAsStr() const override {
7271     if (isAssumedReadNone())
7272       return "readnone";
7273     if (isAssumedReadOnly())
7274       return "readonly";
7275     if (isAssumedWriteOnly())
7276       return "writeonly";
7277     return "may-read/write";
7278   }
7279 
7280   /// The set of IR attributes AAMemoryBehavior deals with.
7281   static const Attribute::AttrKind AttrKinds[3];
7282 };
7283 
7284 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7285     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7286 
7287 /// Memory behavior attribute for a floating value.
7288 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7289   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7290       : AAMemoryBehaviorImpl(IRP, A) {}
7291 
7292   /// See AbstractAttribute::updateImpl(...).
7293   ChangeStatus updateImpl(Attributor &A) override;
7294 
7295   /// See AbstractAttribute::trackStatistics()
7296   void trackStatistics() const override {
7297     if (isAssumedReadNone())
7298       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7299     else if (isAssumedReadOnly())
7300       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7301     else if (isAssumedWriteOnly())
7302       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7303   }
7304 
7305 private:
7306   /// Return true if users of \p UserI might access the underlying
7307   /// variable/location described by \p U and should therefore be analyzed.
7308   bool followUsersOfUseIn(Attributor &A, const Use &U,
7309                           const Instruction *UserI);
7310 
7311   /// Update the state according to the effect of use \p U in \p UserI.
7312   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7313 };
7314 
7315 /// Memory behavior attribute for function argument.
7316 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7317   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7318       : AAMemoryBehaviorFloating(IRP, A) {}
7319 
7320   /// See AbstractAttribute::initialize(...).
7321   void initialize(Attributor &A) override {
7322     intersectAssumedBits(BEST_STATE);
7323     const IRPosition &IRP = getIRPosition();
7324     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7325     // can query it when we use has/getAttr. That would allow us to reuse the
7326     // initialize of the base class here.
7327     bool HasByVal =
7328         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7329     getKnownStateFromValue(IRP, getState(),
7330                            /* IgnoreSubsumingPositions */ HasByVal);
7331 
7332     // Initialize the use vector with all direct uses of the associated value.
7333     Argument *Arg = getAssociatedArgument();
7334     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7335       indicatePessimisticFixpoint();
7336   }
7337 
7338   ChangeStatus manifest(Attributor &A) override {
7339     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7340     if (!getAssociatedValue().getType()->isPointerTy())
7341       return ChangeStatus::UNCHANGED;
7342 
7343     // TODO: From readattrs.ll: "inalloca parameters are always
7344     //                           considered written"
7345     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7346       removeKnownBits(NO_WRITES);
7347       removeAssumedBits(NO_WRITES);
7348     }
7349     return AAMemoryBehaviorFloating::manifest(A);
7350   }
7351 
7352   /// See AbstractAttribute::trackStatistics()
7353   void trackStatistics() const override {
7354     if (isAssumedReadNone())
7355       STATS_DECLTRACK_ARG_ATTR(readnone)
7356     else if (isAssumedReadOnly())
7357       STATS_DECLTRACK_ARG_ATTR(readonly)
7358     else if (isAssumedWriteOnly())
7359       STATS_DECLTRACK_ARG_ATTR(writeonly)
7360   }
7361 };
7362 
7363 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7364   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7365       : AAMemoryBehaviorArgument(IRP, A) {}
7366 
7367   /// See AbstractAttribute::initialize(...).
7368   void initialize(Attributor &A) override {
7369     // If we don't have an associated attribute this is either a variadic call
7370     // or an indirect call, either way, nothing to do here.
7371     Argument *Arg = getAssociatedArgument();
7372     if (!Arg) {
7373       indicatePessimisticFixpoint();
7374       return;
7375     }
7376     if (Arg->hasByValAttr()) {
7377       addKnownBits(NO_WRITES);
7378       removeKnownBits(NO_READS);
7379       removeAssumedBits(NO_READS);
7380     }
7381     AAMemoryBehaviorArgument::initialize(A);
7382     if (getAssociatedFunction()->isDeclaration())
7383       indicatePessimisticFixpoint();
7384   }
7385 
7386   /// See AbstractAttribute::updateImpl(...).
7387   ChangeStatus updateImpl(Attributor &A) override {
7388     // TODO: Once we have call site specific value information we can provide
7389     //       call site specific liveness liveness information and then it makes
7390     //       sense to specialize attributes for call sites arguments instead of
7391     //       redirecting requests to the callee argument.
7392     Argument *Arg = getAssociatedArgument();
7393     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7394     auto &ArgAA =
7395         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7396     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7397   }
7398 
7399   /// See AbstractAttribute::trackStatistics()
7400   void trackStatistics() const override {
7401     if (isAssumedReadNone())
7402       STATS_DECLTRACK_CSARG_ATTR(readnone)
7403     else if (isAssumedReadOnly())
7404       STATS_DECLTRACK_CSARG_ATTR(readonly)
7405     else if (isAssumedWriteOnly())
7406       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7407   }
7408 };
7409 
7410 /// Memory behavior attribute for a call site return position.
7411 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7412   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7413       : AAMemoryBehaviorFloating(IRP, A) {}
7414 
7415   /// See AbstractAttribute::initialize(...).
7416   void initialize(Attributor &A) override {
7417     AAMemoryBehaviorImpl::initialize(A);
7418     Function *F = getAssociatedFunction();
7419     if (!F || F->isDeclaration())
7420       indicatePessimisticFixpoint();
7421   }
7422 
7423   /// See AbstractAttribute::manifest(...).
7424   ChangeStatus manifest(Attributor &A) override {
7425     // We do not annotate returned values.
7426     return ChangeStatus::UNCHANGED;
7427   }
7428 
7429   /// See AbstractAttribute::trackStatistics()
7430   void trackStatistics() const override {}
7431 };
7432 
7433 /// An AA to represent the memory behavior function attributes.
7434 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7435   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7436       : AAMemoryBehaviorImpl(IRP, A) {}
7437 
7438   /// See AbstractAttribute::updateImpl(Attributor &A).
7439   virtual ChangeStatus updateImpl(Attributor &A) override;
7440 
7441   /// See AbstractAttribute::manifest(...).
7442   ChangeStatus manifest(Attributor &A) override {
7443     Function &F = cast<Function>(getAnchorValue());
7444     if (isAssumedReadNone()) {
7445       F.removeFnAttr(Attribute::ArgMemOnly);
7446       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7447       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7448     }
7449     return AAMemoryBehaviorImpl::manifest(A);
7450   }
7451 
7452   /// See AbstractAttribute::trackStatistics()
7453   void trackStatistics() const override {
7454     if (isAssumedReadNone())
7455       STATS_DECLTRACK_FN_ATTR(readnone)
7456     else if (isAssumedReadOnly())
7457       STATS_DECLTRACK_FN_ATTR(readonly)
7458     else if (isAssumedWriteOnly())
7459       STATS_DECLTRACK_FN_ATTR(writeonly)
7460   }
7461 };
7462 
7463 /// AAMemoryBehavior attribute for call sites.
7464 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7465   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7466       : AAMemoryBehaviorImpl(IRP, A) {}
7467 
7468   /// See AbstractAttribute::initialize(...).
7469   void initialize(Attributor &A) override {
7470     AAMemoryBehaviorImpl::initialize(A);
7471     Function *F = getAssociatedFunction();
7472     if (!F || F->isDeclaration())
7473       indicatePessimisticFixpoint();
7474   }
7475 
7476   /// See AbstractAttribute::updateImpl(...).
7477   ChangeStatus updateImpl(Attributor &A) override {
7478     // TODO: Once we have call site specific value information we can provide
7479     //       call site specific liveness liveness information and then it makes
7480     //       sense to specialize attributes for call sites arguments instead of
7481     //       redirecting requests to the callee argument.
7482     Function *F = getAssociatedFunction();
7483     const IRPosition &FnPos = IRPosition::function(*F);
7484     auto &FnAA =
7485         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7486     return clampStateAndIndicateChange(getState(), FnAA.getState());
7487   }
7488 
7489   /// See AbstractAttribute::trackStatistics()
7490   void trackStatistics() const override {
7491     if (isAssumedReadNone())
7492       STATS_DECLTRACK_CS_ATTR(readnone)
7493     else if (isAssumedReadOnly())
7494       STATS_DECLTRACK_CS_ATTR(readonly)
7495     else if (isAssumedWriteOnly())
7496       STATS_DECLTRACK_CS_ATTR(writeonly)
7497   }
7498 };
7499 
7500 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7501 
7502   // The current assumed state used to determine a change.
7503   auto AssumedState = getAssumed();
7504 
7505   auto CheckRWInst = [&](Instruction &I) {
7506     // If the instruction has an own memory behavior state, use it to restrict
7507     // the local state. No further analysis is required as the other memory
7508     // state is as optimistic as it gets.
7509     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7510       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7511           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7512       intersectAssumedBits(MemBehaviorAA.getAssumed());
7513       return !isAtFixpoint();
7514     }
7515 
7516     // Remove access kind modifiers if necessary.
7517     if (I.mayReadFromMemory())
7518       removeAssumedBits(NO_READS);
7519     if (I.mayWriteToMemory())
7520       removeAssumedBits(NO_WRITES);
7521     return !isAtFixpoint();
7522   };
7523 
7524   bool UsedAssumedInformation = false;
7525   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7526                                           UsedAssumedInformation))
7527     return indicatePessimisticFixpoint();
7528 
7529   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7530                                         : ChangeStatus::UNCHANGED;
7531 }
7532 
7533 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7534 
7535   const IRPosition &IRP = getIRPosition();
7536   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7537   AAMemoryBehavior::StateType &S = getState();
7538 
7539   // First, check the function scope. We take the known information and we avoid
7540   // work if the assumed information implies the current assumed information for
7541   // this attribute. This is a valid for all but byval arguments.
7542   Argument *Arg = IRP.getAssociatedArgument();
7543   AAMemoryBehavior::base_t FnMemAssumedState =
7544       AAMemoryBehavior::StateType::getWorstState();
7545   if (!Arg || !Arg->hasByValAttr()) {
7546     const auto &FnMemAA =
7547         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7548     FnMemAssumedState = FnMemAA.getAssumed();
7549     S.addKnownBits(FnMemAA.getKnown());
7550     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7551       return ChangeStatus::UNCHANGED;
7552   }
7553 
7554   // The current assumed state used to determine a change.
7555   auto AssumedState = S.getAssumed();
7556 
7557   // Make sure the value is not captured (except through "return"), if
7558   // it is, any information derived would be irrelevant anyway as we cannot
7559   // check the potential aliases introduced by the capture. However, no need
7560   // to fall back to anythign less optimistic than the function state.
7561   const auto &ArgNoCaptureAA =
7562       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7563   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7564     S.intersectAssumedBits(FnMemAssumedState);
7565     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7566                                           : ChangeStatus::UNCHANGED;
7567   }
7568 
7569   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7570   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7571     Instruction *UserI = cast<Instruction>(U.getUser());
7572     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7573                       << " \n");
7574 
7575     // Droppable users, e.g., llvm::assume does not actually perform any action.
7576     if (UserI->isDroppable())
7577       return true;
7578 
7579     // Check if the users of UserI should also be visited.
7580     Follow = followUsersOfUseIn(A, U, UserI);
7581 
7582     // If UserI might touch memory we analyze the use in detail.
7583     if (UserI->mayReadOrWriteMemory())
7584       analyzeUseIn(A, U, UserI);
7585 
7586     return !isAtFixpoint();
7587   };
7588 
7589   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7590     return indicatePessimisticFixpoint();
7591 
7592   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7593                                         : ChangeStatus::UNCHANGED;
7594 }
7595 
7596 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7597                                                   const Instruction *UserI) {
7598   // The loaded value is unrelated to the pointer argument, no need to
7599   // follow the users of the load.
7600   if (isa<LoadInst>(UserI))
7601     return false;
7602 
7603   // By default we follow all uses assuming UserI might leak information on U,
7604   // we have special handling for call sites operands though.
7605   const auto *CB = dyn_cast<CallBase>(UserI);
7606   if (!CB || !CB->isArgOperand(&U))
7607     return true;
7608 
7609   // If the use is a call argument known not to be captured, the users of
7610   // the call do not need to be visited because they have to be unrelated to
7611   // the input. Note that this check is not trivial even though we disallow
7612   // general capturing of the underlying argument. The reason is that the
7613   // call might the argument "through return", which we allow and for which we
7614   // need to check call users.
7615   if (U.get()->getType()->isPointerTy()) {
7616     unsigned ArgNo = CB->getArgOperandNo(&U);
7617     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7618         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7619     return !ArgNoCaptureAA.isAssumedNoCapture();
7620   }
7621 
7622   return true;
7623 }
7624 
7625 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7626                                             const Instruction *UserI) {
7627   assert(UserI->mayReadOrWriteMemory());
7628 
7629   switch (UserI->getOpcode()) {
7630   default:
7631     // TODO: Handle all atomics and other side-effect operations we know of.
7632     break;
7633   case Instruction::Load:
7634     // Loads cause the NO_READS property to disappear.
7635     removeAssumedBits(NO_READS);
7636     return;
7637 
7638   case Instruction::Store:
7639     // Stores cause the NO_WRITES property to disappear if the use is the
7640     // pointer operand. Note that while capturing was taken care of somewhere
7641     // else we need to deal with stores of the value that is not looked through.
7642     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7643       removeAssumedBits(NO_WRITES);
7644     else
7645       indicatePessimisticFixpoint();
7646     return;
7647 
7648   case Instruction::Call:
7649   case Instruction::CallBr:
7650   case Instruction::Invoke: {
7651     // For call sites we look at the argument memory behavior attribute (this
7652     // could be recursive!) in order to restrict our own state.
7653     const auto *CB = cast<CallBase>(UserI);
7654 
7655     // Give up on operand bundles.
7656     if (CB->isBundleOperand(&U)) {
7657       indicatePessimisticFixpoint();
7658       return;
7659     }
7660 
7661     // Calling a function does read the function pointer, maybe write it if the
7662     // function is self-modifying.
7663     if (CB->isCallee(&U)) {
7664       removeAssumedBits(NO_READS);
7665       break;
7666     }
7667 
7668     // Adjust the possible access behavior based on the information on the
7669     // argument.
7670     IRPosition Pos;
7671     if (U.get()->getType()->isPointerTy())
7672       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7673     else
7674       Pos = IRPosition::callsite_function(*CB);
7675     const auto &MemBehaviorAA =
7676         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7677     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7678     // and at least "known".
7679     intersectAssumedBits(MemBehaviorAA.getAssumed());
7680     return;
7681   }
7682   };
7683 
7684   // Generally, look at the "may-properties" and adjust the assumed state if we
7685   // did not trigger special handling before.
7686   if (UserI->mayReadFromMemory())
7687     removeAssumedBits(NO_READS);
7688   if (UserI->mayWriteToMemory())
7689     removeAssumedBits(NO_WRITES);
7690 }
7691 } // namespace
7692 
7693 /// -------------------- Memory Locations Attributes ---------------------------
7694 /// Includes read-none, argmemonly, inaccessiblememonly,
7695 /// inaccessiblememorargmemonly
7696 /// ----------------------------------------------------------------------------
7697 
7698 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7699     AAMemoryLocation::MemoryLocationsKind MLK) {
7700   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7701     return "all memory";
7702   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7703     return "no memory";
7704   std::string S = "memory:";
7705   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7706     S += "stack,";
7707   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7708     S += "constant,";
7709   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7710     S += "internal global,";
7711   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7712     S += "external global,";
7713   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7714     S += "argument,";
7715   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7716     S += "inaccessible,";
7717   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7718     S += "malloced,";
7719   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7720     S += "unknown,";
7721   S.pop_back();
7722   return S;
7723 }
7724 
7725 namespace {
7726 struct AAMemoryLocationImpl : public AAMemoryLocation {
7727 
7728   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7729       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7730     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7731       AccessKind2Accesses[u] = nullptr;
7732   }
7733 
7734   ~AAMemoryLocationImpl() {
7735     // The AccessSets are allocated via a BumpPtrAllocator, we call
7736     // the destructor manually.
7737     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7738       if (AccessKind2Accesses[u])
7739         AccessKind2Accesses[u]->~AccessSet();
7740   }
7741 
7742   /// See AbstractAttribute::initialize(...).
7743   void initialize(Attributor &A) override {
7744     intersectAssumedBits(BEST_STATE);
7745     getKnownStateFromValue(A, getIRPosition(), getState());
7746     AAMemoryLocation::initialize(A);
7747   }
7748 
7749   /// Return the memory behavior information encoded in the IR for \p IRP.
7750   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7751                                      BitIntegerState &State,
7752                                      bool IgnoreSubsumingPositions = false) {
7753     // For internal functions we ignore `argmemonly` and
7754     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7755     // constant propagation. It is unclear if this is the best way but it is
7756     // unlikely this will cause real performance problems. If we are deriving
7757     // attributes for the anchor function we even remove the attribute in
7758     // addition to ignoring it.
7759     bool UseArgMemOnly = true;
7760     Function *AnchorFn = IRP.getAnchorScope();
7761     if (AnchorFn && A.isRunOn(*AnchorFn))
7762       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7763 
7764     SmallVector<Attribute, 2> Attrs;
7765     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7766     for (const Attribute &Attr : Attrs) {
7767       switch (Attr.getKindAsEnum()) {
7768       case Attribute::ReadNone:
7769         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7770         break;
7771       case Attribute::InaccessibleMemOnly:
7772         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7773         break;
7774       case Attribute::ArgMemOnly:
7775         if (UseArgMemOnly)
7776           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7777         else
7778           IRP.removeAttrs({Attribute::ArgMemOnly});
7779         break;
7780       case Attribute::InaccessibleMemOrArgMemOnly:
7781         if (UseArgMemOnly)
7782           State.addKnownBits(inverseLocation(
7783               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7784         else
7785           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7786         break;
7787       default:
7788         llvm_unreachable("Unexpected attribute!");
7789       }
7790     }
7791   }
7792 
7793   /// See AbstractAttribute::getDeducedAttributes(...).
7794   void getDeducedAttributes(LLVMContext &Ctx,
7795                             SmallVectorImpl<Attribute> &Attrs) const override {
7796     assert(Attrs.size() == 0);
7797     if (isAssumedReadNone()) {
7798       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7799     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7800       if (isAssumedInaccessibleMemOnly())
7801         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7802       else if (isAssumedArgMemOnly())
7803         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7804       else if (isAssumedInaccessibleOrArgMemOnly())
7805         Attrs.push_back(
7806             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7807     }
7808     assert(Attrs.size() <= 1);
7809   }
7810 
7811   /// See AbstractAttribute::manifest(...).
7812   ChangeStatus manifest(Attributor &A) override {
7813     const IRPosition &IRP = getIRPosition();
7814 
7815     // Check if we would improve the existing attributes first.
7816     SmallVector<Attribute, 4> DeducedAttrs;
7817     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7818     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7819           return IRP.hasAttr(Attr.getKindAsEnum(),
7820                              /* IgnoreSubsumingPositions */ true);
7821         }))
7822       return ChangeStatus::UNCHANGED;
7823 
7824     // Clear existing attributes.
7825     IRP.removeAttrs(AttrKinds);
7826     if (isAssumedReadNone())
7827       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7828 
7829     // Use the generic manifest method.
7830     return IRAttribute::manifest(A);
7831   }
7832 
7833   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7834   bool checkForAllAccessesToMemoryKind(
7835       function_ref<bool(const Instruction *, const Value *, AccessKind,
7836                         MemoryLocationsKind)>
7837           Pred,
7838       MemoryLocationsKind RequestedMLK) const override {
7839     if (!isValidState())
7840       return false;
7841 
7842     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7843     if (AssumedMLK == NO_LOCATIONS)
7844       return true;
7845 
7846     unsigned Idx = 0;
7847     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7848          CurMLK *= 2, ++Idx) {
7849       if (CurMLK & RequestedMLK)
7850         continue;
7851 
7852       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7853         for (const AccessInfo &AI : *Accesses)
7854           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7855             return false;
7856     }
7857 
7858     return true;
7859   }
7860 
7861   ChangeStatus indicatePessimisticFixpoint() override {
7862     // If we give up and indicate a pessimistic fixpoint this instruction will
7863     // become an access for all potential access kinds:
7864     // TODO: Add pointers for argmemonly and globals to improve the results of
7865     //       checkForAllAccessesToMemoryKind.
7866     bool Changed = false;
7867     MemoryLocationsKind KnownMLK = getKnown();
7868     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7869     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7870       if (!(CurMLK & KnownMLK))
7871         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7872                                   getAccessKindFromInst(I));
7873     return AAMemoryLocation::indicatePessimisticFixpoint();
7874   }
7875 
7876 protected:
7877   /// Helper struct to tie together an instruction that has a read or write
7878   /// effect with the pointer it accesses (if any).
7879   struct AccessInfo {
7880 
7881     /// The instruction that caused the access.
7882     const Instruction *I;
7883 
7884     /// The base pointer that is accessed, or null if unknown.
7885     const Value *Ptr;
7886 
7887     /// The kind of access (read/write/read+write).
7888     AccessKind Kind;
7889 
7890     bool operator==(const AccessInfo &RHS) const {
7891       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7892     }
7893     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7894       if (LHS.I != RHS.I)
7895         return LHS.I < RHS.I;
7896       if (LHS.Ptr != RHS.Ptr)
7897         return LHS.Ptr < RHS.Ptr;
7898       if (LHS.Kind != RHS.Kind)
7899         return LHS.Kind < RHS.Kind;
7900       return false;
7901     }
7902   };
7903 
7904   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7905   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7906   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7907   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7908 
7909   /// Categorize the pointer arguments of CB that might access memory in
7910   /// AccessedLoc and update the state and access map accordingly.
7911   void
7912   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7913                                      AAMemoryLocation::StateType &AccessedLocs,
7914                                      bool &Changed);
7915 
7916   /// Return the kind(s) of location that may be accessed by \p V.
7917   AAMemoryLocation::MemoryLocationsKind
7918   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7919 
7920   /// Return the access kind as determined by \p I.
7921   AccessKind getAccessKindFromInst(const Instruction *I) {
7922     AccessKind AK = READ_WRITE;
7923     if (I) {
7924       AK = I->mayReadFromMemory() ? READ : NONE;
7925       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7926     }
7927     return AK;
7928   }
7929 
7930   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7931   /// an access of kind \p AK to a \p MLK memory location with the access
7932   /// pointer \p Ptr.
7933   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7934                                  MemoryLocationsKind MLK, const Instruction *I,
7935                                  const Value *Ptr, bool &Changed,
7936                                  AccessKind AK = READ_WRITE) {
7937 
7938     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7939     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7940     if (!Accesses)
7941       Accesses = new (Allocator) AccessSet();
7942     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7943     State.removeAssumedBits(MLK);
7944   }
7945 
7946   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7947   /// arguments, and update the state and access map accordingly.
7948   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7949                           AAMemoryLocation::StateType &State, bool &Changed);
7950 
7951   /// Used to allocate access sets.
7952   BumpPtrAllocator &Allocator;
7953 
7954   /// The set of IR attributes AAMemoryLocation deals with.
7955   static const Attribute::AttrKind AttrKinds[4];
7956 };
7957 
7958 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7959     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7960     Attribute::InaccessibleMemOrArgMemOnly};
7961 
7962 void AAMemoryLocationImpl::categorizePtrValue(
7963     Attributor &A, const Instruction &I, const Value &Ptr,
7964     AAMemoryLocation::StateType &State, bool &Changed) {
7965   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7966                     << Ptr << " ["
7967                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7968 
7969   SmallVector<Value *, 8> Objects;
7970   bool UsedAssumedInformation = false;
7971   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
7972                                        UsedAssumedInformation,
7973                                        /* Intraprocedural */ true)) {
7974     LLVM_DEBUG(
7975         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7976     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7977                               getAccessKindFromInst(&I));
7978     return;
7979   }
7980 
7981   for (Value *Obj : Objects) {
7982     // TODO: recognize the TBAA used for constant accesses.
7983     MemoryLocationsKind MLK = NO_LOCATIONS;
7984     if (isa<UndefValue>(Obj))
7985       continue;
7986     if (isa<Argument>(Obj)) {
7987       // TODO: For now we do not treat byval arguments as local copies performed
7988       // on the call edge, though, we should. To make that happen we need to
7989       // teach various passes, e.g., DSE, about the copy effect of a byval. That
7990       // would also allow us to mark functions only accessing byval arguments as
7991       // readnone again, atguably their acceses have no effect outside of the
7992       // function, like accesses to allocas.
7993       MLK = NO_ARGUMENT_MEM;
7994     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7995       // Reading constant memory is not treated as a read "effect" by the
7996       // function attr pass so we won't neither. Constants defined by TBAA are
7997       // similar. (We know we do not write it because it is constant.)
7998       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7999         if (GVar->isConstant())
8000           continue;
8001 
8002       if (GV->hasLocalLinkage())
8003         MLK = NO_GLOBAL_INTERNAL_MEM;
8004       else
8005         MLK = NO_GLOBAL_EXTERNAL_MEM;
8006     } else if (isa<ConstantPointerNull>(Obj) &&
8007                !NullPointerIsDefined(getAssociatedFunction(),
8008                                      Ptr.getType()->getPointerAddressSpace())) {
8009       continue;
8010     } else if (isa<AllocaInst>(Obj)) {
8011       MLK = NO_LOCAL_MEM;
8012     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
8013       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
8014           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
8015       if (NoAliasAA.isAssumedNoAlias())
8016         MLK = NO_MALLOCED_MEM;
8017       else
8018         MLK = NO_UNKOWN_MEM;
8019     } else {
8020       MLK = NO_UNKOWN_MEM;
8021     }
8022 
8023     assert(MLK != NO_LOCATIONS && "No location specified!");
8024     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
8025                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
8026                       << "\n");
8027     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
8028                               getAccessKindFromInst(&I));
8029   }
8030 
8031   LLVM_DEBUG(
8032       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
8033              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
8034 }
8035 
8036 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
8037     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
8038     bool &Changed) {
8039   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
8040 
8041     // Skip non-pointer arguments.
8042     const Value *ArgOp = CB.getArgOperand(ArgNo);
8043     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
8044       continue;
8045 
8046     // Skip readnone arguments.
8047     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
8048     const auto &ArgOpMemLocationAA =
8049         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
8050 
8051     if (ArgOpMemLocationAA.isAssumedReadNone())
8052       continue;
8053 
8054     // Categorize potentially accessed pointer arguments as if there was an
8055     // access instruction with them as pointer.
8056     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
8057   }
8058 }
8059 
8060 AAMemoryLocation::MemoryLocationsKind
8061 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
8062                                                   bool &Changed) {
8063   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
8064                     << I << "\n");
8065 
8066   AAMemoryLocation::StateType AccessedLocs;
8067   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
8068 
8069   if (auto *CB = dyn_cast<CallBase>(&I)) {
8070 
8071     // First check if we assume any memory is access is visible.
8072     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
8073         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
8074     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
8075                       << " [" << CBMemLocationAA << "]\n");
8076 
8077     if (CBMemLocationAA.isAssumedReadNone())
8078       return NO_LOCATIONS;
8079 
8080     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
8081       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
8082                                 Changed, getAccessKindFromInst(&I));
8083       return AccessedLocs.getAssumed();
8084     }
8085 
8086     uint32_t CBAssumedNotAccessedLocs =
8087         CBMemLocationAA.getAssumedNotAccessedLocation();
8088 
8089     // Set the argmemonly and global bit as we handle them separately below.
8090     uint32_t CBAssumedNotAccessedLocsNoArgMem =
8091         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
8092 
8093     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
8094       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
8095         continue;
8096       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
8097                                 getAccessKindFromInst(&I));
8098     }
8099 
8100     // Now handle global memory if it might be accessed. This is slightly tricky
8101     // as NO_GLOBAL_MEM has multiple bits set.
8102     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
8103     if (HasGlobalAccesses) {
8104       auto AccessPred = [&](const Instruction *, const Value *Ptr,
8105                             AccessKind Kind, MemoryLocationsKind MLK) {
8106         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
8107                                   getAccessKindFromInst(&I));
8108         return true;
8109       };
8110       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
8111               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
8112         return AccessedLocs.getWorstState();
8113     }
8114 
8115     LLVM_DEBUG(
8116         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
8117                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8118 
8119     // Now handle argument memory if it might be accessed.
8120     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
8121     if (HasArgAccesses)
8122       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
8123 
8124     LLVM_DEBUG(
8125         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
8126                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8127 
8128     return AccessedLocs.getAssumed();
8129   }
8130 
8131   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
8132     LLVM_DEBUG(
8133         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
8134                << I << " [" << *Ptr << "]\n");
8135     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
8136     return AccessedLocs.getAssumed();
8137   }
8138 
8139   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
8140                     << I << "\n");
8141   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
8142                             getAccessKindFromInst(&I));
8143   return AccessedLocs.getAssumed();
8144 }
8145 
8146 /// An AA to represent the memory behavior function attributes.
8147 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
8148   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
8149       : AAMemoryLocationImpl(IRP, A) {}
8150 
8151   /// See AbstractAttribute::updateImpl(Attributor &A).
8152   virtual ChangeStatus updateImpl(Attributor &A) override {
8153 
8154     const auto &MemBehaviorAA =
8155         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
8156     if (MemBehaviorAA.isAssumedReadNone()) {
8157       if (MemBehaviorAA.isKnownReadNone())
8158         return indicateOptimisticFixpoint();
8159       assert(isAssumedReadNone() &&
8160              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
8161       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
8162       return ChangeStatus::UNCHANGED;
8163     }
8164 
8165     // The current assumed state used to determine a change.
8166     auto AssumedState = getAssumed();
8167     bool Changed = false;
8168 
8169     auto CheckRWInst = [&](Instruction &I) {
8170       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
8171       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
8172                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
8173       removeAssumedBits(inverseLocation(MLK, false, false));
8174       // Stop once only the valid bit set in the *not assumed location*, thus
8175       // once we don't actually exclude any memory locations in the state.
8176       return getAssumedNotAccessedLocation() != VALID_STATE;
8177     };
8178 
8179     bool UsedAssumedInformation = false;
8180     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8181                                             UsedAssumedInformation))
8182       return indicatePessimisticFixpoint();
8183 
8184     Changed |= AssumedState != getAssumed();
8185     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8186   }
8187 
8188   /// See AbstractAttribute::trackStatistics()
8189   void trackStatistics() const override {
8190     if (isAssumedReadNone())
8191       STATS_DECLTRACK_FN_ATTR(readnone)
8192     else if (isAssumedArgMemOnly())
8193       STATS_DECLTRACK_FN_ATTR(argmemonly)
8194     else if (isAssumedInaccessibleMemOnly())
8195       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
8196     else if (isAssumedInaccessibleOrArgMemOnly())
8197       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
8198   }
8199 };
8200 
8201 /// AAMemoryLocation attribute for call sites.
8202 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
8203   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
8204       : AAMemoryLocationImpl(IRP, A) {}
8205 
8206   /// See AbstractAttribute::initialize(...).
8207   void initialize(Attributor &A) override {
8208     AAMemoryLocationImpl::initialize(A);
8209     Function *F = getAssociatedFunction();
8210     if (!F || F->isDeclaration())
8211       indicatePessimisticFixpoint();
8212   }
8213 
8214   /// See AbstractAttribute::updateImpl(...).
8215   ChangeStatus updateImpl(Attributor &A) override {
8216     // TODO: Once we have call site specific value information we can provide
8217     //       call site specific liveness liveness information and then it makes
8218     //       sense to specialize attributes for call sites arguments instead of
8219     //       redirecting requests to the callee argument.
8220     Function *F = getAssociatedFunction();
8221     const IRPosition &FnPos = IRPosition::function(*F);
8222     auto &FnAA =
8223         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8224     bool Changed = false;
8225     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8226                           AccessKind Kind, MemoryLocationsKind MLK) {
8227       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8228                                 getAccessKindFromInst(I));
8229       return true;
8230     };
8231     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8232       return indicatePessimisticFixpoint();
8233     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8234   }
8235 
8236   /// See AbstractAttribute::trackStatistics()
8237   void trackStatistics() const override {
8238     if (isAssumedReadNone())
8239       STATS_DECLTRACK_CS_ATTR(readnone)
8240   }
8241 };
8242 } // namespace
8243 
8244 /// ------------------ Value Constant Range Attribute -------------------------
8245 
8246 namespace {
8247 struct AAValueConstantRangeImpl : AAValueConstantRange {
8248   using StateType = IntegerRangeState;
8249   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8250       : AAValueConstantRange(IRP, A) {}
8251 
8252   /// See AbstractAttribute::initialize(..).
8253   void initialize(Attributor &A) override {
8254     if (A.hasSimplificationCallback(getIRPosition())) {
8255       indicatePessimisticFixpoint();
8256       return;
8257     }
8258 
8259     // Intersect a range given by SCEV.
8260     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8261 
8262     // Intersect a range given by LVI.
8263     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8264   }
8265 
8266   /// See AbstractAttribute::getAsStr().
8267   const std::string getAsStr() const override {
8268     std::string Str;
8269     llvm::raw_string_ostream OS(Str);
8270     OS << "range(" << getBitWidth() << ")<";
8271     getKnown().print(OS);
8272     OS << " / ";
8273     getAssumed().print(OS);
8274     OS << ">";
8275     return OS.str();
8276   }
8277 
8278   /// Helper function to get a SCEV expr for the associated value at program
8279   /// point \p I.
8280   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8281     if (!getAnchorScope())
8282       return nullptr;
8283 
8284     ScalarEvolution *SE =
8285         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8286             *getAnchorScope());
8287 
8288     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8289         *getAnchorScope());
8290 
8291     if (!SE || !LI)
8292       return nullptr;
8293 
8294     const SCEV *S = SE->getSCEV(&getAssociatedValue());
8295     if (!I)
8296       return S;
8297 
8298     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8299   }
8300 
8301   /// Helper function to get a range from SCEV for the associated value at
8302   /// program point \p I.
8303   ConstantRange getConstantRangeFromSCEV(Attributor &A,
8304                                          const Instruction *I = nullptr) const {
8305     if (!getAnchorScope())
8306       return getWorstState(getBitWidth());
8307 
8308     ScalarEvolution *SE =
8309         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8310             *getAnchorScope());
8311 
8312     const SCEV *S = getSCEV(A, I);
8313     if (!SE || !S)
8314       return getWorstState(getBitWidth());
8315 
8316     return SE->getUnsignedRange(S);
8317   }
8318 
8319   /// Helper function to get a range from LVI for the associated value at
8320   /// program point \p I.
8321   ConstantRange
8322   getConstantRangeFromLVI(Attributor &A,
8323                           const Instruction *CtxI = nullptr) const {
8324     if (!getAnchorScope())
8325       return getWorstState(getBitWidth());
8326 
8327     LazyValueInfo *LVI =
8328         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8329             *getAnchorScope());
8330 
8331     if (!LVI || !CtxI)
8332       return getWorstState(getBitWidth());
8333     return LVI->getConstantRange(&getAssociatedValue(),
8334                                  const_cast<Instruction *>(CtxI));
8335   }
8336 
8337   /// Return true if \p CtxI is valid for querying outside analyses.
8338   /// This basically makes sure we do not ask intra-procedural analysis
8339   /// about a context in the wrong function or a context that violates
8340   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8341   /// if the original context of this AA is OK or should be considered invalid.
8342   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8343                                                const Instruction *CtxI,
8344                                                bool AllowAACtxI) const {
8345     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8346       return false;
8347 
8348     // Our context might be in a different function, neither intra-procedural
8349     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8350     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8351       return false;
8352 
8353     // If the context is not dominated by the value there are paths to the
8354     // context that do not define the value. This cannot be handled by
8355     // LazyValueInfo so we need to bail.
8356     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8357       InformationCache &InfoCache = A.getInfoCache();
8358       const DominatorTree *DT =
8359           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8360               *I->getFunction());
8361       return DT && DT->dominates(I, CtxI);
8362     }
8363 
8364     return true;
8365   }
8366 
8367   /// See AAValueConstantRange::getKnownConstantRange(..).
8368   ConstantRange
8369   getKnownConstantRange(Attributor &A,
8370                         const Instruction *CtxI = nullptr) const override {
8371     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8372                                                  /* AllowAACtxI */ false))
8373       return getKnown();
8374 
8375     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8376     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8377     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8378   }
8379 
8380   /// See AAValueConstantRange::getAssumedConstantRange(..).
8381   ConstantRange
8382   getAssumedConstantRange(Attributor &A,
8383                           const Instruction *CtxI = nullptr) const override {
8384     // TODO: Make SCEV use Attributor assumption.
8385     //       We may be able to bound a variable range via assumptions in
8386     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8387     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8388     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8389                                                  /* AllowAACtxI */ false))
8390       return getAssumed();
8391 
8392     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8393     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8394     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8395   }
8396 
8397   /// Helper function to create MDNode for range metadata.
8398   static MDNode *
8399   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8400                             const ConstantRange &AssumedConstantRange) {
8401     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8402                                   Ty, AssumedConstantRange.getLower())),
8403                               ConstantAsMetadata::get(ConstantInt::get(
8404                                   Ty, AssumedConstantRange.getUpper()))};
8405     return MDNode::get(Ctx, LowAndHigh);
8406   }
8407 
8408   /// Return true if \p Assumed is included in \p KnownRanges.
8409   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8410 
8411     if (Assumed.isFullSet())
8412       return false;
8413 
8414     if (!KnownRanges)
8415       return true;
8416 
8417     // If multiple ranges are annotated in IR, we give up to annotate assumed
8418     // range for now.
8419 
8420     // TODO:  If there exists a known range which containts assumed range, we
8421     // can say assumed range is better.
8422     if (KnownRanges->getNumOperands() > 2)
8423       return false;
8424 
8425     ConstantInt *Lower =
8426         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8427     ConstantInt *Upper =
8428         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8429 
8430     ConstantRange Known(Lower->getValue(), Upper->getValue());
8431     return Known.contains(Assumed) && Known != Assumed;
8432   }
8433 
8434   /// Helper function to set range metadata.
8435   static bool
8436   setRangeMetadataIfisBetterRange(Instruction *I,
8437                                   const ConstantRange &AssumedConstantRange) {
8438     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8439     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8440       if (!AssumedConstantRange.isEmptySet()) {
8441         I->setMetadata(LLVMContext::MD_range,
8442                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8443                                                  AssumedConstantRange));
8444         return true;
8445       }
8446     }
8447     return false;
8448   }
8449 
8450   /// See AbstractAttribute::manifest()
8451   ChangeStatus manifest(Attributor &A) override {
8452     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8453     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8454     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8455 
8456     auto &V = getAssociatedValue();
8457     if (!AssumedConstantRange.isEmptySet() &&
8458         !AssumedConstantRange.isSingleElement()) {
8459       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8460         assert(I == getCtxI() && "Should not annotate an instruction which is "
8461                                  "not the context instruction");
8462         if (isa<CallInst>(I) || isa<LoadInst>(I))
8463           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8464             Changed = ChangeStatus::CHANGED;
8465       }
8466     }
8467 
8468     return Changed;
8469   }
8470 };
8471 
8472 struct AAValueConstantRangeArgument final
8473     : AAArgumentFromCallSiteArguments<
8474           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8475           true /* BridgeCallBaseContext */> {
8476   using Base = AAArgumentFromCallSiteArguments<
8477       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8478       true /* BridgeCallBaseContext */>;
8479   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8480       : Base(IRP, A) {}
8481 
8482   /// See AbstractAttribute::initialize(..).
8483   void initialize(Attributor &A) override {
8484     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8485       indicatePessimisticFixpoint();
8486     } else {
8487       Base::initialize(A);
8488     }
8489   }
8490 
8491   /// See AbstractAttribute::trackStatistics()
8492   void trackStatistics() const override {
8493     STATS_DECLTRACK_ARG_ATTR(value_range)
8494   }
8495 };
8496 
8497 struct AAValueConstantRangeReturned
8498     : AAReturnedFromReturnedValues<AAValueConstantRange,
8499                                    AAValueConstantRangeImpl,
8500                                    AAValueConstantRangeImpl::StateType,
8501                                    /* PropogateCallBaseContext */ true> {
8502   using Base =
8503       AAReturnedFromReturnedValues<AAValueConstantRange,
8504                                    AAValueConstantRangeImpl,
8505                                    AAValueConstantRangeImpl::StateType,
8506                                    /* PropogateCallBaseContext */ true>;
8507   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8508       : Base(IRP, A) {}
8509 
8510   /// See AbstractAttribute::initialize(...).
8511   void initialize(Attributor &A) override {}
8512 
8513   /// See AbstractAttribute::trackStatistics()
8514   void trackStatistics() const override {
8515     STATS_DECLTRACK_FNRET_ATTR(value_range)
8516   }
8517 };
8518 
8519 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8520   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8521       : AAValueConstantRangeImpl(IRP, A) {}
8522 
8523   /// See AbstractAttribute::initialize(...).
8524   void initialize(Attributor &A) override {
8525     AAValueConstantRangeImpl::initialize(A);
8526     if (isAtFixpoint())
8527       return;
8528 
8529     Value &V = getAssociatedValue();
8530 
8531     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8532       unionAssumed(ConstantRange(C->getValue()));
8533       indicateOptimisticFixpoint();
8534       return;
8535     }
8536 
8537     if (isa<UndefValue>(&V)) {
8538       // Collapse the undef state to 0.
8539       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8540       indicateOptimisticFixpoint();
8541       return;
8542     }
8543 
8544     if (isa<CallBase>(&V))
8545       return;
8546 
8547     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8548       return;
8549 
8550     // If it is a load instruction with range metadata, use it.
8551     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8552       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8553         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8554         return;
8555       }
8556 
8557     // We can work with PHI and select instruction as we traverse their operands
8558     // during update.
8559     if (isa<SelectInst>(V) || isa<PHINode>(V))
8560       return;
8561 
8562     // Otherwise we give up.
8563     indicatePessimisticFixpoint();
8564 
8565     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8566                       << getAssociatedValue() << "\n");
8567   }
8568 
8569   bool calculateBinaryOperator(
8570       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8571       const Instruction *CtxI,
8572       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8573     Value *LHS = BinOp->getOperand(0);
8574     Value *RHS = BinOp->getOperand(1);
8575 
8576     // Simplify the operands first.
8577     bool UsedAssumedInformation = false;
8578     const auto &SimplifiedLHS =
8579         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8580                                *this, UsedAssumedInformation);
8581     if (!SimplifiedLHS.hasValue())
8582       return true;
8583     if (!SimplifiedLHS.getValue())
8584       return false;
8585     LHS = *SimplifiedLHS;
8586 
8587     const auto &SimplifiedRHS =
8588         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8589                                *this, UsedAssumedInformation);
8590     if (!SimplifiedRHS.hasValue())
8591       return true;
8592     if (!SimplifiedRHS.getValue())
8593       return false;
8594     RHS = *SimplifiedRHS;
8595 
8596     // TODO: Allow non integers as well.
8597     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8598       return false;
8599 
8600     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8601         *this, IRPosition::value(*LHS, getCallBaseContext()),
8602         DepClassTy::REQUIRED);
8603     QuerriedAAs.push_back(&LHSAA);
8604     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8605 
8606     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8607         *this, IRPosition::value(*RHS, getCallBaseContext()),
8608         DepClassTy::REQUIRED);
8609     QuerriedAAs.push_back(&RHSAA);
8610     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8611 
8612     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8613 
8614     T.unionAssumed(AssumedRange);
8615 
8616     // TODO: Track a known state too.
8617 
8618     return T.isValidState();
8619   }
8620 
8621   bool calculateCastInst(
8622       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8623       const Instruction *CtxI,
8624       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8625     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8626     // TODO: Allow non integers as well.
8627     Value *OpV = CastI->getOperand(0);
8628 
8629     // Simplify the operand first.
8630     bool UsedAssumedInformation = false;
8631     const auto &SimplifiedOpV =
8632         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8633                                *this, UsedAssumedInformation);
8634     if (!SimplifiedOpV.hasValue())
8635       return true;
8636     if (!SimplifiedOpV.getValue())
8637       return false;
8638     OpV = *SimplifiedOpV;
8639 
8640     if (!OpV->getType()->isIntegerTy())
8641       return false;
8642 
8643     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8644         *this, IRPosition::value(*OpV, getCallBaseContext()),
8645         DepClassTy::REQUIRED);
8646     QuerriedAAs.push_back(&OpAA);
8647     T.unionAssumed(
8648         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8649     return T.isValidState();
8650   }
8651 
8652   bool
8653   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8654                    const Instruction *CtxI,
8655                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8656     Value *LHS = CmpI->getOperand(0);
8657     Value *RHS = CmpI->getOperand(1);
8658 
8659     // Simplify the operands first.
8660     bool UsedAssumedInformation = false;
8661     const auto &SimplifiedLHS =
8662         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8663                                *this, UsedAssumedInformation);
8664     if (!SimplifiedLHS.hasValue())
8665       return true;
8666     if (!SimplifiedLHS.getValue())
8667       return false;
8668     LHS = *SimplifiedLHS;
8669 
8670     const auto &SimplifiedRHS =
8671         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8672                                *this, UsedAssumedInformation);
8673     if (!SimplifiedRHS.hasValue())
8674       return true;
8675     if (!SimplifiedRHS.getValue())
8676       return false;
8677     RHS = *SimplifiedRHS;
8678 
8679     // TODO: Allow non integers as well.
8680     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8681       return false;
8682 
8683     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8684         *this, IRPosition::value(*LHS, getCallBaseContext()),
8685         DepClassTy::REQUIRED);
8686     QuerriedAAs.push_back(&LHSAA);
8687     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8688         *this, IRPosition::value(*RHS, getCallBaseContext()),
8689         DepClassTy::REQUIRED);
8690     QuerriedAAs.push_back(&RHSAA);
8691     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8692     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8693 
8694     // If one of them is empty set, we can't decide.
8695     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8696       return true;
8697 
8698     bool MustTrue = false, MustFalse = false;
8699 
8700     auto AllowedRegion =
8701         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8702 
8703     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8704       MustFalse = true;
8705 
8706     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8707       MustTrue = true;
8708 
8709     assert((!MustTrue || !MustFalse) &&
8710            "Either MustTrue or MustFalse should be false!");
8711 
8712     if (MustTrue)
8713       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8714     else if (MustFalse)
8715       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8716     else
8717       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8718 
8719     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8720                       << " " << RHSAA << "\n");
8721 
8722     // TODO: Track a known state too.
8723     return T.isValidState();
8724   }
8725 
8726   /// See AbstractAttribute::updateImpl(...).
8727   ChangeStatus updateImpl(Attributor &A) override {
8728     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8729                             IntegerRangeState &T, bool Stripped) -> bool {
8730       Instruction *I = dyn_cast<Instruction>(&V);
8731       if (!I || isa<CallBase>(I)) {
8732 
8733         // Simplify the operand first.
8734         bool UsedAssumedInformation = false;
8735         const auto &SimplifiedOpV =
8736             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8737                                    *this, UsedAssumedInformation);
8738         if (!SimplifiedOpV.hasValue())
8739           return true;
8740         if (!SimplifiedOpV.getValue())
8741           return false;
8742         Value *VPtr = *SimplifiedOpV;
8743 
8744         // If the value is not instruction, we query AA to Attributor.
8745         const auto &AA = A.getAAFor<AAValueConstantRange>(
8746             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8747             DepClassTy::REQUIRED);
8748 
8749         // Clamp operator is not used to utilize a program point CtxI.
8750         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8751 
8752         return T.isValidState();
8753       }
8754 
8755       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8756       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8757         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8758           return false;
8759       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8760         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8761           return false;
8762       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8763         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8764           return false;
8765       } else {
8766         // Give up with other instructions.
8767         // TODO: Add other instructions
8768 
8769         T.indicatePessimisticFixpoint();
8770         return false;
8771       }
8772 
8773       // Catch circular reasoning in a pessimistic way for now.
8774       // TODO: Check how the range evolves and if we stripped anything, see also
8775       //       AADereferenceable or AAAlign for similar situations.
8776       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8777         if (QueriedAA != this)
8778           continue;
8779         // If we are in a stady state we do not need to worry.
8780         if (T.getAssumed() == getState().getAssumed())
8781           continue;
8782         T.indicatePessimisticFixpoint();
8783       }
8784 
8785       return T.isValidState();
8786     };
8787 
8788     IntegerRangeState T(getBitWidth());
8789 
8790     bool UsedAssumedInformation = false;
8791     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8792                                                   VisitValueCB, getCtxI(),
8793                                                   UsedAssumedInformation,
8794                                                   /* UseValueSimplify */ false))
8795       return indicatePessimisticFixpoint();
8796 
8797     // Ensure that long def-use chains can't cause circular reasoning either by
8798     // introducing a cutoff below.
8799     if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
8800       return ChangeStatus::UNCHANGED;
8801     if (++NumChanges > MaxNumChanges) {
8802       LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
8803                         << " but only " << MaxNumChanges
8804                         << " are allowed to avoid cyclic reasoning.");
8805       return indicatePessimisticFixpoint();
8806     }
8807     return ChangeStatus::CHANGED;
8808   }
8809 
8810   /// See AbstractAttribute::trackStatistics()
8811   void trackStatistics() const override {
8812     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8813   }
8814 
8815   /// Tracker to bail after too many widening steps of the constant range.
8816   int NumChanges = 0;
8817 
8818   /// Upper bound for the number of allowed changes (=widening steps) for the
8819   /// constant range before we give up.
8820   static constexpr int MaxNumChanges = 5;
8821 };
8822 
8823 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8824   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8825       : AAValueConstantRangeImpl(IRP, A) {}
8826 
8827   /// See AbstractAttribute::initialize(...).
8828   ChangeStatus updateImpl(Attributor &A) override {
8829     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8830                      "not be called");
8831   }
8832 
8833   /// See AbstractAttribute::trackStatistics()
8834   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8835 };
8836 
8837 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8838   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8839       : AAValueConstantRangeFunction(IRP, A) {}
8840 
8841   /// See AbstractAttribute::trackStatistics()
8842   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8843 };
8844 
8845 struct AAValueConstantRangeCallSiteReturned
8846     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8847                                      AAValueConstantRangeImpl,
8848                                      AAValueConstantRangeImpl::StateType,
8849                                      /* IntroduceCallBaseContext */ true> {
8850   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8851       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8852                                        AAValueConstantRangeImpl,
8853                                        AAValueConstantRangeImpl::StateType,
8854                                        /* IntroduceCallBaseContext */ true>(IRP,
8855                                                                             A) {
8856   }
8857 
8858   /// See AbstractAttribute::initialize(...).
8859   void initialize(Attributor &A) override {
8860     // If it is a load instruction with range metadata, use the metadata.
8861     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8862       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8863         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8864 
8865     AAValueConstantRangeImpl::initialize(A);
8866   }
8867 
8868   /// See AbstractAttribute::trackStatistics()
8869   void trackStatistics() const override {
8870     STATS_DECLTRACK_CSRET_ATTR(value_range)
8871   }
8872 };
8873 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8874   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8875       : AAValueConstantRangeFloating(IRP, A) {}
8876 
8877   /// See AbstractAttribute::manifest()
8878   ChangeStatus manifest(Attributor &A) override {
8879     return ChangeStatus::UNCHANGED;
8880   }
8881 
8882   /// See AbstractAttribute::trackStatistics()
8883   void trackStatistics() const override {
8884     STATS_DECLTRACK_CSARG_ATTR(value_range)
8885   }
8886 };
8887 } // namespace
8888 
8889 /// ------------------ Potential Values Attribute -------------------------
8890 
8891 namespace {
8892 struct AAPotentialValuesImpl : AAPotentialValues {
8893   using StateType = PotentialConstantIntValuesState;
8894 
8895   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
8896       : AAPotentialValues(IRP, A) {}
8897 
8898   /// See AbstractAttribute::initialize(..).
8899   void initialize(Attributor &A) override {
8900     if (A.hasSimplificationCallback(getIRPosition()))
8901       indicatePessimisticFixpoint();
8902     else
8903       AAPotentialValues::initialize(A);
8904   }
8905 
8906   /// See AbstractAttribute::getAsStr().
8907   const std::string getAsStr() const override {
8908     std::string Str;
8909     llvm::raw_string_ostream OS(Str);
8910     OS << getState();
8911     return OS.str();
8912   }
8913 
8914   /// See AbstractAttribute::updateImpl(...).
8915   ChangeStatus updateImpl(Attributor &A) override {
8916     return indicatePessimisticFixpoint();
8917   }
8918 };
8919 
8920 struct AAPotentialValuesArgument final
8921     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8922                                       PotentialConstantIntValuesState> {
8923   using Base =
8924       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8925                                       PotentialConstantIntValuesState>;
8926   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
8927       : Base(IRP, A) {}
8928 
8929   /// See AbstractAttribute::initialize(..).
8930   void initialize(Attributor &A) override {
8931     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8932       indicatePessimisticFixpoint();
8933     } else {
8934       Base::initialize(A);
8935     }
8936   }
8937 
8938   /// See AbstractAttribute::trackStatistics()
8939   void trackStatistics() const override {
8940     STATS_DECLTRACK_ARG_ATTR(potential_values)
8941   }
8942 };
8943 
8944 struct AAPotentialValuesReturned
8945     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
8946   using Base =
8947       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
8948   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
8949       : Base(IRP, A) {}
8950 
8951   /// See AbstractAttribute::trackStatistics()
8952   void trackStatistics() const override {
8953     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8954   }
8955 };
8956 
8957 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
8958   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
8959       : AAPotentialValuesImpl(IRP, A) {}
8960 
8961   /// See AbstractAttribute::initialize(..).
8962   void initialize(Attributor &A) override {
8963     AAPotentialValuesImpl::initialize(A);
8964     if (isAtFixpoint())
8965       return;
8966 
8967     Value &V = getAssociatedValue();
8968 
8969     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8970       unionAssumed(C->getValue());
8971       indicateOptimisticFixpoint();
8972       return;
8973     }
8974 
8975     if (isa<UndefValue>(&V)) {
8976       unionAssumedWithUndef();
8977       indicateOptimisticFixpoint();
8978       return;
8979     }
8980 
8981     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8982       return;
8983 
8984     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8985       return;
8986 
8987     indicatePessimisticFixpoint();
8988 
8989     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
8990                       << getAssociatedValue() << "\n");
8991   }
8992 
8993   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8994                                 const APInt &RHS) {
8995     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
8996   }
8997 
8998   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8999                                  uint32_t ResultBitWidth) {
9000     Instruction::CastOps CastOp = CI->getOpcode();
9001     switch (CastOp) {
9002     default:
9003       llvm_unreachable("unsupported or not integer cast");
9004     case Instruction::Trunc:
9005       return Src.trunc(ResultBitWidth);
9006     case Instruction::SExt:
9007       return Src.sext(ResultBitWidth);
9008     case Instruction::ZExt:
9009       return Src.zext(ResultBitWidth);
9010     case Instruction::BitCast:
9011       return Src;
9012     }
9013   }
9014 
9015   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
9016                                        const APInt &LHS, const APInt &RHS,
9017                                        bool &SkipOperation, bool &Unsupported) {
9018     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
9019     // Unsupported is set to true when the binary operator is not supported.
9020     // SkipOperation is set to true when UB occur with the given operand pair
9021     // (LHS, RHS).
9022     // TODO: we should look at nsw and nuw keywords to handle operations
9023     //       that create poison or undef value.
9024     switch (BinOpcode) {
9025     default:
9026       Unsupported = true;
9027       return LHS;
9028     case Instruction::Add:
9029       return LHS + RHS;
9030     case Instruction::Sub:
9031       return LHS - RHS;
9032     case Instruction::Mul:
9033       return LHS * RHS;
9034     case Instruction::UDiv:
9035       if (RHS.isZero()) {
9036         SkipOperation = true;
9037         return LHS;
9038       }
9039       return LHS.udiv(RHS);
9040     case Instruction::SDiv:
9041       if (RHS.isZero()) {
9042         SkipOperation = true;
9043         return LHS;
9044       }
9045       return LHS.sdiv(RHS);
9046     case Instruction::URem:
9047       if (RHS.isZero()) {
9048         SkipOperation = true;
9049         return LHS;
9050       }
9051       return LHS.urem(RHS);
9052     case Instruction::SRem:
9053       if (RHS.isZero()) {
9054         SkipOperation = true;
9055         return LHS;
9056       }
9057       return LHS.srem(RHS);
9058     case Instruction::Shl:
9059       return LHS.shl(RHS);
9060     case Instruction::LShr:
9061       return LHS.lshr(RHS);
9062     case Instruction::AShr:
9063       return LHS.ashr(RHS);
9064     case Instruction::And:
9065       return LHS & RHS;
9066     case Instruction::Or:
9067       return LHS | RHS;
9068     case Instruction::Xor:
9069       return LHS ^ RHS;
9070     }
9071   }
9072 
9073   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
9074                                            const APInt &LHS, const APInt &RHS) {
9075     bool SkipOperation = false;
9076     bool Unsupported = false;
9077     APInt Result =
9078         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
9079     if (Unsupported)
9080       return false;
9081     // If SkipOperation is true, we can ignore this operand pair (L, R).
9082     if (!SkipOperation)
9083       unionAssumed(Result);
9084     return isValidState();
9085   }
9086 
9087   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
9088     auto AssumedBefore = getAssumed();
9089     Value *LHS = ICI->getOperand(0);
9090     Value *RHS = ICI->getOperand(1);
9091 
9092     // Simplify the operands first.
9093     bool UsedAssumedInformation = false;
9094     const auto &SimplifiedLHS =
9095         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9096                                *this, UsedAssumedInformation);
9097     if (!SimplifiedLHS.hasValue())
9098       return ChangeStatus::UNCHANGED;
9099     if (!SimplifiedLHS.getValue())
9100       return indicatePessimisticFixpoint();
9101     LHS = *SimplifiedLHS;
9102 
9103     const auto &SimplifiedRHS =
9104         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9105                                *this, UsedAssumedInformation);
9106     if (!SimplifiedRHS.hasValue())
9107       return ChangeStatus::UNCHANGED;
9108     if (!SimplifiedRHS.getValue())
9109       return indicatePessimisticFixpoint();
9110     RHS = *SimplifiedRHS;
9111 
9112     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9113       return indicatePessimisticFixpoint();
9114 
9115     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
9116                                                 DepClassTy::REQUIRED);
9117     if (!LHSAA.isValidState())
9118       return indicatePessimisticFixpoint();
9119 
9120     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
9121                                                 DepClassTy::REQUIRED);
9122     if (!RHSAA.isValidState())
9123       return indicatePessimisticFixpoint();
9124 
9125     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
9126     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
9127 
9128     // TODO: make use of undef flag to limit potential values aggressively.
9129     bool MaybeTrue = false, MaybeFalse = false;
9130     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
9131     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9132       // The result of any comparison between undefs can be soundly replaced
9133       // with undef.
9134       unionAssumedWithUndef();
9135     } else if (LHSAA.undefIsContained()) {
9136       for (const APInt &R : RHSAAPVS) {
9137         bool CmpResult = calculateICmpInst(ICI, Zero, R);
9138         MaybeTrue |= CmpResult;
9139         MaybeFalse |= !CmpResult;
9140         if (MaybeTrue & MaybeFalse)
9141           return indicatePessimisticFixpoint();
9142       }
9143     } else if (RHSAA.undefIsContained()) {
9144       for (const APInt &L : LHSAAPVS) {
9145         bool CmpResult = calculateICmpInst(ICI, L, Zero);
9146         MaybeTrue |= CmpResult;
9147         MaybeFalse |= !CmpResult;
9148         if (MaybeTrue & MaybeFalse)
9149           return indicatePessimisticFixpoint();
9150       }
9151     } else {
9152       for (const APInt &L : LHSAAPVS) {
9153         for (const APInt &R : RHSAAPVS) {
9154           bool CmpResult = calculateICmpInst(ICI, L, R);
9155           MaybeTrue |= CmpResult;
9156           MaybeFalse |= !CmpResult;
9157           if (MaybeTrue & MaybeFalse)
9158             return indicatePessimisticFixpoint();
9159         }
9160       }
9161     }
9162     if (MaybeTrue)
9163       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
9164     if (MaybeFalse)
9165       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
9166     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9167                                          : ChangeStatus::CHANGED;
9168   }
9169 
9170   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
9171     auto AssumedBefore = getAssumed();
9172     Value *LHS = SI->getTrueValue();
9173     Value *RHS = SI->getFalseValue();
9174 
9175     // Simplify the operands first.
9176     bool UsedAssumedInformation = false;
9177     const auto &SimplifiedLHS =
9178         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9179                                *this, UsedAssumedInformation);
9180     if (!SimplifiedLHS.hasValue())
9181       return ChangeStatus::UNCHANGED;
9182     if (!SimplifiedLHS.getValue())
9183       return indicatePessimisticFixpoint();
9184     LHS = *SimplifiedLHS;
9185 
9186     const auto &SimplifiedRHS =
9187         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9188                                *this, UsedAssumedInformation);
9189     if (!SimplifiedRHS.hasValue())
9190       return ChangeStatus::UNCHANGED;
9191     if (!SimplifiedRHS.getValue())
9192       return indicatePessimisticFixpoint();
9193     RHS = *SimplifiedRHS;
9194 
9195     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9196       return indicatePessimisticFixpoint();
9197 
9198     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
9199                                                   UsedAssumedInformation);
9200 
9201     // Check if we only need one operand.
9202     bool OnlyLeft = false, OnlyRight = false;
9203     if (C.hasValue() && *C && (*C)->isOneValue())
9204       OnlyLeft = true;
9205     else if (C.hasValue() && *C && (*C)->isZeroValue())
9206       OnlyRight = true;
9207 
9208     const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr;
9209     if (!OnlyRight) {
9210       LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
9211                                              DepClassTy::REQUIRED);
9212       if (!LHSAA->isValidState())
9213         return indicatePessimisticFixpoint();
9214     }
9215     if (!OnlyLeft) {
9216       RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
9217                                              DepClassTy::REQUIRED);
9218       if (!RHSAA->isValidState())
9219         return indicatePessimisticFixpoint();
9220     }
9221 
9222     if (!LHSAA || !RHSAA) {
9223       // select (true/false), lhs, rhs
9224       auto *OpAA = LHSAA ? LHSAA : RHSAA;
9225 
9226       if (OpAA->undefIsContained())
9227         unionAssumedWithUndef();
9228       else
9229         unionAssumed(*OpAA);
9230 
9231     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
9232       // select i1 *, undef , undef => undef
9233       unionAssumedWithUndef();
9234     } else {
9235       unionAssumed(*LHSAA);
9236       unionAssumed(*RHSAA);
9237     }
9238     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9239                                          : ChangeStatus::CHANGED;
9240   }
9241 
9242   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9243     auto AssumedBefore = getAssumed();
9244     if (!CI->isIntegerCast())
9245       return indicatePessimisticFixpoint();
9246     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
9247     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9248     Value *Src = CI->getOperand(0);
9249 
9250     // Simplify the operand first.
9251     bool UsedAssumedInformation = false;
9252     const auto &SimplifiedSrc =
9253         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
9254                                *this, UsedAssumedInformation);
9255     if (!SimplifiedSrc.hasValue())
9256       return ChangeStatus::UNCHANGED;
9257     if (!SimplifiedSrc.getValue())
9258       return indicatePessimisticFixpoint();
9259     Src = *SimplifiedSrc;
9260 
9261     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
9262                                                 DepClassTy::REQUIRED);
9263     if (!SrcAA.isValidState())
9264       return indicatePessimisticFixpoint();
9265     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
9266     if (SrcAA.undefIsContained())
9267       unionAssumedWithUndef();
9268     else {
9269       for (const APInt &S : SrcAAPVS) {
9270         APInt T = calculateCastInst(CI, S, ResultBitWidth);
9271         unionAssumed(T);
9272       }
9273     }
9274     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9275                                          : ChangeStatus::CHANGED;
9276   }
9277 
9278   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9279     auto AssumedBefore = getAssumed();
9280     Value *LHS = BinOp->getOperand(0);
9281     Value *RHS = BinOp->getOperand(1);
9282 
9283     // Simplify the operands first.
9284     bool UsedAssumedInformation = false;
9285     const auto &SimplifiedLHS =
9286         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9287                                *this, UsedAssumedInformation);
9288     if (!SimplifiedLHS.hasValue())
9289       return ChangeStatus::UNCHANGED;
9290     if (!SimplifiedLHS.getValue())
9291       return indicatePessimisticFixpoint();
9292     LHS = *SimplifiedLHS;
9293 
9294     const auto &SimplifiedRHS =
9295         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9296                                *this, UsedAssumedInformation);
9297     if (!SimplifiedRHS.hasValue())
9298       return ChangeStatus::UNCHANGED;
9299     if (!SimplifiedRHS.getValue())
9300       return indicatePessimisticFixpoint();
9301     RHS = *SimplifiedRHS;
9302 
9303     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9304       return indicatePessimisticFixpoint();
9305 
9306     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
9307                                                 DepClassTy::REQUIRED);
9308     if (!LHSAA.isValidState())
9309       return indicatePessimisticFixpoint();
9310 
9311     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
9312                                                 DepClassTy::REQUIRED);
9313     if (!RHSAA.isValidState())
9314       return indicatePessimisticFixpoint();
9315 
9316     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
9317     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
9318     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9319 
9320     // TODO: make use of undef flag to limit potential values aggressively.
9321     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9322       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9323         return indicatePessimisticFixpoint();
9324     } else if (LHSAA.undefIsContained()) {
9325       for (const APInt &R : RHSAAPVS) {
9326         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9327           return indicatePessimisticFixpoint();
9328       }
9329     } else if (RHSAA.undefIsContained()) {
9330       for (const APInt &L : LHSAAPVS) {
9331         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9332           return indicatePessimisticFixpoint();
9333       }
9334     } else {
9335       for (const APInt &L : LHSAAPVS) {
9336         for (const APInt &R : RHSAAPVS) {
9337           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9338             return indicatePessimisticFixpoint();
9339         }
9340       }
9341     }
9342     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9343                                          : ChangeStatus::CHANGED;
9344   }
9345 
9346   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9347     auto AssumedBefore = getAssumed();
9348     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9349       Value *IncomingValue = PHI->getIncomingValue(u);
9350 
9351       // Simplify the operand first.
9352       bool UsedAssumedInformation = false;
9353       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9354           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9355           UsedAssumedInformation);
9356       if (!SimplifiedIncomingValue.hasValue())
9357         continue;
9358       if (!SimplifiedIncomingValue.getValue())
9359         return indicatePessimisticFixpoint();
9360       IncomingValue = *SimplifiedIncomingValue;
9361 
9362       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
9363           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9364       if (!PotentialValuesAA.isValidState())
9365         return indicatePessimisticFixpoint();
9366       if (PotentialValuesAA.undefIsContained())
9367         unionAssumedWithUndef();
9368       else
9369         unionAssumed(PotentialValuesAA.getAssumed());
9370     }
9371     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9372                                          : ChangeStatus::CHANGED;
9373   }
9374 
9375   /// See AbstractAttribute::updateImpl(...).
9376   ChangeStatus updateImpl(Attributor &A) override {
9377     Value &V = getAssociatedValue();
9378     Instruction *I = dyn_cast<Instruction>(&V);
9379 
9380     if (auto *ICI = dyn_cast<ICmpInst>(I))
9381       return updateWithICmpInst(A, ICI);
9382 
9383     if (auto *SI = dyn_cast<SelectInst>(I))
9384       return updateWithSelectInst(A, SI);
9385 
9386     if (auto *CI = dyn_cast<CastInst>(I))
9387       return updateWithCastInst(A, CI);
9388 
9389     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9390       return updateWithBinaryOperator(A, BinOp);
9391 
9392     if (auto *PHI = dyn_cast<PHINode>(I))
9393       return updateWithPHINode(A, PHI);
9394 
9395     return indicatePessimisticFixpoint();
9396   }
9397 
9398   /// See AbstractAttribute::trackStatistics()
9399   void trackStatistics() const override {
9400     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9401   }
9402 };
9403 
9404 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
9405   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
9406       : AAPotentialValuesImpl(IRP, A) {}
9407 
9408   /// See AbstractAttribute::initialize(...).
9409   ChangeStatus updateImpl(Attributor &A) override {
9410     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
9411                      "not be called");
9412   }
9413 
9414   /// See AbstractAttribute::trackStatistics()
9415   void trackStatistics() const override {
9416     STATS_DECLTRACK_FN_ATTR(potential_values)
9417   }
9418 };
9419 
9420 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
9421   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
9422       : AAPotentialValuesFunction(IRP, A) {}
9423 
9424   /// See AbstractAttribute::trackStatistics()
9425   void trackStatistics() const override {
9426     STATS_DECLTRACK_CS_ATTR(potential_values)
9427   }
9428 };
9429 
9430 struct AAPotentialValuesCallSiteReturned
9431     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
9432   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
9433       : AACallSiteReturnedFromReturned<AAPotentialValues,
9434                                        AAPotentialValuesImpl>(IRP, A) {}
9435 
9436   /// See AbstractAttribute::trackStatistics()
9437   void trackStatistics() const override {
9438     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9439   }
9440 };
9441 
9442 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
9443   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
9444       : AAPotentialValuesFloating(IRP, A) {}
9445 
9446   /// See AbstractAttribute::initialize(..).
9447   void initialize(Attributor &A) override {
9448     AAPotentialValuesImpl::initialize(A);
9449     if (isAtFixpoint())
9450       return;
9451 
9452     Value &V = getAssociatedValue();
9453 
9454     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9455       unionAssumed(C->getValue());
9456       indicateOptimisticFixpoint();
9457       return;
9458     }
9459 
9460     if (isa<UndefValue>(&V)) {
9461       unionAssumedWithUndef();
9462       indicateOptimisticFixpoint();
9463       return;
9464     }
9465   }
9466 
9467   /// See AbstractAttribute::updateImpl(...).
9468   ChangeStatus updateImpl(Attributor &A) override {
9469     Value &V = getAssociatedValue();
9470     auto AssumedBefore = getAssumed();
9471     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
9472                                              DepClassTy::REQUIRED);
9473     const auto &S = AA.getAssumed();
9474     unionAssumed(S);
9475     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9476                                          : ChangeStatus::CHANGED;
9477   }
9478 
9479   /// See AbstractAttribute::trackStatistics()
9480   void trackStatistics() const override {
9481     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9482   }
9483 };
9484 
9485 /// ------------------------ NoUndef Attribute ---------------------------------
9486 struct AANoUndefImpl : AANoUndef {
9487   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9488 
9489   /// See AbstractAttribute::initialize(...).
9490   void initialize(Attributor &A) override {
9491     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9492       indicateOptimisticFixpoint();
9493       return;
9494     }
9495     Value &V = getAssociatedValue();
9496     if (isa<UndefValue>(V))
9497       indicatePessimisticFixpoint();
9498     else if (isa<FreezeInst>(V))
9499       indicateOptimisticFixpoint();
9500     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9501              isGuaranteedNotToBeUndefOrPoison(&V))
9502       indicateOptimisticFixpoint();
9503     else
9504       AANoUndef::initialize(A);
9505   }
9506 
9507   /// See followUsesInMBEC
9508   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9509                        AANoUndef::StateType &State) {
9510     const Value *UseV = U->get();
9511     const DominatorTree *DT = nullptr;
9512     AssumptionCache *AC = nullptr;
9513     InformationCache &InfoCache = A.getInfoCache();
9514     if (Function *F = getAnchorScope()) {
9515       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9516       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9517     }
9518     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9519     bool TrackUse = false;
9520     // Track use for instructions which must produce undef or poison bits when
9521     // at least one operand contains such bits.
9522     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9523       TrackUse = true;
9524     return TrackUse;
9525   }
9526 
9527   /// See AbstractAttribute::getAsStr().
9528   const std::string getAsStr() const override {
9529     return getAssumed() ? "noundef" : "may-undef-or-poison";
9530   }
9531 
9532   ChangeStatus manifest(Attributor &A) override {
9533     // We don't manifest noundef attribute for dead positions because the
9534     // associated values with dead positions would be replaced with undef
9535     // values.
9536     bool UsedAssumedInformation = false;
9537     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9538                         UsedAssumedInformation))
9539       return ChangeStatus::UNCHANGED;
9540     // A position whose simplified value does not have any value is
9541     // considered to be dead. We don't manifest noundef in such positions for
9542     // the same reason above.
9543     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9544              .hasValue())
9545       return ChangeStatus::UNCHANGED;
9546     return AANoUndef::manifest(A);
9547   }
9548 };
9549 
9550 struct AANoUndefFloating : public AANoUndefImpl {
9551   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9552       : AANoUndefImpl(IRP, A) {}
9553 
9554   /// See AbstractAttribute::initialize(...).
9555   void initialize(Attributor &A) override {
9556     AANoUndefImpl::initialize(A);
9557     if (!getState().isAtFixpoint())
9558       if (Instruction *CtxI = getCtxI())
9559         followUsesInMBEC(*this, A, getState(), *CtxI);
9560   }
9561 
9562   /// See AbstractAttribute::updateImpl(...).
9563   ChangeStatus updateImpl(Attributor &A) override {
9564     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9565                             AANoUndef::StateType &T, bool Stripped) -> bool {
9566       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9567                                              DepClassTy::REQUIRED);
9568       if (!Stripped && this == &AA) {
9569         T.indicatePessimisticFixpoint();
9570       } else {
9571         const AANoUndef::StateType &S =
9572             static_cast<const AANoUndef::StateType &>(AA.getState());
9573         T ^= S;
9574       }
9575       return T.isValidState();
9576     };
9577 
9578     StateType T;
9579     bool UsedAssumedInformation = false;
9580     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9581                                           VisitValueCB, getCtxI(),
9582                                           UsedAssumedInformation))
9583       return indicatePessimisticFixpoint();
9584 
9585     return clampStateAndIndicateChange(getState(), T);
9586   }
9587 
9588   /// See AbstractAttribute::trackStatistics()
9589   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9590 };
9591 
9592 struct AANoUndefReturned final
9593     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9594   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9595       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9596 
9597   /// See AbstractAttribute::trackStatistics()
9598   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9599 };
9600 
9601 struct AANoUndefArgument final
9602     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9603   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9604       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9605 
9606   /// See AbstractAttribute::trackStatistics()
9607   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9608 };
9609 
9610 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9611   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9612       : AANoUndefFloating(IRP, A) {}
9613 
9614   /// See AbstractAttribute::trackStatistics()
9615   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9616 };
9617 
9618 struct AANoUndefCallSiteReturned final
9619     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9620   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9621       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9622 
9623   /// See AbstractAttribute::trackStatistics()
9624   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9625 };
9626 
9627 struct AACallEdgesImpl : public AACallEdges {
9628   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9629 
9630   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9631     return CalledFunctions;
9632   }
9633 
9634   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9635 
9636   virtual bool hasNonAsmUnknownCallee() const override {
9637     return HasUnknownCalleeNonAsm;
9638   }
9639 
9640   const std::string getAsStr() const override {
9641     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9642            std::to_string(CalledFunctions.size()) + "]";
9643   }
9644 
9645   void trackStatistics() const override {}
9646 
9647 protected:
9648   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9649     if (CalledFunctions.insert(Fn)) {
9650       Change = ChangeStatus::CHANGED;
9651       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9652                         << "\n");
9653     }
9654   }
9655 
9656   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9657     if (!HasUnknownCallee)
9658       Change = ChangeStatus::CHANGED;
9659     if (NonAsm && !HasUnknownCalleeNonAsm)
9660       Change = ChangeStatus::CHANGED;
9661     HasUnknownCalleeNonAsm |= NonAsm;
9662     HasUnknownCallee = true;
9663   }
9664 
9665 private:
9666   /// Optimistic set of functions that might be called by this position.
9667   SetVector<Function *> CalledFunctions;
9668 
9669   /// Is there any call with a unknown callee.
9670   bool HasUnknownCallee = false;
9671 
9672   /// Is there any call with a unknown callee, excluding any inline asm.
9673   bool HasUnknownCalleeNonAsm = false;
9674 };
9675 
9676 struct AACallEdgesCallSite : public AACallEdgesImpl {
9677   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9678       : AACallEdgesImpl(IRP, A) {}
9679   /// See AbstractAttribute::updateImpl(...).
9680   ChangeStatus updateImpl(Attributor &A) override {
9681     ChangeStatus Change = ChangeStatus::UNCHANGED;
9682 
9683     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9684                           bool Stripped) -> bool {
9685       if (Function *Fn = dyn_cast<Function>(&V)) {
9686         addCalledFunction(Fn, Change);
9687       } else {
9688         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9689         setHasUnknownCallee(true, Change);
9690       }
9691 
9692       // Explore all values.
9693       return true;
9694     };
9695 
9696     // Process any value that we might call.
9697     auto ProcessCalledOperand = [&](Value *V) {
9698       bool DummyValue = false;
9699       bool UsedAssumedInformation = false;
9700       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9701                                        DummyValue, VisitValue, nullptr,
9702                                        UsedAssumedInformation, false)) {
9703         // If we haven't gone through all values, assume that there are unknown
9704         // callees.
9705         setHasUnknownCallee(true, Change);
9706       }
9707     };
9708 
9709     CallBase *CB = cast<CallBase>(getCtxI());
9710 
9711     if (CB->isInlineAsm()) {
9712       if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
9713           !hasAssumption(*CB, "ompx_no_call_asm"))
9714         setHasUnknownCallee(false, Change);
9715       return Change;
9716     }
9717 
9718     // Process callee metadata if available.
9719     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9720       for (auto &Op : MD->operands()) {
9721         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9722         if (Callee)
9723           addCalledFunction(Callee, Change);
9724       }
9725       return Change;
9726     }
9727 
9728     // The most simple case.
9729     ProcessCalledOperand(CB->getCalledOperand());
9730 
9731     // Process callback functions.
9732     SmallVector<const Use *, 4u> CallbackUses;
9733     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9734     for (const Use *U : CallbackUses)
9735       ProcessCalledOperand(U->get());
9736 
9737     return Change;
9738   }
9739 };
9740 
9741 struct AACallEdgesFunction : public AACallEdgesImpl {
9742   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9743       : AACallEdgesImpl(IRP, A) {}
9744 
9745   /// See AbstractAttribute::updateImpl(...).
9746   ChangeStatus updateImpl(Attributor &A) override {
9747     ChangeStatus Change = ChangeStatus::UNCHANGED;
9748 
9749     auto ProcessCallInst = [&](Instruction &Inst) {
9750       CallBase &CB = cast<CallBase>(Inst);
9751 
9752       auto &CBEdges = A.getAAFor<AACallEdges>(
9753           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9754       if (CBEdges.hasNonAsmUnknownCallee())
9755         setHasUnknownCallee(true, Change);
9756       if (CBEdges.hasUnknownCallee())
9757         setHasUnknownCallee(false, Change);
9758 
9759       for (Function *F : CBEdges.getOptimisticEdges())
9760         addCalledFunction(F, Change);
9761 
9762       return true;
9763     };
9764 
9765     // Visit all callable instructions.
9766     bool UsedAssumedInformation = false;
9767     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9768                                            UsedAssumedInformation,
9769                                            /* CheckBBLivenessOnly */ true)) {
9770       // If we haven't looked at all call like instructions, assume that there
9771       // are unknown callees.
9772       setHasUnknownCallee(true, Change);
9773     }
9774 
9775     return Change;
9776   }
9777 };
9778 
9779 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9780 private:
9781   struct QuerySet {
9782     void markReachable(const Function &Fn) {
9783       Reachable.insert(&Fn);
9784       Unreachable.erase(&Fn);
9785     }
9786 
9787     /// If there is no information about the function None is returned.
9788     Optional<bool> isCachedReachable(const Function &Fn) {
9789       // Assume that we can reach the function.
9790       // TODO: Be more specific with the unknown callee.
9791       if (CanReachUnknownCallee)
9792         return true;
9793 
9794       if (Reachable.count(&Fn))
9795         return true;
9796 
9797       if (Unreachable.count(&Fn))
9798         return false;
9799 
9800       return llvm::None;
9801     }
9802 
9803     /// Set of functions that we know for sure is reachable.
9804     DenseSet<const Function *> Reachable;
9805 
9806     /// Set of functions that are unreachable, but might become reachable.
9807     DenseSet<const Function *> Unreachable;
9808 
9809     /// If we can reach a function with a call to a unknown function we assume
9810     /// that we can reach any function.
9811     bool CanReachUnknownCallee = false;
9812   };
9813 
9814   struct QueryResolver : public QuerySet {
9815     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9816                         ArrayRef<const AACallEdges *> AAEdgesList) {
9817       ChangeStatus Change = ChangeStatus::UNCHANGED;
9818 
9819       for (auto *AAEdges : AAEdgesList) {
9820         if (AAEdges->hasUnknownCallee()) {
9821           if (!CanReachUnknownCallee)
9822             Change = ChangeStatus::CHANGED;
9823           CanReachUnknownCallee = true;
9824           return Change;
9825         }
9826       }
9827 
9828       for (const Function *Fn : make_early_inc_range(Unreachable)) {
9829         if (checkIfReachable(A, AA, AAEdgesList, *Fn)) {
9830           Change = ChangeStatus::CHANGED;
9831           markReachable(*Fn);
9832         }
9833       }
9834       return Change;
9835     }
9836 
9837     bool isReachable(Attributor &A, AAFunctionReachability &AA,
9838                      ArrayRef<const AACallEdges *> AAEdgesList,
9839                      const Function &Fn) {
9840       Optional<bool> Cached = isCachedReachable(Fn);
9841       if (Cached.hasValue())
9842         return Cached.getValue();
9843 
9844       // The query was not cached, thus it is new. We need to request an update
9845       // explicitly to make sure this the information is properly run to a
9846       // fixpoint.
9847       A.registerForUpdate(AA);
9848 
9849       // We need to assume that this function can't reach Fn to prevent
9850       // an infinite loop if this function is recursive.
9851       Unreachable.insert(&Fn);
9852 
9853       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9854       if (Result)
9855         markReachable(Fn);
9856       return Result;
9857     }
9858 
9859     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9860                           ArrayRef<const AACallEdges *> AAEdgesList,
9861                           const Function &Fn) const {
9862 
9863       // Handle the most trivial case first.
9864       for (auto *AAEdges : AAEdgesList) {
9865         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9866 
9867         if (Edges.count(const_cast<Function *>(&Fn)))
9868           return true;
9869       }
9870 
9871       SmallVector<const AAFunctionReachability *, 8> Deps;
9872       for (auto &AAEdges : AAEdgesList) {
9873         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9874 
9875         for (Function *Edge : Edges) {
9876           // Functions that do not call back into the module can be ignored.
9877           if (Edge->hasFnAttribute(Attribute::NoCallback))
9878             continue;
9879 
9880           // We don't need a dependency if the result is reachable.
9881           const AAFunctionReachability &EdgeReachability =
9882               A.getAAFor<AAFunctionReachability>(
9883                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9884           Deps.push_back(&EdgeReachability);
9885 
9886           if (EdgeReachability.canReach(A, Fn))
9887             return true;
9888         }
9889       }
9890 
9891       // The result is false for now, set dependencies and leave.
9892       for (auto *Dep : Deps)
9893         A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
9894 
9895       return false;
9896     }
9897   };
9898 
9899   /// Get call edges that can be reached by this instruction.
9900   bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability,
9901                              const Instruction &Inst,
9902                              SmallVector<const AACallEdges *> &Result) const {
9903     // Determine call like instructions that we can reach from the inst.
9904     auto CheckCallBase = [&](Instruction &CBInst) {
9905       if (!Reachability.isAssumedReachable(A, Inst, CBInst))
9906         return true;
9907 
9908       auto &CB = cast<CallBase>(CBInst);
9909       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9910           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9911 
9912       Result.push_back(&AAEdges);
9913       return true;
9914     };
9915 
9916     bool UsedAssumedInformation = false;
9917     return A.checkForAllCallLikeInstructions(CheckCallBase, *this,
9918                                              UsedAssumedInformation,
9919                                              /* CheckBBLivenessOnly */ true);
9920   }
9921 
9922 public:
9923   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9924       : AAFunctionReachability(IRP, A) {}
9925 
9926   bool canReach(Attributor &A, const Function &Fn) const override {
9927     if (!isValidState())
9928       return true;
9929 
9930     const AACallEdges &AAEdges =
9931         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9932 
9933     // Attributor returns attributes as const, so this function has to be
9934     // const for users of this attribute to use it without having to do
9935     // a const_cast.
9936     // This is a hack for us to be able to cache queries.
9937     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9938     bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis,
9939                                                           {&AAEdges}, Fn);
9940 
9941     return Result;
9942   }
9943 
9944   /// Can \p CB reach \p Fn
9945   bool canReach(Attributor &A, CallBase &CB,
9946                 const Function &Fn) const override {
9947     if (!isValidState())
9948       return true;
9949 
9950     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9951         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9952 
9953     // Attributor returns attributes as const, so this function has to be
9954     // const for users of this attribute to use it without having to do
9955     // a const_cast.
9956     // This is a hack for us to be able to cache queries.
9957     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9958     QueryResolver &CBQuery = NonConstThis->CBQueries[&CB];
9959 
9960     bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn);
9961 
9962     return Result;
9963   }
9964 
9965   bool instructionCanReach(Attributor &A, const Instruction &Inst,
9966                            const Function &Fn,
9967                            bool UseBackwards) const override {
9968     if (!isValidState())
9969       return true;
9970 
9971     if (UseBackwards)
9972       return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr);
9973 
9974     const auto &Reachability = A.getAAFor<AAReachability>(
9975         *this, IRPosition::function(*getAssociatedFunction()),
9976         DepClassTy::REQUIRED);
9977 
9978     SmallVector<const AACallEdges *> CallEdges;
9979     bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges);
9980     // Attributor returns attributes as const, so this function has to be
9981     // const for users of this attribute to use it without having to do
9982     // a const_cast.
9983     // This is a hack for us to be able to cache queries.
9984     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9985     QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
9986     if (!AllKnown)
9987       InstQSet.CanReachUnknownCallee = true;
9988 
9989     return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
9990   }
9991 
9992   /// See AbstractAttribute::updateImpl(...).
9993   ChangeStatus updateImpl(Attributor &A) override {
9994     const AACallEdges &AAEdges =
9995         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9996     ChangeStatus Change = ChangeStatus::UNCHANGED;
9997 
9998     Change |= WholeFunction.update(A, *this, {&AAEdges});
9999 
10000     for (auto &CBPair : CBQueries) {
10001       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
10002           *this, IRPosition::callsite_function(*CBPair.first),
10003           DepClassTy::REQUIRED);
10004 
10005       Change |= CBPair.second.update(A, *this, {&AAEdges});
10006     }
10007 
10008     // Update the Instruction queries.
10009     if (!InstQueries.empty()) {
10010       const AAReachability *Reachability = &A.getAAFor<AAReachability>(
10011           *this, IRPosition::function(*getAssociatedFunction()),
10012           DepClassTy::REQUIRED);
10013 
10014       // Check for local callbases first.
10015       for (auto &InstPair : InstQueries) {
10016         SmallVector<const AACallEdges *> CallEdges;
10017         bool AllKnown =
10018             getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
10019         // Update will return change if we this effects any queries.
10020         if (!AllKnown)
10021           InstPair.second.CanReachUnknownCallee = true;
10022         Change |= InstPair.second.update(A, *this, CallEdges);
10023       }
10024     }
10025 
10026     return Change;
10027   }
10028 
10029   const std::string getAsStr() const override {
10030     size_t QueryCount =
10031         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
10032 
10033     return "FunctionReachability [" +
10034            std::to_string(WholeFunction.Reachable.size()) + "," +
10035            std::to_string(QueryCount) + "]";
10036   }
10037 
10038   void trackStatistics() const override {}
10039 
10040 private:
10041   bool canReachUnknownCallee() const override {
10042     return WholeFunction.CanReachUnknownCallee;
10043   }
10044 
10045   /// Used to answer if a the whole function can reacha a specific function.
10046   QueryResolver WholeFunction;
10047 
10048   /// Used to answer if a call base inside this function can reach a specific
10049   /// function.
10050   MapVector<const CallBase *, QueryResolver> CBQueries;
10051 
10052   /// This is for instruction queries than scan "forward".
10053   MapVector<const Instruction *, QueryResolver> InstQueries;
10054 };
10055 } // namespace
10056 
10057 /// ---------------------- Assumption Propagation ------------------------------
10058 namespace {
10059 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
10060   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
10061                        const DenseSet<StringRef> &Known)
10062       : AAAssumptionInfo(IRP, A, Known) {}
10063 
10064   bool hasAssumption(const StringRef Assumption) const override {
10065     return isValidState() && setContains(Assumption);
10066   }
10067 
10068   /// See AbstractAttribute::getAsStr()
10069   const std::string getAsStr() const override {
10070     const SetContents &Known = getKnown();
10071     const SetContents &Assumed = getAssumed();
10072 
10073     const std::string KnownStr =
10074         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
10075     const std::string AssumedStr =
10076         (Assumed.isUniversal())
10077             ? "Universal"
10078             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
10079 
10080     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
10081   }
10082 };
10083 
10084 /// Propagates assumption information from parent functions to all of their
10085 /// successors. An assumption can be propagated if the containing function
10086 /// dominates the called function.
10087 ///
10088 /// We start with a "known" set of assumptions already valid for the associated
10089 /// function and an "assumed" set that initially contains all possible
10090 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
10091 /// contents as concrete values are known. The concrete values are seeded by the
10092 /// first nodes that are either entries into the call graph, or contains no
10093 /// assumptions. Each node is updated as the intersection of the assumed state
10094 /// with all of its predecessors.
10095 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
10096   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
10097       : AAAssumptionInfoImpl(IRP, A,
10098                              getAssumptions(*IRP.getAssociatedFunction())) {}
10099 
10100   /// See AbstractAttribute::manifest(...).
10101   ChangeStatus manifest(Attributor &A) override {
10102     const auto &Assumptions = getKnown();
10103 
10104     // Don't manifest a universal set if it somehow made it here.
10105     if (Assumptions.isUniversal())
10106       return ChangeStatus::UNCHANGED;
10107 
10108     Function *AssociatedFunction = getAssociatedFunction();
10109 
10110     bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
10111 
10112     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10113   }
10114 
10115   /// See AbstractAttribute::updateImpl(...).
10116   ChangeStatus updateImpl(Attributor &A) override {
10117     bool Changed = false;
10118 
10119     auto CallSitePred = [&](AbstractCallSite ACS) {
10120       const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
10121           *this, IRPosition::callsite_function(*ACS.getInstruction()),
10122           DepClassTy::REQUIRED);
10123       // Get the set of assumptions shared by all of this function's callers.
10124       Changed |= getIntersection(AssumptionAA.getAssumed());
10125       return !getAssumed().empty() || !getKnown().empty();
10126     };
10127 
10128     bool UsedAssumedInformation = false;
10129     // Get the intersection of all assumptions held by this node's predecessors.
10130     // If we don't know all the call sites then this is either an entry into the
10131     // call graph or an empty node. This node is known to only contain its own
10132     // assumptions and can be propagated to its successors.
10133     if (!A.checkForAllCallSites(CallSitePred, *this, true,
10134                                 UsedAssumedInformation))
10135       return indicatePessimisticFixpoint();
10136 
10137     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10138   }
10139 
10140   void trackStatistics() const override {}
10141 };
10142 
10143 /// Assumption Info defined for call sites.
10144 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
10145 
10146   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
10147       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
10148 
10149   /// See AbstractAttribute::initialize(...).
10150   void initialize(Attributor &A) override {
10151     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10152     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10153   }
10154 
10155   /// See AbstractAttribute::manifest(...).
10156   ChangeStatus manifest(Attributor &A) override {
10157     // Don't manifest a universal set if it somehow made it here.
10158     if (getKnown().isUniversal())
10159       return ChangeStatus::UNCHANGED;
10160 
10161     CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
10162     bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
10163 
10164     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10165   }
10166 
10167   /// See AbstractAttribute::updateImpl(...).
10168   ChangeStatus updateImpl(Attributor &A) override {
10169     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10170     auto &AssumptionAA =
10171         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10172     bool Changed = getIntersection(AssumptionAA.getAssumed());
10173     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10174   }
10175 
10176   /// See AbstractAttribute::trackStatistics()
10177   void trackStatistics() const override {}
10178 
10179 private:
10180   /// Helper to initialized the known set as all the assumptions this call and
10181   /// the callee contain.
10182   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
10183     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
10184     auto Assumptions = getAssumptions(CB);
10185     if (Function *F = IRP.getAssociatedFunction())
10186       set_union(Assumptions, getAssumptions(*F));
10187     if (Function *F = IRP.getAssociatedFunction())
10188       set_union(Assumptions, getAssumptions(*F));
10189     return Assumptions;
10190   }
10191 };
10192 } // namespace
10193 
10194 AACallGraphNode *AACallEdgeIterator::operator*() const {
10195   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
10196       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
10197 }
10198 
10199 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
10200 
10201 const char AAReturnedValues::ID = 0;
10202 const char AANoUnwind::ID = 0;
10203 const char AANoSync::ID = 0;
10204 const char AANoFree::ID = 0;
10205 const char AANonNull::ID = 0;
10206 const char AANoRecurse::ID = 0;
10207 const char AAWillReturn::ID = 0;
10208 const char AAUndefinedBehavior::ID = 0;
10209 const char AANoAlias::ID = 0;
10210 const char AAReachability::ID = 0;
10211 const char AANoReturn::ID = 0;
10212 const char AAIsDead::ID = 0;
10213 const char AADereferenceable::ID = 0;
10214 const char AAAlign::ID = 0;
10215 const char AAInstanceInfo::ID = 0;
10216 const char AANoCapture::ID = 0;
10217 const char AAValueSimplify::ID = 0;
10218 const char AAHeapToStack::ID = 0;
10219 const char AAPrivatizablePtr::ID = 0;
10220 const char AAMemoryBehavior::ID = 0;
10221 const char AAMemoryLocation::ID = 0;
10222 const char AAValueConstantRange::ID = 0;
10223 const char AAPotentialValues::ID = 0;
10224 const char AANoUndef::ID = 0;
10225 const char AACallEdges::ID = 0;
10226 const char AAFunctionReachability::ID = 0;
10227 const char AAPointerInfo::ID = 0;
10228 const char AAAssumptionInfo::ID = 0;
10229 
10230 // Macro magic to create the static generator function for attributes that
10231 // follow the naming scheme.
10232 
10233 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
10234   case IRPosition::PK:                                                         \
10235     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
10236 
10237 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
10238   case IRPosition::PK:                                                         \
10239     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
10240     ++NumAAs;                                                                  \
10241     break;
10242 
10243 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
10244   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10245     CLASS *AA = nullptr;                                                       \
10246     switch (IRP.getPositionKind()) {                                           \
10247       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10248       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10249       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10250       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10251       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10252       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10253       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10254       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10255     }                                                                          \
10256     return *AA;                                                                \
10257   }
10258 
10259 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
10260   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10261     CLASS *AA = nullptr;                                                       \
10262     switch (IRP.getPositionKind()) {                                           \
10263       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10264       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
10265       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10266       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10267       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10268       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10269       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10270       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10271     }                                                                          \
10272     return *AA;                                                                \
10273   }
10274 
10275 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
10276   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10277     CLASS *AA = nullptr;                                                       \
10278     switch (IRP.getPositionKind()) {                                           \
10279       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10280       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10281       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10282       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10283       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10284       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10285       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10286       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10287     }                                                                          \
10288     return *AA;                                                                \
10289   }
10290 
10291 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
10292   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10293     CLASS *AA = nullptr;                                                       \
10294     switch (IRP.getPositionKind()) {                                           \
10295       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10296       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10297       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10298       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10299       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10300       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10301       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10302       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10303     }                                                                          \
10304     return *AA;                                                                \
10305   }
10306 
10307 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
10308   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10309     CLASS *AA = nullptr;                                                       \
10310     switch (IRP.getPositionKind()) {                                           \
10311       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10312       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10313       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10314       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10315       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10316       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10317       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10318       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10319     }                                                                          \
10320     return *AA;                                                                \
10321   }
10322 
10323 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
10324 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
10325 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
10326 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
10327 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
10328 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
10329 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
10330 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
10331 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
10332 
10333 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
10334 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
10335 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
10336 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
10337 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
10338 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
10339 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
10340 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
10341 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
10342 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
10343 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
10344 
10345 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
10346 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
10347 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
10348 
10349 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
10350 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
10351 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
10352 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
10353 
10354 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
10355 
10356 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
10357 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
10358 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
10359 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
10360 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
10361 #undef SWITCH_PK_CREATE
10362 #undef SWITCH_PK_INV
10363