1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/SCCIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetOperations.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumeBundleQueries.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/Analysis/LazyValueInfo.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/TargetTransformInfo.h"
34 #include "llvm/Analysis/ValueTracking.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Assumptions.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/GlobalValue.h"
40 #include "llvm/IR/IRBuilder.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/NoFolder.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/IR/ValueHandle.h"
47 #include "llvm/Support/Alignment.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/GraphWriter.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
55 #include "llvm/Transforms/Utils/Local.h"
56 #include "llvm/Transforms/Utils/ValueMapper.h"
57 #include <cassert>
58 
59 using namespace llvm;
60 
61 #define DEBUG_TYPE "attributor"
62 
63 static cl::opt<bool> ManifestInternal(
64     "attributor-manifest-internal", cl::Hidden,
65     cl::desc("Manifest Attributor internal string attributes."),
66     cl::init(false));
67 
68 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
69                                        cl::Hidden);
70 
71 template <>
72 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
73 
74 static cl::opt<unsigned, true> MaxPotentialValues(
75     "attributor-max-potential-values", cl::Hidden,
76     cl::desc("Maximum number of potential values to be "
77              "tracked for each position."),
78     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
79     cl::init(7));
80 
81 static cl::opt<unsigned> MaxInterferingAccesses(
82     "attributor-max-interfering-accesses", cl::Hidden,
83     cl::desc("Maximum number of interfering accesses to "
84              "check before assuming all might interfere."),
85     cl::init(6));
86 
87 STATISTIC(NumAAs, "Number of abstract attributes created");
88 
89 // Some helper macros to deal with statistics tracking.
90 //
91 // Usage:
92 // For simple IR attribute tracking overload trackStatistics in the abstract
93 // attribute and choose the right STATS_DECLTRACK_********* macro,
94 // e.g.,:
95 //  void trackStatistics() const override {
96 //    STATS_DECLTRACK_ARG_ATTR(returned)
97 //  }
98 // If there is a single "increment" side one can use the macro
99 // STATS_DECLTRACK with a custom message. If there are multiple increment
100 // sides, STATS_DECL and STATS_TRACK can also be used separately.
101 //
102 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
103   ("Number of " #TYPE " marked '" #NAME "'")
104 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
105 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
106 #define STATS_DECL(NAME, TYPE, MSG)                                            \
107   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
108 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
109 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
110   {                                                                            \
111     STATS_DECL(NAME, TYPE, MSG)                                                \
112     STATS_TRACK(NAME, TYPE)                                                    \
113   }
114 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
115   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
116 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
117   STATS_DECLTRACK(NAME, CSArguments,                                           \
118                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
119 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
120   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
121 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
122   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
123 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
124   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
125                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
126 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
127   STATS_DECLTRACK(NAME, CSReturn,                                              \
128                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
129 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
130   STATS_DECLTRACK(NAME, Floating,                                              \
131                   ("Number of floating values known to be '" #NAME "'"))
132 
133 // Specialization of the operator<< for abstract attributes subclasses. This
134 // disambiguates situations where multiple operators are applicable.
135 namespace llvm {
136 #define PIPE_OPERATOR(CLASS)                                                   \
137   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
138     return OS << static_cast<const AbstractAttribute &>(AA);                   \
139   }
140 
141 PIPE_OPERATOR(AAIsDead)
142 PIPE_OPERATOR(AANoUnwind)
143 PIPE_OPERATOR(AANoSync)
144 PIPE_OPERATOR(AANoRecurse)
145 PIPE_OPERATOR(AAWillReturn)
146 PIPE_OPERATOR(AANoReturn)
147 PIPE_OPERATOR(AAReturnedValues)
148 PIPE_OPERATOR(AANonNull)
149 PIPE_OPERATOR(AANoAlias)
150 PIPE_OPERATOR(AADereferenceable)
151 PIPE_OPERATOR(AAAlign)
152 PIPE_OPERATOR(AAInstanceInfo)
153 PIPE_OPERATOR(AANoCapture)
154 PIPE_OPERATOR(AAValueSimplify)
155 PIPE_OPERATOR(AANoFree)
156 PIPE_OPERATOR(AAHeapToStack)
157 PIPE_OPERATOR(AAReachability)
158 PIPE_OPERATOR(AAMemoryBehavior)
159 PIPE_OPERATOR(AAMemoryLocation)
160 PIPE_OPERATOR(AAValueConstantRange)
161 PIPE_OPERATOR(AAPrivatizablePtr)
162 PIPE_OPERATOR(AAUndefinedBehavior)
163 PIPE_OPERATOR(AAPotentialConstantValues)
164 PIPE_OPERATOR(AANoUndef)
165 PIPE_OPERATOR(AACallEdges)
166 PIPE_OPERATOR(AAFunctionReachability)
167 PIPE_OPERATOR(AAPointerInfo)
168 PIPE_OPERATOR(AAAssumptionInfo)
169 
170 #undef PIPE_OPERATOR
171 
172 template <>
173 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
174                                                      const DerefState &R) {
175   ChangeStatus CS0 =
176       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
177   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
178   return CS0 | CS1;
179 }
180 
181 } // namespace llvm
182 
183 /// Get pointer operand of memory accessing instruction. If \p I is
184 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
185 /// is set to false and the instruction is volatile, return nullptr.
186 static const Value *getPointerOperand(const Instruction *I,
187                                       bool AllowVolatile) {
188   if (!AllowVolatile && I->isVolatile())
189     return nullptr;
190 
191   if (auto *LI = dyn_cast<LoadInst>(I)) {
192     return LI->getPointerOperand();
193   }
194 
195   if (auto *SI = dyn_cast<StoreInst>(I)) {
196     return SI->getPointerOperand();
197   }
198 
199   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
200     return CXI->getPointerOperand();
201   }
202 
203   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
204     return RMWI->getPointerOperand();
205   }
206 
207   return nullptr;
208 }
209 
210 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
211 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
212 /// getelement pointer instructions that traverse the natural type of \p Ptr if
213 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
214 /// through a cast to i8*.
215 ///
216 /// TODO: This could probably live somewhere more prominantly if it doesn't
217 ///       already exist.
218 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
219                                int64_t Offset, IRBuilder<NoFolder> &IRB,
220                                const DataLayout &DL) {
221   assert(Offset >= 0 && "Negative offset not supported yet!");
222   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
223                     << "-bytes as " << *ResTy << "\n");
224 
225   if (Offset) {
226     Type *Ty = PtrElemTy;
227     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
228     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
229 
230     SmallVector<Value *, 4> ValIndices;
231     std::string GEPName = Ptr->getName().str();
232     for (const APInt &Index : IntIndices) {
233       ValIndices.push_back(IRB.getInt(Index));
234       GEPName += "." + std::to_string(Index.getZExtValue());
235     }
236 
237     // Create a GEP for the indices collected above.
238     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
239 
240     // If an offset is left we use byte-wise adjustment.
241     if (IntOffset != 0) {
242       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
243       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
244                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
245     }
246   }
247 
248   // Ensure the result has the requested type.
249   Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
250                                                 Ptr->getName() + ".cast");
251 
252   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
253   return Ptr;
254 }
255 
256 /// Recursively visit all values that might become \p IRP at some point. This
257 /// will be done by looking through cast instructions, selects, phis, and calls
258 /// with the "returned" attribute. Once we cannot look through the value any
259 /// further, the callback \p VisitValueCB is invoked and passed the current
260 /// value, the \p State, and a flag to indicate if we stripped anything.
261 /// Stripped means that we unpacked the value associated with \p IRP at least
262 /// once. Note that the value used for the callback may still be the value
263 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
264 /// we will never visit more values than specified by \p MaxValues.
265 /// If \p VS does not contain the Interprocedural bit, only values valid in the
266 /// scope of \p CtxI will be visited and simplification into other scopes is
267 /// prevented.
268 template <typename StateTy>
269 static bool genericValueTraversal(
270     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
271     StateTy &State,
272     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
273         VisitValueCB,
274     const Instruction *CtxI, bool &UsedAssumedInformation,
275     bool UseValueSimplify = true, int MaxValues = 16,
276     function_ref<Value *(Value *)> StripCB = nullptr,
277     AA::ValueScope VS = AA::Interprocedural) {
278 
279   struct LivenessInfo {
280     const AAIsDead *LivenessAA = nullptr;
281     bool AnyDead = false;
282   };
283   SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
284   auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
285     LivenessInfo &LI = LivenessAAs[&F];
286     if (!LI.LivenessAA)
287       LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F),
288                                             DepClassTy::NONE);
289     return LI;
290   };
291 
292   Value *InitialV = &IRP.getAssociatedValue();
293   using Item = std::pair<Value *, const Instruction *>;
294   SmallSet<Item, 16> Visited;
295   SmallVector<Item, 16> Worklist;
296   Worklist.push_back({InitialV, CtxI});
297 
298   int Iteration = 0;
299   do {
300     Item I = Worklist.pop_back_val();
301     Value *V = I.first;
302     CtxI = I.second;
303     if (StripCB)
304       V = StripCB(V);
305 
306     // Check if we should process the current value. To prevent endless
307     // recursion keep a record of the values we followed!
308     if (!Visited.insert(I).second)
309       continue;
310 
311     // Make sure we limit the compile time for complex expressions.
312     if (Iteration++ >= MaxValues) {
313       LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
314                         << Iteration << "!\n");
315       return false;
316     }
317 
318     // Explicitly look through calls with a "returned" attribute if we do
319     // not have a pointer as stripPointerCasts only works on them.
320     Value *NewV = nullptr;
321     if (V->getType()->isPointerTy()) {
322       NewV = V->stripPointerCasts();
323     } else {
324       auto *CB = dyn_cast<CallBase>(V);
325       if (CB && CB->getCalledFunction()) {
326         for (Argument &Arg : CB->getCalledFunction()->args())
327           if (Arg.hasReturnedAttr()) {
328             NewV = CB->getArgOperand(Arg.getArgNo());
329             break;
330           }
331       }
332     }
333     if (NewV && NewV != V) {
334       Worklist.push_back({NewV, CtxI});
335       continue;
336     }
337 
338     // Look through select instructions, visit assumed potential values.
339     if (auto *SI = dyn_cast<SelectInst>(V)) {
340       Optional<Constant *> C = A.getAssumedConstant(
341           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
342       bool NoValueYet = !C;
343       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
344         continue;
345       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
346         if (CI->isZero())
347           Worklist.push_back({SI->getFalseValue(), CtxI});
348         else
349           Worklist.push_back({SI->getTrueValue(), CtxI});
350         continue;
351       }
352       // We could not simplify the condition, assume both values.(
353       Worklist.push_back({SI->getTrueValue(), CtxI});
354       Worklist.push_back({SI->getFalseValue(), CtxI});
355       continue;
356     }
357 
358     // Look through phi nodes, visit all live operands.
359     if (auto *PHI = dyn_cast<PHINode>(V)) {
360       LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction());
361       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
362         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
363         if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) {
364           LI.AnyDead = true;
365           UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint();
366           continue;
367         }
368         Worklist.push_back(
369             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
370       }
371       continue;
372     }
373 
374     if (auto *Arg = dyn_cast<Argument>(V)) {
375       if ((VS & AA::Interprocedural) && !Arg->hasPassPointeeByValueCopyAttr()) {
376         SmallVector<Item> CallSiteValues;
377         bool UsedAssumedInformation = false;
378         if (A.checkForAllCallSites(
379                 [&](AbstractCallSite ACS) {
380                   // Callbacks might not have a corresponding call site operand,
381                   // stick with the argument in that case.
382                   Value *CSOp = ACS.getCallArgOperand(*Arg);
383                   if (!CSOp)
384                     return false;
385                   CallSiteValues.push_back({CSOp, ACS.getInstruction()});
386                   return true;
387                 },
388                 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) {
389           Worklist.append(CallSiteValues);
390           continue;
391         }
392       }
393     }
394 
395     if (UseValueSimplify && !isa<Constant>(V)) {
396       Optional<Value *> SimpleV =
397           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
398       if (!SimpleV)
399         continue;
400       Value *NewV = SimpleV.getValue();
401       if (NewV && NewV != V) {
402         if ((VS & AA::Interprocedural) || !CtxI ||
403             AA::isValidInScope(*NewV, CtxI->getFunction())) {
404           Worklist.push_back({NewV, CtxI});
405           continue;
406         }
407       }
408     }
409 
410     if (auto *LI = dyn_cast<LoadInst>(V)) {
411       bool UsedAssumedInformation = false;
412       // If we ask for the potentially loaded values from the initial pointer we
413       // will simply end up here again. The load is as far as we can make it.
414       if (LI->getPointerOperand() != InitialV) {
415         SmallSetVector<Value *, 4> PotentialCopies;
416         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
417         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
418                                            PotentialValueOrigins, QueryingAA,
419                                            UsedAssumedInformation,
420                                            /* OnlyExact */ true)) {
421           // Values have to be dynamically unique or we loose the fact that a
422           // single llvm::Value might represent two runtime values (e.g., stack
423           // locations in different recursive calls).
424           bool DynamicallyUnique =
425               llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) {
426                 return AA::isDynamicallyUnique(A, QueryingAA, *PC);
427               });
428           if (DynamicallyUnique &&
429               ((VS & AA::Interprocedural) || !CtxI ||
430                llvm::all_of(PotentialCopies, [CtxI](Value *PC) {
431                  return AA::isValidInScope(*PC, CtxI->getFunction());
432                }))) {
433             for (auto *PotentialCopy : PotentialCopies)
434               Worklist.push_back({PotentialCopy, CtxI});
435             continue;
436           }
437         }
438       }
439     }
440 
441     // Once a leaf is reached we inform the user through the callback.
442     if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) {
443       LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: "
444                         << *V << "!\n");
445       return false;
446     }
447   } while (!Worklist.empty());
448 
449   // If we actually used liveness information so we have to record a dependence.
450   for (auto &It : LivenessAAs)
451     if (It.second.AnyDead)
452       A.recordDependence(*It.second.LivenessAA, QueryingAA,
453                          DepClassTy::OPTIONAL);
454 
455   // All values have been visited.
456   return true;
457 }
458 
459 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
460                                      SmallVectorImpl<Value *> &Objects,
461                                      const AbstractAttribute &QueryingAA,
462                                      const Instruction *CtxI,
463                                      bool &UsedAssumedInformation,
464                                      AA::ValueScope VS) {
465   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
466   SmallPtrSet<Value *, 8> SeenObjects;
467   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
468                                      SmallVectorImpl<Value *> &Objects,
469                                      bool) -> bool {
470     if (SeenObjects.insert(&Val).second)
471       Objects.push_back(&Val);
472     return true;
473   };
474   if (!genericValueTraversal<decltype(Objects)>(
475           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
476           UsedAssumedInformation, true, 32, StripCB, VS))
477     return false;
478   return true;
479 }
480 
481 static const Value *
482 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
483                           const Value *Val, const DataLayout &DL, APInt &Offset,
484                           bool GetMinOffset, bool AllowNonInbounds,
485                           bool UseAssumed = false) {
486 
487   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
488     const IRPosition &Pos = IRPosition::value(V);
489     // Only track dependence if we are going to use the assumed info.
490     const AAValueConstantRange &ValueConstantRangeAA =
491         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
492                                          UseAssumed ? DepClassTy::OPTIONAL
493                                                     : DepClassTy::NONE);
494     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
495                                      : ValueConstantRangeAA.getKnown();
496     if (Range.isFullSet())
497       return false;
498 
499     // We can only use the lower part of the range because the upper part can
500     // be higher than what the value can really be.
501     if (GetMinOffset)
502       ROffset = Range.getSignedMin();
503     else
504       ROffset = Range.getSignedMax();
505     return true;
506   };
507 
508   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
509                                                 /* AllowInvariant */ true,
510                                                 AttributorAnalysis);
511 }
512 
513 static const Value *
514 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
515                         const Value *Ptr, int64_t &BytesOffset,
516                         const DataLayout &DL, bool AllowNonInbounds = false) {
517   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
518   const Value *Base =
519       stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
520                                 /* GetMinOffset */ true, AllowNonInbounds);
521 
522   BytesOffset = OffsetAPInt.getSExtValue();
523   return Base;
524 }
525 
526 /// Clamp the information known for all returned values of a function
527 /// (identified by \p QueryingAA) into \p S.
528 template <typename AAType, typename StateType = typename AAType::StateType>
529 static void clampReturnedValueStates(
530     Attributor &A, const AAType &QueryingAA, StateType &S,
531     const IRPosition::CallBaseContext *CBContext = nullptr) {
532   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
533                     << QueryingAA << " into " << S << "\n");
534 
535   assert((QueryingAA.getIRPosition().getPositionKind() ==
536               IRPosition::IRP_RETURNED ||
537           QueryingAA.getIRPosition().getPositionKind() ==
538               IRPosition::IRP_CALL_SITE_RETURNED) &&
539          "Can only clamp returned value states for a function returned or call "
540          "site returned position!");
541 
542   // Use an optional state as there might not be any return values and we want
543   // to join (IntegerState::operator&) the state of all there are.
544   Optional<StateType> T;
545 
546   // Callback for each possibly returned value.
547   auto CheckReturnValue = [&](Value &RV) -> bool {
548     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
549     const AAType &AA =
550         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
551     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
552                       << " @ " << RVPos << "\n");
553     const StateType &AAS = AA.getState();
554     if (!T)
555       T = StateType::getBestState(AAS);
556     *T &= AAS;
557     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
558                       << "\n");
559     return T->isValidState();
560   };
561 
562   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
563     S.indicatePessimisticFixpoint();
564   else if (T)
565     S ^= *T;
566 }
567 
568 namespace {
569 /// Helper class for generic deduction: return value -> returned position.
570 template <typename AAType, typename BaseType,
571           typename StateType = typename BaseType::StateType,
572           bool PropagateCallBaseContext = false>
573 struct AAReturnedFromReturnedValues : public BaseType {
574   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
575       : BaseType(IRP, A) {}
576 
577   /// See AbstractAttribute::updateImpl(...).
578   ChangeStatus updateImpl(Attributor &A) override {
579     StateType S(StateType::getBestState(this->getState()));
580     clampReturnedValueStates<AAType, StateType>(
581         A, *this, S,
582         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
583     // TODO: If we know we visited all returned values, thus no are assumed
584     // dead, we can take the known information from the state T.
585     return clampStateAndIndicateChange<StateType>(this->getState(), S);
586   }
587 };
588 
589 /// Clamp the information known at all call sites for a given argument
590 /// (identified by \p QueryingAA) into \p S.
591 template <typename AAType, typename StateType = typename AAType::StateType>
592 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
593                                         StateType &S) {
594   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
595                     << QueryingAA << " into " << S << "\n");
596 
597   assert(QueryingAA.getIRPosition().getPositionKind() ==
598              IRPosition::IRP_ARGUMENT &&
599          "Can only clamp call site argument states for an argument position!");
600 
601   // Use an optional state as there might not be any return values and we want
602   // to join (IntegerState::operator&) the state of all there are.
603   Optional<StateType> T;
604 
605   // The argument number which is also the call site argument number.
606   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
607 
608   auto CallSiteCheck = [&](AbstractCallSite ACS) {
609     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
610     // Check if a coresponding argument was found or if it is on not associated
611     // (which can happen for callback calls).
612     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
613       return false;
614 
615     const AAType &AA =
616         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
617     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
618                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
619     const StateType &AAS = AA.getState();
620     if (!T)
621       T = StateType::getBestState(AAS);
622     *T &= AAS;
623     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
624                       << "\n");
625     return T->isValidState();
626   };
627 
628   bool UsedAssumedInformation = false;
629   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
630                               UsedAssumedInformation))
631     S.indicatePessimisticFixpoint();
632   else if (T)
633     S ^= *T;
634 }
635 
636 /// This function is the bridge between argument position and the call base
637 /// context.
638 template <typename AAType, typename BaseType,
639           typename StateType = typename AAType::StateType>
640 bool getArgumentStateFromCallBaseContext(Attributor &A,
641                                          BaseType &QueryingAttribute,
642                                          IRPosition &Pos, StateType &State) {
643   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
644          "Expected an 'argument' position !");
645   const CallBase *CBContext = Pos.getCallBaseContext();
646   if (!CBContext)
647     return false;
648 
649   int ArgNo = Pos.getCallSiteArgNo();
650   assert(ArgNo >= 0 && "Invalid Arg No!");
651 
652   const auto &AA = A.getAAFor<AAType>(
653       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
654       DepClassTy::REQUIRED);
655   const StateType &CBArgumentState =
656       static_cast<const StateType &>(AA.getState());
657 
658   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
659                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
660                     << "\n");
661 
662   // NOTE: If we want to do call site grouping it should happen here.
663   State ^= CBArgumentState;
664   return true;
665 }
666 
667 /// Helper class for generic deduction: call site argument -> argument position.
668 template <typename AAType, typename BaseType,
669           typename StateType = typename AAType::StateType,
670           bool BridgeCallBaseContext = false>
671 struct AAArgumentFromCallSiteArguments : public BaseType {
672   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
673       : BaseType(IRP, A) {}
674 
675   /// See AbstractAttribute::updateImpl(...).
676   ChangeStatus updateImpl(Attributor &A) override {
677     StateType S = StateType::getBestState(this->getState());
678 
679     if (BridgeCallBaseContext) {
680       bool Success =
681           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
682               A, *this, this->getIRPosition(), S);
683       if (Success)
684         return clampStateAndIndicateChange<StateType>(this->getState(), S);
685     }
686     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
687 
688     // TODO: If we know we visited all incoming values, thus no are assumed
689     // dead, we can take the known information from the state T.
690     return clampStateAndIndicateChange<StateType>(this->getState(), S);
691   }
692 };
693 
694 /// Helper class for generic replication: function returned -> cs returned.
695 template <typename AAType, typename BaseType,
696           typename StateType = typename BaseType::StateType,
697           bool IntroduceCallBaseContext = false>
698 struct AACallSiteReturnedFromReturned : public BaseType {
699   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
700       : BaseType(IRP, A) {}
701 
702   /// See AbstractAttribute::updateImpl(...).
703   ChangeStatus updateImpl(Attributor &A) override {
704     assert(this->getIRPosition().getPositionKind() ==
705                IRPosition::IRP_CALL_SITE_RETURNED &&
706            "Can only wrap function returned positions for call site returned "
707            "positions!");
708     auto &S = this->getState();
709 
710     const Function *AssociatedFunction =
711         this->getIRPosition().getAssociatedFunction();
712     if (!AssociatedFunction)
713       return S.indicatePessimisticFixpoint();
714 
715     CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
716     if (IntroduceCallBaseContext)
717       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
718                         << CBContext << "\n");
719 
720     IRPosition FnPos = IRPosition::returned(
721         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
722     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
723     return clampStateAndIndicateChange(S, AA.getState());
724   }
725 };
726 
727 /// Helper function to accumulate uses.
728 template <class AAType, typename StateType = typename AAType::StateType>
729 static void followUsesInContext(AAType &AA, Attributor &A,
730                                 MustBeExecutedContextExplorer &Explorer,
731                                 const Instruction *CtxI,
732                                 SetVector<const Use *> &Uses,
733                                 StateType &State) {
734   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
735   for (unsigned u = 0; u < Uses.size(); ++u) {
736     const Use *U = Uses[u];
737     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
738       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
739       if (Found && AA.followUseInMBEC(A, U, UserI, State))
740         for (const Use &Us : UserI->uses())
741           Uses.insert(&Us);
742     }
743   }
744 }
745 
746 /// Use the must-be-executed-context around \p I to add information into \p S.
747 /// The AAType class is required to have `followUseInMBEC` method with the
748 /// following signature and behaviour:
749 ///
750 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
751 /// U - Underlying use.
752 /// I - The user of the \p U.
753 /// Returns true if the value should be tracked transitively.
754 ///
755 template <class AAType, typename StateType = typename AAType::StateType>
756 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
757                              Instruction &CtxI) {
758 
759   // Container for (transitive) uses of the associated value.
760   SetVector<const Use *> Uses;
761   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
762     Uses.insert(&U);
763 
764   MustBeExecutedContextExplorer &Explorer =
765       A.getInfoCache().getMustBeExecutedContextExplorer();
766 
767   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
768 
769   if (S.isAtFixpoint())
770     return;
771 
772   SmallVector<const BranchInst *, 4> BrInsts;
773   auto Pred = [&](const Instruction *I) {
774     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
775       if (Br->isConditional())
776         BrInsts.push_back(Br);
777     return true;
778   };
779 
780   // Here, accumulate conditional branch instructions in the context. We
781   // explore the child paths and collect the known states. The disjunction of
782   // those states can be merged to its own state. Let ParentState_i be a state
783   // to indicate the known information for an i-th branch instruction in the
784   // context. ChildStates are created for its successors respectively.
785   //
786   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
787   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
788   //      ...
789   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
790   //
791   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
792   //
793   // FIXME: Currently, recursive branches are not handled. For example, we
794   // can't deduce that ptr must be dereferenced in below function.
795   //
796   // void f(int a, int c, int *ptr) {
797   //    if(a)
798   //      if (b) {
799   //        *ptr = 0;
800   //      } else {
801   //        *ptr = 1;
802   //      }
803   //    else {
804   //      if (b) {
805   //        *ptr = 0;
806   //      } else {
807   //        *ptr = 1;
808   //      }
809   //    }
810   // }
811 
812   Explorer.checkForAllContext(&CtxI, Pred);
813   for (const BranchInst *Br : BrInsts) {
814     StateType ParentState;
815 
816     // The known state of the parent state is a conjunction of children's
817     // known states so it is initialized with a best state.
818     ParentState.indicateOptimisticFixpoint();
819 
820     for (const BasicBlock *BB : Br->successors()) {
821       StateType ChildState;
822 
823       size_t BeforeSize = Uses.size();
824       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
825 
826       // Erase uses which only appear in the child.
827       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
828         It = Uses.erase(It);
829 
830       ParentState &= ChildState;
831     }
832 
833     // Use only known state.
834     S += ParentState;
835   }
836 }
837 } // namespace
838 
839 /// ------------------------ PointerInfo ---------------------------------------
840 
841 namespace llvm {
842 namespace AA {
843 namespace PointerInfo {
844 
845 struct State;
846 
847 } // namespace PointerInfo
848 } // namespace AA
849 
850 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
851 template <>
852 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
853   using Access = AAPointerInfo::Access;
854   static inline Access getEmptyKey();
855   static inline Access getTombstoneKey();
856   static unsigned getHashValue(const Access &A);
857   static bool isEqual(const Access &LHS, const Access &RHS);
858 };
859 
860 /// Helper that allows OffsetAndSize as a key in a DenseMap.
861 template <>
862 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
863     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
864 
865 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
866 /// but the instruction
867 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
868   using Base = DenseMapInfo<Instruction *>;
869   using Access = AAPointerInfo::Access;
870   static inline Access getEmptyKey();
871   static inline Access getTombstoneKey();
872   static unsigned getHashValue(const Access &A);
873   static bool isEqual(const Access &LHS, const Access &RHS);
874 };
875 
876 } // namespace llvm
877 
878 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
879 struct AA::PointerInfo::State : public AbstractState {
880 
881   ~State() {
882     // We do not delete the Accesses objects but need to destroy them still.
883     for (auto &It : AccessBins)
884       It.second->~Accesses();
885   }
886 
887   /// Return the best possible representable state.
888   static State getBestState(const State &SIS) { return State(); }
889 
890   /// Return the worst possible representable state.
891   static State getWorstState(const State &SIS) {
892     State R;
893     R.indicatePessimisticFixpoint();
894     return R;
895   }
896 
897   State() = default;
898   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {
899     SIS.AccessBins.clear();
900   }
901 
902   const State &getAssumed() const { return *this; }
903 
904   /// See AbstractState::isValidState().
905   bool isValidState() const override { return BS.isValidState(); }
906 
907   /// See AbstractState::isAtFixpoint().
908   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
909 
910   /// See AbstractState::indicateOptimisticFixpoint().
911   ChangeStatus indicateOptimisticFixpoint() override {
912     BS.indicateOptimisticFixpoint();
913     return ChangeStatus::UNCHANGED;
914   }
915 
916   /// See AbstractState::indicatePessimisticFixpoint().
917   ChangeStatus indicatePessimisticFixpoint() override {
918     BS.indicatePessimisticFixpoint();
919     return ChangeStatus::CHANGED;
920   }
921 
922   State &operator=(const State &R) {
923     if (this == &R)
924       return *this;
925     BS = R.BS;
926     AccessBins = R.AccessBins;
927     return *this;
928   }
929 
930   State &operator=(State &&R) {
931     if (this == &R)
932       return *this;
933     std::swap(BS, R.BS);
934     std::swap(AccessBins, R.AccessBins);
935     return *this;
936   }
937 
938   bool operator==(const State &R) const {
939     if (BS != R.BS)
940       return false;
941     if (AccessBins.size() != R.AccessBins.size())
942       return false;
943     auto It = begin(), RIt = R.begin(), E = end();
944     while (It != E) {
945       if (It->getFirst() != RIt->getFirst())
946         return false;
947       auto &Accs = It->getSecond();
948       auto &RAccs = RIt->getSecond();
949       if (Accs->size() != RAccs->size())
950         return false;
951       for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
952         if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
953           return false;
954       ++It;
955       ++RIt;
956     }
957     return true;
958   }
959   bool operator!=(const State &R) const { return !(*this == R); }
960 
961   /// We store accesses in a set with the instruction as key.
962   struct Accesses {
963     SmallVector<AAPointerInfo::Access, 4> Accesses;
964     DenseMap<const Instruction *, unsigned> Map;
965 
966     unsigned size() const { return Accesses.size(); }
967 
968     using vec_iterator = decltype(Accesses)::iterator;
969     vec_iterator begin() { return Accesses.begin(); }
970     vec_iterator end() { return Accesses.end(); }
971 
972     using iterator = decltype(Map)::const_iterator;
973     iterator find(AAPointerInfo::Access &Acc) {
974       return Map.find(Acc.getRemoteInst());
975     }
976     iterator find_end() { return Map.end(); }
977 
978     AAPointerInfo::Access &get(iterator &It) {
979       return Accesses[It->getSecond()];
980     }
981 
982     void insert(AAPointerInfo::Access &Acc) {
983       Map[Acc.getRemoteInst()] = Accesses.size();
984       Accesses.push_back(Acc);
985     }
986   };
987 
988   /// We store all accesses in bins denoted by their offset and size.
989   using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>;
990 
991   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
992   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
993 
994 protected:
995   /// The bins with all the accesses for the associated pointer.
996   AccessBinsTy AccessBins;
997 
998   /// Add a new access to the state at offset \p Offset and with size \p Size.
999   /// The access is associated with \p I, writes \p Content (if anything), and
1000   /// is of kind \p Kind.
1001   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
1002   ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
1003                          Instruction &I, Optional<Value *> Content,
1004                          AAPointerInfo::AccessKind Kind, Type *Ty,
1005                          Instruction *RemoteI = nullptr,
1006                          Accesses *BinPtr = nullptr) {
1007     AAPointerInfo::OffsetAndSize Key{Offset, Size};
1008     Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
1009     if (!Bin)
1010       Bin = new (A.Allocator) Accesses;
1011     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1012     // Check if we have an access for this instruction in this bin, if not,
1013     // simply add it.
1014     auto It = Bin->find(Acc);
1015     if (It == Bin->find_end()) {
1016       Bin->insert(Acc);
1017       return ChangeStatus::CHANGED;
1018     }
1019     // If the existing access is the same as then new one, nothing changed.
1020     AAPointerInfo::Access &Current = Bin->get(It);
1021     AAPointerInfo::Access Before = Current;
1022     // The new one will be combined with the existing one.
1023     Current &= Acc;
1024     return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1025   }
1026 
1027   /// See AAPointerInfo::forallInterferingAccesses.
1028   bool forallInterferingAccesses(
1029       AAPointerInfo::OffsetAndSize OAS,
1030       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1031     if (!isValidState())
1032       return false;
1033 
1034     for (auto &It : AccessBins) {
1035       AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
1036       if (!OAS.mayOverlap(ItOAS))
1037         continue;
1038       bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
1039       for (auto &Access : *It.getSecond())
1040         if (!CB(Access, IsExact))
1041           return false;
1042     }
1043     return true;
1044   }
1045 
1046   /// See AAPointerInfo::forallInterferingAccesses.
1047   bool forallInterferingAccesses(
1048       Instruction &I,
1049       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1050     if (!isValidState())
1051       return false;
1052 
1053     // First find the offset and size of I.
1054     AAPointerInfo::OffsetAndSize OAS(-1, -1);
1055     for (auto &It : AccessBins) {
1056       for (auto &Access : *It.getSecond()) {
1057         if (Access.getRemoteInst() == &I) {
1058           OAS = It.getFirst();
1059           break;
1060         }
1061       }
1062       if (OAS.getSize() != -1)
1063         break;
1064     }
1065     // No access for I was found, we are done.
1066     if (OAS.getSize() == -1)
1067       return true;
1068 
1069     // Now that we have an offset and size, find all overlapping ones and use
1070     // the callback on the accesses.
1071     return forallInterferingAccesses(OAS, CB);
1072   }
1073 
1074 private:
1075   /// State to track fixpoint and validity.
1076   BooleanState BS;
1077 };
1078 
1079 namespace {
1080 struct AAPointerInfoImpl
1081     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1082   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1083   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1084 
1085   /// See AbstractAttribute::initialize(...).
1086   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1087 
1088   /// See AbstractAttribute::getAsStr().
1089   const std::string getAsStr() const override {
1090     return std::string("PointerInfo ") +
1091            (isValidState() ? (std::string("#") +
1092                               std::to_string(AccessBins.size()) + " bins")
1093                            : "<invalid>");
1094   }
1095 
1096   /// See AbstractAttribute::manifest(...).
1097   ChangeStatus manifest(Attributor &A) override {
1098     return AAPointerInfo::manifest(A);
1099   }
1100 
1101   bool forallInterferingAccesses(
1102       OffsetAndSize OAS,
1103       function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1104       const override {
1105     return State::forallInterferingAccesses(OAS, CB);
1106   }
1107   bool forallInterferingAccesses(
1108       Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
1109       function_ref<bool(const Access &, bool)> UserCB) const override {
1110     SmallPtrSet<const Access *, 8> DominatingWrites;
1111     SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
1112 
1113     Function &Scope = *I.getFunction();
1114     const auto &NoSyncAA = A.getAAFor<AANoSync>(
1115         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1116     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1117         IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
1118     const bool NoSync = NoSyncAA.isAssumedNoSync();
1119 
1120     // Helper to determine if we need to consider threading, which we cannot
1121     // right now. However, if the function is (assumed) nosync or the thread
1122     // executing all instructions is the main thread only we can ignore
1123     // threading.
1124     auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
1125       if (NoSync)
1126         return true;
1127       if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
1128         return true;
1129       return false;
1130     };
1131 
1132     // Helper to determine if the access is executed by the same thread as the
1133     // load, for now it is sufficient to avoid any potential threading effects
1134     // as we cannot deal with them anyway.
1135     auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
1136       return CanIgnoreThreading(*Acc.getLocalInst());
1137     };
1138 
1139     // TODO: Use inter-procedural reachability and dominance.
1140     const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1141         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1142 
1143     const bool FindInterferingWrites = I.mayReadFromMemory();
1144     const bool FindInterferingReads = I.mayWriteToMemory();
1145     const bool UseDominanceReasoning = FindInterferingWrites;
1146     const bool CanUseCFGResoning = CanIgnoreThreading(I);
1147     InformationCache &InfoCache = A.getInfoCache();
1148     const DominatorTree *DT =
1149         NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning
1150             ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
1151                   Scope)
1152             : nullptr;
1153 
1154     enum GPUAddressSpace : unsigned {
1155       Generic = 0,
1156       Global = 1,
1157       Shared = 3,
1158       Constant = 4,
1159       Local = 5,
1160     };
1161 
1162     // Helper to check if a value has "kernel lifetime", that is it will not
1163     // outlive a GPU kernel. This is true for shared, constant, and local
1164     // globals on AMD and NVIDIA GPUs.
1165     auto HasKernelLifetime = [&](Value *V, Module &M) {
1166       Triple T(M.getTargetTriple());
1167       if (!(T.isAMDGPU() || T.isNVPTX()))
1168         return false;
1169       switch (V->getType()->getPointerAddressSpace()) {
1170       case GPUAddressSpace::Shared:
1171       case GPUAddressSpace::Constant:
1172       case GPUAddressSpace::Local:
1173         return true;
1174       default:
1175         return false;
1176       };
1177     };
1178 
1179     // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1180     // to determine if we should look at reachability from the callee. For
1181     // certain pointers we know the lifetime and we do not have to step into the
1182     // callee to determine reachability as the pointer would be dead in the
1183     // callee. See the conditional initialization below.
1184     std::function<bool(const Function &)> IsLiveInCalleeCB;
1185 
1186     if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1187       // If the alloca containing function is not recursive the alloca
1188       // must be dead in the callee.
1189       const Function *AIFn = AI->getFunction();
1190       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1191           *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL);
1192       if (NoRecurseAA.isAssumedNoRecurse()) {
1193         IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1194       }
1195     } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1196       // If the global has kernel lifetime we can stop if we reach a kernel
1197       // as it is "dead" in the (unknown) callees.
1198       if (HasKernelLifetime(GV, *GV->getParent()))
1199         IsLiveInCalleeCB = [](const Function &Fn) {
1200           return !Fn.hasFnAttribute("kernel");
1201         };
1202     }
1203 
1204     auto AccessCB = [&](const Access &Acc, bool Exact) {
1205       if ((!FindInterferingWrites || !Acc.isWrite()) &&
1206           (!FindInterferingReads || !Acc.isRead()))
1207         return true;
1208 
1209       // For now we only filter accesses based on CFG reasoning which does not
1210       // work yet if we have threading effects, or the access is complicated.
1211       if (CanUseCFGResoning) {
1212         if ((!Acc.isWrite() ||
1213              !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1214                                          IsLiveInCalleeCB)) &&
1215             (!Acc.isRead() ||
1216              !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1217                                          IsLiveInCalleeCB)))
1218           return true;
1219         if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) &&
1220             IsSameThreadAsLoad(Acc)) {
1221           if (DT->dominates(Acc.getLocalInst(), &I))
1222             DominatingWrites.insert(&Acc);
1223         }
1224       }
1225 
1226       InterferingAccesses.push_back({&Acc, Exact});
1227       return true;
1228     };
1229     if (!State::forallInterferingAccesses(I, AccessCB))
1230       return false;
1231 
1232     // If we cannot use CFG reasoning we only filter the non-write accesses
1233     // and are done here.
1234     if (!CanUseCFGResoning) {
1235       for (auto &It : InterferingAccesses)
1236         if (!UserCB(*It.first, It.second))
1237           return false;
1238       return true;
1239     }
1240 
1241     // Helper to determine if we can skip a specific write access. This is in
1242     // the worst case quadratic as we are looking for another write that will
1243     // hide the effect of this one.
1244     auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1245       if (!IsSameThreadAsLoad(Acc))
1246         return false;
1247       if (!DominatingWrites.count(&Acc))
1248         return false;
1249       for (const Access *DomAcc : DominatingWrites) {
1250         assert(Acc.getLocalInst()->getFunction() ==
1251                    DomAcc->getLocalInst()->getFunction() &&
1252                "Expected dominating writes to be in the same function!");
1253 
1254         if (DomAcc != &Acc &&
1255             DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1256           return true;
1257         }
1258       }
1259       return false;
1260     };
1261 
1262     // Run the user callback on all accesses we cannot skip and return if that
1263     // succeeded for all or not.
1264     unsigned NumInterferingAccesses = InterferingAccesses.size();
1265     for (auto &It : InterferingAccesses) {
1266       if (!DT || NumInterferingAccesses > MaxInterferingAccesses ||
1267           !CanSkipAccess(*It.first, It.second)) {
1268         if (!UserCB(*It.first, It.second))
1269           return false;
1270       }
1271     }
1272     return true;
1273   }
1274 
1275   ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
1276                                     int64_t Offset, CallBase &CB,
1277                                     bool FromCallee = false) {
1278     using namespace AA::PointerInfo;
1279     if (!OtherAA.getState().isValidState() || !isValidState())
1280       return indicatePessimisticFixpoint();
1281 
1282     const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1283     bool IsByval =
1284         FromCallee && OtherAAImpl.getAssociatedArgument()->hasByValAttr();
1285 
1286     // Combine the accesses bin by bin.
1287     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1288     for (auto &It : OtherAAImpl.getState()) {
1289       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1290       if (Offset != OffsetAndSize::Unknown)
1291         OAS = OffsetAndSize(It.first.getOffset() + Offset, It.first.getSize());
1292       Accesses *Bin = AccessBins.lookup(OAS);
1293       for (const AAPointerInfo::Access &RAcc : *It.second) {
1294         if (IsByval && !RAcc.isRead())
1295           continue;
1296         bool UsedAssumedInformation = false;
1297         AccessKind AK = RAcc.getKind();
1298         Optional<Value *> Content = RAcc.getContent();
1299         if (FromCallee) {
1300           Content = A.translateArgumentToCallSiteContent(
1301               RAcc.getContent(), CB, *this, UsedAssumedInformation);
1302           AK = AccessKind(
1303               AK & (IsByval ? AccessKind::AK_READ : AccessKind::AK_READ_WRITE));
1304         }
1305         Changed =
1306             Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1307                                 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1308       }
1309     }
1310     return Changed;
1311   }
1312 
1313   /// Statistic tracking for all AAPointerInfo implementations.
1314   /// See AbstractAttribute::trackStatistics().
1315   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1316 };
1317 
1318 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1319   using AccessKind = AAPointerInfo::AccessKind;
1320   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1321       : AAPointerInfoImpl(IRP, A) {}
1322 
1323   /// See AbstractAttribute::initialize(...).
1324   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1325 
1326   /// Deal with an access and signal if it was handled successfully.
1327   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1328                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1329                     ChangeStatus &Changed, Type *Ty,
1330                     int64_t Size = OffsetAndSize::Unknown) {
1331     using namespace AA::PointerInfo;
1332     // No need to find a size if one is given or the offset is unknown.
1333     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1334         Ty) {
1335       const DataLayout &DL = A.getDataLayout();
1336       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1337       if (!AccessSize.isScalable())
1338         Size = AccessSize.getFixedSize();
1339     }
1340     Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1341     return true;
1342   };
1343 
1344   /// Helper struct, will support ranges eventually.
1345   struct OffsetInfo {
1346     int64_t Offset = OffsetAndSize::Unknown;
1347 
1348     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1349   };
1350 
1351   /// See AbstractAttribute::updateImpl(...).
1352   ChangeStatus updateImpl(Attributor &A) override {
1353     using namespace AA::PointerInfo;
1354     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1355     Value &AssociatedValue = getAssociatedValue();
1356 
1357     const DataLayout &DL = A.getDataLayout();
1358     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1359     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1360 
1361     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1362                                      bool &Follow) {
1363       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1364       UsrOI = PtrOI;
1365       Follow = true;
1366       return true;
1367     };
1368 
1369     const auto *TLI = getAnchorScope()
1370                           ? A.getInfoCache().getTargetLibraryInfoForFunction(
1371                                 *getAnchorScope())
1372                           : nullptr;
1373     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1374       Value *CurPtr = U.get();
1375       User *Usr = U.getUser();
1376       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1377                         << *Usr << "\n");
1378       assert(OffsetInfoMap.count(CurPtr) &&
1379              "The current pointer offset should have been seeded!");
1380 
1381       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1382         if (CE->isCast())
1383           return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1384         if (CE->isCompare())
1385           return true;
1386         if (!isa<GEPOperator>(CE)) {
1387           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1388                             << "\n");
1389           return false;
1390         }
1391       }
1392       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1393         // Note the order here, the Usr access might change the map, CurPtr is
1394         // already in it though.
1395         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1396         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1397         UsrOI = PtrOI;
1398 
1399         // TODO: Use range information.
1400         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1401             !GEP->hasAllConstantIndices()) {
1402           UsrOI.Offset = OffsetAndSize::Unknown;
1403           Follow = true;
1404           return true;
1405         }
1406 
1407         SmallVector<Value *, 8> Indices;
1408         for (Use &Idx : GEP->indices()) {
1409           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1410             Indices.push_back(CIdx);
1411             continue;
1412           }
1413 
1414           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1415                             << " : " << *Idx << "\n");
1416           return false;
1417         }
1418         UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1419                                           GEP->getSourceElementType(), Indices);
1420         Follow = true;
1421         return true;
1422       }
1423       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1424         return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1425 
1426       // For PHIs we need to take care of the recurrence explicitly as the value
1427       // might change while we iterate through a loop. For now, we give up if
1428       // the PHI is not invariant.
1429       if (isa<PHINode>(Usr)) {
1430         // Note the order here, the Usr access might change the map, CurPtr is
1431         // already in it though.
1432         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1433         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1434         // Check if the PHI is invariant (so far).
1435         if (UsrOI == PtrOI)
1436           return true;
1437 
1438         // Check if the PHI operand has already an unknown offset as we can't
1439         // improve on that anymore.
1440         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1441           UsrOI = PtrOI;
1442           Follow = true;
1443           return true;
1444         }
1445 
1446         // Check if the PHI operand is not dependent on the PHI itself.
1447         // TODO: This is not great as we look at the pointer type. However, it
1448         // is unclear where the Offset size comes from with typeless pointers.
1449         APInt Offset(
1450             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1451             0);
1452         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1453                                     DL, Offset, /* AllowNonInbounds */ true)) {
1454           if (Offset != PtrOI.Offset) {
1455             LLVM_DEBUG(dbgs()
1456                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1457                        << *CurPtr << " in " << *Usr << "\n");
1458             return false;
1459           }
1460           return HandlePassthroughUser(Usr, PtrOI, Follow);
1461         }
1462 
1463         // TODO: Approximate in case we know the direction of the recurrence.
1464         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1465                           << *CurPtr << " in " << *Usr << "\n");
1466         UsrOI = PtrOI;
1467         UsrOI.Offset = OffsetAndSize::Unknown;
1468         Follow = true;
1469         return true;
1470       }
1471 
1472       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1473         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1474                             AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1475                             Changed, LoadI->getType());
1476       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1477         if (StoreI->getValueOperand() == CurPtr) {
1478           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1479                             << *StoreI << "\n");
1480           return false;
1481         }
1482         bool UsedAssumedInformation = false;
1483         Optional<Value *> Content = A.getAssumedSimplified(
1484             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1485         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1486                             OffsetInfoMap[CurPtr].Offset, Changed,
1487                             StoreI->getValueOperand()->getType());
1488       }
1489       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1490         if (CB->isLifetimeStartOrEnd())
1491           return true;
1492         if (TLI && isFreeCall(CB, TLI))
1493           return true;
1494         if (CB->isArgOperand(&U)) {
1495           unsigned ArgNo = CB->getArgOperandNo(&U);
1496           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1497               *this, IRPosition::callsite_argument(*CB, ArgNo),
1498               DepClassTy::REQUIRED);
1499           Changed = translateAndAddState(A, CSArgPI,
1500                                          OffsetInfoMap[CurPtr].Offset, *CB) |
1501                     Changed;
1502           return true;
1503         }
1504         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1505                           << "\n");
1506         // TODO: Allow some call uses
1507         return false;
1508       }
1509 
1510       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1511       return false;
1512     };
1513     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1514       if (OffsetInfoMap.count(NewU))
1515         return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1516       OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1517       return true;
1518     };
1519     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1520                            /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1521                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
1522       return indicatePessimisticFixpoint();
1523 
1524     LLVM_DEBUG({
1525       dbgs() << "Accesses by bin after update:\n";
1526       for (auto &It : AccessBins) {
1527         dbgs() << "[" << It.first.getOffset() << "-"
1528                << It.first.getOffset() + It.first.getSize()
1529                << "] : " << It.getSecond()->size() << "\n";
1530         for (auto &Acc : *It.getSecond()) {
1531           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1532                  << "\n";
1533           if (Acc.getLocalInst() != Acc.getRemoteInst())
1534             dbgs() << "     -->                         "
1535                    << *Acc.getRemoteInst() << "\n";
1536           if (!Acc.isWrittenValueYetUndetermined()) {
1537             if (Acc.getWrittenValue())
1538               dbgs() << "       - c: " << *Acc.getWrittenValue() << "\n";
1539             else
1540               dbgs() << "       - c: <unknown>\n";
1541           }
1542         }
1543       }
1544     });
1545 
1546     return Changed;
1547   }
1548 
1549   /// See AbstractAttribute::trackStatistics()
1550   void trackStatistics() const override {
1551     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1552   }
1553 };
1554 
1555 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1556   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1557       : AAPointerInfoImpl(IRP, A) {}
1558 
1559   /// See AbstractAttribute::updateImpl(...).
1560   ChangeStatus updateImpl(Attributor &A) override {
1561     return indicatePessimisticFixpoint();
1562   }
1563 
1564   /// See AbstractAttribute::trackStatistics()
1565   void trackStatistics() const override {
1566     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1567   }
1568 };
1569 
1570 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1571   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1572       : AAPointerInfoFloating(IRP, A) {}
1573 
1574   /// See AbstractAttribute::initialize(...).
1575   void initialize(Attributor &A) override {
1576     AAPointerInfoFloating::initialize(A);
1577     if (getAnchorScope()->isDeclaration())
1578       indicatePessimisticFixpoint();
1579   }
1580 
1581   /// See AbstractAttribute::trackStatistics()
1582   void trackStatistics() const override {
1583     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1584   }
1585 };
1586 
1587 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1588   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1589       : AAPointerInfoFloating(IRP, A) {}
1590 
1591   /// See AbstractAttribute::updateImpl(...).
1592   ChangeStatus updateImpl(Attributor &A) override {
1593     using namespace AA::PointerInfo;
1594     // We handle memory intrinsics explicitly, at least the first (=
1595     // destination) and second (=source) arguments as we know how they are
1596     // accessed.
1597     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1598       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1599       int64_t LengthVal = OffsetAndSize::Unknown;
1600       if (Length)
1601         LengthVal = Length->getSExtValue();
1602       Value &Ptr = getAssociatedValue();
1603       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1604       ChangeStatus Changed = ChangeStatus::UNCHANGED;
1605       if (ArgNo == 0) {
1606         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1607                      nullptr, LengthVal);
1608       } else if (ArgNo == 1) {
1609         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1610                      nullptr, LengthVal);
1611       } else {
1612         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1613                           << *MI << "\n");
1614         return indicatePessimisticFixpoint();
1615       }
1616       return Changed;
1617     }
1618 
1619     // TODO: Once we have call site specific value information we can provide
1620     //       call site specific liveness information and then it makes
1621     //       sense to specialize attributes for call sites arguments instead of
1622     //       redirecting requests to the callee argument.
1623     Argument *Arg = getAssociatedArgument();
1624     if (!Arg)
1625       return indicatePessimisticFixpoint();
1626     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1627     auto &ArgAA =
1628         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1629     return translateAndAddState(A, ArgAA, 0, *cast<CallBase>(getCtxI()),
1630                                 /* FromCallee */ true);
1631   }
1632 
1633   /// See AbstractAttribute::trackStatistics()
1634   void trackStatistics() const override {
1635     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1636   }
1637 };
1638 
1639 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1640   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1641       : AAPointerInfoFloating(IRP, A) {}
1642 
1643   /// See AbstractAttribute::trackStatistics()
1644   void trackStatistics() const override {
1645     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1646   }
1647 };
1648 } // namespace
1649 
1650 /// -----------------------NoUnwind Function Attribute--------------------------
1651 
1652 namespace {
1653 struct AANoUnwindImpl : AANoUnwind {
1654   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1655 
1656   const std::string getAsStr() const override {
1657     return getAssumed() ? "nounwind" : "may-unwind";
1658   }
1659 
1660   /// See AbstractAttribute::updateImpl(...).
1661   ChangeStatus updateImpl(Attributor &A) override {
1662     auto Opcodes = {
1663         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1664         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1665         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1666 
1667     auto CheckForNoUnwind = [&](Instruction &I) {
1668       if (!I.mayThrow())
1669         return true;
1670 
1671       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1672         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1673             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1674         return NoUnwindAA.isAssumedNoUnwind();
1675       }
1676       return false;
1677     };
1678 
1679     bool UsedAssumedInformation = false;
1680     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1681                                    UsedAssumedInformation))
1682       return indicatePessimisticFixpoint();
1683 
1684     return ChangeStatus::UNCHANGED;
1685   }
1686 };
1687 
1688 struct AANoUnwindFunction final : public AANoUnwindImpl {
1689   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1690       : AANoUnwindImpl(IRP, A) {}
1691 
1692   /// See AbstractAttribute::trackStatistics()
1693   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1694 };
1695 
1696 /// NoUnwind attribute deduction for a call sites.
1697 struct AANoUnwindCallSite final : AANoUnwindImpl {
1698   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1699       : AANoUnwindImpl(IRP, A) {}
1700 
1701   /// See AbstractAttribute::initialize(...).
1702   void initialize(Attributor &A) override {
1703     AANoUnwindImpl::initialize(A);
1704     Function *F = getAssociatedFunction();
1705     if (!F || F->isDeclaration())
1706       indicatePessimisticFixpoint();
1707   }
1708 
1709   /// See AbstractAttribute::updateImpl(...).
1710   ChangeStatus updateImpl(Attributor &A) override {
1711     // TODO: Once we have call site specific value information we can provide
1712     //       call site specific liveness information and then it makes
1713     //       sense to specialize attributes for call sites arguments instead of
1714     //       redirecting requests to the callee argument.
1715     Function *F = getAssociatedFunction();
1716     const IRPosition &FnPos = IRPosition::function(*F);
1717     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1718     return clampStateAndIndicateChange(getState(), FnAA.getState());
1719   }
1720 
1721   /// See AbstractAttribute::trackStatistics()
1722   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1723 };
1724 } // namespace
1725 
1726 /// --------------------- Function Return Values -------------------------------
1727 
1728 namespace {
1729 /// "Attribute" that collects all potential returned values and the return
1730 /// instructions that they arise from.
1731 ///
1732 /// If there is a unique returned value R, the manifest method will:
1733 ///   - mark R with the "returned" attribute, if R is an argument.
1734 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1735 
1736   /// Mapping of values potentially returned by the associated function to the
1737   /// return instructions that might return them.
1738   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1739 
1740   /// State flags
1741   ///
1742   ///{
1743   bool IsFixed = false;
1744   bool IsValidState = true;
1745   ///}
1746 
1747 public:
1748   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1749       : AAReturnedValues(IRP, A) {}
1750 
1751   /// See AbstractAttribute::initialize(...).
1752   void initialize(Attributor &A) override {
1753     // Reset the state.
1754     IsFixed = false;
1755     IsValidState = true;
1756     ReturnedValues.clear();
1757 
1758     Function *F = getAssociatedFunction();
1759     if (!F || F->isDeclaration()) {
1760       indicatePessimisticFixpoint();
1761       return;
1762     }
1763     assert(!F->getReturnType()->isVoidTy() &&
1764            "Did not expect a void return type!");
1765 
1766     // The map from instruction opcodes to those instructions in the function.
1767     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1768 
1769     // Look through all arguments, if one is marked as returned we are done.
1770     for (Argument &Arg : F->args()) {
1771       if (Arg.hasReturnedAttr()) {
1772         auto &ReturnInstSet = ReturnedValues[&Arg];
1773         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1774           for (Instruction *RI : *Insts)
1775             ReturnInstSet.insert(cast<ReturnInst>(RI));
1776 
1777         indicateOptimisticFixpoint();
1778         return;
1779       }
1780     }
1781 
1782     if (!A.isFunctionIPOAmendable(*F))
1783       indicatePessimisticFixpoint();
1784   }
1785 
1786   /// See AbstractAttribute::manifest(...).
1787   ChangeStatus manifest(Attributor &A) override;
1788 
1789   /// See AbstractAttribute::getState(...).
1790   AbstractState &getState() override { return *this; }
1791 
1792   /// See AbstractAttribute::getState(...).
1793   const AbstractState &getState() const override { return *this; }
1794 
1795   /// See AbstractAttribute::updateImpl(Attributor &A).
1796   ChangeStatus updateImpl(Attributor &A) override;
1797 
1798   llvm::iterator_range<iterator> returned_values() override {
1799     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1800   }
1801 
1802   llvm::iterator_range<const_iterator> returned_values() const override {
1803     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1804   }
1805 
1806   /// Return the number of potential return values, -1 if unknown.
1807   size_t getNumReturnValues() const override {
1808     return isValidState() ? ReturnedValues.size() : -1;
1809   }
1810 
1811   /// Return an assumed unique return value if a single candidate is found. If
1812   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1813   /// Optional::NoneType.
1814   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1815 
1816   /// See AbstractState::checkForAllReturnedValues(...).
1817   bool checkForAllReturnedValuesAndReturnInsts(
1818       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1819       const override;
1820 
1821   /// Pretty print the attribute similar to the IR representation.
1822   const std::string getAsStr() const override;
1823 
1824   /// See AbstractState::isAtFixpoint().
1825   bool isAtFixpoint() const override { return IsFixed; }
1826 
1827   /// See AbstractState::isValidState().
1828   bool isValidState() const override { return IsValidState; }
1829 
1830   /// See AbstractState::indicateOptimisticFixpoint(...).
1831   ChangeStatus indicateOptimisticFixpoint() override {
1832     IsFixed = true;
1833     return ChangeStatus::UNCHANGED;
1834   }
1835 
1836   ChangeStatus indicatePessimisticFixpoint() override {
1837     IsFixed = true;
1838     IsValidState = false;
1839     return ChangeStatus::CHANGED;
1840   }
1841 };
1842 
1843 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1844   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1845 
1846   // Bookkeeping.
1847   assert(isValidState());
1848   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1849                   "Number of function with known return values");
1850 
1851   // Check if we have an assumed unique return value that we could manifest.
1852   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1853 
1854   if (!UniqueRV || !UniqueRV.getValue())
1855     return Changed;
1856 
1857   // Bookkeeping.
1858   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1859                   "Number of function with unique return");
1860   // If the assumed unique return value is an argument, annotate it.
1861   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1862     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1863             getAssociatedFunction()->getReturnType())) {
1864       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1865       Changed = IRAttribute::manifest(A);
1866     }
1867   }
1868   return Changed;
1869 }
1870 
1871 const std::string AAReturnedValuesImpl::getAsStr() const {
1872   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1873          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1874 }
1875 
1876 Optional<Value *>
1877 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1878   // If checkForAllReturnedValues provides a unique value, ignoring potential
1879   // undef values that can also be present, it is assumed to be the actual
1880   // return value and forwarded to the caller of this method. If there are
1881   // multiple, a nullptr is returned indicating there cannot be a unique
1882   // returned value.
1883   Optional<Value *> UniqueRV;
1884   Type *Ty = getAssociatedFunction()->getReturnType();
1885 
1886   auto Pred = [&](Value &RV) -> bool {
1887     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1888     return UniqueRV != Optional<Value *>(nullptr);
1889   };
1890 
1891   if (!A.checkForAllReturnedValues(Pred, *this))
1892     UniqueRV = nullptr;
1893 
1894   return UniqueRV;
1895 }
1896 
1897 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1898     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1899     const {
1900   if (!isValidState())
1901     return false;
1902 
1903   // Check all returned values but ignore call sites as long as we have not
1904   // encountered an overdefined one during an update.
1905   for (auto &It : ReturnedValues) {
1906     Value *RV = It.first;
1907     if (!Pred(*RV, It.second))
1908       return false;
1909   }
1910 
1911   return true;
1912 }
1913 
1914 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1915   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1916 
1917   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1918                            bool) -> bool {
1919     assert(AA::isValidInScope(V, Ret.getFunction()) &&
1920            "Assumed returned value should be valid in function scope!");
1921     if (ReturnedValues[&V].insert(&Ret))
1922       Changed = ChangeStatus::CHANGED;
1923     return true;
1924   };
1925 
1926   bool UsedAssumedInformation = false;
1927   auto ReturnInstCB = [&](Instruction &I) {
1928     ReturnInst &Ret = cast<ReturnInst>(I);
1929     return genericValueTraversal<ReturnInst>(
1930         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1931         &I, UsedAssumedInformation, /* UseValueSimplify */ true,
1932         /* MaxValues */ 16,
1933         /* StripCB */ nullptr, AA::Intraprocedural);
1934   };
1935 
1936   // Discover returned values from all live returned instructions in the
1937   // associated function.
1938   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1939                                  UsedAssumedInformation))
1940     return indicatePessimisticFixpoint();
1941   return Changed;
1942 }
1943 
1944 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1945   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1946       : AAReturnedValuesImpl(IRP, A) {}
1947 
1948   /// See AbstractAttribute::trackStatistics()
1949   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1950 };
1951 
1952 /// Returned values information for a call sites.
1953 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1954   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1955       : AAReturnedValuesImpl(IRP, A) {}
1956 
1957   /// See AbstractAttribute::initialize(...).
1958   void initialize(Attributor &A) override {
1959     // TODO: Once we have call site specific value information we can provide
1960     //       call site specific liveness information and then it makes
1961     //       sense to specialize attributes for call sites instead of
1962     //       redirecting requests to the callee.
1963     llvm_unreachable("Abstract attributes for returned values are not "
1964                      "supported for call sites yet!");
1965   }
1966 
1967   /// See AbstractAttribute::updateImpl(...).
1968   ChangeStatus updateImpl(Attributor &A) override {
1969     return indicatePessimisticFixpoint();
1970   }
1971 
1972   /// See AbstractAttribute::trackStatistics()
1973   void trackStatistics() const override {}
1974 };
1975 } // namespace
1976 
1977 /// ------------------------ NoSync Function Attribute -------------------------
1978 
1979 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
1980   if (!I->isAtomic())
1981     return false;
1982 
1983   if (auto *FI = dyn_cast<FenceInst>(I))
1984     // All legal orderings for fence are stronger than monotonic.
1985     return FI->getSyncScopeID() != SyncScope::SingleThread;
1986   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1987     // Unordered is not a legal ordering for cmpxchg.
1988     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1989             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1990   }
1991 
1992   AtomicOrdering Ordering;
1993   switch (I->getOpcode()) {
1994   case Instruction::AtomicRMW:
1995     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1996     break;
1997   case Instruction::Store:
1998     Ordering = cast<StoreInst>(I)->getOrdering();
1999     break;
2000   case Instruction::Load:
2001     Ordering = cast<LoadInst>(I)->getOrdering();
2002     break;
2003   default:
2004     llvm_unreachable(
2005         "New atomic operations need to be known in the attributor.");
2006   }
2007 
2008   return (Ordering != AtomicOrdering::Unordered &&
2009           Ordering != AtomicOrdering::Monotonic);
2010 }
2011 
2012 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
2013 /// which would be nosync except that they have a volatile flag.  All other
2014 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
2015 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
2016   if (auto *MI = dyn_cast<MemIntrinsic>(I))
2017     return !MI->isVolatile();
2018   return false;
2019 }
2020 
2021 namespace {
2022 struct AANoSyncImpl : AANoSync {
2023   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
2024 
2025   const std::string getAsStr() const override {
2026     return getAssumed() ? "nosync" : "may-sync";
2027   }
2028 
2029   /// See AbstractAttribute::updateImpl(...).
2030   ChangeStatus updateImpl(Attributor &A) override;
2031 };
2032 
2033 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2034 
2035   auto CheckRWInstForNoSync = [&](Instruction &I) {
2036     return AA::isNoSyncInst(A, I, *this);
2037   };
2038 
2039   auto CheckForNoSync = [&](Instruction &I) {
2040     // At this point we handled all read/write effects and they are all
2041     // nosync, so they can be skipped.
2042     if (I.mayReadOrWriteMemory())
2043       return true;
2044 
2045     // non-convergent and readnone imply nosync.
2046     return !cast<CallBase>(I).isConvergent();
2047   };
2048 
2049   bool UsedAssumedInformation = false;
2050   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2051                                           UsedAssumedInformation) ||
2052       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2053                                          UsedAssumedInformation))
2054     return indicatePessimisticFixpoint();
2055 
2056   return ChangeStatus::UNCHANGED;
2057 }
2058 
2059 struct AANoSyncFunction final : public AANoSyncImpl {
2060   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2061       : AANoSyncImpl(IRP, A) {}
2062 
2063   /// See AbstractAttribute::trackStatistics()
2064   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
2065 };
2066 
2067 /// NoSync attribute deduction for a call sites.
2068 struct AANoSyncCallSite final : AANoSyncImpl {
2069   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2070       : AANoSyncImpl(IRP, A) {}
2071 
2072   /// See AbstractAttribute::initialize(...).
2073   void initialize(Attributor &A) override {
2074     AANoSyncImpl::initialize(A);
2075     Function *F = getAssociatedFunction();
2076     if (!F || F->isDeclaration())
2077       indicatePessimisticFixpoint();
2078   }
2079 
2080   /// See AbstractAttribute::updateImpl(...).
2081   ChangeStatus updateImpl(Attributor &A) override {
2082     // TODO: Once we have call site specific value information we can provide
2083     //       call site specific liveness information and then it makes
2084     //       sense to specialize attributes for call sites arguments instead of
2085     //       redirecting requests to the callee argument.
2086     Function *F = getAssociatedFunction();
2087     const IRPosition &FnPos = IRPosition::function(*F);
2088     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
2089     return clampStateAndIndicateChange(getState(), FnAA.getState());
2090   }
2091 
2092   /// See AbstractAttribute::trackStatistics()
2093   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
2094 };
2095 } // namespace
2096 
2097 /// ------------------------ No-Free Attributes ----------------------------
2098 
2099 namespace {
2100 struct AANoFreeImpl : public AANoFree {
2101   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2102 
2103   /// See AbstractAttribute::updateImpl(...).
2104   ChangeStatus updateImpl(Attributor &A) override {
2105     auto CheckForNoFree = [&](Instruction &I) {
2106       const auto &CB = cast<CallBase>(I);
2107       if (CB.hasFnAttr(Attribute::NoFree))
2108         return true;
2109 
2110       const auto &NoFreeAA = A.getAAFor<AANoFree>(
2111           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2112       return NoFreeAA.isAssumedNoFree();
2113     };
2114 
2115     bool UsedAssumedInformation = false;
2116     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2117                                            UsedAssumedInformation))
2118       return indicatePessimisticFixpoint();
2119     return ChangeStatus::UNCHANGED;
2120   }
2121 
2122   /// See AbstractAttribute::getAsStr().
2123   const std::string getAsStr() const override {
2124     return getAssumed() ? "nofree" : "may-free";
2125   }
2126 };
2127 
2128 struct AANoFreeFunction final : public AANoFreeImpl {
2129   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2130       : AANoFreeImpl(IRP, A) {}
2131 
2132   /// See AbstractAttribute::trackStatistics()
2133   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2134 };
2135 
2136 /// NoFree attribute deduction for a call sites.
2137 struct AANoFreeCallSite final : AANoFreeImpl {
2138   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2139       : AANoFreeImpl(IRP, A) {}
2140 
2141   /// See AbstractAttribute::initialize(...).
2142   void initialize(Attributor &A) override {
2143     AANoFreeImpl::initialize(A);
2144     Function *F = getAssociatedFunction();
2145     if (!F || F->isDeclaration())
2146       indicatePessimisticFixpoint();
2147   }
2148 
2149   /// See AbstractAttribute::updateImpl(...).
2150   ChangeStatus updateImpl(Attributor &A) override {
2151     // TODO: Once we have call site specific value information we can provide
2152     //       call site specific liveness information and then it makes
2153     //       sense to specialize attributes for call sites arguments instead of
2154     //       redirecting requests to the callee argument.
2155     Function *F = getAssociatedFunction();
2156     const IRPosition &FnPos = IRPosition::function(*F);
2157     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2158     return clampStateAndIndicateChange(getState(), FnAA.getState());
2159   }
2160 
2161   /// See AbstractAttribute::trackStatistics()
2162   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2163 };
2164 
2165 /// NoFree attribute for floating values.
2166 struct AANoFreeFloating : AANoFreeImpl {
2167   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2168       : AANoFreeImpl(IRP, A) {}
2169 
2170   /// See AbstractAttribute::trackStatistics()
2171   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2172 
2173   /// See Abstract Attribute::updateImpl(...).
2174   ChangeStatus updateImpl(Attributor &A) override {
2175     const IRPosition &IRP = getIRPosition();
2176 
2177     const auto &NoFreeAA = A.getAAFor<AANoFree>(
2178         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2179     if (NoFreeAA.isAssumedNoFree())
2180       return ChangeStatus::UNCHANGED;
2181 
2182     Value &AssociatedValue = getIRPosition().getAssociatedValue();
2183     auto Pred = [&](const Use &U, bool &Follow) -> bool {
2184       Instruction *UserI = cast<Instruction>(U.getUser());
2185       if (auto *CB = dyn_cast<CallBase>(UserI)) {
2186         if (CB->isBundleOperand(&U))
2187           return false;
2188         if (!CB->isArgOperand(&U))
2189           return true;
2190         unsigned ArgNo = CB->getArgOperandNo(&U);
2191 
2192         const auto &NoFreeArg = A.getAAFor<AANoFree>(
2193             *this, IRPosition::callsite_argument(*CB, ArgNo),
2194             DepClassTy::REQUIRED);
2195         return NoFreeArg.isAssumedNoFree();
2196       }
2197 
2198       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2199           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2200         Follow = true;
2201         return true;
2202       }
2203       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2204           isa<ReturnInst>(UserI))
2205         return true;
2206 
2207       // Unknown user.
2208       return false;
2209     };
2210     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2211       return indicatePessimisticFixpoint();
2212 
2213     return ChangeStatus::UNCHANGED;
2214   }
2215 };
2216 
2217 /// NoFree attribute for a call site argument.
2218 struct AANoFreeArgument final : AANoFreeFloating {
2219   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2220       : AANoFreeFloating(IRP, A) {}
2221 
2222   /// See AbstractAttribute::trackStatistics()
2223   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2224 };
2225 
2226 /// NoFree attribute for call site arguments.
2227 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2228   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2229       : AANoFreeFloating(IRP, A) {}
2230 
2231   /// See AbstractAttribute::updateImpl(...).
2232   ChangeStatus updateImpl(Attributor &A) override {
2233     // TODO: Once we have call site specific value information we can provide
2234     //       call site specific liveness information and then it makes
2235     //       sense to specialize attributes for call sites arguments instead of
2236     //       redirecting requests to the callee argument.
2237     Argument *Arg = getAssociatedArgument();
2238     if (!Arg)
2239       return indicatePessimisticFixpoint();
2240     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2241     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2242     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2243   }
2244 
2245   /// See AbstractAttribute::trackStatistics()
2246   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2247 };
2248 
2249 /// NoFree attribute for function return value.
2250 struct AANoFreeReturned final : AANoFreeFloating {
2251   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2252       : AANoFreeFloating(IRP, A) {
2253     llvm_unreachable("NoFree is not applicable to function returns!");
2254   }
2255 
2256   /// See AbstractAttribute::initialize(...).
2257   void initialize(Attributor &A) override {
2258     llvm_unreachable("NoFree is not applicable to function returns!");
2259   }
2260 
2261   /// See AbstractAttribute::updateImpl(...).
2262   ChangeStatus updateImpl(Attributor &A) override {
2263     llvm_unreachable("NoFree is not applicable to function returns!");
2264   }
2265 
2266   /// See AbstractAttribute::trackStatistics()
2267   void trackStatistics() const override {}
2268 };
2269 
2270 /// NoFree attribute deduction for a call site return value.
2271 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2272   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2273       : AANoFreeFloating(IRP, A) {}
2274 
2275   ChangeStatus manifest(Attributor &A) override {
2276     return ChangeStatus::UNCHANGED;
2277   }
2278   /// See AbstractAttribute::trackStatistics()
2279   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2280 };
2281 } // namespace
2282 
2283 /// ------------------------ NonNull Argument Attribute ------------------------
2284 namespace {
2285 static int64_t getKnownNonNullAndDerefBytesForUse(
2286     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2287     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2288   TrackUse = false;
2289 
2290   const Value *UseV = U->get();
2291   if (!UseV->getType()->isPointerTy())
2292     return 0;
2293 
2294   // We need to follow common pointer manipulation uses to the accesses they
2295   // feed into. We can try to be smart to avoid looking through things we do not
2296   // like for now, e.g., non-inbounds GEPs.
2297   if (isa<CastInst>(I)) {
2298     TrackUse = true;
2299     return 0;
2300   }
2301 
2302   if (isa<GetElementPtrInst>(I)) {
2303     TrackUse = true;
2304     return 0;
2305   }
2306 
2307   Type *PtrTy = UseV->getType();
2308   const Function *F = I->getFunction();
2309   bool NullPointerIsDefined =
2310       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2311   const DataLayout &DL = A.getInfoCache().getDL();
2312   if (const auto *CB = dyn_cast<CallBase>(I)) {
2313     if (CB->isBundleOperand(U)) {
2314       if (RetainedKnowledge RK = getKnowledgeFromUse(
2315               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2316         IsNonNull |=
2317             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2318         return RK.ArgValue;
2319       }
2320       return 0;
2321     }
2322 
2323     if (CB->isCallee(U)) {
2324       IsNonNull |= !NullPointerIsDefined;
2325       return 0;
2326     }
2327 
2328     unsigned ArgNo = CB->getArgOperandNo(U);
2329     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2330     // As long as we only use known information there is no need to track
2331     // dependences here.
2332     auto &DerefAA =
2333         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2334     IsNonNull |= DerefAA.isKnownNonNull();
2335     return DerefAA.getKnownDereferenceableBytes();
2336   }
2337 
2338   Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2339   if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2340     return 0;
2341 
2342   int64_t Offset;
2343   const Value *Base =
2344       getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2345   if (Base && Base == &AssociatedValue) {
2346     int64_t DerefBytes = Loc->Size.getValue() + Offset;
2347     IsNonNull |= !NullPointerIsDefined;
2348     return std::max(int64_t(0), DerefBytes);
2349   }
2350 
2351   /// Corner case when an offset is 0.
2352   Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2353                                           /*AllowNonInbounds*/ true);
2354   if (Base && Base == &AssociatedValue && Offset == 0) {
2355     int64_t DerefBytes = Loc->Size.getValue();
2356     IsNonNull |= !NullPointerIsDefined;
2357     return std::max(int64_t(0), DerefBytes);
2358   }
2359 
2360   return 0;
2361 }
2362 
2363 struct AANonNullImpl : AANonNull {
2364   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2365       : AANonNull(IRP, A),
2366         NullIsDefined(NullPointerIsDefined(
2367             getAnchorScope(),
2368             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2369 
2370   /// See AbstractAttribute::initialize(...).
2371   void initialize(Attributor &A) override {
2372     Value &V = *getAssociatedValue().stripPointerCasts();
2373     if (!NullIsDefined &&
2374         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2375                 /* IgnoreSubsumingPositions */ false, &A)) {
2376       indicateOptimisticFixpoint();
2377       return;
2378     }
2379 
2380     if (isa<ConstantPointerNull>(V)) {
2381       indicatePessimisticFixpoint();
2382       return;
2383     }
2384 
2385     AANonNull::initialize(A);
2386 
2387     bool CanBeNull, CanBeFreed;
2388     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2389                                          CanBeFreed)) {
2390       if (!CanBeNull) {
2391         indicateOptimisticFixpoint();
2392         return;
2393       }
2394     }
2395 
2396     if (isa<GlobalValue>(V)) {
2397       indicatePessimisticFixpoint();
2398       return;
2399     }
2400 
2401     if (Instruction *CtxI = getCtxI())
2402       followUsesInMBEC(*this, A, getState(), *CtxI);
2403   }
2404 
2405   /// See followUsesInMBEC
2406   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2407                        AANonNull::StateType &State) {
2408     bool IsNonNull = false;
2409     bool TrackUse = false;
2410     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2411                                        IsNonNull, TrackUse);
2412     State.setKnown(IsNonNull);
2413     return TrackUse;
2414   }
2415 
2416   /// See AbstractAttribute::getAsStr().
2417   const std::string getAsStr() const override {
2418     return getAssumed() ? "nonnull" : "may-null";
2419   }
2420 
2421   /// Flag to determine if the underlying value can be null and still allow
2422   /// valid accesses.
2423   const bool NullIsDefined;
2424 };
2425 
2426 /// NonNull attribute for a floating value.
2427 struct AANonNullFloating : public AANonNullImpl {
2428   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2429       : AANonNullImpl(IRP, A) {}
2430 
2431   /// See AbstractAttribute::updateImpl(...).
2432   ChangeStatus updateImpl(Attributor &A) override {
2433     const DataLayout &DL = A.getDataLayout();
2434 
2435     DominatorTree *DT = nullptr;
2436     AssumptionCache *AC = nullptr;
2437     InformationCache &InfoCache = A.getInfoCache();
2438     if (const Function *Fn = getAnchorScope()) {
2439       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2440       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2441     }
2442 
2443     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2444                             AANonNull::StateType &T, bool Stripped) -> bool {
2445       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2446                                              DepClassTy::REQUIRED);
2447       if (!Stripped && this == &AA) {
2448         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2449           T.indicatePessimisticFixpoint();
2450       } else {
2451         // Use abstract attribute information.
2452         const AANonNull::StateType &NS = AA.getState();
2453         T ^= NS;
2454       }
2455       return T.isValidState();
2456     };
2457 
2458     StateType T;
2459     bool UsedAssumedInformation = false;
2460     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2461                                           VisitValueCB, getCtxI(),
2462                                           UsedAssumedInformation))
2463       return indicatePessimisticFixpoint();
2464 
2465     return clampStateAndIndicateChange(getState(), T);
2466   }
2467 
2468   /// See AbstractAttribute::trackStatistics()
2469   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2470 };
2471 
2472 /// NonNull attribute for function return value.
2473 struct AANonNullReturned final
2474     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2475   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2476       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2477 
2478   /// See AbstractAttribute::getAsStr().
2479   const std::string getAsStr() const override {
2480     return getAssumed() ? "nonnull" : "may-null";
2481   }
2482 
2483   /// See AbstractAttribute::trackStatistics()
2484   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2485 };
2486 
2487 /// NonNull attribute for function argument.
2488 struct AANonNullArgument final
2489     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2490   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2491       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2492 
2493   /// See AbstractAttribute::trackStatistics()
2494   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2495 };
2496 
2497 struct AANonNullCallSiteArgument final : AANonNullFloating {
2498   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2499       : AANonNullFloating(IRP, A) {}
2500 
2501   /// See AbstractAttribute::trackStatistics()
2502   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2503 };
2504 
2505 /// NonNull attribute for a call site return position.
2506 struct AANonNullCallSiteReturned final
2507     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2508   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2509       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2510 
2511   /// See AbstractAttribute::trackStatistics()
2512   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2513 };
2514 } // namespace
2515 
2516 /// ------------------------ No-Recurse Attributes ----------------------------
2517 
2518 namespace {
2519 struct AANoRecurseImpl : public AANoRecurse {
2520   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2521 
2522   /// See AbstractAttribute::getAsStr()
2523   const std::string getAsStr() const override {
2524     return getAssumed() ? "norecurse" : "may-recurse";
2525   }
2526 };
2527 
2528 struct AANoRecurseFunction final : AANoRecurseImpl {
2529   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2530       : AANoRecurseImpl(IRP, A) {}
2531 
2532   /// See AbstractAttribute::updateImpl(...).
2533   ChangeStatus updateImpl(Attributor &A) override {
2534 
2535     // If all live call sites are known to be no-recurse, we are as well.
2536     auto CallSitePred = [&](AbstractCallSite ACS) {
2537       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2538           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2539           DepClassTy::NONE);
2540       return NoRecurseAA.isKnownNoRecurse();
2541     };
2542     bool UsedAssumedInformation = false;
2543     if (A.checkForAllCallSites(CallSitePred, *this, true,
2544                                UsedAssumedInformation)) {
2545       // If we know all call sites and all are known no-recurse, we are done.
2546       // If all known call sites, which might not be all that exist, are known
2547       // to be no-recurse, we are not done but we can continue to assume
2548       // no-recurse. If one of the call sites we have not visited will become
2549       // live, another update is triggered.
2550       if (!UsedAssumedInformation)
2551         indicateOptimisticFixpoint();
2552       return ChangeStatus::UNCHANGED;
2553     }
2554 
2555     const AAFunctionReachability &EdgeReachability =
2556         A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2557                                            DepClassTy::REQUIRED);
2558     if (EdgeReachability.canReach(A, *getAnchorScope()))
2559       return indicatePessimisticFixpoint();
2560     return ChangeStatus::UNCHANGED;
2561   }
2562 
2563   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2564 };
2565 
2566 /// NoRecurse attribute deduction for a call sites.
2567 struct AANoRecurseCallSite final : AANoRecurseImpl {
2568   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2569       : AANoRecurseImpl(IRP, A) {}
2570 
2571   /// See AbstractAttribute::initialize(...).
2572   void initialize(Attributor &A) override {
2573     AANoRecurseImpl::initialize(A);
2574     Function *F = getAssociatedFunction();
2575     if (!F || F->isDeclaration())
2576       indicatePessimisticFixpoint();
2577   }
2578 
2579   /// See AbstractAttribute::updateImpl(...).
2580   ChangeStatus updateImpl(Attributor &A) override {
2581     // TODO: Once we have call site specific value information we can provide
2582     //       call site specific liveness information and then it makes
2583     //       sense to specialize attributes for call sites arguments instead of
2584     //       redirecting requests to the callee argument.
2585     Function *F = getAssociatedFunction();
2586     const IRPosition &FnPos = IRPosition::function(*F);
2587     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2588     return clampStateAndIndicateChange(getState(), FnAA.getState());
2589   }
2590 
2591   /// See AbstractAttribute::trackStatistics()
2592   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2593 };
2594 } // namespace
2595 
2596 /// -------------------- Undefined-Behavior Attributes ------------------------
2597 
2598 namespace {
2599 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2600   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2601       : AAUndefinedBehavior(IRP, A) {}
2602 
2603   /// See AbstractAttribute::updateImpl(...).
2604   // through a pointer (i.e. also branches etc.)
2605   ChangeStatus updateImpl(Attributor &A) override {
2606     const size_t UBPrevSize = KnownUBInsts.size();
2607     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2608 
2609     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2610       // Lang ref now states volatile store is not UB, let's skip them.
2611       if (I.isVolatile() && I.mayWriteToMemory())
2612         return true;
2613 
2614       // Skip instructions that are already saved.
2615       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2616         return true;
2617 
2618       // If we reach here, we know we have an instruction
2619       // that accesses memory through a pointer operand,
2620       // for which getPointerOperand() should give it to us.
2621       Value *PtrOp =
2622           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2623       assert(PtrOp &&
2624              "Expected pointer operand of memory accessing instruction");
2625 
2626       // Either we stopped and the appropriate action was taken,
2627       // or we got back a simplified value to continue.
2628       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2629       if (!SimplifiedPtrOp || !SimplifiedPtrOp.getValue())
2630         return true;
2631       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2632 
2633       // A memory access through a pointer is considered UB
2634       // only if the pointer has constant null value.
2635       // TODO: Expand it to not only check constant values.
2636       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2637         AssumedNoUBInsts.insert(&I);
2638         return true;
2639       }
2640       const Type *PtrTy = PtrOpVal->getType();
2641 
2642       // Because we only consider instructions inside functions,
2643       // assume that a parent function exists.
2644       const Function *F = I.getFunction();
2645 
2646       // A memory access using constant null pointer is only considered UB
2647       // if null pointer is _not_ defined for the target platform.
2648       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2649         AssumedNoUBInsts.insert(&I);
2650       else
2651         KnownUBInsts.insert(&I);
2652       return true;
2653     };
2654 
2655     auto InspectBrInstForUB = [&](Instruction &I) {
2656       // A conditional branch instruction is considered UB if it has `undef`
2657       // condition.
2658 
2659       // Skip instructions that are already saved.
2660       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2661         return true;
2662 
2663       // We know we have a branch instruction.
2664       auto *BrInst = cast<BranchInst>(&I);
2665 
2666       // Unconditional branches are never considered UB.
2667       if (BrInst->isUnconditional())
2668         return true;
2669 
2670       // Either we stopped and the appropriate action was taken,
2671       // or we got back a simplified value to continue.
2672       Optional<Value *> SimplifiedCond =
2673           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2674       if (!SimplifiedCond || !*SimplifiedCond)
2675         return true;
2676       AssumedNoUBInsts.insert(&I);
2677       return true;
2678     };
2679 
2680     auto InspectCallSiteForUB = [&](Instruction &I) {
2681       // Check whether a callsite always cause UB or not
2682 
2683       // Skip instructions that are already saved.
2684       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2685         return true;
2686 
2687       // Check nonnull and noundef argument attribute violation for each
2688       // callsite.
2689       CallBase &CB = cast<CallBase>(I);
2690       Function *Callee = CB.getCalledFunction();
2691       if (!Callee)
2692         return true;
2693       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2694         // If current argument is known to be simplified to null pointer and the
2695         // corresponding argument position is known to have nonnull attribute,
2696         // the argument is poison. Furthermore, if the argument is poison and
2697         // the position is known to have noundef attriubte, this callsite is
2698         // considered UB.
2699         if (idx >= Callee->arg_size())
2700           break;
2701         Value *ArgVal = CB.getArgOperand(idx);
2702         if (!ArgVal)
2703           continue;
2704         // Here, we handle three cases.
2705         //   (1) Not having a value means it is dead. (we can replace the value
2706         //       with undef)
2707         //   (2) Simplified to undef. The argument violate noundef attriubte.
2708         //   (3) Simplified to null pointer where known to be nonnull.
2709         //       The argument is a poison value and violate noundef attribute.
2710         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2711         auto &NoUndefAA =
2712             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2713         if (!NoUndefAA.isKnownNoUndef())
2714           continue;
2715         bool UsedAssumedInformation = false;
2716         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2717             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2718         if (UsedAssumedInformation)
2719           continue;
2720         if (SimplifiedVal && !SimplifiedVal.getValue())
2721           return true;
2722         if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.getValue())) {
2723           KnownUBInsts.insert(&I);
2724           continue;
2725         }
2726         if (!ArgVal->getType()->isPointerTy() ||
2727             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2728           continue;
2729         auto &NonNullAA =
2730             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2731         if (NonNullAA.isKnownNonNull())
2732           KnownUBInsts.insert(&I);
2733       }
2734       return true;
2735     };
2736 
2737     auto InspectReturnInstForUB = [&](Instruction &I) {
2738       auto &RI = cast<ReturnInst>(I);
2739       // Either we stopped and the appropriate action was taken,
2740       // or we got back a simplified return value to continue.
2741       Optional<Value *> SimplifiedRetValue =
2742           stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2743       if (!SimplifiedRetValue || !*SimplifiedRetValue)
2744         return true;
2745 
2746       // Check if a return instruction always cause UB or not
2747       // Note: It is guaranteed that the returned position of the anchor
2748       //       scope has noundef attribute when this is called.
2749       //       We also ensure the return position is not "assumed dead"
2750       //       because the returned value was then potentially simplified to
2751       //       `undef` in AAReturnedValues without removing the `noundef`
2752       //       attribute yet.
2753 
2754       // When the returned position has noundef attriubte, UB occurs in the
2755       // following cases.
2756       //   (1) Returned value is known to be undef.
2757       //   (2) The value is known to be a null pointer and the returned
2758       //       position has nonnull attribute (because the returned value is
2759       //       poison).
2760       if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2761         auto &NonNullAA = A.getAAFor<AANonNull>(
2762             *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2763         if (NonNullAA.isKnownNonNull())
2764           KnownUBInsts.insert(&I);
2765       }
2766 
2767       return true;
2768     };
2769 
2770     bool UsedAssumedInformation = false;
2771     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2772                               {Instruction::Load, Instruction::Store,
2773                                Instruction::AtomicCmpXchg,
2774                                Instruction::AtomicRMW},
2775                               UsedAssumedInformation,
2776                               /* CheckBBLivenessOnly */ true);
2777     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2778                               UsedAssumedInformation,
2779                               /* CheckBBLivenessOnly */ true);
2780     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2781                                       UsedAssumedInformation);
2782 
2783     // If the returned position of the anchor scope has noundef attriubte, check
2784     // all returned instructions.
2785     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2786       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2787       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2788         auto &RetPosNoUndefAA =
2789             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2790         if (RetPosNoUndefAA.isKnownNoUndef())
2791           A.checkForAllInstructions(InspectReturnInstForUB, *this,
2792                                     {Instruction::Ret}, UsedAssumedInformation,
2793                                     /* CheckBBLivenessOnly */ true);
2794       }
2795     }
2796 
2797     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2798         UBPrevSize != KnownUBInsts.size())
2799       return ChangeStatus::CHANGED;
2800     return ChangeStatus::UNCHANGED;
2801   }
2802 
2803   bool isKnownToCauseUB(Instruction *I) const override {
2804     return KnownUBInsts.count(I);
2805   }
2806 
2807   bool isAssumedToCauseUB(Instruction *I) const override {
2808     // In simple words, if an instruction is not in the assumed to _not_
2809     // cause UB, then it is assumed UB (that includes those
2810     // in the KnownUBInsts set). The rest is boilerplate
2811     // is to ensure that it is one of the instructions we test
2812     // for UB.
2813 
2814     switch (I->getOpcode()) {
2815     case Instruction::Load:
2816     case Instruction::Store:
2817     case Instruction::AtomicCmpXchg:
2818     case Instruction::AtomicRMW:
2819       return !AssumedNoUBInsts.count(I);
2820     case Instruction::Br: {
2821       auto *BrInst = cast<BranchInst>(I);
2822       if (BrInst->isUnconditional())
2823         return false;
2824       return !AssumedNoUBInsts.count(I);
2825     } break;
2826     default:
2827       return false;
2828     }
2829     return false;
2830   }
2831 
2832   ChangeStatus manifest(Attributor &A) override {
2833     if (KnownUBInsts.empty())
2834       return ChangeStatus::UNCHANGED;
2835     for (Instruction *I : KnownUBInsts)
2836       A.changeToUnreachableAfterManifest(I);
2837     return ChangeStatus::CHANGED;
2838   }
2839 
2840   /// See AbstractAttribute::getAsStr()
2841   const std::string getAsStr() const override {
2842     return getAssumed() ? "undefined-behavior" : "no-ub";
2843   }
2844 
2845   /// Note: The correctness of this analysis depends on the fact that the
2846   /// following 2 sets will stop changing after some point.
2847   /// "Change" here means that their size changes.
2848   /// The size of each set is monotonically increasing
2849   /// (we only add items to them) and it is upper bounded by the number of
2850   /// instructions in the processed function (we can never save more
2851   /// elements in either set than this number). Hence, at some point,
2852   /// they will stop increasing.
2853   /// Consequently, at some point, both sets will have stopped
2854   /// changing, effectively making the analysis reach a fixpoint.
2855 
2856   /// Note: These 2 sets are disjoint and an instruction can be considered
2857   /// one of 3 things:
2858   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2859   ///    the KnownUBInsts set.
2860   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2861   ///    has a reason to assume it).
2862   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2863   ///    could not find a reason to assume or prove that it can cause UB,
2864   ///    hence it assumes it doesn't. We have a set for these instructions
2865   ///    so that we don't reprocess them in every update.
2866   ///    Note however that instructions in this set may cause UB.
2867 
2868 protected:
2869   /// A set of all live instructions _known_ to cause UB.
2870   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2871 
2872 private:
2873   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2874   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2875 
2876   // Should be called on updates in which if we're processing an instruction
2877   // \p I that depends on a value \p V, one of the following has to happen:
2878   // - If the value is assumed, then stop.
2879   // - If the value is known but undef, then consider it UB.
2880   // - Otherwise, do specific processing with the simplified value.
2881   // We return None in the first 2 cases to signify that an appropriate
2882   // action was taken and the caller should stop.
2883   // Otherwise, we return the simplified value that the caller should
2884   // use for specific processing.
2885   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2886                                          Instruction *I) {
2887     bool UsedAssumedInformation = false;
2888     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2889         IRPosition::value(*V), *this, UsedAssumedInformation);
2890     if (!UsedAssumedInformation) {
2891       // Don't depend on assumed values.
2892       if (!SimplifiedV) {
2893         // If it is known (which we tested above) but it doesn't have a value,
2894         // then we can assume `undef` and hence the instruction is UB.
2895         KnownUBInsts.insert(I);
2896         return llvm::None;
2897       }
2898       if (!*SimplifiedV)
2899         return nullptr;
2900       V = *SimplifiedV;
2901     }
2902     if (isa<UndefValue>(V)) {
2903       KnownUBInsts.insert(I);
2904       return llvm::None;
2905     }
2906     return V;
2907   }
2908 };
2909 
2910 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2911   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2912       : AAUndefinedBehaviorImpl(IRP, A) {}
2913 
2914   /// See AbstractAttribute::trackStatistics()
2915   void trackStatistics() const override {
2916     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2917                "Number of instructions known to have UB");
2918     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2919         KnownUBInsts.size();
2920   }
2921 };
2922 } // namespace
2923 
2924 /// ------------------------ Will-Return Attributes ----------------------------
2925 
2926 namespace {
2927 // Helper function that checks whether a function has any cycle which we don't
2928 // know if it is bounded or not.
2929 // Loops with maximum trip count are considered bounded, any other cycle not.
2930 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2931   ScalarEvolution *SE =
2932       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2933   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2934   // If either SCEV or LoopInfo is not available for the function then we assume
2935   // any cycle to be unbounded cycle.
2936   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2937   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2938   if (!SE || !LI) {
2939     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2940       if (SCCI.hasCycle())
2941         return true;
2942     return false;
2943   }
2944 
2945   // If there's irreducible control, the function may contain non-loop cycles.
2946   if (mayContainIrreducibleControl(F, LI))
2947     return true;
2948 
2949   // Any loop that does not have a max trip count is considered unbounded cycle.
2950   for (auto *L : LI->getLoopsInPreorder()) {
2951     if (!SE->getSmallConstantMaxTripCount(L))
2952       return true;
2953   }
2954   return false;
2955 }
2956 
2957 struct AAWillReturnImpl : public AAWillReturn {
2958   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2959       : AAWillReturn(IRP, A) {}
2960 
2961   /// See AbstractAttribute::initialize(...).
2962   void initialize(Attributor &A) override {
2963     AAWillReturn::initialize(A);
2964 
2965     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2966       indicateOptimisticFixpoint();
2967       return;
2968     }
2969   }
2970 
2971   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2972   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2973     // Check for `mustprogress` in the scope and the associated function which
2974     // might be different if this is a call site.
2975     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2976         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2977       return false;
2978 
2979     bool IsKnown;
2980     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
2981       return IsKnown || !KnownOnly;
2982     return false;
2983   }
2984 
2985   /// See AbstractAttribute::updateImpl(...).
2986   ChangeStatus updateImpl(Attributor &A) override {
2987     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2988       return ChangeStatus::UNCHANGED;
2989 
2990     auto CheckForWillReturn = [&](Instruction &I) {
2991       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2992       const auto &WillReturnAA =
2993           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2994       if (WillReturnAA.isKnownWillReturn())
2995         return true;
2996       if (!WillReturnAA.isAssumedWillReturn())
2997         return false;
2998       const auto &NoRecurseAA =
2999           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
3000       return NoRecurseAA.isAssumedNoRecurse();
3001     };
3002 
3003     bool UsedAssumedInformation = false;
3004     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
3005                                            UsedAssumedInformation))
3006       return indicatePessimisticFixpoint();
3007 
3008     return ChangeStatus::UNCHANGED;
3009   }
3010 
3011   /// See AbstractAttribute::getAsStr()
3012   const std::string getAsStr() const override {
3013     return getAssumed() ? "willreturn" : "may-noreturn";
3014   }
3015 };
3016 
3017 struct AAWillReturnFunction final : AAWillReturnImpl {
3018   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
3019       : AAWillReturnImpl(IRP, A) {}
3020 
3021   /// See AbstractAttribute::initialize(...).
3022   void initialize(Attributor &A) override {
3023     AAWillReturnImpl::initialize(A);
3024 
3025     Function *F = getAnchorScope();
3026     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
3027       indicatePessimisticFixpoint();
3028   }
3029 
3030   /// See AbstractAttribute::trackStatistics()
3031   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
3032 };
3033 
3034 /// WillReturn attribute deduction for a call sites.
3035 struct AAWillReturnCallSite final : AAWillReturnImpl {
3036   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3037       : AAWillReturnImpl(IRP, A) {}
3038 
3039   /// See AbstractAttribute::initialize(...).
3040   void initialize(Attributor &A) override {
3041     AAWillReturnImpl::initialize(A);
3042     Function *F = getAssociatedFunction();
3043     if (!F || !A.isFunctionIPOAmendable(*F))
3044       indicatePessimisticFixpoint();
3045   }
3046 
3047   /// See AbstractAttribute::updateImpl(...).
3048   ChangeStatus updateImpl(Attributor &A) override {
3049     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3050       return ChangeStatus::UNCHANGED;
3051 
3052     // TODO: Once we have call site specific value information we can provide
3053     //       call site specific liveness information and then it makes
3054     //       sense to specialize attributes for call sites arguments instead of
3055     //       redirecting requests to the callee argument.
3056     Function *F = getAssociatedFunction();
3057     const IRPosition &FnPos = IRPosition::function(*F);
3058     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
3059     return clampStateAndIndicateChange(getState(), FnAA.getState());
3060   }
3061 
3062   /// See AbstractAttribute::trackStatistics()
3063   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
3064 };
3065 } // namespace
3066 
3067 /// -------------------AAReachability Attribute--------------------------
3068 
3069 namespace {
3070 struct AAReachabilityImpl : AAReachability {
3071   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
3072       : AAReachability(IRP, A) {}
3073 
3074   const std::string getAsStr() const override {
3075     // TODO: Return the number of reachable queries.
3076     return "reachable";
3077   }
3078 
3079   /// See AbstractAttribute::updateImpl(...).
3080   ChangeStatus updateImpl(Attributor &A) override {
3081     return ChangeStatus::UNCHANGED;
3082   }
3083 };
3084 
3085 struct AAReachabilityFunction final : public AAReachabilityImpl {
3086   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
3087       : AAReachabilityImpl(IRP, A) {}
3088 
3089   /// See AbstractAttribute::trackStatistics()
3090   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
3091 };
3092 } // namespace
3093 
3094 /// ------------------------ NoAlias Argument Attribute ------------------------
3095 
3096 namespace {
3097 struct AANoAliasImpl : AANoAlias {
3098   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3099     assert(getAssociatedType()->isPointerTy() &&
3100            "Noalias is a pointer attribute");
3101   }
3102 
3103   const std::string getAsStr() const override {
3104     return getAssumed() ? "noalias" : "may-alias";
3105   }
3106 };
3107 
3108 /// NoAlias attribute for a floating value.
3109 struct AANoAliasFloating final : AANoAliasImpl {
3110   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3111       : AANoAliasImpl(IRP, A) {}
3112 
3113   /// See AbstractAttribute::initialize(...).
3114   void initialize(Attributor &A) override {
3115     AANoAliasImpl::initialize(A);
3116     Value *Val = &getAssociatedValue();
3117     do {
3118       CastInst *CI = dyn_cast<CastInst>(Val);
3119       if (!CI)
3120         break;
3121       Value *Base = CI->getOperand(0);
3122       if (!Base->hasOneUse())
3123         break;
3124       Val = Base;
3125     } while (true);
3126 
3127     if (!Val->getType()->isPointerTy()) {
3128       indicatePessimisticFixpoint();
3129       return;
3130     }
3131 
3132     if (isa<AllocaInst>(Val))
3133       indicateOptimisticFixpoint();
3134     else if (isa<ConstantPointerNull>(Val) &&
3135              !NullPointerIsDefined(getAnchorScope(),
3136                                    Val->getType()->getPointerAddressSpace()))
3137       indicateOptimisticFixpoint();
3138     else if (Val != &getAssociatedValue()) {
3139       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3140           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3141       if (ValNoAliasAA.isKnownNoAlias())
3142         indicateOptimisticFixpoint();
3143     }
3144   }
3145 
3146   /// See AbstractAttribute::updateImpl(...).
3147   ChangeStatus updateImpl(Attributor &A) override {
3148     // TODO: Implement this.
3149     return indicatePessimisticFixpoint();
3150   }
3151 
3152   /// See AbstractAttribute::trackStatistics()
3153   void trackStatistics() const override {
3154     STATS_DECLTRACK_FLOATING_ATTR(noalias)
3155   }
3156 };
3157 
3158 /// NoAlias attribute for an argument.
3159 struct AANoAliasArgument final
3160     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3161   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3162   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3163 
3164   /// See AbstractAttribute::initialize(...).
3165   void initialize(Attributor &A) override {
3166     Base::initialize(A);
3167     // See callsite argument attribute and callee argument attribute.
3168     if (hasAttr({Attribute::ByVal}))
3169       indicateOptimisticFixpoint();
3170   }
3171 
3172   /// See AbstractAttribute::update(...).
3173   ChangeStatus updateImpl(Attributor &A) override {
3174     // We have to make sure no-alias on the argument does not break
3175     // synchronization when this is a callback argument, see also [1] below.
3176     // If synchronization cannot be affected, we delegate to the base updateImpl
3177     // function, otherwise we give up for now.
3178 
3179     // If the function is no-sync, no-alias cannot break synchronization.
3180     const auto &NoSyncAA =
3181         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3182                              DepClassTy::OPTIONAL);
3183     if (NoSyncAA.isAssumedNoSync())
3184       return Base::updateImpl(A);
3185 
3186     // If the argument is read-only, no-alias cannot break synchronization.
3187     bool IsKnown;
3188     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3189       return Base::updateImpl(A);
3190 
3191     // If the argument is never passed through callbacks, no-alias cannot break
3192     // synchronization.
3193     bool UsedAssumedInformation = false;
3194     if (A.checkForAllCallSites(
3195             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3196             true, UsedAssumedInformation))
3197       return Base::updateImpl(A);
3198 
3199     // TODO: add no-alias but make sure it doesn't break synchronization by
3200     // introducing fake uses. See:
3201     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3202     //     International Workshop on OpenMP 2018,
3203     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3204 
3205     return indicatePessimisticFixpoint();
3206   }
3207 
3208   /// See AbstractAttribute::trackStatistics()
3209   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3210 };
3211 
3212 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3213   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3214       : AANoAliasImpl(IRP, A) {}
3215 
3216   /// See AbstractAttribute::initialize(...).
3217   void initialize(Attributor &A) override {
3218     // See callsite argument attribute and callee argument attribute.
3219     const auto &CB = cast<CallBase>(getAnchorValue());
3220     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3221       indicateOptimisticFixpoint();
3222     Value &Val = getAssociatedValue();
3223     if (isa<ConstantPointerNull>(Val) &&
3224         !NullPointerIsDefined(getAnchorScope(),
3225                               Val.getType()->getPointerAddressSpace()))
3226       indicateOptimisticFixpoint();
3227   }
3228 
3229   /// Determine if the underlying value may alias with the call site argument
3230   /// \p OtherArgNo of \p ICS (= the underlying call site).
3231   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3232                             const AAMemoryBehavior &MemBehaviorAA,
3233                             const CallBase &CB, unsigned OtherArgNo) {
3234     // We do not need to worry about aliasing with the underlying IRP.
3235     if (this->getCalleeArgNo() == (int)OtherArgNo)
3236       return false;
3237 
3238     // If it is not a pointer or pointer vector we do not alias.
3239     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3240     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3241       return false;
3242 
3243     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3244         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3245 
3246     // If the argument is readnone, there is no read-write aliasing.
3247     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3248       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3249       return false;
3250     }
3251 
3252     // If the argument is readonly and the underlying value is readonly, there
3253     // is no read-write aliasing.
3254     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3255     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3256       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3257       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3258       return false;
3259     }
3260 
3261     // We have to utilize actual alias analysis queries so we need the object.
3262     if (!AAR)
3263       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3264 
3265     // Try to rule it out at the call site.
3266     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3267     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3268                          "callsite arguments: "
3269                       << getAssociatedValue() << " " << *ArgOp << " => "
3270                       << (IsAliasing ? "" : "no-") << "alias \n");
3271 
3272     return IsAliasing;
3273   }
3274 
3275   bool
3276   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3277                                          const AAMemoryBehavior &MemBehaviorAA,
3278                                          const AANoAlias &NoAliasAA) {
3279     // We can deduce "noalias" if the following conditions hold.
3280     // (i)   Associated value is assumed to be noalias in the definition.
3281     // (ii)  Associated value is assumed to be no-capture in all the uses
3282     //       possibly executed before this callsite.
3283     // (iii) There is no other pointer argument which could alias with the
3284     //       value.
3285 
3286     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3287     if (!AssociatedValueIsNoAliasAtDef) {
3288       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3289                         << " is not no-alias at the definition\n");
3290       return false;
3291     }
3292 
3293     auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3294       const auto &DerefAA = A.getAAFor<AADereferenceable>(
3295           *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3296       return DerefAA.getAssumedDereferenceableBytes();
3297     };
3298 
3299     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3300 
3301     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3302     const Function *ScopeFn = VIRP.getAnchorScope();
3303     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3304     // Check whether the value is captured in the scope using AANoCapture.
3305     // Look at CFG and check only uses possibly executed before this
3306     // callsite.
3307     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3308       Instruction *UserI = cast<Instruction>(U.getUser());
3309 
3310       // If UserI is the curr instruction and there is a single potential use of
3311       // the value in UserI we allow the use.
3312       // TODO: We should inspect the operands and allow those that cannot alias
3313       //       with the value.
3314       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3315         return true;
3316 
3317       if (ScopeFn) {
3318         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3319           if (CB->isArgOperand(&U)) {
3320 
3321             unsigned ArgNo = CB->getArgOperandNo(&U);
3322 
3323             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3324                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3325                 DepClassTy::OPTIONAL);
3326 
3327             if (NoCaptureAA.isAssumedNoCapture())
3328               return true;
3329           }
3330         }
3331 
3332         if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this))
3333           return true;
3334       }
3335 
3336       // TODO: We should track the capturing uses in AANoCapture but the problem
3337       //       is CGSCC runs. For those we would need to "allow" AANoCapture for
3338       //       a value in the module slice.
3339       switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3340       case UseCaptureKind::NO_CAPTURE:
3341         return true;
3342       case UseCaptureKind::MAY_CAPTURE:
3343         LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3344                           << "\n");
3345         return false;
3346       case UseCaptureKind::PASSTHROUGH:
3347         Follow = true;
3348         return true;
3349       }
3350       llvm_unreachable("unknown UseCaptureKind");
3351     };
3352 
3353     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3354       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3355         LLVM_DEBUG(
3356             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3357                    << " cannot be noalias as it is potentially captured\n");
3358         return false;
3359       }
3360     }
3361     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3362 
3363     // Check there is no other pointer argument which could alias with the
3364     // value passed at this call site.
3365     // TODO: AbstractCallSite
3366     const auto &CB = cast<CallBase>(getAnchorValue());
3367     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3368       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3369         return false;
3370 
3371     return true;
3372   }
3373 
3374   /// See AbstractAttribute::updateImpl(...).
3375   ChangeStatus updateImpl(Attributor &A) override {
3376     // If the argument is readnone we are done as there are no accesses via the
3377     // argument.
3378     auto &MemBehaviorAA =
3379         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3380     if (MemBehaviorAA.isAssumedReadNone()) {
3381       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3382       return ChangeStatus::UNCHANGED;
3383     }
3384 
3385     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3386     const auto &NoAliasAA =
3387         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3388 
3389     AAResults *AAR = nullptr;
3390     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3391                                                NoAliasAA)) {
3392       LLVM_DEBUG(
3393           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3394       return ChangeStatus::UNCHANGED;
3395     }
3396 
3397     return indicatePessimisticFixpoint();
3398   }
3399 
3400   /// See AbstractAttribute::trackStatistics()
3401   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3402 };
3403 
3404 /// NoAlias attribute for function return value.
3405 struct AANoAliasReturned final : AANoAliasImpl {
3406   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3407       : AANoAliasImpl(IRP, A) {}
3408 
3409   /// See AbstractAttribute::initialize(...).
3410   void initialize(Attributor &A) override {
3411     AANoAliasImpl::initialize(A);
3412     Function *F = getAssociatedFunction();
3413     if (!F || F->isDeclaration())
3414       indicatePessimisticFixpoint();
3415   }
3416 
3417   /// See AbstractAttribute::updateImpl(...).
3418   virtual ChangeStatus updateImpl(Attributor &A) override {
3419 
3420     auto CheckReturnValue = [&](Value &RV) -> bool {
3421       if (Constant *C = dyn_cast<Constant>(&RV))
3422         if (C->isNullValue() || isa<UndefValue>(C))
3423           return true;
3424 
3425       /// For now, we can only deduce noalias if we have call sites.
3426       /// FIXME: add more support.
3427       if (!isa<CallBase>(&RV))
3428         return false;
3429 
3430       const IRPosition &RVPos = IRPosition::value(RV);
3431       const auto &NoAliasAA =
3432           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3433       if (!NoAliasAA.isAssumedNoAlias())
3434         return false;
3435 
3436       const auto &NoCaptureAA =
3437           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3438       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3439     };
3440 
3441     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3442       return indicatePessimisticFixpoint();
3443 
3444     return ChangeStatus::UNCHANGED;
3445   }
3446 
3447   /// See AbstractAttribute::trackStatistics()
3448   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3449 };
3450 
3451 /// NoAlias attribute deduction for a call site return value.
3452 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3453   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3454       : AANoAliasImpl(IRP, A) {}
3455 
3456   /// See AbstractAttribute::initialize(...).
3457   void initialize(Attributor &A) override {
3458     AANoAliasImpl::initialize(A);
3459     Function *F = getAssociatedFunction();
3460     if (!F || F->isDeclaration())
3461       indicatePessimisticFixpoint();
3462   }
3463 
3464   /// See AbstractAttribute::updateImpl(...).
3465   ChangeStatus updateImpl(Attributor &A) override {
3466     // TODO: Once we have call site specific value information we can provide
3467     //       call site specific liveness information and then it makes
3468     //       sense to specialize attributes for call sites arguments instead of
3469     //       redirecting requests to the callee argument.
3470     Function *F = getAssociatedFunction();
3471     const IRPosition &FnPos = IRPosition::returned(*F);
3472     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3473     return clampStateAndIndicateChange(getState(), FnAA.getState());
3474   }
3475 
3476   /// See AbstractAttribute::trackStatistics()
3477   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3478 };
3479 } // namespace
3480 
3481 /// -------------------AAIsDead Function Attribute-----------------------
3482 
3483 namespace {
3484 struct AAIsDeadValueImpl : public AAIsDead {
3485   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3486 
3487   /// See AbstractAttribute::initialize(...).
3488   void initialize(Attributor &A) override {
3489     if (auto *Scope = getAnchorScope())
3490       if (!A.isRunOn(*Scope))
3491         indicatePessimisticFixpoint();
3492   }
3493 
3494   /// See AAIsDead::isAssumedDead().
3495   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3496 
3497   /// See AAIsDead::isKnownDead().
3498   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3499 
3500   /// See AAIsDead::isAssumedDead(BasicBlock *).
3501   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3502 
3503   /// See AAIsDead::isKnownDead(BasicBlock *).
3504   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3505 
3506   /// See AAIsDead::isAssumedDead(Instruction *I).
3507   bool isAssumedDead(const Instruction *I) const override {
3508     return I == getCtxI() && isAssumedDead();
3509   }
3510 
3511   /// See AAIsDead::isKnownDead(Instruction *I).
3512   bool isKnownDead(const Instruction *I) const override {
3513     return isAssumedDead(I) && isKnownDead();
3514   }
3515 
3516   /// See AbstractAttribute::getAsStr().
3517   virtual const std::string getAsStr() const override {
3518     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3519   }
3520 
3521   /// Check if all uses are assumed dead.
3522   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3523     // Callers might not check the type, void has no uses.
3524     if (V.getType()->isVoidTy() || V.use_empty())
3525       return true;
3526 
3527     // If we replace a value with a constant there are no uses left afterwards.
3528     if (!isa<Constant>(V)) {
3529       if (auto *I = dyn_cast<Instruction>(&V))
3530         if (!A.isRunOn(*I->getFunction()))
3531           return false;
3532       bool UsedAssumedInformation = false;
3533       Optional<Constant *> C =
3534           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3535       if (!C || *C)
3536         return true;
3537     }
3538 
3539     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3540     // Explicitly set the dependence class to required because we want a long
3541     // chain of N dependent instructions to be considered live as soon as one is
3542     // without going through N update cycles. This is not required for
3543     // correctness.
3544     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3545                              DepClassTy::REQUIRED,
3546                              /* IgnoreDroppableUses */ false);
3547   }
3548 
3549   /// Determine if \p I is assumed to be side-effect free.
3550   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3551     if (!I || wouldInstructionBeTriviallyDead(I))
3552       return true;
3553 
3554     auto *CB = dyn_cast<CallBase>(I);
3555     if (!CB || isa<IntrinsicInst>(CB))
3556       return false;
3557 
3558     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3559     const auto &NoUnwindAA =
3560         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3561     if (!NoUnwindAA.isAssumedNoUnwind())
3562       return false;
3563     if (!NoUnwindAA.isKnownNoUnwind())
3564       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3565 
3566     bool IsKnown;
3567     return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3568   }
3569 };
3570 
3571 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3572   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3573       : AAIsDeadValueImpl(IRP, A) {}
3574 
3575   /// See AbstractAttribute::initialize(...).
3576   void initialize(Attributor &A) override {
3577     AAIsDeadValueImpl::initialize(A);
3578 
3579     if (isa<UndefValue>(getAssociatedValue())) {
3580       indicatePessimisticFixpoint();
3581       return;
3582     }
3583 
3584     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3585     if (!isAssumedSideEffectFree(A, I)) {
3586       if (!isa_and_nonnull<StoreInst>(I))
3587         indicatePessimisticFixpoint();
3588       else
3589         removeAssumedBits(HAS_NO_EFFECT);
3590     }
3591   }
3592 
3593   bool isDeadStore(Attributor &A, StoreInst &SI) {
3594     // Lang ref now states volatile store is not UB/dead, let's skip them.
3595     if (SI.isVolatile())
3596       return false;
3597 
3598     bool UsedAssumedInformation = false;
3599     SmallSetVector<Value *, 4> PotentialCopies;
3600     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3601                                              UsedAssumedInformation))
3602       return false;
3603     return llvm::all_of(PotentialCopies, [&](Value *V) {
3604       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3605                              UsedAssumedInformation);
3606     });
3607   }
3608 
3609   /// See AbstractAttribute::getAsStr().
3610   const std::string getAsStr() const override {
3611     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3612     if (isa_and_nonnull<StoreInst>(I))
3613       if (isValidState())
3614         return "assumed-dead-store";
3615     return AAIsDeadValueImpl::getAsStr();
3616   }
3617 
3618   /// See AbstractAttribute::updateImpl(...).
3619   ChangeStatus updateImpl(Attributor &A) override {
3620     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3621     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3622       if (!isDeadStore(A, *SI))
3623         return indicatePessimisticFixpoint();
3624     } else {
3625       if (!isAssumedSideEffectFree(A, I))
3626         return indicatePessimisticFixpoint();
3627       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3628         return indicatePessimisticFixpoint();
3629     }
3630     return ChangeStatus::UNCHANGED;
3631   }
3632 
3633   bool isRemovableStore() const override {
3634     return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3635   }
3636 
3637   /// See AbstractAttribute::manifest(...).
3638   ChangeStatus manifest(Attributor &A) override {
3639     Value &V = getAssociatedValue();
3640     if (auto *I = dyn_cast<Instruction>(&V)) {
3641       // If we get here we basically know the users are all dead. We check if
3642       // isAssumedSideEffectFree returns true here again because it might not be
3643       // the case and only the users are dead but the instruction (=call) is
3644       // still needed.
3645       if (isa<StoreInst>(I) ||
3646           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3647         A.deleteAfterManifest(*I);
3648         return ChangeStatus::CHANGED;
3649       }
3650     }
3651     return ChangeStatus::UNCHANGED;
3652   }
3653 
3654   /// See AbstractAttribute::trackStatistics()
3655   void trackStatistics() const override {
3656     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3657   }
3658 };
3659 
3660 struct AAIsDeadArgument : public AAIsDeadFloating {
3661   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3662       : AAIsDeadFloating(IRP, A) {}
3663 
3664   /// See AbstractAttribute::initialize(...).
3665   void initialize(Attributor &A) override {
3666     AAIsDeadFloating::initialize(A);
3667     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3668       indicatePessimisticFixpoint();
3669   }
3670 
3671   /// See AbstractAttribute::manifest(...).
3672   ChangeStatus manifest(Attributor &A) override {
3673     Argument &Arg = *getAssociatedArgument();
3674     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3675       if (A.registerFunctionSignatureRewrite(
3676               Arg, /* ReplacementTypes */ {},
3677               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3678               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3679         return ChangeStatus::CHANGED;
3680       }
3681     return ChangeStatus::UNCHANGED;
3682   }
3683 
3684   /// See AbstractAttribute::trackStatistics()
3685   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3686 };
3687 
3688 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3689   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3690       : AAIsDeadValueImpl(IRP, A) {}
3691 
3692   /// See AbstractAttribute::initialize(...).
3693   void initialize(Attributor &A) override {
3694     AAIsDeadValueImpl::initialize(A);
3695     if (isa<UndefValue>(getAssociatedValue()))
3696       indicatePessimisticFixpoint();
3697   }
3698 
3699   /// See AbstractAttribute::updateImpl(...).
3700   ChangeStatus updateImpl(Attributor &A) override {
3701     // TODO: Once we have call site specific value information we can provide
3702     //       call site specific liveness information and then it makes
3703     //       sense to specialize attributes for call sites arguments instead of
3704     //       redirecting requests to the callee argument.
3705     Argument *Arg = getAssociatedArgument();
3706     if (!Arg)
3707       return indicatePessimisticFixpoint();
3708     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3709     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3710     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3711   }
3712 
3713   /// See AbstractAttribute::manifest(...).
3714   ChangeStatus manifest(Attributor &A) override {
3715     CallBase &CB = cast<CallBase>(getAnchorValue());
3716     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3717     assert(!isa<UndefValue>(U.get()) &&
3718            "Expected undef values to be filtered out!");
3719     UndefValue &UV = *UndefValue::get(U->getType());
3720     if (A.changeUseAfterManifest(U, UV))
3721       return ChangeStatus::CHANGED;
3722     return ChangeStatus::UNCHANGED;
3723   }
3724 
3725   /// See AbstractAttribute::trackStatistics()
3726   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3727 };
3728 
3729 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3730   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3731       : AAIsDeadFloating(IRP, A) {}
3732 
3733   /// See AAIsDead::isAssumedDead().
3734   bool isAssumedDead() const override {
3735     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3736   }
3737 
3738   /// See AbstractAttribute::initialize(...).
3739   void initialize(Attributor &A) override {
3740     AAIsDeadFloating::initialize(A);
3741     if (isa<UndefValue>(getAssociatedValue())) {
3742       indicatePessimisticFixpoint();
3743       return;
3744     }
3745 
3746     // We track this separately as a secondary state.
3747     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3748   }
3749 
3750   /// See AbstractAttribute::updateImpl(...).
3751   ChangeStatus updateImpl(Attributor &A) override {
3752     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3753     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3754       IsAssumedSideEffectFree = false;
3755       Changed = ChangeStatus::CHANGED;
3756     }
3757     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3758       return indicatePessimisticFixpoint();
3759     return Changed;
3760   }
3761 
3762   /// See AbstractAttribute::trackStatistics()
3763   void trackStatistics() const override {
3764     if (IsAssumedSideEffectFree)
3765       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3766     else
3767       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3768   }
3769 
3770   /// See AbstractAttribute::getAsStr().
3771   const std::string getAsStr() const override {
3772     return isAssumedDead()
3773                ? "assumed-dead"
3774                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3775   }
3776 
3777 private:
3778   bool IsAssumedSideEffectFree = true;
3779 };
3780 
3781 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3782   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3783       : AAIsDeadValueImpl(IRP, A) {}
3784 
3785   /// See AbstractAttribute::updateImpl(...).
3786   ChangeStatus updateImpl(Attributor &A) override {
3787 
3788     bool UsedAssumedInformation = false;
3789     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3790                               {Instruction::Ret}, UsedAssumedInformation);
3791 
3792     auto PredForCallSite = [&](AbstractCallSite ACS) {
3793       if (ACS.isCallbackCall() || !ACS.getInstruction())
3794         return false;
3795       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3796     };
3797 
3798     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3799                                 UsedAssumedInformation))
3800       return indicatePessimisticFixpoint();
3801 
3802     return ChangeStatus::UNCHANGED;
3803   }
3804 
3805   /// See AbstractAttribute::manifest(...).
3806   ChangeStatus manifest(Attributor &A) override {
3807     // TODO: Rewrite the signature to return void?
3808     bool AnyChange = false;
3809     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3810     auto RetInstPred = [&](Instruction &I) {
3811       ReturnInst &RI = cast<ReturnInst>(I);
3812       if (!isa<UndefValue>(RI.getReturnValue()))
3813         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3814       return true;
3815     };
3816     bool UsedAssumedInformation = false;
3817     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3818                               UsedAssumedInformation);
3819     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3820   }
3821 
3822   /// See AbstractAttribute::trackStatistics()
3823   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3824 };
3825 
3826 struct AAIsDeadFunction : public AAIsDead {
3827   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3828 
3829   /// See AbstractAttribute::initialize(...).
3830   void initialize(Attributor &A) override {
3831     Function *F = getAnchorScope();
3832     if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3833       indicatePessimisticFixpoint();
3834       return;
3835     }
3836     ToBeExploredFrom.insert(&F->getEntryBlock().front());
3837     assumeLive(A, F->getEntryBlock());
3838   }
3839 
3840   /// See AbstractAttribute::getAsStr().
3841   const std::string getAsStr() const override {
3842     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3843            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3844            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3845            std::to_string(KnownDeadEnds.size()) + "]";
3846   }
3847 
3848   /// See AbstractAttribute::manifest(...).
3849   ChangeStatus manifest(Attributor &A) override {
3850     assert(getState().isValidState() &&
3851            "Attempted to manifest an invalid state!");
3852 
3853     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3854     Function &F = *getAnchorScope();
3855 
3856     if (AssumedLiveBlocks.empty()) {
3857       A.deleteAfterManifest(F);
3858       return ChangeStatus::CHANGED;
3859     }
3860 
3861     // Flag to determine if we can change an invoke to a call assuming the
3862     // callee is nounwind. This is not possible if the personality of the
3863     // function allows to catch asynchronous exceptions.
3864     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3865 
3866     KnownDeadEnds.set_union(ToBeExploredFrom);
3867     for (const Instruction *DeadEndI : KnownDeadEnds) {
3868       auto *CB = dyn_cast<CallBase>(DeadEndI);
3869       if (!CB)
3870         continue;
3871       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3872           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3873       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3874       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3875         continue;
3876 
3877       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3878         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3879       else
3880         A.changeToUnreachableAfterManifest(
3881             const_cast<Instruction *>(DeadEndI->getNextNode()));
3882       HasChanged = ChangeStatus::CHANGED;
3883     }
3884 
3885     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3886     for (BasicBlock &BB : F)
3887       if (!AssumedLiveBlocks.count(&BB)) {
3888         A.deleteAfterManifest(BB);
3889         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3890         HasChanged = ChangeStatus::CHANGED;
3891       }
3892 
3893     return HasChanged;
3894   }
3895 
3896   /// See AbstractAttribute::updateImpl(...).
3897   ChangeStatus updateImpl(Attributor &A) override;
3898 
3899   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3900     assert(From->getParent() == getAnchorScope() &&
3901            To->getParent() == getAnchorScope() &&
3902            "Used AAIsDead of the wrong function");
3903     return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3904   }
3905 
3906   /// See AbstractAttribute::trackStatistics()
3907   void trackStatistics() const override {}
3908 
3909   /// Returns true if the function is assumed dead.
3910   bool isAssumedDead() const override { return false; }
3911 
3912   /// See AAIsDead::isKnownDead().
3913   bool isKnownDead() const override { return false; }
3914 
3915   /// See AAIsDead::isAssumedDead(BasicBlock *).
3916   bool isAssumedDead(const BasicBlock *BB) const override {
3917     assert(BB->getParent() == getAnchorScope() &&
3918            "BB must be in the same anchor scope function.");
3919 
3920     if (!getAssumed())
3921       return false;
3922     return !AssumedLiveBlocks.count(BB);
3923   }
3924 
3925   /// See AAIsDead::isKnownDead(BasicBlock *).
3926   bool isKnownDead(const BasicBlock *BB) const override {
3927     return getKnown() && isAssumedDead(BB);
3928   }
3929 
3930   /// See AAIsDead::isAssumed(Instruction *I).
3931   bool isAssumedDead(const Instruction *I) const override {
3932     assert(I->getParent()->getParent() == getAnchorScope() &&
3933            "Instruction must be in the same anchor scope function.");
3934 
3935     if (!getAssumed())
3936       return false;
3937 
3938     // If it is not in AssumedLiveBlocks then it for sure dead.
3939     // Otherwise, it can still be after noreturn call in a live block.
3940     if (!AssumedLiveBlocks.count(I->getParent()))
3941       return true;
3942 
3943     // If it is not after a liveness barrier it is live.
3944     const Instruction *PrevI = I->getPrevNode();
3945     while (PrevI) {
3946       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3947         return true;
3948       PrevI = PrevI->getPrevNode();
3949     }
3950     return false;
3951   }
3952 
3953   /// See AAIsDead::isKnownDead(Instruction *I).
3954   bool isKnownDead(const Instruction *I) const override {
3955     return getKnown() && isAssumedDead(I);
3956   }
3957 
3958   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3959   /// that internal function called from \p BB should now be looked at.
3960   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3961     if (!AssumedLiveBlocks.insert(&BB).second)
3962       return false;
3963 
3964     // We assume that all of BB is (probably) live now and if there are calls to
3965     // internal functions we will assume that those are now live as well. This
3966     // is a performance optimization for blocks with calls to a lot of internal
3967     // functions. It can however cause dead functions to be treated as live.
3968     for (const Instruction &I : BB)
3969       if (const auto *CB = dyn_cast<CallBase>(&I))
3970         if (const Function *F = CB->getCalledFunction())
3971           if (F->hasLocalLinkage())
3972             A.markLiveInternalFunction(*F);
3973     return true;
3974   }
3975 
3976   /// Collection of instructions that need to be explored again, e.g., we
3977   /// did assume they do not transfer control to (one of their) successors.
3978   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3979 
3980   /// Collection of instructions that are known to not transfer control.
3981   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3982 
3983   /// Collection of all assumed live edges
3984   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3985 
3986   /// Collection of all assumed live BasicBlocks.
3987   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3988 };
3989 
3990 static bool
3991 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3992                         AbstractAttribute &AA,
3993                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3994   const IRPosition &IPos = IRPosition::callsite_function(CB);
3995 
3996   const auto &NoReturnAA =
3997       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3998   if (NoReturnAA.isAssumedNoReturn())
3999     return !NoReturnAA.isKnownNoReturn();
4000   if (CB.isTerminator())
4001     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
4002   else
4003     AliveSuccessors.push_back(CB.getNextNode());
4004   return false;
4005 }
4006 
4007 static bool
4008 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
4009                         AbstractAttribute &AA,
4010                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4011   bool UsedAssumedInformation =
4012       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
4013 
4014   // First, determine if we can change an invoke to a call assuming the
4015   // callee is nounwind. This is not possible if the personality of the
4016   // function allows to catch asynchronous exceptions.
4017   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
4018     AliveSuccessors.push_back(&II.getUnwindDest()->front());
4019   } else {
4020     const IRPosition &IPos = IRPosition::callsite_function(II);
4021     const auto &AANoUnw =
4022         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
4023     if (AANoUnw.isAssumedNoUnwind()) {
4024       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
4025     } else {
4026       AliveSuccessors.push_back(&II.getUnwindDest()->front());
4027     }
4028   }
4029   return UsedAssumedInformation;
4030 }
4031 
4032 static bool
4033 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
4034                         AbstractAttribute &AA,
4035                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4036   bool UsedAssumedInformation = false;
4037   if (BI.getNumSuccessors() == 1) {
4038     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4039   } else {
4040     Optional<Constant *> C =
4041         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
4042     if (!C || isa_and_nonnull<UndefValue>(*C)) {
4043       // No value yet, assume both edges are dead.
4044     } else if (isa_and_nonnull<ConstantInt>(*C)) {
4045       const BasicBlock *SuccBB =
4046           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
4047       AliveSuccessors.push_back(&SuccBB->front());
4048     } else {
4049       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4050       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4051       UsedAssumedInformation = false;
4052     }
4053   }
4054   return UsedAssumedInformation;
4055 }
4056 
4057 static bool
4058 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4059                         AbstractAttribute &AA,
4060                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4061   bool UsedAssumedInformation = false;
4062   Optional<Constant *> C =
4063       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
4064   if (!C || isa_and_nonnull<UndefValue>(C.getValue())) {
4065     // No value yet, assume all edges are dead.
4066   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
4067     for (auto &CaseIt : SI.cases()) {
4068       if (CaseIt.getCaseValue() == C.getValue()) {
4069         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4070         return UsedAssumedInformation;
4071       }
4072     }
4073     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4074     return UsedAssumedInformation;
4075   } else {
4076     for (const BasicBlock *SuccBB : successors(SI.getParent()))
4077       AliveSuccessors.push_back(&SuccBB->front());
4078   }
4079   return UsedAssumedInformation;
4080 }
4081 
4082 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4083   ChangeStatus Change = ChangeStatus::UNCHANGED;
4084 
4085   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4086                     << getAnchorScope()->size() << "] BBs and "
4087                     << ToBeExploredFrom.size() << " exploration points and "
4088                     << KnownDeadEnds.size() << " known dead ends\n");
4089 
4090   // Copy and clear the list of instructions we need to explore from. It is
4091   // refilled with instructions the next update has to look at.
4092   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4093                                                ToBeExploredFrom.end());
4094   decltype(ToBeExploredFrom) NewToBeExploredFrom;
4095 
4096   SmallVector<const Instruction *, 8> AliveSuccessors;
4097   while (!Worklist.empty()) {
4098     const Instruction *I = Worklist.pop_back_val();
4099     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4100 
4101     // Fast forward for uninteresting instructions. We could look for UB here
4102     // though.
4103     while (!I->isTerminator() && !isa<CallBase>(I))
4104       I = I->getNextNode();
4105 
4106     AliveSuccessors.clear();
4107 
4108     bool UsedAssumedInformation = false;
4109     switch (I->getOpcode()) {
4110     // TODO: look for (assumed) UB to backwards propagate "deadness".
4111     default:
4112       assert(I->isTerminator() &&
4113              "Expected non-terminators to be handled already!");
4114       for (const BasicBlock *SuccBB : successors(I->getParent()))
4115         AliveSuccessors.push_back(&SuccBB->front());
4116       break;
4117     case Instruction::Call:
4118       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4119                                                        *this, AliveSuccessors);
4120       break;
4121     case Instruction::Invoke:
4122       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4123                                                        *this, AliveSuccessors);
4124       break;
4125     case Instruction::Br:
4126       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4127                                                        *this, AliveSuccessors);
4128       break;
4129     case Instruction::Switch:
4130       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4131                                                        *this, AliveSuccessors);
4132       break;
4133     }
4134 
4135     if (UsedAssumedInformation) {
4136       NewToBeExploredFrom.insert(I);
4137     } else if (AliveSuccessors.empty() ||
4138                (I->isTerminator() &&
4139                 AliveSuccessors.size() < I->getNumSuccessors())) {
4140       if (KnownDeadEnds.insert(I))
4141         Change = ChangeStatus::CHANGED;
4142     }
4143 
4144     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4145                       << AliveSuccessors.size() << " UsedAssumedInformation: "
4146                       << UsedAssumedInformation << "\n");
4147 
4148     for (const Instruction *AliveSuccessor : AliveSuccessors) {
4149       if (!I->isTerminator()) {
4150         assert(AliveSuccessors.size() == 1 &&
4151                "Non-terminator expected to have a single successor!");
4152         Worklist.push_back(AliveSuccessor);
4153       } else {
4154         // record the assumed live edge
4155         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4156         if (AssumedLiveEdges.insert(Edge).second)
4157           Change = ChangeStatus::CHANGED;
4158         if (assumeLive(A, *AliveSuccessor->getParent()))
4159           Worklist.push_back(AliveSuccessor);
4160       }
4161     }
4162   }
4163 
4164   // Check if the content of ToBeExploredFrom changed, ignore the order.
4165   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4166       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4167         return !ToBeExploredFrom.count(I);
4168       })) {
4169     Change = ChangeStatus::CHANGED;
4170     ToBeExploredFrom = std::move(NewToBeExploredFrom);
4171   }
4172 
4173   // If we know everything is live there is no need to query for liveness.
4174   // Instead, indicating a pessimistic fixpoint will cause the state to be
4175   // "invalid" and all queries to be answered conservatively without lookups.
4176   // To be in this state we have to (1) finished the exploration and (3) not
4177   // discovered any non-trivial dead end and (2) not ruled unreachable code
4178   // dead.
4179   if (ToBeExploredFrom.empty() &&
4180       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4181       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4182         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4183       }))
4184     return indicatePessimisticFixpoint();
4185   return Change;
4186 }
4187 
4188 /// Liveness information for a call sites.
4189 struct AAIsDeadCallSite final : AAIsDeadFunction {
4190   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4191       : AAIsDeadFunction(IRP, A) {}
4192 
4193   /// See AbstractAttribute::initialize(...).
4194   void initialize(Attributor &A) override {
4195     // TODO: Once we have call site specific value information we can provide
4196     //       call site specific liveness information and then it makes
4197     //       sense to specialize attributes for call sites instead of
4198     //       redirecting requests to the callee.
4199     llvm_unreachable("Abstract attributes for liveness are not "
4200                      "supported for call sites yet!");
4201   }
4202 
4203   /// See AbstractAttribute::updateImpl(...).
4204   ChangeStatus updateImpl(Attributor &A) override {
4205     return indicatePessimisticFixpoint();
4206   }
4207 
4208   /// See AbstractAttribute::trackStatistics()
4209   void trackStatistics() const override {}
4210 };
4211 } // namespace
4212 
4213 /// -------------------- Dereferenceable Argument Attribute --------------------
4214 
4215 namespace {
4216 struct AADereferenceableImpl : AADereferenceable {
4217   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4218       : AADereferenceable(IRP, A) {}
4219   using StateType = DerefState;
4220 
4221   /// See AbstractAttribute::initialize(...).
4222   void initialize(Attributor &A) override {
4223     Value &V = *getAssociatedValue().stripPointerCasts();
4224     SmallVector<Attribute, 4> Attrs;
4225     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4226              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4227     for (const Attribute &Attr : Attrs)
4228       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4229 
4230     const IRPosition &IRP = this->getIRPosition();
4231     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4232 
4233     bool CanBeNull, CanBeFreed;
4234     takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4235         A.getDataLayout(), CanBeNull, CanBeFreed));
4236 
4237     bool IsFnInterface = IRP.isFnInterfaceKind();
4238     Function *FnScope = IRP.getAnchorScope();
4239     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4240       indicatePessimisticFixpoint();
4241       return;
4242     }
4243 
4244     if (Instruction *CtxI = getCtxI())
4245       followUsesInMBEC(*this, A, getState(), *CtxI);
4246   }
4247 
4248   /// See AbstractAttribute::getState()
4249   /// {
4250   StateType &getState() override { return *this; }
4251   const StateType &getState() const override { return *this; }
4252   /// }
4253 
4254   /// Helper function for collecting accessed bytes in must-be-executed-context
4255   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4256                               DerefState &State) {
4257     const Value *UseV = U->get();
4258     if (!UseV->getType()->isPointerTy())
4259       return;
4260 
4261     Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4262     if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4263       return;
4264 
4265     int64_t Offset;
4266     const Value *Base = GetPointerBaseWithConstantOffset(
4267         Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4268     if (Base && Base == &getAssociatedValue())
4269       State.addAccessedBytes(Offset, Loc->Size.getValue());
4270   }
4271 
4272   /// See followUsesInMBEC
4273   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4274                        AADereferenceable::StateType &State) {
4275     bool IsNonNull = false;
4276     bool TrackUse = false;
4277     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4278         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4279     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4280                       << " for instruction " << *I << "\n");
4281 
4282     addAccessedBytesForUse(A, U, I, State);
4283     State.takeKnownDerefBytesMaximum(DerefBytes);
4284     return TrackUse;
4285   }
4286 
4287   /// See AbstractAttribute::manifest(...).
4288   ChangeStatus manifest(Attributor &A) override {
4289     ChangeStatus Change = AADereferenceable::manifest(A);
4290     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4291       removeAttrs({Attribute::DereferenceableOrNull});
4292       return ChangeStatus::CHANGED;
4293     }
4294     return Change;
4295   }
4296 
4297   void getDeducedAttributes(LLVMContext &Ctx,
4298                             SmallVectorImpl<Attribute> &Attrs) const override {
4299     // TODO: Add *_globally support
4300     if (isAssumedNonNull())
4301       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4302           Ctx, getAssumedDereferenceableBytes()));
4303     else
4304       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4305           Ctx, getAssumedDereferenceableBytes()));
4306   }
4307 
4308   /// See AbstractAttribute::getAsStr().
4309   const std::string getAsStr() const override {
4310     if (!getAssumedDereferenceableBytes())
4311       return "unknown-dereferenceable";
4312     return std::string("dereferenceable") +
4313            (isAssumedNonNull() ? "" : "_or_null") +
4314            (isAssumedGlobal() ? "_globally" : "") + "<" +
4315            std::to_string(getKnownDereferenceableBytes()) + "-" +
4316            std::to_string(getAssumedDereferenceableBytes()) + ">";
4317   }
4318 };
4319 
4320 /// Dereferenceable attribute for a floating value.
4321 struct AADereferenceableFloating : AADereferenceableImpl {
4322   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4323       : AADereferenceableImpl(IRP, A) {}
4324 
4325   /// See AbstractAttribute::updateImpl(...).
4326   ChangeStatus updateImpl(Attributor &A) override {
4327     const DataLayout &DL = A.getDataLayout();
4328 
4329     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4330                             bool Stripped) -> bool {
4331       unsigned IdxWidth =
4332           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4333       APInt Offset(IdxWidth, 0);
4334       const Value *Base = stripAndAccumulateOffsets(
4335           A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4336           /* AllowNonInbounds */ true);
4337 
4338       const auto &AA = A.getAAFor<AADereferenceable>(
4339           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4340       int64_t DerefBytes = 0;
4341       if (!Stripped && this == &AA) {
4342         // Use IR information if we did not strip anything.
4343         // TODO: track globally.
4344         bool CanBeNull, CanBeFreed;
4345         DerefBytes =
4346             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4347         T.GlobalState.indicatePessimisticFixpoint();
4348       } else {
4349         const DerefState &DS = AA.getState();
4350         DerefBytes = DS.DerefBytesState.getAssumed();
4351         T.GlobalState &= DS.GlobalState;
4352       }
4353 
4354       // For now we do not try to "increase" dereferenceability due to negative
4355       // indices as we first have to come up with code to deal with loops and
4356       // for overflows of the dereferenceable bytes.
4357       int64_t OffsetSExt = Offset.getSExtValue();
4358       if (OffsetSExt < 0)
4359         OffsetSExt = 0;
4360 
4361       T.takeAssumedDerefBytesMinimum(
4362           std::max(int64_t(0), DerefBytes - OffsetSExt));
4363 
4364       if (this == &AA) {
4365         if (!Stripped) {
4366           // If nothing was stripped IR information is all we got.
4367           T.takeKnownDerefBytesMaximum(
4368               std::max(int64_t(0), DerefBytes - OffsetSExt));
4369           T.indicatePessimisticFixpoint();
4370         } else if (OffsetSExt > 0) {
4371           // If something was stripped but there is circular reasoning we look
4372           // for the offset. If it is positive we basically decrease the
4373           // dereferenceable bytes in a circluar loop now, which will simply
4374           // drive them down to the known value in a very slow way which we
4375           // can accelerate.
4376           T.indicatePessimisticFixpoint();
4377         }
4378       }
4379 
4380       return T.isValidState();
4381     };
4382 
4383     DerefState T;
4384     bool UsedAssumedInformation = false;
4385     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4386                                            VisitValueCB, getCtxI(),
4387                                            UsedAssumedInformation))
4388       return indicatePessimisticFixpoint();
4389 
4390     return clampStateAndIndicateChange(getState(), T);
4391   }
4392 
4393   /// See AbstractAttribute::trackStatistics()
4394   void trackStatistics() const override {
4395     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4396   }
4397 };
4398 
4399 /// Dereferenceable attribute for a return value.
4400 struct AADereferenceableReturned final
4401     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4402   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4403       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4404             IRP, A) {}
4405 
4406   /// See AbstractAttribute::trackStatistics()
4407   void trackStatistics() const override {
4408     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4409   }
4410 };
4411 
4412 /// Dereferenceable attribute for an argument
4413 struct AADereferenceableArgument final
4414     : AAArgumentFromCallSiteArguments<AADereferenceable,
4415                                       AADereferenceableImpl> {
4416   using Base =
4417       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4418   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4419       : Base(IRP, A) {}
4420 
4421   /// See AbstractAttribute::trackStatistics()
4422   void trackStatistics() const override {
4423     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4424   }
4425 };
4426 
4427 /// Dereferenceable attribute for a call site argument.
4428 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4429   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4430       : AADereferenceableFloating(IRP, A) {}
4431 
4432   /// See AbstractAttribute::trackStatistics()
4433   void trackStatistics() const override {
4434     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4435   }
4436 };
4437 
4438 /// Dereferenceable attribute deduction for a call site return value.
4439 struct AADereferenceableCallSiteReturned final
4440     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4441   using Base =
4442       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4443   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4444       : Base(IRP, A) {}
4445 
4446   /// See AbstractAttribute::trackStatistics()
4447   void trackStatistics() const override {
4448     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4449   }
4450 };
4451 } // namespace
4452 
4453 // ------------------------ Align Argument Attribute ------------------------
4454 
4455 namespace {
4456 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4457                                     Value &AssociatedValue, const Use *U,
4458                                     const Instruction *I, bool &TrackUse) {
4459   // We need to follow common pointer manipulation uses to the accesses they
4460   // feed into.
4461   if (isa<CastInst>(I)) {
4462     // Follow all but ptr2int casts.
4463     TrackUse = !isa<PtrToIntInst>(I);
4464     return 0;
4465   }
4466   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4467     if (GEP->hasAllConstantIndices())
4468       TrackUse = true;
4469     return 0;
4470   }
4471 
4472   MaybeAlign MA;
4473   if (const auto *CB = dyn_cast<CallBase>(I)) {
4474     if (CB->isBundleOperand(U) || CB->isCallee(U))
4475       return 0;
4476 
4477     unsigned ArgNo = CB->getArgOperandNo(U);
4478     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4479     // As long as we only use known information there is no need to track
4480     // dependences here.
4481     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4482     MA = MaybeAlign(AlignAA.getKnownAlign());
4483   }
4484 
4485   const DataLayout &DL = A.getDataLayout();
4486   const Value *UseV = U->get();
4487   if (auto *SI = dyn_cast<StoreInst>(I)) {
4488     if (SI->getPointerOperand() == UseV)
4489       MA = SI->getAlign();
4490   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4491     if (LI->getPointerOperand() == UseV)
4492       MA = LI->getAlign();
4493   }
4494 
4495   if (!MA || *MA <= QueryingAA.getKnownAlign())
4496     return 0;
4497 
4498   unsigned Alignment = MA->value();
4499   int64_t Offset;
4500 
4501   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4502     if (Base == &AssociatedValue) {
4503       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4504       // So we can say that the maximum power of two which is a divisor of
4505       // gcd(Offset, Alignment) is an alignment.
4506 
4507       uint32_t gcd =
4508           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4509       Alignment = llvm::PowerOf2Floor(gcd);
4510     }
4511   }
4512 
4513   return Alignment;
4514 }
4515 
4516 struct AAAlignImpl : AAAlign {
4517   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4518 
4519   /// See AbstractAttribute::initialize(...).
4520   void initialize(Attributor &A) override {
4521     SmallVector<Attribute, 4> Attrs;
4522     getAttrs({Attribute::Alignment}, Attrs);
4523     for (const Attribute &Attr : Attrs)
4524       takeKnownMaximum(Attr.getValueAsInt());
4525 
4526     Value &V = *getAssociatedValue().stripPointerCasts();
4527     takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4528 
4529     if (getIRPosition().isFnInterfaceKind() &&
4530         (!getAnchorScope() ||
4531          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4532       indicatePessimisticFixpoint();
4533       return;
4534     }
4535 
4536     if (Instruction *CtxI = getCtxI())
4537       followUsesInMBEC(*this, A, getState(), *CtxI);
4538   }
4539 
4540   /// See AbstractAttribute::manifest(...).
4541   ChangeStatus manifest(Attributor &A) override {
4542     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4543 
4544     // Check for users that allow alignment annotations.
4545     Value &AssociatedValue = getAssociatedValue();
4546     for (const Use &U : AssociatedValue.uses()) {
4547       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4548         if (SI->getPointerOperand() == &AssociatedValue)
4549           if (SI->getAlign() < getAssumedAlign()) {
4550             STATS_DECLTRACK(AAAlign, Store,
4551                             "Number of times alignment added to a store");
4552             SI->setAlignment(getAssumedAlign());
4553             LoadStoreChanged = ChangeStatus::CHANGED;
4554           }
4555       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4556         if (LI->getPointerOperand() == &AssociatedValue)
4557           if (LI->getAlign() < getAssumedAlign()) {
4558             LI->setAlignment(getAssumedAlign());
4559             STATS_DECLTRACK(AAAlign, Load,
4560                             "Number of times alignment added to a load");
4561             LoadStoreChanged = ChangeStatus::CHANGED;
4562           }
4563       }
4564     }
4565 
4566     ChangeStatus Changed = AAAlign::manifest(A);
4567 
4568     Align InheritAlign =
4569         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4570     if (InheritAlign >= getAssumedAlign())
4571       return LoadStoreChanged;
4572     return Changed | LoadStoreChanged;
4573   }
4574 
4575   // TODO: Provide a helper to determine the implied ABI alignment and check in
4576   //       the existing manifest method and a new one for AAAlignImpl that value
4577   //       to avoid making the alignment explicit if it did not improve.
4578 
4579   /// See AbstractAttribute::getDeducedAttributes
4580   virtual void
4581   getDeducedAttributes(LLVMContext &Ctx,
4582                        SmallVectorImpl<Attribute> &Attrs) const override {
4583     if (getAssumedAlign() > 1)
4584       Attrs.emplace_back(
4585           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4586   }
4587 
4588   /// See followUsesInMBEC
4589   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4590                        AAAlign::StateType &State) {
4591     bool TrackUse = false;
4592 
4593     unsigned int KnownAlign =
4594         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4595     State.takeKnownMaximum(KnownAlign);
4596 
4597     return TrackUse;
4598   }
4599 
4600   /// See AbstractAttribute::getAsStr().
4601   const std::string getAsStr() const override {
4602     return "align<" + std::to_string(getKnownAlign().value()) + "-" +
4603            std::to_string(getAssumedAlign().value()) + ">";
4604   }
4605 };
4606 
4607 /// Align attribute for a floating value.
4608 struct AAAlignFloating : AAAlignImpl {
4609   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4610 
4611   /// See AbstractAttribute::updateImpl(...).
4612   ChangeStatus updateImpl(Attributor &A) override {
4613     const DataLayout &DL = A.getDataLayout();
4614 
4615     auto VisitValueCB = [&](Value &V, const Instruction *,
4616                             AAAlign::StateType &T, bool Stripped) -> bool {
4617       if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4618         return true;
4619       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4620                                            DepClassTy::REQUIRED);
4621       if (!Stripped && this == &AA) {
4622         int64_t Offset;
4623         unsigned Alignment = 1;
4624         if (const Value *Base =
4625                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4626           // TODO: Use AAAlign for the base too.
4627           Align PA = Base->getPointerAlignment(DL);
4628           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4629           // So we can say that the maximum power of two which is a divisor of
4630           // gcd(Offset, Alignment) is an alignment.
4631 
4632           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4633                                                uint32_t(PA.value()));
4634           Alignment = llvm::PowerOf2Floor(gcd);
4635         } else {
4636           Alignment = V.getPointerAlignment(DL).value();
4637         }
4638         // Use only IR information if we did not strip anything.
4639         T.takeKnownMaximum(Alignment);
4640         T.indicatePessimisticFixpoint();
4641       } else {
4642         // Use abstract attribute information.
4643         const AAAlign::StateType &DS = AA.getState();
4644         T ^= DS;
4645       }
4646       return T.isValidState();
4647     };
4648 
4649     StateType T;
4650     bool UsedAssumedInformation = false;
4651     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4652                                           VisitValueCB, getCtxI(),
4653                                           UsedAssumedInformation))
4654       return indicatePessimisticFixpoint();
4655 
4656     // TODO: If we know we visited all incoming values, thus no are assumed
4657     // dead, we can take the known information from the state T.
4658     return clampStateAndIndicateChange(getState(), T);
4659   }
4660 
4661   /// See AbstractAttribute::trackStatistics()
4662   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4663 };
4664 
4665 /// Align attribute for function return value.
4666 struct AAAlignReturned final
4667     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4668   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4669   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4670 
4671   /// See AbstractAttribute::initialize(...).
4672   void initialize(Attributor &A) override {
4673     Base::initialize(A);
4674     Function *F = getAssociatedFunction();
4675     if (!F || F->isDeclaration())
4676       indicatePessimisticFixpoint();
4677   }
4678 
4679   /// See AbstractAttribute::trackStatistics()
4680   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4681 };
4682 
4683 /// Align attribute for function argument.
4684 struct AAAlignArgument final
4685     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4686   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4687   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4688 
4689   /// See AbstractAttribute::manifest(...).
4690   ChangeStatus manifest(Attributor &A) override {
4691     // If the associated argument is involved in a must-tail call we give up
4692     // because we would need to keep the argument alignments of caller and
4693     // callee in-sync. Just does not seem worth the trouble right now.
4694     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4695       return ChangeStatus::UNCHANGED;
4696     return Base::manifest(A);
4697   }
4698 
4699   /// See AbstractAttribute::trackStatistics()
4700   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4701 };
4702 
4703 struct AAAlignCallSiteArgument final : AAAlignFloating {
4704   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4705       : AAAlignFloating(IRP, A) {}
4706 
4707   /// See AbstractAttribute::manifest(...).
4708   ChangeStatus manifest(Attributor &A) override {
4709     // If the associated argument is involved in a must-tail call we give up
4710     // because we would need to keep the argument alignments of caller and
4711     // callee in-sync. Just does not seem worth the trouble right now.
4712     if (Argument *Arg = getAssociatedArgument())
4713       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4714         return ChangeStatus::UNCHANGED;
4715     ChangeStatus Changed = AAAlignImpl::manifest(A);
4716     Align InheritAlign =
4717         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4718     if (InheritAlign >= getAssumedAlign())
4719       Changed = ChangeStatus::UNCHANGED;
4720     return Changed;
4721   }
4722 
4723   /// See AbstractAttribute::updateImpl(Attributor &A).
4724   ChangeStatus updateImpl(Attributor &A) override {
4725     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4726     if (Argument *Arg = getAssociatedArgument()) {
4727       // We only take known information from the argument
4728       // so we do not need to track a dependence.
4729       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4730           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4731       takeKnownMaximum(ArgAlignAA.getKnownAlign().value());
4732     }
4733     return Changed;
4734   }
4735 
4736   /// See AbstractAttribute::trackStatistics()
4737   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4738 };
4739 
4740 /// Align attribute deduction for a call site return value.
4741 struct AAAlignCallSiteReturned final
4742     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4743   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4744   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4745       : Base(IRP, A) {}
4746 
4747   /// See AbstractAttribute::initialize(...).
4748   void initialize(Attributor &A) override {
4749     Base::initialize(A);
4750     Function *F = getAssociatedFunction();
4751     if (!F || F->isDeclaration())
4752       indicatePessimisticFixpoint();
4753   }
4754 
4755   /// See AbstractAttribute::trackStatistics()
4756   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4757 };
4758 } // namespace
4759 
4760 /// ------------------ Function No-Return Attribute ----------------------------
4761 namespace {
4762 struct AANoReturnImpl : public AANoReturn {
4763   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4764 
4765   /// See AbstractAttribute::initialize(...).
4766   void initialize(Attributor &A) override {
4767     AANoReturn::initialize(A);
4768     Function *F = getAssociatedFunction();
4769     if (!F || F->isDeclaration())
4770       indicatePessimisticFixpoint();
4771   }
4772 
4773   /// See AbstractAttribute::getAsStr().
4774   const std::string getAsStr() const override {
4775     return getAssumed() ? "noreturn" : "may-return";
4776   }
4777 
4778   /// See AbstractAttribute::updateImpl(Attributor &A).
4779   virtual ChangeStatus updateImpl(Attributor &A) override {
4780     auto CheckForNoReturn = [](Instruction &) { return false; };
4781     bool UsedAssumedInformation = false;
4782     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4783                                    {(unsigned)Instruction::Ret},
4784                                    UsedAssumedInformation))
4785       return indicatePessimisticFixpoint();
4786     return ChangeStatus::UNCHANGED;
4787   }
4788 };
4789 
4790 struct AANoReturnFunction final : AANoReturnImpl {
4791   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4792       : AANoReturnImpl(IRP, A) {}
4793 
4794   /// See AbstractAttribute::trackStatistics()
4795   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4796 };
4797 
4798 /// NoReturn attribute deduction for a call sites.
4799 struct AANoReturnCallSite final : AANoReturnImpl {
4800   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4801       : AANoReturnImpl(IRP, A) {}
4802 
4803   /// See AbstractAttribute::initialize(...).
4804   void initialize(Attributor &A) override {
4805     AANoReturnImpl::initialize(A);
4806     if (Function *F = getAssociatedFunction()) {
4807       const IRPosition &FnPos = IRPosition::function(*F);
4808       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4809       if (!FnAA.isAssumedNoReturn())
4810         indicatePessimisticFixpoint();
4811     }
4812   }
4813 
4814   /// See AbstractAttribute::updateImpl(...).
4815   ChangeStatus updateImpl(Attributor &A) override {
4816     // TODO: Once we have call site specific value information we can provide
4817     //       call site specific liveness information and then it makes
4818     //       sense to specialize attributes for call sites arguments instead of
4819     //       redirecting requests to the callee argument.
4820     Function *F = getAssociatedFunction();
4821     const IRPosition &FnPos = IRPosition::function(*F);
4822     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4823     return clampStateAndIndicateChange(getState(), FnAA.getState());
4824   }
4825 
4826   /// See AbstractAttribute::trackStatistics()
4827   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4828 };
4829 } // namespace
4830 
4831 /// ----------------------- Instance Info ---------------------------------
4832 
4833 namespace {
4834 /// A class to hold the state of for no-capture attributes.
4835 struct AAInstanceInfoImpl : public AAInstanceInfo {
4836   AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4837       : AAInstanceInfo(IRP, A) {}
4838 
4839   /// See AbstractAttribute::initialize(...).
4840   void initialize(Attributor &A) override {
4841     Value &V = getAssociatedValue();
4842     if (auto *C = dyn_cast<Constant>(&V)) {
4843       if (C->isThreadDependent())
4844         indicatePessimisticFixpoint();
4845       else
4846         indicateOptimisticFixpoint();
4847       return;
4848     }
4849     if (auto *CB = dyn_cast<CallBase>(&V))
4850       if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4851           !CB->mayReadFromMemory()) {
4852         indicateOptimisticFixpoint();
4853         return;
4854       }
4855   }
4856 
4857   /// See AbstractAttribute::updateImpl(...).
4858   ChangeStatus updateImpl(Attributor &A) override {
4859     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4860 
4861     Value &V = getAssociatedValue();
4862     const Function *Scope = nullptr;
4863     if (auto *I = dyn_cast<Instruction>(&V))
4864       Scope = I->getFunction();
4865     if (auto *A = dyn_cast<Argument>(&V)) {
4866       Scope = A->getParent();
4867       if (!Scope->hasLocalLinkage())
4868         return Changed;
4869     }
4870     if (!Scope)
4871       return indicateOptimisticFixpoint();
4872 
4873     auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4874         *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
4875     if (NoRecurseAA.isAssumedNoRecurse())
4876       return Changed;
4877 
4878     auto UsePred = [&](const Use &U, bool &Follow) {
4879       const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4880       if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4881           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4882         Follow = true;
4883         return true;
4884       }
4885       if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4886           (isa<StoreInst>(UserI) &&
4887            cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4888         return true;
4889       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4890         // This check is not guaranteeing uniqueness but for now that we cannot
4891         // end up with two versions of \p U thinking it was one.
4892         if (!CB->getCalledFunction() ||
4893             !CB->getCalledFunction()->hasLocalLinkage())
4894           return true;
4895         if (!CB->isArgOperand(&U))
4896           return false;
4897         const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4898             *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4899             DepClassTy::OPTIONAL);
4900         if (!ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4901           return false;
4902         // If this call base might reach the scope again we might forward the
4903         // argument back here. This is very conservative.
4904         if (AA::isPotentiallyReachable(A, *CB, *Scope, *this, nullptr))
4905           return false;
4906         return true;
4907       }
4908       return false;
4909     };
4910 
4911     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4912       if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4913         auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4914         if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr))
4915           return true;
4916         auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(
4917             *SI->getFunction());
4918         if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr))
4919           return true;
4920       }
4921       return false;
4922     };
4923 
4924     if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4925                            DepClassTy::OPTIONAL,
4926                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
4927       return indicatePessimisticFixpoint();
4928 
4929     return Changed;
4930   }
4931 
4932   /// See AbstractState::getAsStr().
4933   const std::string getAsStr() const override {
4934     return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4935   }
4936 
4937   /// See AbstractAttribute::trackStatistics()
4938   void trackStatistics() const override {}
4939 };
4940 
4941 /// InstanceInfo attribute for floating values.
4942 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4943   AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4944       : AAInstanceInfoImpl(IRP, A) {}
4945 };
4946 
4947 /// NoCapture attribute for function arguments.
4948 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4949   AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4950       : AAInstanceInfoFloating(IRP, A) {}
4951 };
4952 
4953 /// InstanceInfo attribute for call site arguments.
4954 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4955   AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4956       : AAInstanceInfoImpl(IRP, A) {}
4957 
4958   /// See AbstractAttribute::updateImpl(...).
4959   ChangeStatus updateImpl(Attributor &A) override {
4960     // TODO: Once we have call site specific value information we can provide
4961     //       call site specific liveness information and then it makes
4962     //       sense to specialize attributes for call sites arguments instead of
4963     //       redirecting requests to the callee argument.
4964     Argument *Arg = getAssociatedArgument();
4965     if (!Arg)
4966       return indicatePessimisticFixpoint();
4967     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4968     auto &ArgAA =
4969         A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
4970     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4971   }
4972 };
4973 
4974 /// InstanceInfo attribute for function return value.
4975 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
4976   AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
4977       : AAInstanceInfoImpl(IRP, A) {
4978     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4979   }
4980 
4981   /// See AbstractAttribute::initialize(...).
4982   void initialize(Attributor &A) override {
4983     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4984   }
4985 
4986   /// See AbstractAttribute::updateImpl(...).
4987   ChangeStatus updateImpl(Attributor &A) override {
4988     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4989   }
4990 };
4991 
4992 /// InstanceInfo attribute deduction for a call site return value.
4993 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
4994   AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
4995       : AAInstanceInfoFloating(IRP, A) {}
4996 };
4997 } // namespace
4998 
4999 /// ----------------------- Variable Capturing ---------------------------------
5000 
5001 namespace {
5002 /// A class to hold the state of for no-capture attributes.
5003 struct AANoCaptureImpl : public AANoCapture {
5004   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
5005 
5006   /// See AbstractAttribute::initialize(...).
5007   void initialize(Attributor &A) override {
5008     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
5009       indicateOptimisticFixpoint();
5010       return;
5011     }
5012     Function *AnchorScope = getAnchorScope();
5013     if (isFnInterfaceKind() &&
5014         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
5015       indicatePessimisticFixpoint();
5016       return;
5017     }
5018 
5019     // You cannot "capture" null in the default address space.
5020     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
5021         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
5022       indicateOptimisticFixpoint();
5023       return;
5024     }
5025 
5026     const Function *F =
5027         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
5028 
5029     // Check what state the associated function can actually capture.
5030     if (F)
5031       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5032     else
5033       indicatePessimisticFixpoint();
5034   }
5035 
5036   /// See AbstractAttribute::updateImpl(...).
5037   ChangeStatus updateImpl(Attributor &A) override;
5038 
5039   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
5040   virtual void
5041   getDeducedAttributes(LLVMContext &Ctx,
5042                        SmallVectorImpl<Attribute> &Attrs) const override {
5043     if (!isAssumedNoCaptureMaybeReturned())
5044       return;
5045 
5046     if (isArgumentPosition()) {
5047       if (isAssumedNoCapture())
5048         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
5049       else if (ManifestInternal)
5050         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
5051     }
5052   }
5053 
5054   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
5055   /// depending on the ability of the function associated with \p IRP to capture
5056   /// state in memory and through "returning/throwing", respectively.
5057   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
5058                                                    const Function &F,
5059                                                    BitIntegerState &State) {
5060     // TODO: Once we have memory behavior attributes we should use them here.
5061 
5062     // If we know we cannot communicate or write to memory, we do not care about
5063     // ptr2int anymore.
5064     if (F.onlyReadsMemory() && F.doesNotThrow() &&
5065         F.getReturnType()->isVoidTy()) {
5066       State.addKnownBits(NO_CAPTURE);
5067       return;
5068     }
5069 
5070     // A function cannot capture state in memory if it only reads memory, it can
5071     // however return/throw state and the state might be influenced by the
5072     // pointer value, e.g., loading from a returned pointer might reveal a bit.
5073     if (F.onlyReadsMemory())
5074       State.addKnownBits(NOT_CAPTURED_IN_MEM);
5075 
5076     // A function cannot communicate state back if it does not through
5077     // exceptions and doesn not return values.
5078     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
5079       State.addKnownBits(NOT_CAPTURED_IN_RET);
5080 
5081     // Check existing "returned" attributes.
5082     int ArgNo = IRP.getCalleeArgNo();
5083     if (F.doesNotThrow() && ArgNo >= 0) {
5084       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
5085         if (F.hasParamAttribute(u, Attribute::Returned)) {
5086           if (u == unsigned(ArgNo))
5087             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5088           else if (F.onlyReadsMemory())
5089             State.addKnownBits(NO_CAPTURE);
5090           else
5091             State.addKnownBits(NOT_CAPTURED_IN_RET);
5092           break;
5093         }
5094     }
5095   }
5096 
5097   /// See AbstractState::getAsStr().
5098   const std::string getAsStr() const override {
5099     if (isKnownNoCapture())
5100       return "known not-captured";
5101     if (isAssumedNoCapture())
5102       return "assumed not-captured";
5103     if (isKnownNoCaptureMaybeReturned())
5104       return "known not-captured-maybe-returned";
5105     if (isAssumedNoCaptureMaybeReturned())
5106       return "assumed not-captured-maybe-returned";
5107     return "assumed-captured";
5108   }
5109 
5110   /// Check the use \p U and update \p State accordingly. Return true if we
5111   /// should continue to update the state.
5112   bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5113                 bool &Follow) {
5114     Instruction *UInst = cast<Instruction>(U.getUser());
5115     LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5116                       << *UInst << "\n");
5117 
5118     // Deal with ptr2int by following uses.
5119     if (isa<PtrToIntInst>(UInst)) {
5120       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5121       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5122                           /* Return */ true);
5123     }
5124 
5125     // For stores we already checked if we can follow them, if they make it
5126     // here we give up.
5127     if (isa<StoreInst>(UInst))
5128       return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5129                           /* Return */ false);
5130 
5131     // Explicitly catch return instructions.
5132     if (isa<ReturnInst>(UInst)) {
5133       if (UInst->getFunction() == getAnchorScope())
5134         return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5135                             /* Return */ true);
5136       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5137                           /* Return */ true);
5138     }
5139 
5140     // For now we only use special logic for call sites. However, the tracker
5141     // itself knows about a lot of other non-capturing cases already.
5142     auto *CB = dyn_cast<CallBase>(UInst);
5143     if (!CB || !CB->isArgOperand(&U))
5144       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5145                           /* Return */ true);
5146 
5147     unsigned ArgNo = CB->getArgOperandNo(&U);
5148     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5149     // If we have a abstract no-capture attribute for the argument we can use
5150     // it to justify a non-capture attribute here. This allows recursion!
5151     auto &ArgNoCaptureAA =
5152         A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5153     if (ArgNoCaptureAA.isAssumedNoCapture())
5154       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5155                           /* Return */ false);
5156     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5157       Follow = true;
5158       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5159                           /* Return */ false);
5160     }
5161 
5162     // Lastly, we could not find a reason no-capture can be assumed so we don't.
5163     return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5164                         /* Return */ true);
5165   }
5166 
5167   /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5168   /// \p CapturedInRet, then return true if we should continue updating the
5169   /// state.
5170   static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5171                            bool CapturedInInt, bool CapturedInRet) {
5172     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5173                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5174     if (CapturedInMem)
5175       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5176     if (CapturedInInt)
5177       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5178     if (CapturedInRet)
5179       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5180     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5181   }
5182 };
5183 
5184 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5185   const IRPosition &IRP = getIRPosition();
5186   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5187                                   : &IRP.getAssociatedValue();
5188   if (!V)
5189     return indicatePessimisticFixpoint();
5190 
5191   const Function *F =
5192       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5193   assert(F && "Expected a function!");
5194   const IRPosition &FnPos = IRPosition::function(*F);
5195 
5196   AANoCapture::StateType T;
5197 
5198   // Readonly means we cannot capture through memory.
5199   bool IsKnown;
5200   if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5201     T.addKnownBits(NOT_CAPTURED_IN_MEM);
5202     if (IsKnown)
5203       addKnownBits(NOT_CAPTURED_IN_MEM);
5204   }
5205 
5206   // Make sure all returned values are different than the underlying value.
5207   // TODO: we could do this in a more sophisticated way inside
5208   //       AAReturnedValues, e.g., track all values that escape through returns
5209   //       directly somehow.
5210   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5211     if (!RVAA.getState().isValidState())
5212       return false;
5213     bool SeenConstant = false;
5214     for (auto &It : RVAA.returned_values()) {
5215       if (isa<Constant>(It.first)) {
5216         if (SeenConstant)
5217           return false;
5218         SeenConstant = true;
5219       } else if (!isa<Argument>(It.first) ||
5220                  It.first == getAssociatedArgument())
5221         return false;
5222     }
5223     return true;
5224   };
5225 
5226   const auto &NoUnwindAA =
5227       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5228   if (NoUnwindAA.isAssumedNoUnwind()) {
5229     bool IsVoidTy = F->getReturnType()->isVoidTy();
5230     const AAReturnedValues *RVAA =
5231         IsVoidTy ? nullptr
5232                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5233 
5234                                                  DepClassTy::OPTIONAL);
5235     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5236       T.addKnownBits(NOT_CAPTURED_IN_RET);
5237       if (T.isKnown(NOT_CAPTURED_IN_MEM))
5238         return ChangeStatus::UNCHANGED;
5239       if (NoUnwindAA.isKnownNoUnwind() &&
5240           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5241         addKnownBits(NOT_CAPTURED_IN_RET);
5242         if (isKnown(NOT_CAPTURED_IN_MEM))
5243           return indicateOptimisticFixpoint();
5244       }
5245     }
5246   }
5247 
5248   auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5249     const auto &DerefAA = A.getAAFor<AADereferenceable>(
5250         *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5251     return DerefAA.getAssumedDereferenceableBytes();
5252   };
5253 
5254   auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5255     switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5256     case UseCaptureKind::NO_CAPTURE:
5257       return true;
5258     case UseCaptureKind::MAY_CAPTURE:
5259       return checkUse(A, T, U, Follow);
5260     case UseCaptureKind::PASSTHROUGH:
5261       Follow = true;
5262       return true;
5263     }
5264     llvm_unreachable("Unexpected use capture kind!");
5265   };
5266 
5267   if (!A.checkForAllUses(UseCheck, *this, *V))
5268     return indicatePessimisticFixpoint();
5269 
5270   AANoCapture::StateType &S = getState();
5271   auto Assumed = S.getAssumed();
5272   S.intersectAssumedBits(T.getAssumed());
5273   if (!isAssumedNoCaptureMaybeReturned())
5274     return indicatePessimisticFixpoint();
5275   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5276                                    : ChangeStatus::CHANGED;
5277 }
5278 
5279 /// NoCapture attribute for function arguments.
5280 struct AANoCaptureArgument final : AANoCaptureImpl {
5281   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5282       : AANoCaptureImpl(IRP, A) {}
5283 
5284   /// See AbstractAttribute::trackStatistics()
5285   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5286 };
5287 
5288 /// NoCapture attribute for call site arguments.
5289 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5290   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5291       : AANoCaptureImpl(IRP, A) {}
5292 
5293   /// See AbstractAttribute::initialize(...).
5294   void initialize(Attributor &A) override {
5295     if (Argument *Arg = getAssociatedArgument())
5296       if (Arg->hasByValAttr())
5297         indicateOptimisticFixpoint();
5298     AANoCaptureImpl::initialize(A);
5299   }
5300 
5301   /// See AbstractAttribute::updateImpl(...).
5302   ChangeStatus updateImpl(Attributor &A) override {
5303     // TODO: Once we have call site specific value information we can provide
5304     //       call site specific liveness information and then it makes
5305     //       sense to specialize attributes for call sites arguments instead of
5306     //       redirecting requests to the callee argument.
5307     Argument *Arg = getAssociatedArgument();
5308     if (!Arg)
5309       return indicatePessimisticFixpoint();
5310     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5311     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5312     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5313   }
5314 
5315   /// See AbstractAttribute::trackStatistics()
5316   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5317 };
5318 
5319 /// NoCapture attribute for floating values.
5320 struct AANoCaptureFloating final : AANoCaptureImpl {
5321   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5322       : AANoCaptureImpl(IRP, A) {}
5323 
5324   /// See AbstractAttribute::trackStatistics()
5325   void trackStatistics() const override {
5326     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5327   }
5328 };
5329 
5330 /// NoCapture attribute for function return value.
5331 struct AANoCaptureReturned final : AANoCaptureImpl {
5332   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5333       : AANoCaptureImpl(IRP, A) {
5334     llvm_unreachable("NoCapture is not applicable to function returns!");
5335   }
5336 
5337   /// See AbstractAttribute::initialize(...).
5338   void initialize(Attributor &A) override {
5339     llvm_unreachable("NoCapture is not applicable to function returns!");
5340   }
5341 
5342   /// See AbstractAttribute::updateImpl(...).
5343   ChangeStatus updateImpl(Attributor &A) override {
5344     llvm_unreachable("NoCapture is not applicable to function returns!");
5345   }
5346 
5347   /// See AbstractAttribute::trackStatistics()
5348   void trackStatistics() const override {}
5349 };
5350 
5351 /// NoCapture attribute deduction for a call site return value.
5352 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5353   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5354       : AANoCaptureImpl(IRP, A) {}
5355 
5356   /// See AbstractAttribute::initialize(...).
5357   void initialize(Attributor &A) override {
5358     const Function *F = getAnchorScope();
5359     // Check what state the associated function can actually capture.
5360     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5361   }
5362 
5363   /// See AbstractAttribute::trackStatistics()
5364   void trackStatistics() const override {
5365     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5366   }
5367 };
5368 } // namespace
5369 
5370 /// ------------------ Value Simplify Attribute ----------------------------
5371 
5372 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5373   // FIXME: Add a typecast support.
5374   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5375       SimplifiedAssociatedValue, Other, Ty);
5376   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5377     return false;
5378 
5379   LLVM_DEBUG({
5380     if (SimplifiedAssociatedValue)
5381       dbgs() << "[ValueSimplify] is assumed to be "
5382              << **SimplifiedAssociatedValue << "\n";
5383     else
5384       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5385   });
5386   return true;
5387 }
5388 
5389 namespace {
5390 struct AAValueSimplifyImpl : AAValueSimplify {
5391   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5392       : AAValueSimplify(IRP, A) {}
5393 
5394   /// See AbstractAttribute::initialize(...).
5395   void initialize(Attributor &A) override {
5396     if (getAssociatedValue().getType()->isVoidTy())
5397       indicatePessimisticFixpoint();
5398     if (A.hasSimplificationCallback(getIRPosition()))
5399       indicatePessimisticFixpoint();
5400   }
5401 
5402   /// See AbstractAttribute::getAsStr().
5403   const std::string getAsStr() const override {
5404     LLVM_DEBUG({
5405       dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
5406       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5407         dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5408     });
5409     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5410                           : "not-simple";
5411   }
5412 
5413   /// See AbstractAttribute::trackStatistics()
5414   void trackStatistics() const override {}
5415 
5416   /// See AAValueSimplify::getAssumedSimplifiedValue()
5417   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5418     return SimplifiedAssociatedValue;
5419   }
5420 
5421   /// Ensure the return value is \p V with type \p Ty, if not possible return
5422   /// nullptr. If \p Check is true we will only verify such an operation would
5423   /// suceed and return a non-nullptr value if that is the case. No IR is
5424   /// generated or modified.
5425   static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
5426                            bool Check) {
5427     if (auto *TypedV = AA::getWithType(V, Ty))
5428       return TypedV;
5429     if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
5430       return Check ? &V
5431                    : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty,
5432                                                                       "", CtxI);
5433     return nullptr;
5434   }
5435 
5436   /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
5437   /// If \p Check is true we will only verify such an operation would suceed and
5438   /// return a non-nullptr value if that is the case. No IR is generated or
5439   /// modified.
5440   static Value *reproduceInst(Attributor &A,
5441                               const AbstractAttribute &QueryingAA,
5442                               Instruction &I, Type &Ty, Instruction *CtxI,
5443                               bool Check, ValueToValueMapTy &VMap) {
5444     assert(CtxI && "Cannot reproduce an instruction without context!");
5445     if (Check && (I.mayReadFromMemory() ||
5446                   !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
5447                                                 /* TLI */ nullptr)))
5448       return nullptr;
5449     for (Value *Op : I.operands()) {
5450       Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
5451       if (!NewOp) {
5452         assert(Check && "Manifest of new value unexpectedly failed!");
5453         return nullptr;
5454       }
5455       if (!Check)
5456         VMap[Op] = NewOp;
5457     }
5458     if (Check)
5459       return &I;
5460 
5461     Instruction *CloneI = I.clone();
5462     // TODO: Try to salvage debug information here.
5463     CloneI->setDebugLoc(DebugLoc());
5464     VMap[&I] = CloneI;
5465     CloneI->insertBefore(CtxI);
5466     RemapInstruction(CloneI, VMap);
5467     return CloneI;
5468   }
5469 
5470   /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
5471   /// If \p Check is true we will only verify such an operation would suceed and
5472   /// return a non-nullptr value if that is the case. No IR is generated or
5473   /// modified.
5474   static Value *reproduceValue(Attributor &A,
5475                                const AbstractAttribute &QueryingAA, Value &V,
5476                                Type &Ty, Instruction *CtxI, bool Check,
5477                                ValueToValueMapTy &VMap) {
5478     if (const auto &NewV = VMap.lookup(&V))
5479       return NewV;
5480     bool UsedAssumedInformation = false;
5481     Optional<Value *> SimpleV =
5482         A.getAssumedSimplified(V, QueryingAA, UsedAssumedInformation);
5483     if (!SimpleV)
5484       return PoisonValue::get(&Ty);
5485     Value *EffectiveV = &V;
5486     if (SimpleV.getValue())
5487       EffectiveV = SimpleV.getValue();
5488     if (auto *C = dyn_cast<Constant>(EffectiveV))
5489       if (!C->canTrap())
5490         return C;
5491     if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
5492                                       A.getInfoCache()))
5493       return ensureType(A, *EffectiveV, Ty, CtxI, Check);
5494     if (auto *I = dyn_cast<Instruction>(EffectiveV))
5495       if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
5496         return ensureType(A, *NewV, Ty, CtxI, Check);
5497     return nullptr;
5498   }
5499 
5500   /// Return a value we can use as replacement for the associated one, or
5501   /// nullptr if we don't have one that makes sense.
5502   Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
5503     Value *NewV = SimplifiedAssociatedValue
5504                       ? SimplifiedAssociatedValue.getValue()
5505                       : UndefValue::get(getAssociatedType());
5506     if (NewV && NewV != &getAssociatedValue()) {
5507       ValueToValueMapTy VMap;
5508       // First verify we can reprduce the value with the required type at the
5509       // context location before we actually start modifying the IR.
5510       if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5511                          /* CheckOnly */ true, VMap))
5512         return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5513                               /* CheckOnly */ false, VMap);
5514     }
5515     return nullptr;
5516   }
5517 
5518   /// Helper function for querying AAValueSimplify and updating candicate.
5519   /// \param IRP The value position we are trying to unify with SimplifiedValue
5520   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5521                       const IRPosition &IRP, bool Simplify = true) {
5522     bool UsedAssumedInformation = false;
5523     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5524     if (Simplify)
5525       QueryingValueSimplified =
5526           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5527     return unionAssumed(QueryingValueSimplified);
5528   }
5529 
5530   /// Returns a candidate is found or not
5531   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5532     if (!getAssociatedValue().getType()->isIntegerTy())
5533       return false;
5534 
5535     // This will also pass the call base context.
5536     const auto &AA =
5537         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5538 
5539     Optional<Constant *> COpt = AA.getAssumedConstant(A);
5540 
5541     if (!COpt) {
5542       SimplifiedAssociatedValue = llvm::None;
5543       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5544       return true;
5545     }
5546     if (auto *C = *COpt) {
5547       SimplifiedAssociatedValue = C;
5548       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5549       return true;
5550     }
5551     return false;
5552   }
5553 
5554   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5555     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5556       return true;
5557     if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
5558       return true;
5559     return false;
5560   }
5561 
5562   /// See AbstractAttribute::manifest(...).
5563   ChangeStatus manifest(Attributor &A) override {
5564     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5565     for (auto &U : getAssociatedValue().uses()) {
5566       // Check if we need to adjust the insertion point to make sure the IR is
5567       // valid.
5568       Instruction *IP = dyn_cast<Instruction>(U.getUser());
5569       if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
5570         IP = PHI->getIncomingBlock(U)->getTerminator();
5571       if (auto *NewV = manifestReplacementValue(A, IP)) {
5572         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
5573                           << " -> " << *NewV << " :: " << *this << "\n");
5574         if (A.changeUseAfterManifest(U, *NewV))
5575           Changed = ChangeStatus::CHANGED;
5576       }
5577     }
5578 
5579     return Changed | AAValueSimplify::manifest(A);
5580   }
5581 
5582   /// See AbstractState::indicatePessimisticFixpoint(...).
5583   ChangeStatus indicatePessimisticFixpoint() override {
5584     SimplifiedAssociatedValue = &getAssociatedValue();
5585     return AAValueSimplify::indicatePessimisticFixpoint();
5586   }
5587 };
5588 
5589 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5590   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5591       : AAValueSimplifyImpl(IRP, A) {}
5592 
5593   void initialize(Attributor &A) override {
5594     AAValueSimplifyImpl::initialize(A);
5595     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5596       indicatePessimisticFixpoint();
5597     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5598                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5599                 /* IgnoreSubsumingPositions */ true))
5600       indicatePessimisticFixpoint();
5601   }
5602 
5603   /// See AbstractAttribute::updateImpl(...).
5604   ChangeStatus updateImpl(Attributor &A) override {
5605     // Byval is only replacable if it is readonly otherwise we would write into
5606     // the replaced value and not the copy that byval creates implicitly.
5607     Argument *Arg = getAssociatedArgument();
5608     if (Arg->hasByValAttr()) {
5609       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5610       //       there is no race by not copying a constant byval.
5611       bool IsKnown;
5612       if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
5613         return indicatePessimisticFixpoint();
5614     }
5615 
5616     auto Before = SimplifiedAssociatedValue;
5617 
5618     auto PredForCallSite = [&](AbstractCallSite ACS) {
5619       const IRPosition &ACSArgPos =
5620           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5621       // Check if a coresponding argument was found or if it is on not
5622       // associated (which can happen for callback calls).
5623       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5624         return false;
5625 
5626       // Simplify the argument operand explicitly and check if the result is
5627       // valid in the current scope. This avoids refering to simplified values
5628       // in other functions, e.g., we don't want to say a an argument in a
5629       // static function is actually an argument in a different function.
5630       bool UsedAssumedInformation = false;
5631       Optional<Constant *> SimpleArgOp =
5632           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5633       if (!SimpleArgOp)
5634         return true;
5635       if (!SimpleArgOp.getValue())
5636         return false;
5637       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5638         return false;
5639       return unionAssumed(*SimpleArgOp);
5640     };
5641 
5642     // Generate a answer specific to a call site context.
5643     bool Success;
5644     bool UsedAssumedInformation = false;
5645     if (hasCallBaseContext() &&
5646         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5647       Success = PredForCallSite(
5648           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5649     else
5650       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5651                                        UsedAssumedInformation);
5652 
5653     if (!Success)
5654       if (!askSimplifiedValueForOtherAAs(A))
5655         return indicatePessimisticFixpoint();
5656 
5657     // If a candicate was found in this update, return CHANGED.
5658     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5659                                                : ChangeStatus ::CHANGED;
5660   }
5661 
5662   /// See AbstractAttribute::trackStatistics()
5663   void trackStatistics() const override {
5664     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5665   }
5666 };
5667 
5668 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5669   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5670       : AAValueSimplifyImpl(IRP, A) {}
5671 
5672   /// See AAValueSimplify::getAssumedSimplifiedValue()
5673   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5674     if (!isValidState())
5675       return nullptr;
5676     return SimplifiedAssociatedValue;
5677   }
5678 
5679   /// See AbstractAttribute::updateImpl(...).
5680   ChangeStatus updateImpl(Attributor &A) override {
5681     auto Before = SimplifiedAssociatedValue;
5682 
5683     auto ReturnInstCB = [&](Instruction &I) {
5684       auto &RI = cast<ReturnInst>(I);
5685       return checkAndUpdate(
5686           A, *this,
5687           IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
5688     };
5689 
5690     bool UsedAssumedInformation = false;
5691     if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
5692                                    UsedAssumedInformation))
5693       if (!askSimplifiedValueForOtherAAs(A))
5694         return indicatePessimisticFixpoint();
5695 
5696     // If a candicate was found in this update, return CHANGED.
5697     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5698                                                : ChangeStatus ::CHANGED;
5699   }
5700 
5701   ChangeStatus manifest(Attributor &A) override {
5702     // We queried AAValueSimplify for the returned values so they will be
5703     // replaced if a simplified form was found. Nothing to do here.
5704     return ChangeStatus::UNCHANGED;
5705   }
5706 
5707   /// See AbstractAttribute::trackStatistics()
5708   void trackStatistics() const override {
5709     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5710   }
5711 };
5712 
5713 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5714   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5715       : AAValueSimplifyImpl(IRP, A) {}
5716 
5717   /// See AbstractAttribute::initialize(...).
5718   void initialize(Attributor &A) override {
5719     AAValueSimplifyImpl::initialize(A);
5720     Value &V = getAnchorValue();
5721 
5722     // TODO: add other stuffs
5723     if (isa<Constant>(V))
5724       indicatePessimisticFixpoint();
5725   }
5726 
5727   /// Check if \p Cmp is a comparison we can simplify.
5728   ///
5729   /// We handle multiple cases, one in which at least one operand is an
5730   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5731   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5732   /// will be updated.
5733   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5734     auto Union = [&](Value &V) {
5735       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5736           SimplifiedAssociatedValue, &V, V.getType());
5737       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5738     };
5739 
5740     Value *LHS = Cmp.getOperand(0);
5741     Value *RHS = Cmp.getOperand(1);
5742 
5743     // Simplify the operands first.
5744     bool UsedAssumedInformation = false;
5745     const auto &SimplifiedLHS =
5746         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5747                                *this, UsedAssumedInformation);
5748     if (!SimplifiedLHS)
5749       return true;
5750     if (!SimplifiedLHS.getValue())
5751       return false;
5752     LHS = *SimplifiedLHS;
5753 
5754     const auto &SimplifiedRHS =
5755         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5756                                *this, UsedAssumedInformation);
5757     if (!SimplifiedRHS)
5758       return true;
5759     if (!SimplifiedRHS.getValue())
5760       return false;
5761     RHS = *SimplifiedRHS;
5762 
5763     LLVMContext &Ctx = Cmp.getContext();
5764     // Handle the trivial case first in which we don't even need to think about
5765     // null or non-null.
5766     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5767       Constant *NewVal =
5768           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5769       if (!Union(*NewVal))
5770         return false;
5771       if (!UsedAssumedInformation)
5772         indicateOptimisticFixpoint();
5773       return true;
5774     }
5775 
5776     // From now on we only handle equalities (==, !=).
5777     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5778     if (!ICmp || !ICmp->isEquality())
5779       return false;
5780 
5781     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5782     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5783     if (!LHSIsNull && !RHSIsNull)
5784       return false;
5785 
5786     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5787     // non-nullptr operand and if we assume it's non-null we can conclude the
5788     // result of the comparison.
5789     assert((LHSIsNull || RHSIsNull) &&
5790            "Expected nullptr versus non-nullptr comparison at this point");
5791 
5792     // The index is the operand that we assume is not null.
5793     unsigned PtrIdx = LHSIsNull;
5794     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5795         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5796         DepClassTy::REQUIRED);
5797     if (!PtrNonNullAA.isAssumedNonNull())
5798       return false;
5799     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5800 
5801     // The new value depends on the predicate, true for != and false for ==.
5802     Constant *NewVal = ConstantInt::get(
5803         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5804     if (!Union(*NewVal))
5805       return false;
5806 
5807     if (!UsedAssumedInformation)
5808       indicateOptimisticFixpoint();
5809 
5810     return true;
5811   }
5812 
5813   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5814   /// simplify any operand of the instruction \p I. Return true if successful,
5815   /// in that case SimplifiedAssociatedValue will be updated.
5816   bool handleGenericInst(Attributor &A, Instruction &I) {
5817     bool SomeSimplified = false;
5818     bool UsedAssumedInformation = false;
5819 
5820     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5821     int Idx = 0;
5822     for (Value *Op : I.operands()) {
5823       const auto &SimplifiedOp =
5824           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5825                                  *this, UsedAssumedInformation);
5826       // If we are not sure about any operand we are not sure about the entire
5827       // instruction, we'll wait.
5828       if (!SimplifiedOp)
5829         return true;
5830 
5831       if (SimplifiedOp.getValue())
5832         NewOps[Idx] = SimplifiedOp.getValue();
5833       else
5834         NewOps[Idx] = Op;
5835 
5836       SomeSimplified |= (NewOps[Idx] != Op);
5837       ++Idx;
5838     }
5839 
5840     // We won't bother with the InstSimplify interface if we didn't simplify any
5841     // operand ourselves.
5842     if (!SomeSimplified)
5843       return false;
5844 
5845     InformationCache &InfoCache = A.getInfoCache();
5846     Function *F = I.getFunction();
5847     const auto *DT =
5848         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5849     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5850     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5851     OptimizationRemarkEmitter *ORE = nullptr;
5852 
5853     const DataLayout &DL = I.getModule()->getDataLayout();
5854     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5855     if (Value *SimplifiedI =
5856             simplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5857       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5858           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5859       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5860     }
5861     return false;
5862   }
5863 
5864   /// See AbstractAttribute::updateImpl(...).
5865   ChangeStatus updateImpl(Attributor &A) override {
5866     auto Before = SimplifiedAssociatedValue;
5867 
5868     // Do not simplify loads that are only used in llvm.assume if we cannot also
5869     // remove all stores that may feed into the load. The reason is that the
5870     // assume is probably worth something as long as the stores are around.
5871     if (auto *LI = dyn_cast<LoadInst>(&getAssociatedValue())) {
5872       InformationCache &InfoCache = A.getInfoCache();
5873       if (InfoCache.isOnlyUsedByAssume(*LI)) {
5874         SmallSetVector<Value *, 4> PotentialCopies;
5875         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
5876         bool UsedAssumedInformation = false;
5877         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
5878                                            PotentialValueOrigins, *this,
5879                                            UsedAssumedInformation,
5880                                            /* OnlyExact */ true)) {
5881           if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
5882                 if (!I)
5883                   return true;
5884                 if (auto *SI = dyn_cast<StoreInst>(I))
5885                   return A.isAssumedDead(SI->getOperandUse(0), this,
5886                                          /* LivenessAA */ nullptr,
5887                                          UsedAssumedInformation,
5888                                          /* CheckBBLivenessOnly */ false);
5889                 return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
5890                                        UsedAssumedInformation,
5891                                        /* CheckBBLivenessOnly */ false);
5892               }))
5893             return indicatePessimisticFixpoint();
5894         }
5895       }
5896     }
5897 
5898     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5899                             bool Stripped) -> bool {
5900       auto &AA = A.getAAFor<AAValueSimplify>(
5901           *this, IRPosition::value(V, getCallBaseContext()),
5902           DepClassTy::REQUIRED);
5903       if (!Stripped && this == &AA) {
5904 
5905         if (auto *I = dyn_cast<Instruction>(&V)) {
5906           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5907             if (handleCmp(A, *Cmp))
5908               return true;
5909           if (handleGenericInst(A, *I))
5910             return true;
5911         }
5912         // TODO: Look the instruction and check recursively.
5913 
5914         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5915                           << "\n");
5916         return false;
5917       }
5918       return checkAndUpdate(A, *this,
5919                             IRPosition::value(V, getCallBaseContext()));
5920     };
5921 
5922     bool Dummy = false;
5923     bool UsedAssumedInformation = false;
5924     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5925                                      VisitValueCB, getCtxI(),
5926                                      UsedAssumedInformation,
5927                                      /* UseValueSimplify */ false))
5928       if (!askSimplifiedValueForOtherAAs(A))
5929         return indicatePessimisticFixpoint();
5930 
5931     // If a candicate was found in this update, return CHANGED.
5932     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5933                                                : ChangeStatus ::CHANGED;
5934   }
5935 
5936   /// See AbstractAttribute::trackStatistics()
5937   void trackStatistics() const override {
5938     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5939   }
5940 };
5941 
5942 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5943   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5944       : AAValueSimplifyImpl(IRP, A) {}
5945 
5946   /// See AbstractAttribute::initialize(...).
5947   void initialize(Attributor &A) override {
5948     SimplifiedAssociatedValue = nullptr;
5949     indicateOptimisticFixpoint();
5950   }
5951   /// See AbstractAttribute::initialize(...).
5952   ChangeStatus updateImpl(Attributor &A) override {
5953     llvm_unreachable(
5954         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5955   }
5956   /// See AbstractAttribute::trackStatistics()
5957   void trackStatistics() const override {
5958     STATS_DECLTRACK_FN_ATTR(value_simplify)
5959   }
5960 };
5961 
5962 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5963   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5964       : AAValueSimplifyFunction(IRP, A) {}
5965   /// See AbstractAttribute::trackStatistics()
5966   void trackStatistics() const override {
5967     STATS_DECLTRACK_CS_ATTR(value_simplify)
5968   }
5969 };
5970 
5971 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5972   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5973       : AAValueSimplifyImpl(IRP, A) {}
5974 
5975   void initialize(Attributor &A) override {
5976     AAValueSimplifyImpl::initialize(A);
5977     Function *Fn = getAssociatedFunction();
5978     if (!Fn) {
5979       indicatePessimisticFixpoint();
5980       return;
5981     }
5982     for (Argument &Arg : Fn->args()) {
5983       if (Arg.hasReturnedAttr()) {
5984         auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
5985                                                  Arg.getArgNo());
5986         if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
5987             checkAndUpdate(A, *this, IRP))
5988           indicateOptimisticFixpoint();
5989         else
5990           indicatePessimisticFixpoint();
5991         return;
5992       }
5993     }
5994   }
5995 
5996   /// See AbstractAttribute::updateImpl(...).
5997   ChangeStatus updateImpl(Attributor &A) override {
5998     auto Before = SimplifiedAssociatedValue;
5999     auto &RetAA = A.getAAFor<AAReturnedValues>(
6000         *this, IRPosition::function(*getAssociatedFunction()),
6001         DepClassTy::REQUIRED);
6002     auto PredForReturned =
6003         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
6004           bool UsedAssumedInformation = false;
6005           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
6006               &RetVal, *cast<CallBase>(getCtxI()), *this,
6007               UsedAssumedInformation);
6008           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
6009               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
6010           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
6011         };
6012     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
6013       if (!askSimplifiedValueForOtherAAs(A))
6014         return indicatePessimisticFixpoint();
6015     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6016                                                : ChangeStatus ::CHANGED;
6017   }
6018 
6019   void trackStatistics() const override {
6020     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
6021   }
6022 };
6023 
6024 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
6025   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
6026       : AAValueSimplifyFloating(IRP, A) {}
6027 
6028   /// See AbstractAttribute::manifest(...).
6029   ChangeStatus manifest(Attributor &A) override {
6030     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6031     // TODO: We should avoid simplification duplication to begin with.
6032     auto *FloatAA = A.lookupAAFor<AAValueSimplify>(
6033         IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE);
6034     if (FloatAA && FloatAA->getState().isValidState())
6035       return Changed;
6036 
6037     if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
6038       Use &U = cast<CallBase>(&getAnchorValue())
6039                    ->getArgOperandUse(getCallSiteArgNo());
6040       if (A.changeUseAfterManifest(U, *NewV))
6041         Changed = ChangeStatus::CHANGED;
6042     }
6043 
6044     return Changed | AAValueSimplify::manifest(A);
6045   }
6046 
6047   void trackStatistics() const override {
6048     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
6049   }
6050 };
6051 } // namespace
6052 
6053 /// ----------------------- Heap-To-Stack Conversion ---------------------------
6054 namespace {
6055 struct AAHeapToStackFunction final : public AAHeapToStack {
6056 
6057   struct AllocationInfo {
6058     /// The call that allocates the memory.
6059     CallBase *const CB;
6060 
6061     /// The library function id for the allocation.
6062     LibFunc LibraryFunctionId = NotLibFunc;
6063 
6064     /// The status wrt. a rewrite.
6065     enum {
6066       STACK_DUE_TO_USE,
6067       STACK_DUE_TO_FREE,
6068       INVALID,
6069     } Status = STACK_DUE_TO_USE;
6070 
6071     /// Flag to indicate if we encountered a use that might free this allocation
6072     /// but which is not in the deallocation infos.
6073     bool HasPotentiallyFreeingUnknownUses = false;
6074 
6075     /// The set of free calls that use this allocation.
6076     SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
6077   };
6078 
6079   struct DeallocationInfo {
6080     /// The call that deallocates the memory.
6081     CallBase *const CB;
6082 
6083     /// Flag to indicate if we don't know all objects this deallocation might
6084     /// free.
6085     bool MightFreeUnknownObjects = false;
6086 
6087     /// The set of allocation calls that are potentially freed.
6088     SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
6089   };
6090 
6091   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
6092       : AAHeapToStack(IRP, A) {}
6093 
6094   ~AAHeapToStackFunction() {
6095     // Ensure we call the destructor so we release any memory allocated in the
6096     // sets.
6097     for (auto &It : AllocationInfos)
6098       It.second->~AllocationInfo();
6099     for (auto &It : DeallocationInfos)
6100       It.second->~DeallocationInfo();
6101   }
6102 
6103   void initialize(Attributor &A) override {
6104     AAHeapToStack::initialize(A);
6105 
6106     const Function *F = getAnchorScope();
6107     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6108 
6109     auto AllocationIdentifierCB = [&](Instruction &I) {
6110       CallBase *CB = dyn_cast<CallBase>(&I);
6111       if (!CB)
6112         return true;
6113       if (isFreeCall(CB, TLI)) {
6114         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
6115         return true;
6116       }
6117       // To do heap to stack, we need to know that the allocation itself is
6118       // removable once uses are rewritten, and that we can initialize the
6119       // alloca to the same pattern as the original allocation result.
6120       if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) {
6121         auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
6122         if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
6123           AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
6124           AllocationInfos[CB] = AI;
6125           if (TLI)
6126             TLI->getLibFunc(*CB, AI->LibraryFunctionId);
6127         }
6128       }
6129       return true;
6130     };
6131 
6132     bool UsedAssumedInformation = false;
6133     bool Success = A.checkForAllCallLikeInstructions(
6134         AllocationIdentifierCB, *this, UsedAssumedInformation,
6135         /* CheckBBLivenessOnly */ false,
6136         /* CheckPotentiallyDead */ true);
6137     (void)Success;
6138     assert(Success && "Did not expect the call base visit callback to fail!");
6139 
6140     Attributor::SimplifictionCallbackTy SCB =
6141         [](const IRPosition &, const AbstractAttribute *,
6142            bool &) -> Optional<Value *> { return nullptr; };
6143     for (const auto &It : AllocationInfos)
6144       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6145                                        SCB);
6146     for (const auto &It : DeallocationInfos)
6147       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6148                                        SCB);
6149   }
6150 
6151   const std::string getAsStr() const override {
6152     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
6153     for (const auto &It : AllocationInfos) {
6154       if (It.second->Status == AllocationInfo::INVALID)
6155         ++NumInvalidMallocs;
6156       else
6157         ++NumH2SMallocs;
6158     }
6159     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
6160            std::to_string(NumInvalidMallocs);
6161   }
6162 
6163   /// See AbstractAttribute::trackStatistics().
6164   void trackStatistics() const override {
6165     STATS_DECL(
6166         MallocCalls, Function,
6167         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
6168     for (auto &It : AllocationInfos)
6169       if (It.second->Status != AllocationInfo::INVALID)
6170         ++BUILD_STAT_NAME(MallocCalls, Function);
6171   }
6172 
6173   bool isAssumedHeapToStack(const CallBase &CB) const override {
6174     if (isValidState())
6175       if (AllocationInfo *AI =
6176               AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
6177         return AI->Status != AllocationInfo::INVALID;
6178     return false;
6179   }
6180 
6181   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
6182     if (!isValidState())
6183       return false;
6184 
6185     for (auto &It : AllocationInfos) {
6186       AllocationInfo &AI = *It.second;
6187       if (AI.Status == AllocationInfo::INVALID)
6188         continue;
6189 
6190       if (AI.PotentialFreeCalls.count(&CB))
6191         return true;
6192     }
6193 
6194     return false;
6195   }
6196 
6197   ChangeStatus manifest(Attributor &A) override {
6198     assert(getState().isValidState() &&
6199            "Attempted to manifest an invalid state!");
6200 
6201     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
6202     Function *F = getAnchorScope();
6203     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6204 
6205     for (auto &It : AllocationInfos) {
6206       AllocationInfo &AI = *It.second;
6207       if (AI.Status == AllocationInfo::INVALID)
6208         continue;
6209 
6210       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
6211         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
6212         A.deleteAfterManifest(*FreeCall);
6213         HasChanged = ChangeStatus::CHANGED;
6214       }
6215 
6216       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
6217                         << "\n");
6218 
6219       auto Remark = [&](OptimizationRemark OR) {
6220         LibFunc IsAllocShared;
6221         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
6222           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
6223             return OR << "Moving globalized variable to the stack.";
6224         return OR << "Moving memory allocation from the heap to the stack.";
6225       };
6226       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6227         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
6228       else
6229         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
6230 
6231       const DataLayout &DL = A.getInfoCache().getDL();
6232       Value *Size;
6233       Optional<APInt> SizeAPI = getSize(A, *this, AI);
6234       if (SizeAPI) {
6235         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
6236       } else {
6237         LLVMContext &Ctx = AI.CB->getContext();
6238         ObjectSizeOpts Opts;
6239         ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
6240         SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
6241         assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
6242                cast<ConstantInt>(SizeOffsetPair.second)->isZero());
6243         Size = SizeOffsetPair.first;
6244       }
6245 
6246       Align Alignment(1);
6247       if (MaybeAlign RetAlign = AI.CB->getRetAlign())
6248         Alignment = std::max(Alignment, *RetAlign);
6249       if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6250         Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
6251         assert(AlignmentAPI && AlignmentAPI.getValue().getZExtValue() > 0 &&
6252                "Expected an alignment during manifest!");
6253         Alignment = std::max(
6254             Alignment, assumeAligned(AlignmentAPI.getValue().getZExtValue()));
6255       }
6256 
6257       // TODO: Hoist the alloca towards the function entry.
6258       unsigned AS = DL.getAllocaAddrSpace();
6259       Instruction *Alloca = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
6260                                            Size, Alignment, "", AI.CB);
6261 
6262       if (Alloca->getType() != AI.CB->getType())
6263         Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6264             Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
6265 
6266       auto *I8Ty = Type::getInt8Ty(F->getContext());
6267       auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6268       assert(InitVal &&
6269              "Must be able to materialize initial memory state of allocation");
6270 
6271       A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca);
6272 
6273       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6274         auto *NBB = II->getNormalDest();
6275         BranchInst::Create(NBB, AI.CB->getParent());
6276         A.deleteAfterManifest(*AI.CB);
6277       } else {
6278         A.deleteAfterManifest(*AI.CB);
6279       }
6280 
6281       // Initialize the alloca with the same value as used by the allocation
6282       // function.  We can skip undef as the initial value of an alloc is
6283       // undef, and the memset would simply end up being DSEd.
6284       if (!isa<UndefValue>(InitVal)) {
6285         IRBuilder<> Builder(Alloca->getNextNode());
6286         // TODO: Use alignment above if align!=1
6287         Builder.CreateMemSet(Alloca, InitVal, Size, None);
6288       }
6289       HasChanged = ChangeStatus::CHANGED;
6290     }
6291 
6292     return HasChanged;
6293   }
6294 
6295   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6296                            Value &V) {
6297     bool UsedAssumedInformation = false;
6298     Optional<Constant *> SimpleV =
6299         A.getAssumedConstant(V, AA, UsedAssumedInformation);
6300     if (!SimpleV)
6301       return APInt(64, 0);
6302     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
6303       return CI->getValue();
6304     return llvm::None;
6305   }
6306 
6307   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6308                           AllocationInfo &AI) {
6309     auto Mapper = [&](const Value *V) -> const Value * {
6310       bool UsedAssumedInformation = false;
6311       if (Optional<Constant *> SimpleV =
6312               A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6313         if (*SimpleV)
6314           return *SimpleV;
6315       return V;
6316     };
6317 
6318     const Function *F = getAnchorScope();
6319     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6320     return getAllocSize(AI.CB, TLI, Mapper);
6321   }
6322 
6323   /// Collection of all malloc-like calls in a function with associated
6324   /// information.
6325   MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6326 
6327   /// Collection of all free-like calls in a function with associated
6328   /// information.
6329   MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6330 
6331   ChangeStatus updateImpl(Attributor &A) override;
6332 };
6333 
6334 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6335   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6336   const Function *F = getAnchorScope();
6337   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6338 
6339   const auto &LivenessAA =
6340       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6341 
6342   MustBeExecutedContextExplorer &Explorer =
6343       A.getInfoCache().getMustBeExecutedContextExplorer();
6344 
6345   bool StackIsAccessibleByOtherThreads =
6346       A.getInfoCache().stackIsAccessibleByOtherThreads();
6347 
6348   // Flag to ensure we update our deallocation information at most once per
6349   // updateImpl call and only if we use the free check reasoning.
6350   bool HasUpdatedFrees = false;
6351 
6352   auto UpdateFrees = [&]() {
6353     HasUpdatedFrees = true;
6354 
6355     for (auto &It : DeallocationInfos) {
6356       DeallocationInfo &DI = *It.second;
6357       // For now we cannot use deallocations that have unknown inputs, skip
6358       // them.
6359       if (DI.MightFreeUnknownObjects)
6360         continue;
6361 
6362       // No need to analyze dead calls, ignore them instead.
6363       bool UsedAssumedInformation = false;
6364       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6365                           /* CheckBBLivenessOnly */ true))
6366         continue;
6367 
6368       // Use the optimistic version to get the freed objects, ignoring dead
6369       // branches etc.
6370       SmallVector<Value *, 8> Objects;
6371       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6372                                            *this, DI.CB,
6373                                            UsedAssumedInformation)) {
6374         LLVM_DEBUG(
6375             dbgs()
6376             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6377         DI.MightFreeUnknownObjects = true;
6378         continue;
6379       }
6380 
6381       // Check each object explicitly.
6382       for (auto *Obj : Objects) {
6383         // Free of null and undef can be ignored as no-ops (or UB in the latter
6384         // case).
6385         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6386           continue;
6387 
6388         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6389         if (!ObjCB) {
6390           LLVM_DEBUG(dbgs()
6391                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6392           DI.MightFreeUnknownObjects = true;
6393           continue;
6394         }
6395 
6396         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6397         if (!AI) {
6398           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6399                             << "\n");
6400           DI.MightFreeUnknownObjects = true;
6401           continue;
6402         }
6403 
6404         DI.PotentialAllocationCalls.insert(ObjCB);
6405       }
6406     }
6407   };
6408 
6409   auto FreeCheck = [&](AllocationInfo &AI) {
6410     // If the stack is not accessible by other threads, the "must-free" logic
6411     // doesn't apply as the pointer could be shared and needs to be places in
6412     // "shareable" memory.
6413     if (!StackIsAccessibleByOtherThreads) {
6414       auto &NoSyncAA =
6415           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6416       if (!NoSyncAA.isAssumedNoSync()) {
6417         LLVM_DEBUG(
6418             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6419                       "other threads and function is not nosync:\n");
6420         return false;
6421       }
6422     }
6423     if (!HasUpdatedFrees)
6424       UpdateFrees();
6425 
6426     // TODO: Allow multi exit functions that have different free calls.
6427     if (AI.PotentialFreeCalls.size() != 1) {
6428       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6429                         << AI.PotentialFreeCalls.size() << "\n");
6430       return false;
6431     }
6432     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6433     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6434     if (!DI) {
6435       LLVM_DEBUG(
6436           dbgs() << "[H2S] unique free call was not known as deallocation call "
6437                  << *UniqueFree << "\n");
6438       return false;
6439     }
6440     if (DI->MightFreeUnknownObjects) {
6441       LLVM_DEBUG(
6442           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6443       return false;
6444     }
6445     if (DI->PotentialAllocationCalls.empty())
6446       return true;
6447     if (DI->PotentialAllocationCalls.size() > 1) {
6448       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6449                         << DI->PotentialAllocationCalls.size()
6450                         << " different allocations\n");
6451       return false;
6452     }
6453     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6454       LLVM_DEBUG(
6455           dbgs()
6456           << "[H2S] unique free call not known to free this allocation but "
6457           << **DI->PotentialAllocationCalls.begin() << "\n");
6458       return false;
6459     }
6460     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6461     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6462       LLVM_DEBUG(
6463           dbgs()
6464           << "[H2S] unique free call might not be executed with the allocation "
6465           << *UniqueFree << "\n");
6466       return false;
6467     }
6468     return true;
6469   };
6470 
6471   auto UsesCheck = [&](AllocationInfo &AI) {
6472     bool ValidUsesOnly = true;
6473 
6474     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6475       Instruction *UserI = cast<Instruction>(U.getUser());
6476       if (isa<LoadInst>(UserI))
6477         return true;
6478       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6479         if (SI->getValueOperand() == U.get()) {
6480           LLVM_DEBUG(dbgs()
6481                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6482           ValidUsesOnly = false;
6483         } else {
6484           // A store into the malloc'ed memory is fine.
6485         }
6486         return true;
6487       }
6488       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6489         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6490           return true;
6491         if (DeallocationInfos.count(CB)) {
6492           AI.PotentialFreeCalls.insert(CB);
6493           return true;
6494         }
6495 
6496         unsigned ArgNo = CB->getArgOperandNo(&U);
6497 
6498         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6499             *this, IRPosition::callsite_argument(*CB, ArgNo),
6500             DepClassTy::OPTIONAL);
6501 
6502         // If a call site argument use is nofree, we are fine.
6503         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6504             *this, IRPosition::callsite_argument(*CB, ArgNo),
6505             DepClassTy::OPTIONAL);
6506 
6507         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6508         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6509         if (MaybeCaptured ||
6510             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6511              MaybeFreed)) {
6512           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6513 
6514           // Emit a missed remark if this is missed OpenMP globalization.
6515           auto Remark = [&](OptimizationRemarkMissed ORM) {
6516             return ORM
6517                    << "Could not move globalized variable to the stack. "
6518                       "Variable is potentially captured in call. Mark "
6519                       "parameter as `__attribute__((noescape))` to override.";
6520           };
6521 
6522           if (ValidUsesOnly &&
6523               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6524             A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
6525 
6526           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6527           ValidUsesOnly = false;
6528         }
6529         return true;
6530       }
6531 
6532       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6533           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6534         Follow = true;
6535         return true;
6536       }
6537       // Unknown user for which we can not track uses further (in a way that
6538       // makes sense).
6539       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6540       ValidUsesOnly = false;
6541       return true;
6542     };
6543     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6544       return false;
6545     return ValidUsesOnly;
6546   };
6547 
6548   // The actual update starts here. We look at all allocations and depending on
6549   // their status perform the appropriate check(s).
6550   for (auto &It : AllocationInfos) {
6551     AllocationInfo &AI = *It.second;
6552     if (AI.Status == AllocationInfo::INVALID)
6553       continue;
6554 
6555     if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6556       Optional<APInt> APAlign = getAPInt(A, *this, *Align);
6557       if (!APAlign) {
6558         // Can't generate an alloca which respects the required alignment
6559         // on the allocation.
6560         LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6561                           << "\n");
6562         AI.Status = AllocationInfo::INVALID;
6563         Changed = ChangeStatus::CHANGED;
6564         continue;
6565       } else {
6566         if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
6567             !APAlign->isPowerOf2()) {
6568           LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
6569                             << "\n");
6570           AI.Status = AllocationInfo::INVALID;
6571           Changed = ChangeStatus::CHANGED;
6572           continue;
6573         }
6574       }
6575     }
6576 
6577     if (MaxHeapToStackSize != -1) {
6578       Optional<APInt> Size = getSize(A, *this, AI);
6579       if (!Size || Size.getValue().ugt(MaxHeapToStackSize)) {
6580         LLVM_DEBUG({
6581           if (!Size)
6582             dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
6583           else
6584             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6585                    << MaxHeapToStackSize << "\n";
6586         });
6587 
6588         AI.Status = AllocationInfo::INVALID;
6589         Changed = ChangeStatus::CHANGED;
6590         continue;
6591       }
6592     }
6593 
6594     switch (AI.Status) {
6595     case AllocationInfo::STACK_DUE_TO_USE:
6596       if (UsesCheck(AI))
6597         continue;
6598       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6599       LLVM_FALLTHROUGH;
6600     case AllocationInfo::STACK_DUE_TO_FREE:
6601       if (FreeCheck(AI))
6602         continue;
6603       AI.Status = AllocationInfo::INVALID;
6604       Changed = ChangeStatus::CHANGED;
6605       continue;
6606     case AllocationInfo::INVALID:
6607       llvm_unreachable("Invalid allocations should never reach this point!");
6608     };
6609   }
6610 
6611   return Changed;
6612 }
6613 } // namespace
6614 
6615 /// ----------------------- Privatizable Pointers ------------------------------
6616 namespace {
6617 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6618   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6619       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6620 
6621   ChangeStatus indicatePessimisticFixpoint() override {
6622     AAPrivatizablePtr::indicatePessimisticFixpoint();
6623     PrivatizableType = nullptr;
6624     return ChangeStatus::CHANGED;
6625   }
6626 
6627   /// Identify the type we can chose for a private copy of the underlying
6628   /// argument. None means it is not clear yet, nullptr means there is none.
6629   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6630 
6631   /// Return a privatizable type that encloses both T0 and T1.
6632   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6633   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6634     if (!T0)
6635       return T1;
6636     if (!T1)
6637       return T0;
6638     if (T0 == T1)
6639       return T0;
6640     return nullptr;
6641   }
6642 
6643   Optional<Type *> getPrivatizableType() const override {
6644     return PrivatizableType;
6645   }
6646 
6647   const std::string getAsStr() const override {
6648     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6649   }
6650 
6651 protected:
6652   Optional<Type *> PrivatizableType;
6653 };
6654 
6655 // TODO: Do this for call site arguments (probably also other values) as well.
6656 
6657 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6658   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6659       : AAPrivatizablePtrImpl(IRP, A) {}
6660 
6661   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6662   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6663     // If this is a byval argument and we know all the call sites (so we can
6664     // rewrite them), there is no need to check them explicitly.
6665     bool UsedAssumedInformation = false;
6666     SmallVector<Attribute, 1> Attrs;
6667     getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true);
6668     if (!Attrs.empty() &&
6669         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6670                                true, UsedAssumedInformation))
6671       return Attrs[0].getValueAsType();
6672 
6673     Optional<Type *> Ty;
6674     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6675 
6676     // Make sure the associated call site argument has the same type at all call
6677     // sites and it is an allocation we know is safe to privatize, for now that
6678     // means we only allow alloca instructions.
6679     // TODO: We can additionally analyze the accesses in the callee to  create
6680     //       the type from that information instead. That is a little more
6681     //       involved and will be done in a follow up patch.
6682     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6683       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6684       // Check if a coresponding argument was found or if it is one not
6685       // associated (which can happen for callback calls).
6686       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6687         return false;
6688 
6689       // Check that all call sites agree on a type.
6690       auto &PrivCSArgAA =
6691           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6692       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6693 
6694       LLVM_DEBUG({
6695         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6696         if (CSTy && CSTy.getValue())
6697           CSTy.getValue()->print(dbgs());
6698         else if (CSTy)
6699           dbgs() << "<nullptr>";
6700         else
6701           dbgs() << "<none>";
6702       });
6703 
6704       Ty = combineTypes(Ty, CSTy);
6705 
6706       LLVM_DEBUG({
6707         dbgs() << " : New Type: ";
6708         if (Ty && Ty.getValue())
6709           Ty.getValue()->print(dbgs());
6710         else if (Ty)
6711           dbgs() << "<nullptr>";
6712         else
6713           dbgs() << "<none>";
6714         dbgs() << "\n";
6715       });
6716 
6717       return !Ty || Ty.getValue();
6718     };
6719 
6720     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6721                                 UsedAssumedInformation))
6722       return nullptr;
6723     return Ty;
6724   }
6725 
6726   /// See AbstractAttribute::updateImpl(...).
6727   ChangeStatus updateImpl(Attributor &A) override {
6728     PrivatizableType = identifyPrivatizableType(A);
6729     if (!PrivatizableType)
6730       return ChangeStatus::UNCHANGED;
6731     if (!PrivatizableType.getValue())
6732       return indicatePessimisticFixpoint();
6733 
6734     // The dependence is optional so we don't give up once we give up on the
6735     // alignment.
6736     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6737                         DepClassTy::OPTIONAL);
6738 
6739     // Avoid arguments with padding for now.
6740     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6741         !ArgumentPromotionPass::isDenselyPacked(*PrivatizableType,
6742                                                 A.getInfoCache().getDL())) {
6743       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6744       return indicatePessimisticFixpoint();
6745     }
6746 
6747     // Collect the types that will replace the privatizable type in the function
6748     // signature.
6749     SmallVector<Type *, 16> ReplacementTypes;
6750     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
6751 
6752     // Verify callee and caller agree on how the promoted argument would be
6753     // passed.
6754     Function &Fn = *getIRPosition().getAnchorScope();
6755     const auto *TTI =
6756         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6757     if (!TTI) {
6758       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
6759                         << Fn.getName() << "\n");
6760       return indicatePessimisticFixpoint();
6761     }
6762 
6763     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6764       CallBase *CB = ACS.getInstruction();
6765       return TTI->areTypesABICompatible(
6766           CB->getCaller(), CB->getCalledFunction(), ReplacementTypes);
6767     };
6768     bool UsedAssumedInformation = false;
6769     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6770                                 UsedAssumedInformation)) {
6771       LLVM_DEBUG(
6772           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6773                  << Fn.getName() << "\n");
6774       return indicatePessimisticFixpoint();
6775     }
6776 
6777     // Register a rewrite of the argument.
6778     Argument *Arg = getAssociatedArgument();
6779     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6780       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6781       return indicatePessimisticFixpoint();
6782     }
6783 
6784     unsigned ArgNo = Arg->getArgNo();
6785 
6786     // Helper to check if for the given call site the associated argument is
6787     // passed to a callback where the privatization would be different.
6788     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6789       SmallVector<const Use *, 4> CallbackUses;
6790       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6791       for (const Use *U : CallbackUses) {
6792         AbstractCallSite CBACS(U);
6793         assert(CBACS && CBACS.isCallbackCall());
6794         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6795           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6796 
6797           LLVM_DEBUG({
6798             dbgs()
6799                 << "[AAPrivatizablePtr] Argument " << *Arg
6800                 << "check if can be privatized in the context of its parent ("
6801                 << Arg->getParent()->getName()
6802                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6803                    "callback ("
6804                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6805                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6806                 << CBACS.getCallArgOperand(CBArg) << " vs "
6807                 << CB.getArgOperand(ArgNo) << "\n"
6808                 << "[AAPrivatizablePtr] " << CBArg << " : "
6809                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6810           });
6811 
6812           if (CBArgNo != int(ArgNo))
6813             continue;
6814           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6815               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6816           if (CBArgPrivAA.isValidState()) {
6817             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6818             if (!CBArgPrivTy)
6819               continue;
6820             if (CBArgPrivTy.getValue() == PrivatizableType)
6821               continue;
6822           }
6823 
6824           LLVM_DEBUG({
6825             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6826                    << " cannot be privatized in the context of its parent ("
6827                    << Arg->getParent()->getName()
6828                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6829                       "callback ("
6830                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6831                    << ").\n[AAPrivatizablePtr] for which the argument "
6832                       "privatization is not compatible.\n";
6833           });
6834           return false;
6835         }
6836       }
6837       return true;
6838     };
6839 
6840     // Helper to check if for the given call site the associated argument is
6841     // passed to a direct call where the privatization would be different.
6842     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6843       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6844       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6845       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6846              "Expected a direct call operand for callback call operand");
6847 
6848       LLVM_DEBUG({
6849         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6850                << " check if be privatized in the context of its parent ("
6851                << Arg->getParent()->getName()
6852                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6853                   "direct call of ("
6854                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6855                << ").\n";
6856       });
6857 
6858       Function *DCCallee = DC->getCalledFunction();
6859       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6860         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6861             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6862             DepClassTy::REQUIRED);
6863         if (DCArgPrivAA.isValidState()) {
6864           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6865           if (!DCArgPrivTy)
6866             return true;
6867           if (DCArgPrivTy.getValue() == PrivatizableType)
6868             return true;
6869         }
6870       }
6871 
6872       LLVM_DEBUG({
6873         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6874                << " cannot be privatized in the context of its parent ("
6875                << Arg->getParent()->getName()
6876                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6877                   "direct call of ("
6878                << ACS.getInstruction()->getCalledFunction()->getName()
6879                << ").\n[AAPrivatizablePtr] for which the argument "
6880                   "privatization is not compatible.\n";
6881       });
6882       return false;
6883     };
6884 
6885     // Helper to check if the associated argument is used at the given abstract
6886     // call site in a way that is incompatible with the privatization assumed
6887     // here.
6888     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6889       if (ACS.isDirectCall())
6890         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6891       if (ACS.isCallbackCall())
6892         return IsCompatiblePrivArgOfDirectCS(ACS);
6893       return false;
6894     };
6895 
6896     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6897                                 UsedAssumedInformation))
6898       return indicatePessimisticFixpoint();
6899 
6900     return ChangeStatus::UNCHANGED;
6901   }
6902 
6903   /// Given a type to private \p PrivType, collect the constituates (which are
6904   /// used) in \p ReplacementTypes.
6905   static void
6906   identifyReplacementTypes(Type *PrivType,
6907                            SmallVectorImpl<Type *> &ReplacementTypes) {
6908     // TODO: For now we expand the privatization type to the fullest which can
6909     //       lead to dead arguments that need to be removed later.
6910     assert(PrivType && "Expected privatizable type!");
6911 
6912     // Traverse the type, extract constituate types on the outermost level.
6913     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6914       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6915         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6916     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6917       ReplacementTypes.append(PrivArrayType->getNumElements(),
6918                               PrivArrayType->getElementType());
6919     } else {
6920       ReplacementTypes.push_back(PrivType);
6921     }
6922   }
6923 
6924   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6925   /// The values needed are taken from the arguments of \p F starting at
6926   /// position \p ArgNo.
6927   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6928                                    unsigned ArgNo, Instruction &IP) {
6929     assert(PrivType && "Expected privatizable type!");
6930 
6931     IRBuilder<NoFolder> IRB(&IP);
6932     const DataLayout &DL = F.getParent()->getDataLayout();
6933 
6934     // Traverse the type, build GEPs and stores.
6935     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6936       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6937       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6938         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6939         Value *Ptr =
6940             constructPointer(PointeeTy, PrivType, &Base,
6941                              PrivStructLayout->getElementOffset(u), IRB, DL);
6942         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6943       }
6944     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6945       Type *PointeeTy = PrivArrayType->getElementType();
6946       Type *PointeePtrTy = PointeeTy->getPointerTo();
6947       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6948       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6949         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6950                                       u * PointeeTySize, IRB, DL);
6951         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6952       }
6953     } else {
6954       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6955     }
6956   }
6957 
6958   /// Extract values from \p Base according to the type \p PrivType at the
6959   /// call position \p ACS. The values are appended to \p ReplacementValues.
6960   void createReplacementValues(Align Alignment, Type *PrivType,
6961                                AbstractCallSite ACS, Value *Base,
6962                                SmallVectorImpl<Value *> &ReplacementValues) {
6963     assert(Base && "Expected base value!");
6964     assert(PrivType && "Expected privatizable type!");
6965     Instruction *IP = ACS.getInstruction();
6966 
6967     IRBuilder<NoFolder> IRB(IP);
6968     const DataLayout &DL = IP->getModule()->getDataLayout();
6969 
6970     Type *PrivPtrType = PrivType->getPointerTo();
6971     if (Base->getType() != PrivPtrType)
6972       Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6973           Base, PrivPtrType, "", ACS.getInstruction());
6974 
6975     // Traverse the type, build GEPs and loads.
6976     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6977       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6978       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6979         Type *PointeeTy = PrivStructType->getElementType(u);
6980         Value *Ptr =
6981             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6982                              PrivStructLayout->getElementOffset(u), IRB, DL);
6983         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6984         L->setAlignment(Alignment);
6985         ReplacementValues.push_back(L);
6986       }
6987     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6988       Type *PointeeTy = PrivArrayType->getElementType();
6989       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6990       Type *PointeePtrTy = PointeeTy->getPointerTo();
6991       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6992         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6993                                       u * PointeeTySize, IRB, DL);
6994         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6995         L->setAlignment(Alignment);
6996         ReplacementValues.push_back(L);
6997       }
6998     } else {
6999       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
7000       L->setAlignment(Alignment);
7001       ReplacementValues.push_back(L);
7002     }
7003   }
7004 
7005   /// See AbstractAttribute::manifest(...)
7006   ChangeStatus manifest(Attributor &A) override {
7007     if (!PrivatizableType)
7008       return ChangeStatus::UNCHANGED;
7009     assert(PrivatizableType.getValue() && "Expected privatizable type!");
7010 
7011     // Collect all tail calls in the function as we cannot allow new allocas to
7012     // escape into tail recursion.
7013     // TODO: Be smarter about new allocas escaping into tail calls.
7014     SmallVector<CallInst *, 16> TailCalls;
7015     bool UsedAssumedInformation = false;
7016     if (!A.checkForAllInstructions(
7017             [&](Instruction &I) {
7018               CallInst &CI = cast<CallInst>(I);
7019               if (CI.isTailCall())
7020                 TailCalls.push_back(&CI);
7021               return true;
7022             },
7023             *this, {Instruction::Call}, UsedAssumedInformation))
7024       return ChangeStatus::UNCHANGED;
7025 
7026     Argument *Arg = getAssociatedArgument();
7027     // Query AAAlign attribute for alignment of associated argument to
7028     // determine the best alignment of loads.
7029     const auto &AlignAA =
7030         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
7031 
7032     // Callback to repair the associated function. A new alloca is placed at the
7033     // beginning and initialized with the values passed through arguments. The
7034     // new alloca replaces the use of the old pointer argument.
7035     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
7036         [=](const Attributor::ArgumentReplacementInfo &ARI,
7037             Function &ReplacementFn, Function::arg_iterator ArgIt) {
7038           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
7039           Instruction *IP = &*EntryBB.getFirstInsertionPt();
7040           const DataLayout &DL = IP->getModule()->getDataLayout();
7041           unsigned AS = DL.getAllocaAddrSpace();
7042           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS,
7043                                            Arg->getName() + ".priv", IP);
7044           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
7045                                ArgIt->getArgNo(), *IP);
7046 
7047           if (AI->getType() != Arg->getType())
7048             AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7049                 AI, Arg->getType(), "", IP);
7050           Arg->replaceAllUsesWith(AI);
7051 
7052           for (CallInst *CI : TailCalls)
7053             CI->setTailCall(false);
7054         };
7055 
7056     // Callback to repair a call site of the associated function. The elements
7057     // of the privatizable type are loaded prior to the call and passed to the
7058     // new function version.
7059     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
7060         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
7061                       AbstractCallSite ACS,
7062                       SmallVectorImpl<Value *> &NewArgOperands) {
7063           // When no alignment is specified for the load instruction,
7064           // natural alignment is assumed.
7065           createReplacementValues(
7066               AlignAA.getAssumedAlign(), *PrivatizableType, ACS,
7067               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
7068               NewArgOperands);
7069         };
7070 
7071     // Collect the types that will replace the privatizable type in the function
7072     // signature.
7073     SmallVector<Type *, 16> ReplacementTypes;
7074     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
7075 
7076     // Register a rewrite of the argument.
7077     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
7078                                            std::move(FnRepairCB),
7079                                            std::move(ACSRepairCB)))
7080       return ChangeStatus::CHANGED;
7081     return ChangeStatus::UNCHANGED;
7082   }
7083 
7084   /// See AbstractAttribute::trackStatistics()
7085   void trackStatistics() const override {
7086     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
7087   }
7088 };
7089 
7090 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
7091   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
7092       : AAPrivatizablePtrImpl(IRP, A) {}
7093 
7094   /// See AbstractAttribute::initialize(...).
7095   virtual void initialize(Attributor &A) override {
7096     // TODO: We can privatize more than arguments.
7097     indicatePessimisticFixpoint();
7098   }
7099 
7100   ChangeStatus updateImpl(Attributor &A) override {
7101     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
7102                      "updateImpl will not be called");
7103   }
7104 
7105   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
7106   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
7107     Value *Obj = getUnderlyingObject(&getAssociatedValue());
7108     if (!Obj) {
7109       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
7110       return nullptr;
7111     }
7112 
7113     if (auto *AI = dyn_cast<AllocaInst>(Obj))
7114       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
7115         if (CI->isOne())
7116           return AI->getAllocatedType();
7117     if (auto *Arg = dyn_cast<Argument>(Obj)) {
7118       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
7119           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
7120       if (PrivArgAA.isAssumedPrivatizablePtr())
7121         return PrivArgAA.getPrivatizableType();
7122     }
7123 
7124     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
7125                          "alloca nor privatizable argument: "
7126                       << *Obj << "!\n");
7127     return nullptr;
7128   }
7129 
7130   /// See AbstractAttribute::trackStatistics()
7131   void trackStatistics() const override {
7132     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
7133   }
7134 };
7135 
7136 struct AAPrivatizablePtrCallSiteArgument final
7137     : public AAPrivatizablePtrFloating {
7138   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
7139       : AAPrivatizablePtrFloating(IRP, A) {}
7140 
7141   /// See AbstractAttribute::initialize(...).
7142   void initialize(Attributor &A) override {
7143     if (getIRPosition().hasAttr(Attribute::ByVal))
7144       indicateOptimisticFixpoint();
7145   }
7146 
7147   /// See AbstractAttribute::updateImpl(...).
7148   ChangeStatus updateImpl(Attributor &A) override {
7149     PrivatizableType = identifyPrivatizableType(A);
7150     if (!PrivatizableType)
7151       return ChangeStatus::UNCHANGED;
7152     if (!PrivatizableType.getValue())
7153       return indicatePessimisticFixpoint();
7154 
7155     const IRPosition &IRP = getIRPosition();
7156     auto &NoCaptureAA =
7157         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
7158     if (!NoCaptureAA.isAssumedNoCapture()) {
7159       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
7160       return indicatePessimisticFixpoint();
7161     }
7162 
7163     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
7164     if (!NoAliasAA.isAssumedNoAlias()) {
7165       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
7166       return indicatePessimisticFixpoint();
7167     }
7168 
7169     bool IsKnown;
7170     if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
7171       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
7172       return indicatePessimisticFixpoint();
7173     }
7174 
7175     return ChangeStatus::UNCHANGED;
7176   }
7177 
7178   /// See AbstractAttribute::trackStatistics()
7179   void trackStatistics() const override {
7180     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
7181   }
7182 };
7183 
7184 struct AAPrivatizablePtrCallSiteReturned final
7185     : public AAPrivatizablePtrFloating {
7186   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
7187       : AAPrivatizablePtrFloating(IRP, A) {}
7188 
7189   /// See AbstractAttribute::initialize(...).
7190   void initialize(Attributor &A) override {
7191     // TODO: We can privatize more than arguments.
7192     indicatePessimisticFixpoint();
7193   }
7194 
7195   /// See AbstractAttribute::trackStatistics()
7196   void trackStatistics() const override {
7197     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
7198   }
7199 };
7200 
7201 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
7202   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
7203       : AAPrivatizablePtrFloating(IRP, A) {}
7204 
7205   /// See AbstractAttribute::initialize(...).
7206   void initialize(Attributor &A) override {
7207     // TODO: We can privatize more than arguments.
7208     indicatePessimisticFixpoint();
7209   }
7210 
7211   /// See AbstractAttribute::trackStatistics()
7212   void trackStatistics() const override {
7213     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
7214   }
7215 };
7216 } // namespace
7217 
7218 /// -------------------- Memory Behavior Attributes ----------------------------
7219 /// Includes read-none, read-only, and write-only.
7220 /// ----------------------------------------------------------------------------
7221 namespace {
7222 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
7223   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
7224       : AAMemoryBehavior(IRP, A) {}
7225 
7226   /// See AbstractAttribute::initialize(...).
7227   void initialize(Attributor &A) override {
7228     intersectAssumedBits(BEST_STATE);
7229     getKnownStateFromValue(getIRPosition(), getState());
7230     AAMemoryBehavior::initialize(A);
7231   }
7232 
7233   /// Return the memory behavior information encoded in the IR for \p IRP.
7234   static void getKnownStateFromValue(const IRPosition &IRP,
7235                                      BitIntegerState &State,
7236                                      bool IgnoreSubsumingPositions = false) {
7237     SmallVector<Attribute, 2> Attrs;
7238     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7239     for (const Attribute &Attr : Attrs) {
7240       switch (Attr.getKindAsEnum()) {
7241       case Attribute::ReadNone:
7242         State.addKnownBits(NO_ACCESSES);
7243         break;
7244       case Attribute::ReadOnly:
7245         State.addKnownBits(NO_WRITES);
7246         break;
7247       case Attribute::WriteOnly:
7248         State.addKnownBits(NO_READS);
7249         break;
7250       default:
7251         llvm_unreachable("Unexpected attribute!");
7252       }
7253     }
7254 
7255     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7256       if (!I->mayReadFromMemory())
7257         State.addKnownBits(NO_READS);
7258       if (!I->mayWriteToMemory())
7259         State.addKnownBits(NO_WRITES);
7260     }
7261   }
7262 
7263   /// See AbstractAttribute::getDeducedAttributes(...).
7264   void getDeducedAttributes(LLVMContext &Ctx,
7265                             SmallVectorImpl<Attribute> &Attrs) const override {
7266     assert(Attrs.size() == 0);
7267     if (isAssumedReadNone())
7268       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7269     else if (isAssumedReadOnly())
7270       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7271     else if (isAssumedWriteOnly())
7272       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7273     assert(Attrs.size() <= 1);
7274   }
7275 
7276   /// See AbstractAttribute::manifest(...).
7277   ChangeStatus manifest(Attributor &A) override {
7278     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
7279       return ChangeStatus::UNCHANGED;
7280 
7281     const IRPosition &IRP = getIRPosition();
7282 
7283     // Check if we would improve the existing attributes first.
7284     SmallVector<Attribute, 4> DeducedAttrs;
7285     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7286     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7287           return IRP.hasAttr(Attr.getKindAsEnum(),
7288                              /* IgnoreSubsumingPositions */ true);
7289         }))
7290       return ChangeStatus::UNCHANGED;
7291 
7292     // Clear existing attributes.
7293     IRP.removeAttrs(AttrKinds);
7294 
7295     // Use the generic manifest method.
7296     return IRAttribute::manifest(A);
7297   }
7298 
7299   /// See AbstractState::getAsStr().
7300   const std::string getAsStr() const override {
7301     if (isAssumedReadNone())
7302       return "readnone";
7303     if (isAssumedReadOnly())
7304       return "readonly";
7305     if (isAssumedWriteOnly())
7306       return "writeonly";
7307     return "may-read/write";
7308   }
7309 
7310   /// The set of IR attributes AAMemoryBehavior deals with.
7311   static const Attribute::AttrKind AttrKinds[3];
7312 };
7313 
7314 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7315     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7316 
7317 /// Memory behavior attribute for a floating value.
7318 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7319   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7320       : AAMemoryBehaviorImpl(IRP, A) {}
7321 
7322   /// See AbstractAttribute::updateImpl(...).
7323   ChangeStatus updateImpl(Attributor &A) override;
7324 
7325   /// See AbstractAttribute::trackStatistics()
7326   void trackStatistics() const override {
7327     if (isAssumedReadNone())
7328       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7329     else if (isAssumedReadOnly())
7330       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7331     else if (isAssumedWriteOnly())
7332       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7333   }
7334 
7335 private:
7336   /// Return true if users of \p UserI might access the underlying
7337   /// variable/location described by \p U and should therefore be analyzed.
7338   bool followUsersOfUseIn(Attributor &A, const Use &U,
7339                           const Instruction *UserI);
7340 
7341   /// Update the state according to the effect of use \p U in \p UserI.
7342   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7343 };
7344 
7345 /// Memory behavior attribute for function argument.
7346 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7347   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7348       : AAMemoryBehaviorFloating(IRP, A) {}
7349 
7350   /// See AbstractAttribute::initialize(...).
7351   void initialize(Attributor &A) override {
7352     intersectAssumedBits(BEST_STATE);
7353     const IRPosition &IRP = getIRPosition();
7354     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7355     // can query it when we use has/getAttr. That would allow us to reuse the
7356     // initialize of the base class here.
7357     bool HasByVal =
7358         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7359     getKnownStateFromValue(IRP, getState(),
7360                            /* IgnoreSubsumingPositions */ HasByVal);
7361 
7362     // Initialize the use vector with all direct uses of the associated value.
7363     Argument *Arg = getAssociatedArgument();
7364     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7365       indicatePessimisticFixpoint();
7366   }
7367 
7368   ChangeStatus manifest(Attributor &A) override {
7369     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7370     if (!getAssociatedValue().getType()->isPointerTy())
7371       return ChangeStatus::UNCHANGED;
7372 
7373     // TODO: From readattrs.ll: "inalloca parameters are always
7374     //                           considered written"
7375     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7376       removeKnownBits(NO_WRITES);
7377       removeAssumedBits(NO_WRITES);
7378     }
7379     return AAMemoryBehaviorFloating::manifest(A);
7380   }
7381 
7382   /// See AbstractAttribute::trackStatistics()
7383   void trackStatistics() const override {
7384     if (isAssumedReadNone())
7385       STATS_DECLTRACK_ARG_ATTR(readnone)
7386     else if (isAssumedReadOnly())
7387       STATS_DECLTRACK_ARG_ATTR(readonly)
7388     else if (isAssumedWriteOnly())
7389       STATS_DECLTRACK_ARG_ATTR(writeonly)
7390   }
7391 };
7392 
7393 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7394   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7395       : AAMemoryBehaviorArgument(IRP, A) {}
7396 
7397   /// See AbstractAttribute::initialize(...).
7398   void initialize(Attributor &A) override {
7399     // If we don't have an associated attribute this is either a variadic call
7400     // or an indirect call, either way, nothing to do here.
7401     Argument *Arg = getAssociatedArgument();
7402     if (!Arg) {
7403       indicatePessimisticFixpoint();
7404       return;
7405     }
7406     if (Arg->hasByValAttr()) {
7407       addKnownBits(NO_WRITES);
7408       removeKnownBits(NO_READS);
7409       removeAssumedBits(NO_READS);
7410     }
7411     AAMemoryBehaviorArgument::initialize(A);
7412     if (getAssociatedFunction()->isDeclaration())
7413       indicatePessimisticFixpoint();
7414   }
7415 
7416   /// See AbstractAttribute::updateImpl(...).
7417   ChangeStatus updateImpl(Attributor &A) override {
7418     // TODO: Once we have call site specific value information we can provide
7419     //       call site specific liveness liveness information and then it makes
7420     //       sense to specialize attributes for call sites arguments instead of
7421     //       redirecting requests to the callee argument.
7422     Argument *Arg = getAssociatedArgument();
7423     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7424     auto &ArgAA =
7425         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7426     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7427   }
7428 
7429   /// See AbstractAttribute::trackStatistics()
7430   void trackStatistics() const override {
7431     if (isAssumedReadNone())
7432       STATS_DECLTRACK_CSARG_ATTR(readnone)
7433     else if (isAssumedReadOnly())
7434       STATS_DECLTRACK_CSARG_ATTR(readonly)
7435     else if (isAssumedWriteOnly())
7436       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7437   }
7438 };
7439 
7440 /// Memory behavior attribute for a call site return position.
7441 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7442   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7443       : AAMemoryBehaviorFloating(IRP, A) {}
7444 
7445   /// See AbstractAttribute::initialize(...).
7446   void initialize(Attributor &A) override {
7447     AAMemoryBehaviorImpl::initialize(A);
7448     Function *F = getAssociatedFunction();
7449     if (!F || F->isDeclaration())
7450       indicatePessimisticFixpoint();
7451   }
7452 
7453   /// See AbstractAttribute::manifest(...).
7454   ChangeStatus manifest(Attributor &A) override {
7455     // We do not annotate returned values.
7456     return ChangeStatus::UNCHANGED;
7457   }
7458 
7459   /// See AbstractAttribute::trackStatistics()
7460   void trackStatistics() const override {}
7461 };
7462 
7463 /// An AA to represent the memory behavior function attributes.
7464 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7465   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7466       : AAMemoryBehaviorImpl(IRP, A) {}
7467 
7468   /// See AbstractAttribute::updateImpl(Attributor &A).
7469   virtual ChangeStatus updateImpl(Attributor &A) override;
7470 
7471   /// See AbstractAttribute::manifest(...).
7472   ChangeStatus manifest(Attributor &A) override {
7473     Function &F = cast<Function>(getAnchorValue());
7474     if (isAssumedReadNone()) {
7475       F.removeFnAttr(Attribute::ArgMemOnly);
7476       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7477       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7478     }
7479     return AAMemoryBehaviorImpl::manifest(A);
7480   }
7481 
7482   /// See AbstractAttribute::trackStatistics()
7483   void trackStatistics() const override {
7484     if (isAssumedReadNone())
7485       STATS_DECLTRACK_FN_ATTR(readnone)
7486     else if (isAssumedReadOnly())
7487       STATS_DECLTRACK_FN_ATTR(readonly)
7488     else if (isAssumedWriteOnly())
7489       STATS_DECLTRACK_FN_ATTR(writeonly)
7490   }
7491 };
7492 
7493 /// AAMemoryBehavior attribute for call sites.
7494 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7495   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7496       : AAMemoryBehaviorImpl(IRP, A) {}
7497 
7498   /// See AbstractAttribute::initialize(...).
7499   void initialize(Attributor &A) override {
7500     AAMemoryBehaviorImpl::initialize(A);
7501     Function *F = getAssociatedFunction();
7502     if (!F || F->isDeclaration())
7503       indicatePessimisticFixpoint();
7504   }
7505 
7506   /// See AbstractAttribute::updateImpl(...).
7507   ChangeStatus updateImpl(Attributor &A) override {
7508     // TODO: Once we have call site specific value information we can provide
7509     //       call site specific liveness liveness information and then it makes
7510     //       sense to specialize attributes for call sites arguments instead of
7511     //       redirecting requests to the callee argument.
7512     Function *F = getAssociatedFunction();
7513     const IRPosition &FnPos = IRPosition::function(*F);
7514     auto &FnAA =
7515         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7516     return clampStateAndIndicateChange(getState(), FnAA.getState());
7517   }
7518 
7519   /// See AbstractAttribute::trackStatistics()
7520   void trackStatistics() const override {
7521     if (isAssumedReadNone())
7522       STATS_DECLTRACK_CS_ATTR(readnone)
7523     else if (isAssumedReadOnly())
7524       STATS_DECLTRACK_CS_ATTR(readonly)
7525     else if (isAssumedWriteOnly())
7526       STATS_DECLTRACK_CS_ATTR(writeonly)
7527   }
7528 };
7529 
7530 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7531 
7532   // The current assumed state used to determine a change.
7533   auto AssumedState = getAssumed();
7534 
7535   auto CheckRWInst = [&](Instruction &I) {
7536     // If the instruction has an own memory behavior state, use it to restrict
7537     // the local state. No further analysis is required as the other memory
7538     // state is as optimistic as it gets.
7539     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7540       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7541           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7542       intersectAssumedBits(MemBehaviorAA.getAssumed());
7543       return !isAtFixpoint();
7544     }
7545 
7546     // Remove access kind modifiers if necessary.
7547     if (I.mayReadFromMemory())
7548       removeAssumedBits(NO_READS);
7549     if (I.mayWriteToMemory())
7550       removeAssumedBits(NO_WRITES);
7551     return !isAtFixpoint();
7552   };
7553 
7554   bool UsedAssumedInformation = false;
7555   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7556                                           UsedAssumedInformation))
7557     return indicatePessimisticFixpoint();
7558 
7559   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7560                                         : ChangeStatus::UNCHANGED;
7561 }
7562 
7563 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7564 
7565   const IRPosition &IRP = getIRPosition();
7566   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7567   AAMemoryBehavior::StateType &S = getState();
7568 
7569   // First, check the function scope. We take the known information and we avoid
7570   // work if the assumed information implies the current assumed information for
7571   // this attribute. This is a valid for all but byval arguments.
7572   Argument *Arg = IRP.getAssociatedArgument();
7573   AAMemoryBehavior::base_t FnMemAssumedState =
7574       AAMemoryBehavior::StateType::getWorstState();
7575   if (!Arg || !Arg->hasByValAttr()) {
7576     const auto &FnMemAA =
7577         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7578     FnMemAssumedState = FnMemAA.getAssumed();
7579     S.addKnownBits(FnMemAA.getKnown());
7580     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7581       return ChangeStatus::UNCHANGED;
7582   }
7583 
7584   // The current assumed state used to determine a change.
7585   auto AssumedState = S.getAssumed();
7586 
7587   // Make sure the value is not captured (except through "return"), if
7588   // it is, any information derived would be irrelevant anyway as we cannot
7589   // check the potential aliases introduced by the capture. However, no need
7590   // to fall back to anythign less optimistic than the function state.
7591   const auto &ArgNoCaptureAA =
7592       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7593   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7594     S.intersectAssumedBits(FnMemAssumedState);
7595     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7596                                           : ChangeStatus::UNCHANGED;
7597   }
7598 
7599   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7600   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7601     Instruction *UserI = cast<Instruction>(U.getUser());
7602     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7603                       << " \n");
7604 
7605     // Droppable users, e.g., llvm::assume does not actually perform any action.
7606     if (UserI->isDroppable())
7607       return true;
7608 
7609     // Check if the users of UserI should also be visited.
7610     Follow = followUsersOfUseIn(A, U, UserI);
7611 
7612     // If UserI might touch memory we analyze the use in detail.
7613     if (UserI->mayReadOrWriteMemory())
7614       analyzeUseIn(A, U, UserI);
7615 
7616     return !isAtFixpoint();
7617   };
7618 
7619   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7620     return indicatePessimisticFixpoint();
7621 
7622   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7623                                         : ChangeStatus::UNCHANGED;
7624 }
7625 
7626 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7627                                                   const Instruction *UserI) {
7628   // The loaded value is unrelated to the pointer argument, no need to
7629   // follow the users of the load.
7630   if (isa<LoadInst>(UserI))
7631     return false;
7632 
7633   // By default we follow all uses assuming UserI might leak information on U,
7634   // we have special handling for call sites operands though.
7635   const auto *CB = dyn_cast<CallBase>(UserI);
7636   if (!CB || !CB->isArgOperand(&U))
7637     return true;
7638 
7639   // If the use is a call argument known not to be captured, the users of
7640   // the call do not need to be visited because they have to be unrelated to
7641   // the input. Note that this check is not trivial even though we disallow
7642   // general capturing of the underlying argument. The reason is that the
7643   // call might the argument "through return", which we allow and for which we
7644   // need to check call users.
7645   if (U.get()->getType()->isPointerTy()) {
7646     unsigned ArgNo = CB->getArgOperandNo(&U);
7647     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7648         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7649     return !ArgNoCaptureAA.isAssumedNoCapture();
7650   }
7651 
7652   return true;
7653 }
7654 
7655 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7656                                             const Instruction *UserI) {
7657   assert(UserI->mayReadOrWriteMemory());
7658 
7659   switch (UserI->getOpcode()) {
7660   default:
7661     // TODO: Handle all atomics and other side-effect operations we know of.
7662     break;
7663   case Instruction::Load:
7664     // Loads cause the NO_READS property to disappear.
7665     removeAssumedBits(NO_READS);
7666     return;
7667 
7668   case Instruction::Store:
7669     // Stores cause the NO_WRITES property to disappear if the use is the
7670     // pointer operand. Note that while capturing was taken care of somewhere
7671     // else we need to deal with stores of the value that is not looked through.
7672     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7673       removeAssumedBits(NO_WRITES);
7674     else
7675       indicatePessimisticFixpoint();
7676     return;
7677 
7678   case Instruction::Call:
7679   case Instruction::CallBr:
7680   case Instruction::Invoke: {
7681     // For call sites we look at the argument memory behavior attribute (this
7682     // could be recursive!) in order to restrict our own state.
7683     const auto *CB = cast<CallBase>(UserI);
7684 
7685     // Give up on operand bundles.
7686     if (CB->isBundleOperand(&U)) {
7687       indicatePessimisticFixpoint();
7688       return;
7689     }
7690 
7691     // Calling a function does read the function pointer, maybe write it if the
7692     // function is self-modifying.
7693     if (CB->isCallee(&U)) {
7694       removeAssumedBits(NO_READS);
7695       break;
7696     }
7697 
7698     // Adjust the possible access behavior based on the information on the
7699     // argument.
7700     IRPosition Pos;
7701     if (U.get()->getType()->isPointerTy())
7702       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7703     else
7704       Pos = IRPosition::callsite_function(*CB);
7705     const auto &MemBehaviorAA =
7706         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7707     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7708     // and at least "known".
7709     intersectAssumedBits(MemBehaviorAA.getAssumed());
7710     return;
7711   }
7712   };
7713 
7714   // Generally, look at the "may-properties" and adjust the assumed state if we
7715   // did not trigger special handling before.
7716   if (UserI->mayReadFromMemory())
7717     removeAssumedBits(NO_READS);
7718   if (UserI->mayWriteToMemory())
7719     removeAssumedBits(NO_WRITES);
7720 }
7721 } // namespace
7722 
7723 /// -------------------- Memory Locations Attributes ---------------------------
7724 /// Includes read-none, argmemonly, inaccessiblememonly,
7725 /// inaccessiblememorargmemonly
7726 /// ----------------------------------------------------------------------------
7727 
7728 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7729     AAMemoryLocation::MemoryLocationsKind MLK) {
7730   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7731     return "all memory";
7732   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7733     return "no memory";
7734   std::string S = "memory:";
7735   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7736     S += "stack,";
7737   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7738     S += "constant,";
7739   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7740     S += "internal global,";
7741   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7742     S += "external global,";
7743   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7744     S += "argument,";
7745   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7746     S += "inaccessible,";
7747   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7748     S += "malloced,";
7749   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7750     S += "unknown,";
7751   S.pop_back();
7752   return S;
7753 }
7754 
7755 namespace {
7756 struct AAMemoryLocationImpl : public AAMemoryLocation {
7757 
7758   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7759       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7760     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7761       AccessKind2Accesses[u] = nullptr;
7762   }
7763 
7764   ~AAMemoryLocationImpl() {
7765     // The AccessSets are allocated via a BumpPtrAllocator, we call
7766     // the destructor manually.
7767     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7768       if (AccessKind2Accesses[u])
7769         AccessKind2Accesses[u]->~AccessSet();
7770   }
7771 
7772   /// See AbstractAttribute::initialize(...).
7773   void initialize(Attributor &A) override {
7774     intersectAssumedBits(BEST_STATE);
7775     getKnownStateFromValue(A, getIRPosition(), getState());
7776     AAMemoryLocation::initialize(A);
7777   }
7778 
7779   /// Return the memory behavior information encoded in the IR for \p IRP.
7780   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7781                                      BitIntegerState &State,
7782                                      bool IgnoreSubsumingPositions = false) {
7783     // For internal functions we ignore `argmemonly` and
7784     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7785     // constant propagation. It is unclear if this is the best way but it is
7786     // unlikely this will cause real performance problems. If we are deriving
7787     // attributes for the anchor function we even remove the attribute in
7788     // addition to ignoring it.
7789     bool UseArgMemOnly = true;
7790     Function *AnchorFn = IRP.getAnchorScope();
7791     if (AnchorFn && A.isRunOn(*AnchorFn))
7792       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7793 
7794     SmallVector<Attribute, 2> Attrs;
7795     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7796     for (const Attribute &Attr : Attrs) {
7797       switch (Attr.getKindAsEnum()) {
7798       case Attribute::ReadNone:
7799         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7800         break;
7801       case Attribute::InaccessibleMemOnly:
7802         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7803         break;
7804       case Attribute::ArgMemOnly:
7805         if (UseArgMemOnly)
7806           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7807         else
7808           IRP.removeAttrs({Attribute::ArgMemOnly});
7809         break;
7810       case Attribute::InaccessibleMemOrArgMemOnly:
7811         if (UseArgMemOnly)
7812           State.addKnownBits(inverseLocation(
7813               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7814         else
7815           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7816         break;
7817       default:
7818         llvm_unreachable("Unexpected attribute!");
7819       }
7820     }
7821   }
7822 
7823   /// See AbstractAttribute::getDeducedAttributes(...).
7824   void getDeducedAttributes(LLVMContext &Ctx,
7825                             SmallVectorImpl<Attribute> &Attrs) const override {
7826     assert(Attrs.size() == 0);
7827     if (isAssumedReadNone()) {
7828       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7829     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7830       if (isAssumedInaccessibleMemOnly())
7831         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7832       else if (isAssumedArgMemOnly())
7833         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7834       else if (isAssumedInaccessibleOrArgMemOnly())
7835         Attrs.push_back(
7836             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7837     }
7838     assert(Attrs.size() <= 1);
7839   }
7840 
7841   /// See AbstractAttribute::manifest(...).
7842   ChangeStatus manifest(Attributor &A) override {
7843     const IRPosition &IRP = getIRPosition();
7844 
7845     // Check if we would improve the existing attributes first.
7846     SmallVector<Attribute, 4> DeducedAttrs;
7847     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7848     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7849           return IRP.hasAttr(Attr.getKindAsEnum(),
7850                              /* IgnoreSubsumingPositions */ true);
7851         }))
7852       return ChangeStatus::UNCHANGED;
7853 
7854     // Clear existing attributes.
7855     IRP.removeAttrs(AttrKinds);
7856     if (isAssumedReadNone())
7857       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7858 
7859     // Use the generic manifest method.
7860     return IRAttribute::manifest(A);
7861   }
7862 
7863   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7864   bool checkForAllAccessesToMemoryKind(
7865       function_ref<bool(const Instruction *, const Value *, AccessKind,
7866                         MemoryLocationsKind)>
7867           Pred,
7868       MemoryLocationsKind RequestedMLK) const override {
7869     if (!isValidState())
7870       return false;
7871 
7872     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7873     if (AssumedMLK == NO_LOCATIONS)
7874       return true;
7875 
7876     unsigned Idx = 0;
7877     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7878          CurMLK *= 2, ++Idx) {
7879       if (CurMLK & RequestedMLK)
7880         continue;
7881 
7882       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7883         for (const AccessInfo &AI : *Accesses)
7884           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7885             return false;
7886     }
7887 
7888     return true;
7889   }
7890 
7891   ChangeStatus indicatePessimisticFixpoint() override {
7892     // If we give up and indicate a pessimistic fixpoint this instruction will
7893     // become an access for all potential access kinds:
7894     // TODO: Add pointers for argmemonly and globals to improve the results of
7895     //       checkForAllAccessesToMemoryKind.
7896     bool Changed = false;
7897     MemoryLocationsKind KnownMLK = getKnown();
7898     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7899     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7900       if (!(CurMLK & KnownMLK))
7901         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7902                                   getAccessKindFromInst(I));
7903     return AAMemoryLocation::indicatePessimisticFixpoint();
7904   }
7905 
7906 protected:
7907   /// Helper struct to tie together an instruction that has a read or write
7908   /// effect with the pointer it accesses (if any).
7909   struct AccessInfo {
7910 
7911     /// The instruction that caused the access.
7912     const Instruction *I;
7913 
7914     /// The base pointer that is accessed, or null if unknown.
7915     const Value *Ptr;
7916 
7917     /// The kind of access (read/write/read+write).
7918     AccessKind Kind;
7919 
7920     bool operator==(const AccessInfo &RHS) const {
7921       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7922     }
7923     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7924       if (LHS.I != RHS.I)
7925         return LHS.I < RHS.I;
7926       if (LHS.Ptr != RHS.Ptr)
7927         return LHS.Ptr < RHS.Ptr;
7928       if (LHS.Kind != RHS.Kind)
7929         return LHS.Kind < RHS.Kind;
7930       return false;
7931     }
7932   };
7933 
7934   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7935   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7936   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7937   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7938 
7939   /// Categorize the pointer arguments of CB that might access memory in
7940   /// AccessedLoc and update the state and access map accordingly.
7941   void
7942   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7943                                      AAMemoryLocation::StateType &AccessedLocs,
7944                                      bool &Changed);
7945 
7946   /// Return the kind(s) of location that may be accessed by \p V.
7947   AAMemoryLocation::MemoryLocationsKind
7948   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7949 
7950   /// Return the access kind as determined by \p I.
7951   AccessKind getAccessKindFromInst(const Instruction *I) {
7952     AccessKind AK = READ_WRITE;
7953     if (I) {
7954       AK = I->mayReadFromMemory() ? READ : NONE;
7955       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7956     }
7957     return AK;
7958   }
7959 
7960   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7961   /// an access of kind \p AK to a \p MLK memory location with the access
7962   /// pointer \p Ptr.
7963   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7964                                  MemoryLocationsKind MLK, const Instruction *I,
7965                                  const Value *Ptr, bool &Changed,
7966                                  AccessKind AK = READ_WRITE) {
7967 
7968     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7969     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7970     if (!Accesses)
7971       Accesses = new (Allocator) AccessSet();
7972     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7973     State.removeAssumedBits(MLK);
7974   }
7975 
7976   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7977   /// arguments, and update the state and access map accordingly.
7978   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7979                           AAMemoryLocation::StateType &State, bool &Changed);
7980 
7981   /// Used to allocate access sets.
7982   BumpPtrAllocator &Allocator;
7983 
7984   /// The set of IR attributes AAMemoryLocation deals with.
7985   static const Attribute::AttrKind AttrKinds[4];
7986 };
7987 
7988 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7989     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7990     Attribute::InaccessibleMemOrArgMemOnly};
7991 
7992 void AAMemoryLocationImpl::categorizePtrValue(
7993     Attributor &A, const Instruction &I, const Value &Ptr,
7994     AAMemoryLocation::StateType &State, bool &Changed) {
7995   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7996                     << Ptr << " ["
7997                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7998 
7999   SmallVector<Value *, 8> Objects;
8000   bool UsedAssumedInformation = false;
8001   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
8002                                        UsedAssumedInformation,
8003                                        AA::Intraprocedural)) {
8004     LLVM_DEBUG(
8005         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
8006     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
8007                               getAccessKindFromInst(&I));
8008     return;
8009   }
8010 
8011   for (Value *Obj : Objects) {
8012     // TODO: recognize the TBAA used for constant accesses.
8013     MemoryLocationsKind MLK = NO_LOCATIONS;
8014     if (isa<UndefValue>(Obj))
8015       continue;
8016     if (isa<Argument>(Obj)) {
8017       // TODO: For now we do not treat byval arguments as local copies performed
8018       // on the call edge, though, we should. To make that happen we need to
8019       // teach various passes, e.g., DSE, about the copy effect of a byval. That
8020       // would also allow us to mark functions only accessing byval arguments as
8021       // readnone again, atguably their acceses have no effect outside of the
8022       // function, like accesses to allocas.
8023       MLK = NO_ARGUMENT_MEM;
8024     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
8025       // Reading constant memory is not treated as a read "effect" by the
8026       // function attr pass so we won't neither. Constants defined by TBAA are
8027       // similar. (We know we do not write it because it is constant.)
8028       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
8029         if (GVar->isConstant())
8030           continue;
8031 
8032       if (GV->hasLocalLinkage())
8033         MLK = NO_GLOBAL_INTERNAL_MEM;
8034       else
8035         MLK = NO_GLOBAL_EXTERNAL_MEM;
8036     } else if (isa<ConstantPointerNull>(Obj) &&
8037                !NullPointerIsDefined(getAssociatedFunction(),
8038                                      Ptr.getType()->getPointerAddressSpace())) {
8039       continue;
8040     } else if (isa<AllocaInst>(Obj)) {
8041       MLK = NO_LOCAL_MEM;
8042     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
8043       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
8044           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
8045       if (NoAliasAA.isAssumedNoAlias())
8046         MLK = NO_MALLOCED_MEM;
8047       else
8048         MLK = NO_UNKOWN_MEM;
8049     } else {
8050       MLK = NO_UNKOWN_MEM;
8051     }
8052 
8053     assert(MLK != NO_LOCATIONS && "No location specified!");
8054     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
8055                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
8056                       << "\n");
8057     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
8058                               getAccessKindFromInst(&I));
8059   }
8060 
8061   LLVM_DEBUG(
8062       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
8063              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
8064 }
8065 
8066 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
8067     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
8068     bool &Changed) {
8069   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
8070 
8071     // Skip non-pointer arguments.
8072     const Value *ArgOp = CB.getArgOperand(ArgNo);
8073     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
8074       continue;
8075 
8076     // Skip readnone arguments.
8077     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
8078     const auto &ArgOpMemLocationAA =
8079         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
8080 
8081     if (ArgOpMemLocationAA.isAssumedReadNone())
8082       continue;
8083 
8084     // Categorize potentially accessed pointer arguments as if there was an
8085     // access instruction with them as pointer.
8086     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
8087   }
8088 }
8089 
8090 AAMemoryLocation::MemoryLocationsKind
8091 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
8092                                                   bool &Changed) {
8093   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
8094                     << I << "\n");
8095 
8096   AAMemoryLocation::StateType AccessedLocs;
8097   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
8098 
8099   if (auto *CB = dyn_cast<CallBase>(&I)) {
8100 
8101     // First check if we assume any memory is access is visible.
8102     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
8103         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
8104     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
8105                       << " [" << CBMemLocationAA << "]\n");
8106 
8107     if (CBMemLocationAA.isAssumedReadNone())
8108       return NO_LOCATIONS;
8109 
8110     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
8111       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
8112                                 Changed, getAccessKindFromInst(&I));
8113       return AccessedLocs.getAssumed();
8114     }
8115 
8116     uint32_t CBAssumedNotAccessedLocs =
8117         CBMemLocationAA.getAssumedNotAccessedLocation();
8118 
8119     // Set the argmemonly and global bit as we handle them separately below.
8120     uint32_t CBAssumedNotAccessedLocsNoArgMem =
8121         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
8122 
8123     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
8124       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
8125         continue;
8126       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
8127                                 getAccessKindFromInst(&I));
8128     }
8129 
8130     // Now handle global memory if it might be accessed. This is slightly tricky
8131     // as NO_GLOBAL_MEM has multiple bits set.
8132     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
8133     if (HasGlobalAccesses) {
8134       auto AccessPred = [&](const Instruction *, const Value *Ptr,
8135                             AccessKind Kind, MemoryLocationsKind MLK) {
8136         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
8137                                   getAccessKindFromInst(&I));
8138         return true;
8139       };
8140       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
8141               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
8142         return AccessedLocs.getWorstState();
8143     }
8144 
8145     LLVM_DEBUG(
8146         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
8147                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8148 
8149     // Now handle argument memory if it might be accessed.
8150     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
8151     if (HasArgAccesses)
8152       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
8153 
8154     LLVM_DEBUG(
8155         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
8156                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8157 
8158     return AccessedLocs.getAssumed();
8159   }
8160 
8161   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
8162     LLVM_DEBUG(
8163         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
8164                << I << " [" << *Ptr << "]\n");
8165     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
8166     return AccessedLocs.getAssumed();
8167   }
8168 
8169   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
8170                     << I << "\n");
8171   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
8172                             getAccessKindFromInst(&I));
8173   return AccessedLocs.getAssumed();
8174 }
8175 
8176 /// An AA to represent the memory behavior function attributes.
8177 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
8178   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
8179       : AAMemoryLocationImpl(IRP, A) {}
8180 
8181   /// See AbstractAttribute::updateImpl(Attributor &A).
8182   virtual ChangeStatus updateImpl(Attributor &A) override {
8183 
8184     const auto &MemBehaviorAA =
8185         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
8186     if (MemBehaviorAA.isAssumedReadNone()) {
8187       if (MemBehaviorAA.isKnownReadNone())
8188         return indicateOptimisticFixpoint();
8189       assert(isAssumedReadNone() &&
8190              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
8191       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
8192       return ChangeStatus::UNCHANGED;
8193     }
8194 
8195     // The current assumed state used to determine a change.
8196     auto AssumedState = getAssumed();
8197     bool Changed = false;
8198 
8199     auto CheckRWInst = [&](Instruction &I) {
8200       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
8201       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
8202                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
8203       removeAssumedBits(inverseLocation(MLK, false, false));
8204       // Stop once only the valid bit set in the *not assumed location*, thus
8205       // once we don't actually exclude any memory locations in the state.
8206       return getAssumedNotAccessedLocation() != VALID_STATE;
8207     };
8208 
8209     bool UsedAssumedInformation = false;
8210     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8211                                             UsedAssumedInformation))
8212       return indicatePessimisticFixpoint();
8213 
8214     Changed |= AssumedState != getAssumed();
8215     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8216   }
8217 
8218   /// See AbstractAttribute::trackStatistics()
8219   void trackStatistics() const override {
8220     if (isAssumedReadNone())
8221       STATS_DECLTRACK_FN_ATTR(readnone)
8222     else if (isAssumedArgMemOnly())
8223       STATS_DECLTRACK_FN_ATTR(argmemonly)
8224     else if (isAssumedInaccessibleMemOnly())
8225       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
8226     else if (isAssumedInaccessibleOrArgMemOnly())
8227       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
8228   }
8229 };
8230 
8231 /// AAMemoryLocation attribute for call sites.
8232 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
8233   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
8234       : AAMemoryLocationImpl(IRP, A) {}
8235 
8236   /// See AbstractAttribute::initialize(...).
8237   void initialize(Attributor &A) override {
8238     AAMemoryLocationImpl::initialize(A);
8239     Function *F = getAssociatedFunction();
8240     if (!F || F->isDeclaration())
8241       indicatePessimisticFixpoint();
8242   }
8243 
8244   /// See AbstractAttribute::updateImpl(...).
8245   ChangeStatus updateImpl(Attributor &A) override {
8246     // TODO: Once we have call site specific value information we can provide
8247     //       call site specific liveness liveness information and then it makes
8248     //       sense to specialize attributes for call sites arguments instead of
8249     //       redirecting requests to the callee argument.
8250     Function *F = getAssociatedFunction();
8251     const IRPosition &FnPos = IRPosition::function(*F);
8252     auto &FnAA =
8253         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8254     bool Changed = false;
8255     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8256                           AccessKind Kind, MemoryLocationsKind MLK) {
8257       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8258                                 getAccessKindFromInst(I));
8259       return true;
8260     };
8261     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8262       return indicatePessimisticFixpoint();
8263     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8264   }
8265 
8266   /// See AbstractAttribute::trackStatistics()
8267   void trackStatistics() const override {
8268     if (isAssumedReadNone())
8269       STATS_DECLTRACK_CS_ATTR(readnone)
8270   }
8271 };
8272 } // namespace
8273 
8274 /// ------------------ Value Constant Range Attribute -------------------------
8275 
8276 namespace {
8277 struct AAValueConstantRangeImpl : AAValueConstantRange {
8278   using StateType = IntegerRangeState;
8279   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8280       : AAValueConstantRange(IRP, A) {}
8281 
8282   /// See AbstractAttribute::initialize(..).
8283   void initialize(Attributor &A) override {
8284     if (A.hasSimplificationCallback(getIRPosition())) {
8285       indicatePessimisticFixpoint();
8286       return;
8287     }
8288 
8289     // Intersect a range given by SCEV.
8290     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8291 
8292     // Intersect a range given by LVI.
8293     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8294   }
8295 
8296   /// See AbstractAttribute::getAsStr().
8297   const std::string getAsStr() const override {
8298     std::string Str;
8299     llvm::raw_string_ostream OS(Str);
8300     OS << "range(" << getBitWidth() << ")<";
8301     getKnown().print(OS);
8302     OS << " / ";
8303     getAssumed().print(OS);
8304     OS << ">";
8305     return OS.str();
8306   }
8307 
8308   /// Helper function to get a SCEV expr for the associated value at program
8309   /// point \p I.
8310   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8311     if (!getAnchorScope())
8312       return nullptr;
8313 
8314     ScalarEvolution *SE =
8315         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8316             *getAnchorScope());
8317 
8318     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8319         *getAnchorScope());
8320 
8321     if (!SE || !LI)
8322       return nullptr;
8323 
8324     const SCEV *S = SE->getSCEV(&getAssociatedValue());
8325     if (!I)
8326       return S;
8327 
8328     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8329   }
8330 
8331   /// Helper function to get a range from SCEV for the associated value at
8332   /// program point \p I.
8333   ConstantRange getConstantRangeFromSCEV(Attributor &A,
8334                                          const Instruction *I = nullptr) const {
8335     if (!getAnchorScope())
8336       return getWorstState(getBitWidth());
8337 
8338     ScalarEvolution *SE =
8339         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8340             *getAnchorScope());
8341 
8342     const SCEV *S = getSCEV(A, I);
8343     if (!SE || !S)
8344       return getWorstState(getBitWidth());
8345 
8346     return SE->getUnsignedRange(S);
8347   }
8348 
8349   /// Helper function to get a range from LVI for the associated value at
8350   /// program point \p I.
8351   ConstantRange
8352   getConstantRangeFromLVI(Attributor &A,
8353                           const Instruction *CtxI = nullptr) const {
8354     if (!getAnchorScope())
8355       return getWorstState(getBitWidth());
8356 
8357     LazyValueInfo *LVI =
8358         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8359             *getAnchorScope());
8360 
8361     if (!LVI || !CtxI)
8362       return getWorstState(getBitWidth());
8363     return LVI->getConstantRange(&getAssociatedValue(),
8364                                  const_cast<Instruction *>(CtxI));
8365   }
8366 
8367   /// Return true if \p CtxI is valid for querying outside analyses.
8368   /// This basically makes sure we do not ask intra-procedural analysis
8369   /// about a context in the wrong function or a context that violates
8370   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8371   /// if the original context of this AA is OK or should be considered invalid.
8372   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8373                                                const Instruction *CtxI,
8374                                                bool AllowAACtxI) const {
8375     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8376       return false;
8377 
8378     // Our context might be in a different function, neither intra-procedural
8379     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8380     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8381       return false;
8382 
8383     // If the context is not dominated by the value there are paths to the
8384     // context that do not define the value. This cannot be handled by
8385     // LazyValueInfo so we need to bail.
8386     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8387       InformationCache &InfoCache = A.getInfoCache();
8388       const DominatorTree *DT =
8389           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8390               *I->getFunction());
8391       return DT && DT->dominates(I, CtxI);
8392     }
8393 
8394     return true;
8395   }
8396 
8397   /// See AAValueConstantRange::getKnownConstantRange(..).
8398   ConstantRange
8399   getKnownConstantRange(Attributor &A,
8400                         const Instruction *CtxI = nullptr) const override {
8401     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8402                                                  /* AllowAACtxI */ false))
8403       return getKnown();
8404 
8405     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8406     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8407     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8408   }
8409 
8410   /// See AAValueConstantRange::getAssumedConstantRange(..).
8411   ConstantRange
8412   getAssumedConstantRange(Attributor &A,
8413                           const Instruction *CtxI = nullptr) const override {
8414     // TODO: Make SCEV use Attributor assumption.
8415     //       We may be able to bound a variable range via assumptions in
8416     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8417     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8418     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8419                                                  /* AllowAACtxI */ false))
8420       return getAssumed();
8421 
8422     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8423     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8424     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8425   }
8426 
8427   /// Helper function to create MDNode for range metadata.
8428   static MDNode *
8429   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8430                             const ConstantRange &AssumedConstantRange) {
8431     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8432                                   Ty, AssumedConstantRange.getLower())),
8433                               ConstantAsMetadata::get(ConstantInt::get(
8434                                   Ty, AssumedConstantRange.getUpper()))};
8435     return MDNode::get(Ctx, LowAndHigh);
8436   }
8437 
8438   /// Return true if \p Assumed is included in \p KnownRanges.
8439   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8440 
8441     if (Assumed.isFullSet())
8442       return false;
8443 
8444     if (!KnownRanges)
8445       return true;
8446 
8447     // If multiple ranges are annotated in IR, we give up to annotate assumed
8448     // range for now.
8449 
8450     // TODO:  If there exists a known range which containts assumed range, we
8451     // can say assumed range is better.
8452     if (KnownRanges->getNumOperands() > 2)
8453       return false;
8454 
8455     ConstantInt *Lower =
8456         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8457     ConstantInt *Upper =
8458         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8459 
8460     ConstantRange Known(Lower->getValue(), Upper->getValue());
8461     return Known.contains(Assumed) && Known != Assumed;
8462   }
8463 
8464   /// Helper function to set range metadata.
8465   static bool
8466   setRangeMetadataIfisBetterRange(Instruction *I,
8467                                   const ConstantRange &AssumedConstantRange) {
8468     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8469     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8470       if (!AssumedConstantRange.isEmptySet()) {
8471         I->setMetadata(LLVMContext::MD_range,
8472                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8473                                                  AssumedConstantRange));
8474         return true;
8475       }
8476     }
8477     return false;
8478   }
8479 
8480   /// See AbstractAttribute::manifest()
8481   ChangeStatus manifest(Attributor &A) override {
8482     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8483     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8484     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8485 
8486     auto &V = getAssociatedValue();
8487     if (!AssumedConstantRange.isEmptySet() &&
8488         !AssumedConstantRange.isSingleElement()) {
8489       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8490         assert(I == getCtxI() && "Should not annotate an instruction which is "
8491                                  "not the context instruction");
8492         if (isa<CallInst>(I) || isa<LoadInst>(I))
8493           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8494             Changed = ChangeStatus::CHANGED;
8495       }
8496     }
8497 
8498     return Changed;
8499   }
8500 };
8501 
8502 struct AAValueConstantRangeArgument final
8503     : AAArgumentFromCallSiteArguments<
8504           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8505           true /* BridgeCallBaseContext */> {
8506   using Base = AAArgumentFromCallSiteArguments<
8507       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8508       true /* BridgeCallBaseContext */>;
8509   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8510       : Base(IRP, A) {}
8511 
8512   /// See AbstractAttribute::initialize(..).
8513   void initialize(Attributor &A) override {
8514     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8515       indicatePessimisticFixpoint();
8516     } else {
8517       Base::initialize(A);
8518     }
8519   }
8520 
8521   /// See AbstractAttribute::trackStatistics()
8522   void trackStatistics() const override {
8523     STATS_DECLTRACK_ARG_ATTR(value_range)
8524   }
8525 };
8526 
8527 struct AAValueConstantRangeReturned
8528     : AAReturnedFromReturnedValues<AAValueConstantRange,
8529                                    AAValueConstantRangeImpl,
8530                                    AAValueConstantRangeImpl::StateType,
8531                                    /* PropogateCallBaseContext */ true> {
8532   using Base =
8533       AAReturnedFromReturnedValues<AAValueConstantRange,
8534                                    AAValueConstantRangeImpl,
8535                                    AAValueConstantRangeImpl::StateType,
8536                                    /* PropogateCallBaseContext */ true>;
8537   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8538       : Base(IRP, A) {}
8539 
8540   /// See AbstractAttribute::initialize(...).
8541   void initialize(Attributor &A) override {}
8542 
8543   /// See AbstractAttribute::trackStatistics()
8544   void trackStatistics() const override {
8545     STATS_DECLTRACK_FNRET_ATTR(value_range)
8546   }
8547 };
8548 
8549 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8550   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8551       : AAValueConstantRangeImpl(IRP, A) {}
8552 
8553   /// See AbstractAttribute::initialize(...).
8554   void initialize(Attributor &A) override {
8555     AAValueConstantRangeImpl::initialize(A);
8556     if (isAtFixpoint())
8557       return;
8558 
8559     Value &V = getAssociatedValue();
8560 
8561     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8562       unionAssumed(ConstantRange(C->getValue()));
8563       indicateOptimisticFixpoint();
8564       return;
8565     }
8566 
8567     if (isa<UndefValue>(&V)) {
8568       // Collapse the undef state to 0.
8569       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8570       indicateOptimisticFixpoint();
8571       return;
8572     }
8573 
8574     if (isa<CallBase>(&V))
8575       return;
8576 
8577     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8578       return;
8579 
8580     // If it is a load instruction with range metadata, use it.
8581     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8582       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8583         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8584         return;
8585       }
8586 
8587     // We can work with PHI and select instruction as we traverse their operands
8588     // during update.
8589     if (isa<SelectInst>(V) || isa<PHINode>(V))
8590       return;
8591 
8592     // Otherwise we give up.
8593     indicatePessimisticFixpoint();
8594 
8595     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8596                       << getAssociatedValue() << "\n");
8597   }
8598 
8599   bool calculateBinaryOperator(
8600       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8601       const Instruction *CtxI,
8602       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8603     Value *LHS = BinOp->getOperand(0);
8604     Value *RHS = BinOp->getOperand(1);
8605 
8606     // Simplify the operands first.
8607     bool UsedAssumedInformation = false;
8608     const auto &SimplifiedLHS =
8609         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8610                                *this, UsedAssumedInformation);
8611     if (!SimplifiedLHS)
8612       return true;
8613     if (!SimplifiedLHS.getValue())
8614       return false;
8615     LHS = *SimplifiedLHS;
8616 
8617     const auto &SimplifiedRHS =
8618         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8619                                *this, UsedAssumedInformation);
8620     if (!SimplifiedRHS)
8621       return true;
8622     if (!SimplifiedRHS.getValue())
8623       return false;
8624     RHS = *SimplifiedRHS;
8625 
8626     // TODO: Allow non integers as well.
8627     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8628       return false;
8629 
8630     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8631         *this, IRPosition::value(*LHS, getCallBaseContext()),
8632         DepClassTy::REQUIRED);
8633     QuerriedAAs.push_back(&LHSAA);
8634     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8635 
8636     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8637         *this, IRPosition::value(*RHS, getCallBaseContext()),
8638         DepClassTy::REQUIRED);
8639     QuerriedAAs.push_back(&RHSAA);
8640     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8641 
8642     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8643 
8644     T.unionAssumed(AssumedRange);
8645 
8646     // TODO: Track a known state too.
8647 
8648     return T.isValidState();
8649   }
8650 
8651   bool calculateCastInst(
8652       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8653       const Instruction *CtxI,
8654       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8655     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8656     // TODO: Allow non integers as well.
8657     Value *OpV = CastI->getOperand(0);
8658 
8659     // Simplify the operand first.
8660     bool UsedAssumedInformation = false;
8661     const auto &SimplifiedOpV =
8662         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8663                                *this, UsedAssumedInformation);
8664     if (!SimplifiedOpV)
8665       return true;
8666     if (!SimplifiedOpV.getValue())
8667       return false;
8668     OpV = *SimplifiedOpV;
8669 
8670     if (!OpV->getType()->isIntegerTy())
8671       return false;
8672 
8673     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8674         *this, IRPosition::value(*OpV, getCallBaseContext()),
8675         DepClassTy::REQUIRED);
8676     QuerriedAAs.push_back(&OpAA);
8677     T.unionAssumed(
8678         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8679     return T.isValidState();
8680   }
8681 
8682   bool
8683   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8684                    const Instruction *CtxI,
8685                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8686     Value *LHS = CmpI->getOperand(0);
8687     Value *RHS = CmpI->getOperand(1);
8688 
8689     // Simplify the operands first.
8690     bool UsedAssumedInformation = false;
8691     const auto &SimplifiedLHS =
8692         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8693                                *this, UsedAssumedInformation);
8694     if (!SimplifiedLHS)
8695       return true;
8696     if (!SimplifiedLHS.getValue())
8697       return false;
8698     LHS = *SimplifiedLHS;
8699 
8700     const auto &SimplifiedRHS =
8701         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8702                                *this, UsedAssumedInformation);
8703     if (!SimplifiedRHS)
8704       return true;
8705     if (!SimplifiedRHS.getValue())
8706       return false;
8707     RHS = *SimplifiedRHS;
8708 
8709     // TODO: Allow non integers as well.
8710     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8711       return false;
8712 
8713     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8714         *this, IRPosition::value(*LHS, getCallBaseContext()),
8715         DepClassTy::REQUIRED);
8716     QuerriedAAs.push_back(&LHSAA);
8717     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8718         *this, IRPosition::value(*RHS, getCallBaseContext()),
8719         DepClassTy::REQUIRED);
8720     QuerriedAAs.push_back(&RHSAA);
8721     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8722     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8723 
8724     // If one of them is empty set, we can't decide.
8725     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8726       return true;
8727 
8728     bool MustTrue = false, MustFalse = false;
8729 
8730     auto AllowedRegion =
8731         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8732 
8733     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8734       MustFalse = true;
8735 
8736     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8737       MustTrue = true;
8738 
8739     assert((!MustTrue || !MustFalse) &&
8740            "Either MustTrue or MustFalse should be false!");
8741 
8742     if (MustTrue)
8743       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8744     else if (MustFalse)
8745       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8746     else
8747       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8748 
8749     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8750                       << " " << RHSAA << "\n");
8751 
8752     // TODO: Track a known state too.
8753     return T.isValidState();
8754   }
8755 
8756   /// See AbstractAttribute::updateImpl(...).
8757   ChangeStatus updateImpl(Attributor &A) override {
8758     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8759                             IntegerRangeState &T, bool Stripped) -> bool {
8760       Instruction *I = dyn_cast<Instruction>(&V);
8761       if (!I || isa<CallBase>(I)) {
8762 
8763         // Simplify the operand first.
8764         bool UsedAssumedInformation = false;
8765         const auto &SimplifiedOpV =
8766             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8767                                    *this, UsedAssumedInformation);
8768         if (!SimplifiedOpV)
8769           return true;
8770         if (!SimplifiedOpV.getValue())
8771           return false;
8772         Value *VPtr = *SimplifiedOpV;
8773 
8774         // If the value is not instruction, we query AA to Attributor.
8775         const auto &AA = A.getAAFor<AAValueConstantRange>(
8776             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8777             DepClassTy::REQUIRED);
8778 
8779         // Clamp operator is not used to utilize a program point CtxI.
8780         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8781 
8782         return T.isValidState();
8783       }
8784 
8785       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8786       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8787         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8788           return false;
8789       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8790         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8791           return false;
8792       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8793         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8794           return false;
8795       } else {
8796         // Give up with other instructions.
8797         // TODO: Add other instructions
8798 
8799         T.indicatePessimisticFixpoint();
8800         return false;
8801       }
8802 
8803       // Catch circular reasoning in a pessimistic way for now.
8804       // TODO: Check how the range evolves and if we stripped anything, see also
8805       //       AADereferenceable or AAAlign for similar situations.
8806       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8807         if (QueriedAA != this)
8808           continue;
8809         // If we are in a stady state we do not need to worry.
8810         if (T.getAssumed() == getState().getAssumed())
8811           continue;
8812         T.indicatePessimisticFixpoint();
8813       }
8814 
8815       return T.isValidState();
8816     };
8817 
8818     IntegerRangeState T(getBitWidth());
8819 
8820     bool UsedAssumedInformation = false;
8821     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8822                                                   VisitValueCB, getCtxI(),
8823                                                   UsedAssumedInformation,
8824                                                   /* UseValueSimplify */ false))
8825       return indicatePessimisticFixpoint();
8826 
8827     // Ensure that long def-use chains can't cause circular reasoning either by
8828     // introducing a cutoff below.
8829     if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
8830       return ChangeStatus::UNCHANGED;
8831     if (++NumChanges > MaxNumChanges) {
8832       LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
8833                         << " but only " << MaxNumChanges
8834                         << " are allowed to avoid cyclic reasoning.");
8835       return indicatePessimisticFixpoint();
8836     }
8837     return ChangeStatus::CHANGED;
8838   }
8839 
8840   /// See AbstractAttribute::trackStatistics()
8841   void trackStatistics() const override {
8842     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8843   }
8844 
8845   /// Tracker to bail after too many widening steps of the constant range.
8846   int NumChanges = 0;
8847 
8848   /// Upper bound for the number of allowed changes (=widening steps) for the
8849   /// constant range before we give up.
8850   static constexpr int MaxNumChanges = 5;
8851 };
8852 
8853 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8854   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8855       : AAValueConstantRangeImpl(IRP, A) {}
8856 
8857   /// See AbstractAttribute::initialize(...).
8858   ChangeStatus updateImpl(Attributor &A) override {
8859     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8860                      "not be called");
8861   }
8862 
8863   /// See AbstractAttribute::trackStatistics()
8864   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8865 };
8866 
8867 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8868   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8869       : AAValueConstantRangeFunction(IRP, A) {}
8870 
8871   /// See AbstractAttribute::trackStatistics()
8872   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8873 };
8874 
8875 struct AAValueConstantRangeCallSiteReturned
8876     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8877                                      AAValueConstantRangeImpl,
8878                                      AAValueConstantRangeImpl::StateType,
8879                                      /* IntroduceCallBaseContext */ true> {
8880   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8881       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8882                                        AAValueConstantRangeImpl,
8883                                        AAValueConstantRangeImpl::StateType,
8884                                        /* IntroduceCallBaseContext */ true>(IRP,
8885                                                                             A) {
8886   }
8887 
8888   /// See AbstractAttribute::initialize(...).
8889   void initialize(Attributor &A) override {
8890     // If it is a load instruction with range metadata, use the metadata.
8891     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8892       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8893         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8894 
8895     AAValueConstantRangeImpl::initialize(A);
8896   }
8897 
8898   /// See AbstractAttribute::trackStatistics()
8899   void trackStatistics() const override {
8900     STATS_DECLTRACK_CSRET_ATTR(value_range)
8901   }
8902 };
8903 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8904   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8905       : AAValueConstantRangeFloating(IRP, A) {}
8906 
8907   /// See AbstractAttribute::manifest()
8908   ChangeStatus manifest(Attributor &A) override {
8909     return ChangeStatus::UNCHANGED;
8910   }
8911 
8912   /// See AbstractAttribute::trackStatistics()
8913   void trackStatistics() const override {
8914     STATS_DECLTRACK_CSARG_ATTR(value_range)
8915   }
8916 };
8917 } // namespace
8918 
8919 /// ------------------ Potential Values Attribute -------------------------
8920 
8921 namespace {
8922 struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
8923   using StateType = PotentialConstantIntValuesState;
8924 
8925   AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
8926       : AAPotentialConstantValues(IRP, A) {}
8927 
8928   /// See AbstractAttribute::initialize(..).
8929   void initialize(Attributor &A) override {
8930     if (A.hasSimplificationCallback(getIRPosition()))
8931       indicatePessimisticFixpoint();
8932     else
8933       AAPotentialConstantValues::initialize(A);
8934   }
8935 
8936   /// See AbstractAttribute::getAsStr().
8937   const std::string getAsStr() const override {
8938     std::string Str;
8939     llvm::raw_string_ostream OS(Str);
8940     OS << getState();
8941     return OS.str();
8942   }
8943 
8944   /// See AbstractAttribute::updateImpl(...).
8945   ChangeStatus updateImpl(Attributor &A) override {
8946     return indicatePessimisticFixpoint();
8947   }
8948 };
8949 
8950 struct AAPotentialConstantValuesArgument final
8951     : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8952                                       AAPotentialConstantValuesImpl,
8953                                       PotentialConstantIntValuesState> {
8954   using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8955                                                AAPotentialConstantValuesImpl,
8956                                                PotentialConstantIntValuesState>;
8957   AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
8958       : Base(IRP, A) {}
8959 
8960   /// See AbstractAttribute::initialize(..).
8961   void initialize(Attributor &A) override {
8962     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8963       indicatePessimisticFixpoint();
8964     } else {
8965       Base::initialize(A);
8966     }
8967   }
8968 
8969   /// See AbstractAttribute::trackStatistics()
8970   void trackStatistics() const override {
8971     STATS_DECLTRACK_ARG_ATTR(potential_values)
8972   }
8973 };
8974 
8975 struct AAPotentialConstantValuesReturned
8976     : AAReturnedFromReturnedValues<AAPotentialConstantValues,
8977                                    AAPotentialConstantValuesImpl> {
8978   using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
8979                                             AAPotentialConstantValuesImpl>;
8980   AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
8981       : Base(IRP, A) {}
8982 
8983   /// See AbstractAttribute::trackStatistics()
8984   void trackStatistics() const override {
8985     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8986   }
8987 };
8988 
8989 struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
8990   AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
8991       : AAPotentialConstantValuesImpl(IRP, A) {}
8992 
8993   /// See AbstractAttribute::initialize(..).
8994   void initialize(Attributor &A) override {
8995     AAPotentialConstantValuesImpl::initialize(A);
8996     if (isAtFixpoint())
8997       return;
8998 
8999     Value &V = getAssociatedValue();
9000 
9001     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9002       unionAssumed(C->getValue());
9003       indicateOptimisticFixpoint();
9004       return;
9005     }
9006 
9007     if (isa<UndefValue>(&V)) {
9008       unionAssumedWithUndef();
9009       indicateOptimisticFixpoint();
9010       return;
9011     }
9012 
9013     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
9014       return;
9015 
9016     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
9017       return;
9018 
9019     indicatePessimisticFixpoint();
9020 
9021     LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "
9022                       << getAssociatedValue() << "\n");
9023   }
9024 
9025   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
9026                                 const APInt &RHS) {
9027     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
9028   }
9029 
9030   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
9031                                  uint32_t ResultBitWidth) {
9032     Instruction::CastOps CastOp = CI->getOpcode();
9033     switch (CastOp) {
9034     default:
9035       llvm_unreachable("unsupported or not integer cast");
9036     case Instruction::Trunc:
9037       return Src.trunc(ResultBitWidth);
9038     case Instruction::SExt:
9039       return Src.sext(ResultBitWidth);
9040     case Instruction::ZExt:
9041       return Src.zext(ResultBitWidth);
9042     case Instruction::BitCast:
9043       return Src;
9044     }
9045   }
9046 
9047   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
9048                                        const APInt &LHS, const APInt &RHS,
9049                                        bool &SkipOperation, bool &Unsupported) {
9050     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
9051     // Unsupported is set to true when the binary operator is not supported.
9052     // SkipOperation is set to true when UB occur with the given operand pair
9053     // (LHS, RHS).
9054     // TODO: we should look at nsw and nuw keywords to handle operations
9055     //       that create poison or undef value.
9056     switch (BinOpcode) {
9057     default:
9058       Unsupported = true;
9059       return LHS;
9060     case Instruction::Add:
9061       return LHS + RHS;
9062     case Instruction::Sub:
9063       return LHS - RHS;
9064     case Instruction::Mul:
9065       return LHS * RHS;
9066     case Instruction::UDiv:
9067       if (RHS.isZero()) {
9068         SkipOperation = true;
9069         return LHS;
9070       }
9071       return LHS.udiv(RHS);
9072     case Instruction::SDiv:
9073       if (RHS.isZero()) {
9074         SkipOperation = true;
9075         return LHS;
9076       }
9077       return LHS.sdiv(RHS);
9078     case Instruction::URem:
9079       if (RHS.isZero()) {
9080         SkipOperation = true;
9081         return LHS;
9082       }
9083       return LHS.urem(RHS);
9084     case Instruction::SRem:
9085       if (RHS.isZero()) {
9086         SkipOperation = true;
9087         return LHS;
9088       }
9089       return LHS.srem(RHS);
9090     case Instruction::Shl:
9091       return LHS.shl(RHS);
9092     case Instruction::LShr:
9093       return LHS.lshr(RHS);
9094     case Instruction::AShr:
9095       return LHS.ashr(RHS);
9096     case Instruction::And:
9097       return LHS & RHS;
9098     case Instruction::Or:
9099       return LHS | RHS;
9100     case Instruction::Xor:
9101       return LHS ^ RHS;
9102     }
9103   }
9104 
9105   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
9106                                            const APInt &LHS, const APInt &RHS) {
9107     bool SkipOperation = false;
9108     bool Unsupported = false;
9109     APInt Result =
9110         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
9111     if (Unsupported)
9112       return false;
9113     // If SkipOperation is true, we can ignore this operand pair (L, R).
9114     if (!SkipOperation)
9115       unionAssumed(Result);
9116     return isValidState();
9117   }
9118 
9119   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
9120     auto AssumedBefore = getAssumed();
9121     Value *LHS = ICI->getOperand(0);
9122     Value *RHS = ICI->getOperand(1);
9123 
9124     // Simplify the operands first.
9125     bool UsedAssumedInformation = false;
9126     const auto &SimplifiedLHS =
9127         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9128                                *this, UsedAssumedInformation);
9129     if (!SimplifiedLHS)
9130       return ChangeStatus::UNCHANGED;
9131     if (!SimplifiedLHS.getValue())
9132       return indicatePessimisticFixpoint();
9133     LHS = *SimplifiedLHS;
9134 
9135     const auto &SimplifiedRHS =
9136         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9137                                *this, UsedAssumedInformation);
9138     if (!SimplifiedRHS)
9139       return ChangeStatus::UNCHANGED;
9140     if (!SimplifiedRHS.getValue())
9141       return indicatePessimisticFixpoint();
9142     RHS = *SimplifiedRHS;
9143 
9144     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9145       return indicatePessimisticFixpoint();
9146 
9147     auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9148         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9149     if (!LHSAA.isValidState())
9150       return indicatePessimisticFixpoint();
9151 
9152     auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9153         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9154     if (!RHSAA.isValidState())
9155       return indicatePessimisticFixpoint();
9156 
9157     const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9158     const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9159 
9160     // TODO: make use of undef flag to limit potential values aggressively.
9161     bool MaybeTrue = false, MaybeFalse = false;
9162     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
9163     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9164       // The result of any comparison between undefs can be soundly replaced
9165       // with undef.
9166       unionAssumedWithUndef();
9167     } else if (LHSAA.undefIsContained()) {
9168       for (const APInt &R : RHSAAPVS) {
9169         bool CmpResult = calculateICmpInst(ICI, Zero, R);
9170         MaybeTrue |= CmpResult;
9171         MaybeFalse |= !CmpResult;
9172         if (MaybeTrue & MaybeFalse)
9173           return indicatePessimisticFixpoint();
9174       }
9175     } else if (RHSAA.undefIsContained()) {
9176       for (const APInt &L : LHSAAPVS) {
9177         bool CmpResult = calculateICmpInst(ICI, L, Zero);
9178         MaybeTrue |= CmpResult;
9179         MaybeFalse |= !CmpResult;
9180         if (MaybeTrue & MaybeFalse)
9181           return indicatePessimisticFixpoint();
9182       }
9183     } else {
9184       for (const APInt &L : LHSAAPVS) {
9185         for (const APInt &R : RHSAAPVS) {
9186           bool CmpResult = calculateICmpInst(ICI, L, R);
9187           MaybeTrue |= CmpResult;
9188           MaybeFalse |= !CmpResult;
9189           if (MaybeTrue & MaybeFalse)
9190             return indicatePessimisticFixpoint();
9191         }
9192       }
9193     }
9194     if (MaybeTrue)
9195       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
9196     if (MaybeFalse)
9197       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
9198     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9199                                          : ChangeStatus::CHANGED;
9200   }
9201 
9202   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
9203     auto AssumedBefore = getAssumed();
9204     Value *LHS = SI->getTrueValue();
9205     Value *RHS = SI->getFalseValue();
9206 
9207     // Simplify the operands first.
9208     bool UsedAssumedInformation = false;
9209     const auto &SimplifiedLHS =
9210         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9211                                *this, UsedAssumedInformation);
9212     if (!SimplifiedLHS)
9213       return ChangeStatus::UNCHANGED;
9214     if (!SimplifiedLHS.getValue())
9215       return indicatePessimisticFixpoint();
9216     LHS = *SimplifiedLHS;
9217 
9218     const auto &SimplifiedRHS =
9219         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9220                                *this, UsedAssumedInformation);
9221     if (!SimplifiedRHS)
9222       return ChangeStatus::UNCHANGED;
9223     if (!SimplifiedRHS.getValue())
9224       return indicatePessimisticFixpoint();
9225     RHS = *SimplifiedRHS;
9226 
9227     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9228       return indicatePessimisticFixpoint();
9229 
9230     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
9231                                                   UsedAssumedInformation);
9232 
9233     // Check if we only need one operand.
9234     bool OnlyLeft = false, OnlyRight = false;
9235     if (C && *C && (*C)->isOneValue())
9236       OnlyLeft = true;
9237     else if (C && *C && (*C)->isZeroValue())
9238       OnlyRight = true;
9239 
9240     const AAPotentialConstantValues *LHSAA = nullptr, *RHSAA = nullptr;
9241     if (!OnlyRight) {
9242       LHSAA = &A.getAAFor<AAPotentialConstantValues>(
9243           *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9244       if (!LHSAA->isValidState())
9245         return indicatePessimisticFixpoint();
9246     }
9247     if (!OnlyLeft) {
9248       RHSAA = &A.getAAFor<AAPotentialConstantValues>(
9249           *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9250       if (!RHSAA->isValidState())
9251         return indicatePessimisticFixpoint();
9252     }
9253 
9254     if (!LHSAA || !RHSAA) {
9255       // select (true/false), lhs, rhs
9256       auto *OpAA = LHSAA ? LHSAA : RHSAA;
9257 
9258       if (OpAA->undefIsContained())
9259         unionAssumedWithUndef();
9260       else
9261         unionAssumed(*OpAA);
9262 
9263     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
9264       // select i1 *, undef , undef => undef
9265       unionAssumedWithUndef();
9266     } else {
9267       unionAssumed(*LHSAA);
9268       unionAssumed(*RHSAA);
9269     }
9270     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9271                                          : ChangeStatus::CHANGED;
9272   }
9273 
9274   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9275     auto AssumedBefore = getAssumed();
9276     if (!CI->isIntegerCast())
9277       return indicatePessimisticFixpoint();
9278     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
9279     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9280     Value *Src = CI->getOperand(0);
9281 
9282     // Simplify the operand first.
9283     bool UsedAssumedInformation = false;
9284     const auto &SimplifiedSrc =
9285         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
9286                                *this, UsedAssumedInformation);
9287     if (!SimplifiedSrc)
9288       return ChangeStatus::UNCHANGED;
9289     if (!SimplifiedSrc.getValue())
9290       return indicatePessimisticFixpoint();
9291     Src = *SimplifiedSrc;
9292 
9293     auto &SrcAA = A.getAAFor<AAPotentialConstantValues>(
9294         *this, IRPosition::value(*Src), DepClassTy::REQUIRED);
9295     if (!SrcAA.isValidState())
9296       return indicatePessimisticFixpoint();
9297     const SetTy &SrcAAPVS = SrcAA.getAssumedSet();
9298     if (SrcAA.undefIsContained())
9299       unionAssumedWithUndef();
9300     else {
9301       for (const APInt &S : SrcAAPVS) {
9302         APInt T = calculateCastInst(CI, S, ResultBitWidth);
9303         unionAssumed(T);
9304       }
9305     }
9306     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9307                                          : ChangeStatus::CHANGED;
9308   }
9309 
9310   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9311     auto AssumedBefore = getAssumed();
9312     Value *LHS = BinOp->getOperand(0);
9313     Value *RHS = BinOp->getOperand(1);
9314 
9315     // Simplify the operands first.
9316     bool UsedAssumedInformation = false;
9317     const auto &SimplifiedLHS =
9318         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9319                                *this, UsedAssumedInformation);
9320     if (!SimplifiedLHS)
9321       return ChangeStatus::UNCHANGED;
9322     if (!SimplifiedLHS.getValue())
9323       return indicatePessimisticFixpoint();
9324     LHS = *SimplifiedLHS;
9325 
9326     const auto &SimplifiedRHS =
9327         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9328                                *this, UsedAssumedInformation);
9329     if (!SimplifiedRHS)
9330       return ChangeStatus::UNCHANGED;
9331     if (!SimplifiedRHS.getValue())
9332       return indicatePessimisticFixpoint();
9333     RHS = *SimplifiedRHS;
9334 
9335     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9336       return indicatePessimisticFixpoint();
9337 
9338     auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9339         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9340     if (!LHSAA.isValidState())
9341       return indicatePessimisticFixpoint();
9342 
9343     auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9344         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9345     if (!RHSAA.isValidState())
9346       return indicatePessimisticFixpoint();
9347 
9348     const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9349     const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9350     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9351 
9352     // TODO: make use of undef flag to limit potential values aggressively.
9353     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9354       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9355         return indicatePessimisticFixpoint();
9356     } else if (LHSAA.undefIsContained()) {
9357       for (const APInt &R : RHSAAPVS) {
9358         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9359           return indicatePessimisticFixpoint();
9360       }
9361     } else if (RHSAA.undefIsContained()) {
9362       for (const APInt &L : LHSAAPVS) {
9363         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9364           return indicatePessimisticFixpoint();
9365       }
9366     } else {
9367       for (const APInt &L : LHSAAPVS) {
9368         for (const APInt &R : RHSAAPVS) {
9369           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9370             return indicatePessimisticFixpoint();
9371         }
9372       }
9373     }
9374     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9375                                          : ChangeStatus::CHANGED;
9376   }
9377 
9378   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9379     auto AssumedBefore = getAssumed();
9380     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9381       Value *IncomingValue = PHI->getIncomingValue(u);
9382 
9383       // Simplify the operand first.
9384       bool UsedAssumedInformation = false;
9385       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9386           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9387           UsedAssumedInformation);
9388       if (!SimplifiedIncomingValue)
9389         continue;
9390       if (!SimplifiedIncomingValue.getValue())
9391         return indicatePessimisticFixpoint();
9392       IncomingValue = *SimplifiedIncomingValue;
9393 
9394       auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
9395           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9396       if (!PotentialValuesAA.isValidState())
9397         return indicatePessimisticFixpoint();
9398       if (PotentialValuesAA.undefIsContained())
9399         unionAssumedWithUndef();
9400       else
9401         unionAssumed(PotentialValuesAA.getAssumed());
9402     }
9403     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9404                                          : ChangeStatus::CHANGED;
9405   }
9406 
9407   /// See AbstractAttribute::updateImpl(...).
9408   ChangeStatus updateImpl(Attributor &A) override {
9409     Value &V = getAssociatedValue();
9410     Instruction *I = dyn_cast<Instruction>(&V);
9411 
9412     if (auto *ICI = dyn_cast<ICmpInst>(I))
9413       return updateWithICmpInst(A, ICI);
9414 
9415     if (auto *SI = dyn_cast<SelectInst>(I))
9416       return updateWithSelectInst(A, SI);
9417 
9418     if (auto *CI = dyn_cast<CastInst>(I))
9419       return updateWithCastInst(A, CI);
9420 
9421     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9422       return updateWithBinaryOperator(A, BinOp);
9423 
9424     if (auto *PHI = dyn_cast<PHINode>(I))
9425       return updateWithPHINode(A, PHI);
9426 
9427     return indicatePessimisticFixpoint();
9428   }
9429 
9430   /// See AbstractAttribute::trackStatistics()
9431   void trackStatistics() const override {
9432     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9433   }
9434 };
9435 
9436 struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
9437   AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
9438       : AAPotentialConstantValuesImpl(IRP, A) {}
9439 
9440   /// See AbstractAttribute::initialize(...).
9441   ChangeStatus updateImpl(Attributor &A) override {
9442     llvm_unreachable(
9443         "AAPotentialConstantValues(Function|CallSite)::updateImpl will "
9444         "not be called");
9445   }
9446 
9447   /// See AbstractAttribute::trackStatistics()
9448   void trackStatistics() const override {
9449     STATS_DECLTRACK_FN_ATTR(potential_values)
9450   }
9451 };
9452 
9453 struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
9454   AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
9455       : AAPotentialConstantValuesFunction(IRP, A) {}
9456 
9457   /// See AbstractAttribute::trackStatistics()
9458   void trackStatistics() const override {
9459     STATS_DECLTRACK_CS_ATTR(potential_values)
9460   }
9461 };
9462 
9463 struct AAPotentialConstantValuesCallSiteReturned
9464     : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9465                                      AAPotentialConstantValuesImpl> {
9466   AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
9467                                             Attributor &A)
9468       : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9469                                        AAPotentialConstantValuesImpl>(IRP, A) {}
9470 
9471   /// See AbstractAttribute::trackStatistics()
9472   void trackStatistics() const override {
9473     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9474   }
9475 };
9476 
9477 struct AAPotentialConstantValuesCallSiteArgument
9478     : AAPotentialConstantValuesFloating {
9479   AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
9480                                             Attributor &A)
9481       : AAPotentialConstantValuesFloating(IRP, A) {}
9482 
9483   /// See AbstractAttribute::initialize(..).
9484   void initialize(Attributor &A) override {
9485     AAPotentialConstantValuesImpl::initialize(A);
9486     if (isAtFixpoint())
9487       return;
9488 
9489     Value &V = getAssociatedValue();
9490 
9491     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9492       unionAssumed(C->getValue());
9493       indicateOptimisticFixpoint();
9494       return;
9495     }
9496 
9497     if (isa<UndefValue>(&V)) {
9498       unionAssumedWithUndef();
9499       indicateOptimisticFixpoint();
9500       return;
9501     }
9502   }
9503 
9504   /// See AbstractAttribute::updateImpl(...).
9505   ChangeStatus updateImpl(Attributor &A) override {
9506     Value &V = getAssociatedValue();
9507     auto AssumedBefore = getAssumed();
9508     auto &AA = A.getAAFor<AAPotentialConstantValues>(
9509         *this, IRPosition::value(V), DepClassTy::REQUIRED);
9510     const auto &S = AA.getAssumed();
9511     unionAssumed(S);
9512     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9513                                          : ChangeStatus::CHANGED;
9514   }
9515 
9516   /// See AbstractAttribute::trackStatistics()
9517   void trackStatistics() const override {
9518     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9519   }
9520 };
9521 
9522 /// ------------------------ NoUndef Attribute ---------------------------------
9523 struct AANoUndefImpl : AANoUndef {
9524   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9525 
9526   /// See AbstractAttribute::initialize(...).
9527   void initialize(Attributor &A) override {
9528     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9529       indicateOptimisticFixpoint();
9530       return;
9531     }
9532     Value &V = getAssociatedValue();
9533     if (isa<UndefValue>(V))
9534       indicatePessimisticFixpoint();
9535     else if (isa<FreezeInst>(V))
9536       indicateOptimisticFixpoint();
9537     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9538              isGuaranteedNotToBeUndefOrPoison(&V))
9539       indicateOptimisticFixpoint();
9540     else
9541       AANoUndef::initialize(A);
9542   }
9543 
9544   /// See followUsesInMBEC
9545   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9546                        AANoUndef::StateType &State) {
9547     const Value *UseV = U->get();
9548     const DominatorTree *DT = nullptr;
9549     AssumptionCache *AC = nullptr;
9550     InformationCache &InfoCache = A.getInfoCache();
9551     if (Function *F = getAnchorScope()) {
9552       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9553       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9554     }
9555     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9556     bool TrackUse = false;
9557     // Track use for instructions which must produce undef or poison bits when
9558     // at least one operand contains such bits.
9559     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9560       TrackUse = true;
9561     return TrackUse;
9562   }
9563 
9564   /// See AbstractAttribute::getAsStr().
9565   const std::string getAsStr() const override {
9566     return getAssumed() ? "noundef" : "may-undef-or-poison";
9567   }
9568 
9569   ChangeStatus manifest(Attributor &A) override {
9570     // We don't manifest noundef attribute for dead positions because the
9571     // associated values with dead positions would be replaced with undef
9572     // values.
9573     bool UsedAssumedInformation = false;
9574     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9575                         UsedAssumedInformation))
9576       return ChangeStatus::UNCHANGED;
9577     // A position whose simplified value does not have any value is
9578     // considered to be dead. We don't manifest noundef in such positions for
9579     // the same reason above.
9580     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9581              .has_value())
9582       return ChangeStatus::UNCHANGED;
9583     return AANoUndef::manifest(A);
9584   }
9585 };
9586 
9587 struct AANoUndefFloating : public AANoUndefImpl {
9588   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9589       : AANoUndefImpl(IRP, A) {}
9590 
9591   /// See AbstractAttribute::initialize(...).
9592   void initialize(Attributor &A) override {
9593     AANoUndefImpl::initialize(A);
9594     if (!getState().isAtFixpoint())
9595       if (Instruction *CtxI = getCtxI())
9596         followUsesInMBEC(*this, A, getState(), *CtxI);
9597   }
9598 
9599   /// See AbstractAttribute::updateImpl(...).
9600   ChangeStatus updateImpl(Attributor &A) override {
9601     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9602                             AANoUndef::StateType &T, bool Stripped) -> bool {
9603       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9604                                              DepClassTy::REQUIRED);
9605       if (!Stripped && this == &AA) {
9606         T.indicatePessimisticFixpoint();
9607       } else {
9608         const AANoUndef::StateType &S =
9609             static_cast<const AANoUndef::StateType &>(AA.getState());
9610         T ^= S;
9611       }
9612       return T.isValidState();
9613     };
9614 
9615     StateType T;
9616     bool UsedAssumedInformation = false;
9617     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9618                                           VisitValueCB, getCtxI(),
9619                                           UsedAssumedInformation))
9620       return indicatePessimisticFixpoint();
9621 
9622     return clampStateAndIndicateChange(getState(), T);
9623   }
9624 
9625   /// See AbstractAttribute::trackStatistics()
9626   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9627 };
9628 
9629 struct AANoUndefReturned final
9630     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9631   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9632       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9633 
9634   /// See AbstractAttribute::trackStatistics()
9635   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9636 };
9637 
9638 struct AANoUndefArgument final
9639     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9640   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9641       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9642 
9643   /// See AbstractAttribute::trackStatistics()
9644   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9645 };
9646 
9647 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9648   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9649       : AANoUndefFloating(IRP, A) {}
9650 
9651   /// See AbstractAttribute::trackStatistics()
9652   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9653 };
9654 
9655 struct AANoUndefCallSiteReturned final
9656     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9657   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9658       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9659 
9660   /// See AbstractAttribute::trackStatistics()
9661   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9662 };
9663 
9664 struct AACallEdgesImpl : public AACallEdges {
9665   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9666 
9667   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9668     return CalledFunctions;
9669   }
9670 
9671   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9672 
9673   virtual bool hasNonAsmUnknownCallee() const override {
9674     return HasUnknownCalleeNonAsm;
9675   }
9676 
9677   const std::string getAsStr() const override {
9678     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9679            std::to_string(CalledFunctions.size()) + "]";
9680   }
9681 
9682   void trackStatistics() const override {}
9683 
9684 protected:
9685   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9686     if (CalledFunctions.insert(Fn)) {
9687       Change = ChangeStatus::CHANGED;
9688       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9689                         << "\n");
9690     }
9691   }
9692 
9693   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9694     if (!HasUnknownCallee)
9695       Change = ChangeStatus::CHANGED;
9696     if (NonAsm && !HasUnknownCalleeNonAsm)
9697       Change = ChangeStatus::CHANGED;
9698     HasUnknownCalleeNonAsm |= NonAsm;
9699     HasUnknownCallee = true;
9700   }
9701 
9702 private:
9703   /// Optimistic set of functions that might be called by this position.
9704   SetVector<Function *> CalledFunctions;
9705 
9706   /// Is there any call with a unknown callee.
9707   bool HasUnknownCallee = false;
9708 
9709   /// Is there any call with a unknown callee, excluding any inline asm.
9710   bool HasUnknownCalleeNonAsm = false;
9711 };
9712 
9713 struct AACallEdgesCallSite : public AACallEdgesImpl {
9714   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9715       : AACallEdgesImpl(IRP, A) {}
9716   /// See AbstractAttribute::updateImpl(...).
9717   ChangeStatus updateImpl(Attributor &A) override {
9718     ChangeStatus Change = ChangeStatus::UNCHANGED;
9719 
9720     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9721                           bool Stripped) -> bool {
9722       if (Function *Fn = dyn_cast<Function>(&V)) {
9723         addCalledFunction(Fn, Change);
9724       } else {
9725         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9726         setHasUnknownCallee(true, Change);
9727       }
9728 
9729       // Explore all values.
9730       return true;
9731     };
9732 
9733     // Process any value that we might call.
9734     auto ProcessCalledOperand = [&](Value *V) {
9735       bool DummyValue = false;
9736       bool UsedAssumedInformation = false;
9737       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9738                                        DummyValue, VisitValue, nullptr,
9739                                        UsedAssumedInformation, false)) {
9740         // If we haven't gone through all values, assume that there are unknown
9741         // callees.
9742         setHasUnknownCallee(true, Change);
9743       }
9744     };
9745 
9746     CallBase *CB = cast<CallBase>(getCtxI());
9747 
9748     if (CB->isInlineAsm()) {
9749       if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
9750           !hasAssumption(*CB, "ompx_no_call_asm"))
9751         setHasUnknownCallee(false, Change);
9752       return Change;
9753     }
9754 
9755     // Process callee metadata if available.
9756     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9757       for (auto &Op : MD->operands()) {
9758         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9759         if (Callee)
9760           addCalledFunction(Callee, Change);
9761       }
9762       return Change;
9763     }
9764 
9765     // The most simple case.
9766     ProcessCalledOperand(CB->getCalledOperand());
9767 
9768     // Process callback functions.
9769     SmallVector<const Use *, 4u> CallbackUses;
9770     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9771     for (const Use *U : CallbackUses)
9772       ProcessCalledOperand(U->get());
9773 
9774     return Change;
9775   }
9776 };
9777 
9778 struct AACallEdgesFunction : public AACallEdgesImpl {
9779   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9780       : AACallEdgesImpl(IRP, A) {}
9781 
9782   /// See AbstractAttribute::updateImpl(...).
9783   ChangeStatus updateImpl(Attributor &A) override {
9784     ChangeStatus Change = ChangeStatus::UNCHANGED;
9785 
9786     auto ProcessCallInst = [&](Instruction &Inst) {
9787       CallBase &CB = cast<CallBase>(Inst);
9788 
9789       auto &CBEdges = A.getAAFor<AACallEdges>(
9790           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9791       if (CBEdges.hasNonAsmUnknownCallee())
9792         setHasUnknownCallee(true, Change);
9793       if (CBEdges.hasUnknownCallee())
9794         setHasUnknownCallee(false, Change);
9795 
9796       for (Function *F : CBEdges.getOptimisticEdges())
9797         addCalledFunction(F, Change);
9798 
9799       return true;
9800     };
9801 
9802     // Visit all callable instructions.
9803     bool UsedAssumedInformation = false;
9804     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9805                                            UsedAssumedInformation,
9806                                            /* CheckBBLivenessOnly */ true)) {
9807       // If we haven't looked at all call like instructions, assume that there
9808       // are unknown callees.
9809       setHasUnknownCallee(true, Change);
9810     }
9811 
9812     return Change;
9813   }
9814 };
9815 
9816 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9817 private:
9818   struct QuerySet {
9819     void markReachable(const Function &Fn) {
9820       Reachable.insert(&Fn);
9821       Unreachable.erase(&Fn);
9822     }
9823 
9824     /// If there is no information about the function None is returned.
9825     Optional<bool> isCachedReachable(const Function &Fn) {
9826       // Assume that we can reach the function.
9827       // TODO: Be more specific with the unknown callee.
9828       if (CanReachUnknownCallee)
9829         return true;
9830 
9831       if (Reachable.count(&Fn))
9832         return true;
9833 
9834       if (Unreachable.count(&Fn))
9835         return false;
9836 
9837       return llvm::None;
9838     }
9839 
9840     /// Set of functions that we know for sure is reachable.
9841     DenseSet<const Function *> Reachable;
9842 
9843     /// Set of functions that are unreachable, but might become reachable.
9844     DenseSet<const Function *> Unreachable;
9845 
9846     /// If we can reach a function with a call to a unknown function we assume
9847     /// that we can reach any function.
9848     bool CanReachUnknownCallee = false;
9849   };
9850 
9851   struct QueryResolver : public QuerySet {
9852     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9853                         ArrayRef<const AACallEdges *> AAEdgesList) {
9854       ChangeStatus Change = ChangeStatus::UNCHANGED;
9855 
9856       for (auto *AAEdges : AAEdgesList) {
9857         if (AAEdges->hasUnknownCallee()) {
9858           if (!CanReachUnknownCallee)
9859             Change = ChangeStatus::CHANGED;
9860           CanReachUnknownCallee = true;
9861           return Change;
9862         }
9863       }
9864 
9865       for (const Function *Fn : make_early_inc_range(Unreachable)) {
9866         if (checkIfReachable(A, AA, AAEdgesList, *Fn)) {
9867           Change = ChangeStatus::CHANGED;
9868           markReachable(*Fn);
9869         }
9870       }
9871       return Change;
9872     }
9873 
9874     bool isReachable(Attributor &A, AAFunctionReachability &AA,
9875                      ArrayRef<const AACallEdges *> AAEdgesList,
9876                      const Function &Fn) {
9877       Optional<bool> Cached = isCachedReachable(Fn);
9878       if (Cached)
9879         return Cached.getValue();
9880 
9881       // The query was not cached, thus it is new. We need to request an update
9882       // explicitly to make sure this the information is properly run to a
9883       // fixpoint.
9884       A.registerForUpdate(AA);
9885 
9886       // We need to assume that this function can't reach Fn to prevent
9887       // an infinite loop if this function is recursive.
9888       Unreachable.insert(&Fn);
9889 
9890       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9891       if (Result)
9892         markReachable(Fn);
9893       return Result;
9894     }
9895 
9896     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9897                           ArrayRef<const AACallEdges *> AAEdgesList,
9898                           const Function &Fn) const {
9899 
9900       // Handle the most trivial case first.
9901       for (auto *AAEdges : AAEdgesList) {
9902         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9903 
9904         if (Edges.count(const_cast<Function *>(&Fn)))
9905           return true;
9906       }
9907 
9908       SmallVector<const AAFunctionReachability *, 8> Deps;
9909       for (auto &AAEdges : AAEdgesList) {
9910         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9911 
9912         for (Function *Edge : Edges) {
9913           // Functions that do not call back into the module can be ignored.
9914           if (Edge->hasFnAttribute(Attribute::NoCallback))
9915             continue;
9916 
9917           // We don't need a dependency if the result is reachable.
9918           const AAFunctionReachability &EdgeReachability =
9919               A.getAAFor<AAFunctionReachability>(
9920                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9921           Deps.push_back(&EdgeReachability);
9922 
9923           if (EdgeReachability.canReach(A, Fn))
9924             return true;
9925         }
9926       }
9927 
9928       // The result is false for now, set dependencies and leave.
9929       for (auto *Dep : Deps)
9930         A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
9931 
9932       return false;
9933     }
9934   };
9935 
9936   /// Get call edges that can be reached by this instruction.
9937   bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability,
9938                              const Instruction &Inst,
9939                              SmallVector<const AACallEdges *> &Result) const {
9940     // Determine call like instructions that we can reach from the inst.
9941     auto CheckCallBase = [&](Instruction &CBInst) {
9942       if (!Reachability.isAssumedReachable(A, Inst, CBInst))
9943         return true;
9944 
9945       auto &CB = cast<CallBase>(CBInst);
9946       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9947           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9948 
9949       Result.push_back(&AAEdges);
9950       return true;
9951     };
9952 
9953     bool UsedAssumedInformation = false;
9954     return A.checkForAllCallLikeInstructions(CheckCallBase, *this,
9955                                              UsedAssumedInformation,
9956                                              /* CheckBBLivenessOnly */ true);
9957   }
9958 
9959 public:
9960   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9961       : AAFunctionReachability(IRP, A) {}
9962 
9963   bool canReach(Attributor &A, const Function &Fn) const override {
9964     if (!isValidState())
9965       return true;
9966 
9967     const AACallEdges &AAEdges =
9968         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9969 
9970     // Attributor returns attributes as const, so this function has to be
9971     // const for users of this attribute to use it without having to do
9972     // a const_cast.
9973     // This is a hack for us to be able to cache queries.
9974     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9975     bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis,
9976                                                           {&AAEdges}, Fn);
9977 
9978     return Result;
9979   }
9980 
9981   /// Can \p CB reach \p Fn
9982   bool canReach(Attributor &A, CallBase &CB,
9983                 const Function &Fn) const override {
9984     if (!isValidState())
9985       return true;
9986 
9987     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9988         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9989 
9990     // Attributor returns attributes as const, so this function has to be
9991     // const for users of this attribute to use it without having to do
9992     // a const_cast.
9993     // This is a hack for us to be able to cache queries.
9994     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9995     QueryResolver &CBQuery = NonConstThis->CBQueries[&CB];
9996 
9997     bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn);
9998 
9999     return Result;
10000   }
10001 
10002   bool instructionCanReach(Attributor &A, const Instruction &Inst,
10003                            const Function &Fn,
10004                            bool UseBackwards) const override {
10005     if (!isValidState())
10006       return true;
10007 
10008     if (UseBackwards)
10009       return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr);
10010 
10011     const auto &Reachability = A.getAAFor<AAReachability>(
10012         *this, IRPosition::function(*getAssociatedFunction()),
10013         DepClassTy::REQUIRED);
10014 
10015     SmallVector<const AACallEdges *> CallEdges;
10016     bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges);
10017     // Attributor returns attributes as const, so this function has to be
10018     // const for users of this attribute to use it without having to do
10019     // a const_cast.
10020     // This is a hack for us to be able to cache queries.
10021     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
10022     QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
10023     if (!AllKnown)
10024       InstQSet.CanReachUnknownCallee = true;
10025 
10026     return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
10027   }
10028 
10029   /// See AbstractAttribute::updateImpl(...).
10030   ChangeStatus updateImpl(Attributor &A) override {
10031     const AACallEdges &AAEdges =
10032         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
10033     ChangeStatus Change = ChangeStatus::UNCHANGED;
10034 
10035     Change |= WholeFunction.update(A, *this, {&AAEdges});
10036 
10037     for (auto &CBPair : CBQueries) {
10038       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
10039           *this, IRPosition::callsite_function(*CBPair.first),
10040           DepClassTy::REQUIRED);
10041 
10042       Change |= CBPair.second.update(A, *this, {&AAEdges});
10043     }
10044 
10045     // Update the Instruction queries.
10046     if (!InstQueries.empty()) {
10047       const AAReachability *Reachability = &A.getAAFor<AAReachability>(
10048           *this, IRPosition::function(*getAssociatedFunction()),
10049           DepClassTy::REQUIRED);
10050 
10051       // Check for local callbases first.
10052       for (auto &InstPair : InstQueries) {
10053         SmallVector<const AACallEdges *> CallEdges;
10054         bool AllKnown =
10055             getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
10056         // Update will return change if we this effects any queries.
10057         if (!AllKnown)
10058           InstPair.second.CanReachUnknownCallee = true;
10059         Change |= InstPair.second.update(A, *this, CallEdges);
10060       }
10061     }
10062 
10063     return Change;
10064   }
10065 
10066   const std::string getAsStr() const override {
10067     size_t QueryCount =
10068         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
10069 
10070     return "FunctionReachability [" +
10071            std::to_string(WholeFunction.Reachable.size()) + "," +
10072            std::to_string(QueryCount) + "]";
10073   }
10074 
10075   void trackStatistics() const override {}
10076 
10077 private:
10078   bool canReachUnknownCallee() const override {
10079     return WholeFunction.CanReachUnknownCallee;
10080   }
10081 
10082   /// Used to answer if a the whole function can reacha a specific function.
10083   QueryResolver WholeFunction;
10084 
10085   /// Used to answer if a call base inside this function can reach a specific
10086   /// function.
10087   MapVector<const CallBase *, QueryResolver> CBQueries;
10088 
10089   /// This is for instruction queries than scan "forward".
10090   MapVector<const Instruction *, QueryResolver> InstQueries;
10091 };
10092 } // namespace
10093 
10094 /// ---------------------- Assumption Propagation ------------------------------
10095 namespace {
10096 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
10097   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
10098                        const DenseSet<StringRef> &Known)
10099       : AAAssumptionInfo(IRP, A, Known) {}
10100 
10101   bool hasAssumption(const StringRef Assumption) const override {
10102     return isValidState() && setContains(Assumption);
10103   }
10104 
10105   /// See AbstractAttribute::getAsStr()
10106   const std::string getAsStr() const override {
10107     const SetContents &Known = getKnown();
10108     const SetContents &Assumed = getAssumed();
10109 
10110     const std::string KnownStr =
10111         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
10112     const std::string AssumedStr =
10113         (Assumed.isUniversal())
10114             ? "Universal"
10115             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
10116 
10117     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
10118   }
10119 };
10120 
10121 /// Propagates assumption information from parent functions to all of their
10122 /// successors. An assumption can be propagated if the containing function
10123 /// dominates the called function.
10124 ///
10125 /// We start with a "known" set of assumptions already valid for the associated
10126 /// function and an "assumed" set that initially contains all possible
10127 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
10128 /// contents as concrete values are known. The concrete values are seeded by the
10129 /// first nodes that are either entries into the call graph, or contains no
10130 /// assumptions. Each node is updated as the intersection of the assumed state
10131 /// with all of its predecessors.
10132 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
10133   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
10134       : AAAssumptionInfoImpl(IRP, A,
10135                              getAssumptions(*IRP.getAssociatedFunction())) {}
10136 
10137   /// See AbstractAttribute::manifest(...).
10138   ChangeStatus manifest(Attributor &A) override {
10139     const auto &Assumptions = getKnown();
10140 
10141     // Don't manifest a universal set if it somehow made it here.
10142     if (Assumptions.isUniversal())
10143       return ChangeStatus::UNCHANGED;
10144 
10145     Function *AssociatedFunction = getAssociatedFunction();
10146 
10147     bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
10148 
10149     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10150   }
10151 
10152   /// See AbstractAttribute::updateImpl(...).
10153   ChangeStatus updateImpl(Attributor &A) override {
10154     bool Changed = false;
10155 
10156     auto CallSitePred = [&](AbstractCallSite ACS) {
10157       const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
10158           *this, IRPosition::callsite_function(*ACS.getInstruction()),
10159           DepClassTy::REQUIRED);
10160       // Get the set of assumptions shared by all of this function's callers.
10161       Changed |= getIntersection(AssumptionAA.getAssumed());
10162       return !getAssumed().empty() || !getKnown().empty();
10163     };
10164 
10165     bool UsedAssumedInformation = false;
10166     // Get the intersection of all assumptions held by this node's predecessors.
10167     // If we don't know all the call sites then this is either an entry into the
10168     // call graph or an empty node. This node is known to only contain its own
10169     // assumptions and can be propagated to its successors.
10170     if (!A.checkForAllCallSites(CallSitePred, *this, true,
10171                                 UsedAssumedInformation))
10172       return indicatePessimisticFixpoint();
10173 
10174     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10175   }
10176 
10177   void trackStatistics() const override {}
10178 };
10179 
10180 /// Assumption Info defined for call sites.
10181 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
10182 
10183   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
10184       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
10185 
10186   /// See AbstractAttribute::initialize(...).
10187   void initialize(Attributor &A) override {
10188     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10189     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10190   }
10191 
10192   /// See AbstractAttribute::manifest(...).
10193   ChangeStatus manifest(Attributor &A) override {
10194     // Don't manifest a universal set if it somehow made it here.
10195     if (getKnown().isUniversal())
10196       return ChangeStatus::UNCHANGED;
10197 
10198     CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
10199     bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
10200 
10201     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10202   }
10203 
10204   /// See AbstractAttribute::updateImpl(...).
10205   ChangeStatus updateImpl(Attributor &A) override {
10206     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10207     auto &AssumptionAA =
10208         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10209     bool Changed = getIntersection(AssumptionAA.getAssumed());
10210     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10211   }
10212 
10213   /// See AbstractAttribute::trackStatistics()
10214   void trackStatistics() const override {}
10215 
10216 private:
10217   /// Helper to initialized the known set as all the assumptions this call and
10218   /// the callee contain.
10219   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
10220     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
10221     auto Assumptions = getAssumptions(CB);
10222     if (Function *F = IRP.getAssociatedFunction())
10223       set_union(Assumptions, getAssumptions(*F));
10224     if (Function *F = IRP.getAssociatedFunction())
10225       set_union(Assumptions, getAssumptions(*F));
10226     return Assumptions;
10227   }
10228 };
10229 } // namespace
10230 
10231 AACallGraphNode *AACallEdgeIterator::operator*() const {
10232   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
10233       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
10234 }
10235 
10236 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
10237 
10238 const char AAReturnedValues::ID = 0;
10239 const char AANoUnwind::ID = 0;
10240 const char AANoSync::ID = 0;
10241 const char AANoFree::ID = 0;
10242 const char AANonNull::ID = 0;
10243 const char AANoRecurse::ID = 0;
10244 const char AAWillReturn::ID = 0;
10245 const char AAUndefinedBehavior::ID = 0;
10246 const char AANoAlias::ID = 0;
10247 const char AAReachability::ID = 0;
10248 const char AANoReturn::ID = 0;
10249 const char AAIsDead::ID = 0;
10250 const char AADereferenceable::ID = 0;
10251 const char AAAlign::ID = 0;
10252 const char AAInstanceInfo::ID = 0;
10253 const char AANoCapture::ID = 0;
10254 const char AAValueSimplify::ID = 0;
10255 const char AAHeapToStack::ID = 0;
10256 const char AAPrivatizablePtr::ID = 0;
10257 const char AAMemoryBehavior::ID = 0;
10258 const char AAMemoryLocation::ID = 0;
10259 const char AAValueConstantRange::ID = 0;
10260 const char AAPotentialConstantValues::ID = 0;
10261 const char AANoUndef::ID = 0;
10262 const char AACallEdges::ID = 0;
10263 const char AAFunctionReachability::ID = 0;
10264 const char AAPointerInfo::ID = 0;
10265 const char AAAssumptionInfo::ID = 0;
10266 
10267 // Macro magic to create the static generator function for attributes that
10268 // follow the naming scheme.
10269 
10270 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
10271   case IRPosition::PK:                                                         \
10272     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
10273 
10274 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
10275   case IRPosition::PK:                                                         \
10276     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
10277     ++NumAAs;                                                                  \
10278     break;
10279 
10280 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
10281   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10282     CLASS *AA = nullptr;                                                       \
10283     switch (IRP.getPositionKind()) {                                           \
10284       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10285       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10286       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10287       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10288       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10289       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10290       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10291       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10292     }                                                                          \
10293     return *AA;                                                                \
10294   }
10295 
10296 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
10297   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10298     CLASS *AA = nullptr;                                                       \
10299     switch (IRP.getPositionKind()) {                                           \
10300       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10301       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
10302       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10303       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10304       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10305       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10306       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10307       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10308     }                                                                          \
10309     return *AA;                                                                \
10310   }
10311 
10312 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
10313   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10314     CLASS *AA = nullptr;                                                       \
10315     switch (IRP.getPositionKind()) {                                           \
10316       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10317       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10318       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10319       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10320       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10321       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10322       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10323       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10324     }                                                                          \
10325     return *AA;                                                                \
10326   }
10327 
10328 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
10329   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10330     CLASS *AA = nullptr;                                                       \
10331     switch (IRP.getPositionKind()) {                                           \
10332       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10333       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10334       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10335       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10336       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10337       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10338       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10339       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10340     }                                                                          \
10341     return *AA;                                                                \
10342   }
10343 
10344 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
10345   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10346     CLASS *AA = nullptr;                                                       \
10347     switch (IRP.getPositionKind()) {                                           \
10348       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10349       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10350       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10351       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10352       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10353       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10354       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10355       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10356     }                                                                          \
10357     return *AA;                                                                \
10358   }
10359 
10360 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
10361 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
10362 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
10363 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
10364 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
10365 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
10366 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
10367 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
10368 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
10369 
10370 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
10371 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
10372 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
10373 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
10374 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
10375 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
10376 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
10377 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
10378 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
10379 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
10380 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
10381 
10382 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
10383 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
10384 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
10385 
10386 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
10387 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
10388 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
10389 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
10390 
10391 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
10392 
10393 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
10394 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
10395 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
10396 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
10397 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
10398 #undef SWITCH_PK_CREATE
10399 #undef SWITCH_PK_INV
10400