1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/SCCIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetOperations.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumeBundleQueries.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/Analysis/LazyValueInfo.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/TargetTransformInfo.h"
34 #include "llvm/Analysis/ValueTracking.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Assumptions.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/GlobalValue.h"
40 #include "llvm/IR/IRBuilder.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/NoFolder.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/IR/ValueHandle.h"
47 #include "llvm/Support/Alignment.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/GraphWriter.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
55 #include "llvm/Transforms/Utils/Local.h"
56 #include "llvm/Transforms/Utils/ValueMapper.h"
57 #include <cassert>
58 
59 using namespace llvm;
60 
61 #define DEBUG_TYPE "attributor"
62 
63 static cl::opt<bool> ManifestInternal(
64     "attributor-manifest-internal", cl::Hidden,
65     cl::desc("Manifest Attributor internal string attributes."),
66     cl::init(false));
67 
68 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
69                                        cl::Hidden);
70 
71 template <>
72 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
73 
74 static cl::opt<unsigned, true> MaxPotentialValues(
75     "attributor-max-potential-values", cl::Hidden,
76     cl::desc("Maximum number of potential values to be "
77              "tracked for each position."),
78     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
79     cl::init(7));
80 
81 static cl::opt<unsigned> MaxInterferingAccesses(
82     "attributor-max-interfering-accesses", cl::Hidden,
83     cl::desc("Maximum number of interfering accesses to "
84              "check before assuming all might interfere."),
85     cl::init(6));
86 
87 STATISTIC(NumAAs, "Number of abstract attributes created");
88 
89 // Some helper macros to deal with statistics tracking.
90 //
91 // Usage:
92 // For simple IR attribute tracking overload trackStatistics in the abstract
93 // attribute and choose the right STATS_DECLTRACK_********* macro,
94 // e.g.,:
95 //  void trackStatistics() const override {
96 //    STATS_DECLTRACK_ARG_ATTR(returned)
97 //  }
98 // If there is a single "increment" side one can use the macro
99 // STATS_DECLTRACK with a custom message. If there are multiple increment
100 // sides, STATS_DECL and STATS_TRACK can also be used separately.
101 //
102 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
103   ("Number of " #TYPE " marked '" #NAME "'")
104 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
105 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
106 #define STATS_DECL(NAME, TYPE, MSG)                                            \
107   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
108 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
109 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
110   {                                                                            \
111     STATS_DECL(NAME, TYPE, MSG)                                                \
112     STATS_TRACK(NAME, TYPE)                                                    \
113   }
114 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
115   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
116 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
117   STATS_DECLTRACK(NAME, CSArguments,                                           \
118                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
119 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
120   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
121 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
122   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
123 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
124   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
125                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
126 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
127   STATS_DECLTRACK(NAME, CSReturn,                                              \
128                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
129 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
130   STATS_DECLTRACK(NAME, Floating,                                              \
131                   ("Number of floating values known to be '" #NAME "'"))
132 
133 // Specialization of the operator<< for abstract attributes subclasses. This
134 // disambiguates situations where multiple operators are applicable.
135 namespace llvm {
136 #define PIPE_OPERATOR(CLASS)                                                   \
137   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
138     return OS << static_cast<const AbstractAttribute &>(AA);                   \
139   }
140 
141 PIPE_OPERATOR(AAIsDead)
142 PIPE_OPERATOR(AANoUnwind)
143 PIPE_OPERATOR(AANoSync)
144 PIPE_OPERATOR(AANoRecurse)
145 PIPE_OPERATOR(AAWillReturn)
146 PIPE_OPERATOR(AANoReturn)
147 PIPE_OPERATOR(AAReturnedValues)
148 PIPE_OPERATOR(AANonNull)
149 PIPE_OPERATOR(AANoAlias)
150 PIPE_OPERATOR(AADereferenceable)
151 PIPE_OPERATOR(AAAlign)
152 PIPE_OPERATOR(AAInstanceInfo)
153 PIPE_OPERATOR(AANoCapture)
154 PIPE_OPERATOR(AAValueSimplify)
155 PIPE_OPERATOR(AANoFree)
156 PIPE_OPERATOR(AAHeapToStack)
157 PIPE_OPERATOR(AAReachability)
158 PIPE_OPERATOR(AAMemoryBehavior)
159 PIPE_OPERATOR(AAMemoryLocation)
160 PIPE_OPERATOR(AAValueConstantRange)
161 PIPE_OPERATOR(AAPrivatizablePtr)
162 PIPE_OPERATOR(AAUndefinedBehavior)
163 PIPE_OPERATOR(AAPotentialConstantValues)
164 PIPE_OPERATOR(AANoUndef)
165 PIPE_OPERATOR(AACallEdges)
166 PIPE_OPERATOR(AAFunctionReachability)
167 PIPE_OPERATOR(AAPointerInfo)
168 PIPE_OPERATOR(AAAssumptionInfo)
169 
170 #undef PIPE_OPERATOR
171 
172 template <>
173 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
174                                                      const DerefState &R) {
175   ChangeStatus CS0 =
176       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
177   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
178   return CS0 | CS1;
179 }
180 
181 } // namespace llvm
182 
183 /// Get pointer operand of memory accessing instruction. If \p I is
184 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
185 /// is set to false and the instruction is volatile, return nullptr.
186 static const Value *getPointerOperand(const Instruction *I,
187                                       bool AllowVolatile) {
188   if (!AllowVolatile && I->isVolatile())
189     return nullptr;
190 
191   if (auto *LI = dyn_cast<LoadInst>(I)) {
192     return LI->getPointerOperand();
193   }
194 
195   if (auto *SI = dyn_cast<StoreInst>(I)) {
196     return SI->getPointerOperand();
197   }
198 
199   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
200     return CXI->getPointerOperand();
201   }
202 
203   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
204     return RMWI->getPointerOperand();
205   }
206 
207   return nullptr;
208 }
209 
210 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
211 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
212 /// getelement pointer instructions that traverse the natural type of \p Ptr if
213 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
214 /// through a cast to i8*.
215 ///
216 /// TODO: This could probably live somewhere more prominantly if it doesn't
217 ///       already exist.
218 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
219                                int64_t Offset, IRBuilder<NoFolder> &IRB,
220                                const DataLayout &DL) {
221   assert(Offset >= 0 && "Negative offset not supported yet!");
222   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
223                     << "-bytes as " << *ResTy << "\n");
224 
225   if (Offset) {
226     Type *Ty = PtrElemTy;
227     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
228     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
229 
230     SmallVector<Value *, 4> ValIndices;
231     std::string GEPName = Ptr->getName().str();
232     for (const APInt &Index : IntIndices) {
233       ValIndices.push_back(IRB.getInt(Index));
234       GEPName += "." + std::to_string(Index.getZExtValue());
235     }
236 
237     // Create a GEP for the indices collected above.
238     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
239 
240     // If an offset is left we use byte-wise adjustment.
241     if (IntOffset != 0) {
242       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
243       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
244                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
245     }
246   }
247 
248   // Ensure the result has the requested type.
249   Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
250                                                 Ptr->getName() + ".cast");
251 
252   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
253   return Ptr;
254 }
255 
256 /// Recursively visit all values that might become \p IRP at some point. This
257 /// will be done by looking through cast instructions, selects, phis, and calls
258 /// with the "returned" attribute. Once we cannot look through the value any
259 /// further, the callback \p VisitValueCB is invoked and passed the current
260 /// value, the \p State, and a flag to indicate if we stripped anything.
261 /// Stripped means that we unpacked the value associated with \p IRP at least
262 /// once. Note that the value used for the callback may still be the value
263 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
264 /// we will never visit more values than specified by \p MaxValues.
265 /// If \p VS does not contain the Interprocedural bit, only values valid in the
266 /// scope of \p CtxI will be visited and simplification into other scopes is
267 /// prevented.
268 template <typename StateTy>
269 static bool genericValueTraversal(
270     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
271     StateTy &State,
272     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
273         VisitValueCB,
274     const Instruction *CtxI, bool &UsedAssumedInformation,
275     bool UseValueSimplify = true, int MaxValues = 16,
276     function_ref<Value *(Value *)> StripCB = nullptr,
277     AA::ValueScope VS = AA::Interprocedural) {
278 
279   struct LivenessInfo {
280     const AAIsDead *LivenessAA = nullptr;
281     bool AnyDead = false;
282   };
283   SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
284   auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
285     LivenessInfo &LI = LivenessAAs[&F];
286     if (!LI.LivenessAA)
287       LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F),
288                                             DepClassTy::NONE);
289     return LI;
290   };
291 
292   Value *InitialV = &IRP.getAssociatedValue();
293   using Item = std::pair<Value *, const Instruction *>;
294   SmallSet<Item, 16> Visited;
295   SmallVector<Item, 16> Worklist;
296   Worklist.push_back({InitialV, CtxI});
297 
298   int Iteration = 0;
299   do {
300     Item I = Worklist.pop_back_val();
301     Value *V = I.first;
302     CtxI = I.second;
303     if (StripCB)
304       V = StripCB(V);
305 
306     // Check if we should process the current value. To prevent endless
307     // recursion keep a record of the values we followed!
308     if (!Visited.insert(I).second)
309       continue;
310 
311     // Make sure we limit the compile time for complex expressions.
312     if (Iteration++ >= MaxValues) {
313       LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
314                         << Iteration << "!\n");
315       return false;
316     }
317 
318     // Explicitly look through calls with a "returned" attribute if we do
319     // not have a pointer as stripPointerCasts only works on them.
320     Value *NewV = nullptr;
321     if (V->getType()->isPointerTy()) {
322       NewV = V->stripPointerCasts();
323     } else {
324       auto *CB = dyn_cast<CallBase>(V);
325       if (CB && CB->getCalledFunction()) {
326         for (Argument &Arg : CB->getCalledFunction()->args())
327           if (Arg.hasReturnedAttr()) {
328             NewV = CB->getArgOperand(Arg.getArgNo());
329             break;
330           }
331       }
332     }
333     if (NewV && NewV != V) {
334       Worklist.push_back({NewV, CtxI});
335       continue;
336     }
337 
338     // Look through select instructions, visit assumed potential values.
339     if (auto *SI = dyn_cast<SelectInst>(V)) {
340       Optional<Constant *> C = A.getAssumedConstant(
341           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
342       bool NoValueYet = !C.hasValue();
343       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
344         continue;
345       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
346         if (CI->isZero())
347           Worklist.push_back({SI->getFalseValue(), CtxI});
348         else
349           Worklist.push_back({SI->getTrueValue(), CtxI});
350         continue;
351       }
352       // We could not simplify the condition, assume both values.(
353       Worklist.push_back({SI->getTrueValue(), CtxI});
354       Worklist.push_back({SI->getFalseValue(), CtxI});
355       continue;
356     }
357 
358     // Look through phi nodes, visit all live operands.
359     if (auto *PHI = dyn_cast<PHINode>(V)) {
360       LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction());
361       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
362         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
363         if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) {
364           LI.AnyDead = true;
365           UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint();
366           continue;
367         }
368         Worklist.push_back(
369             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
370       }
371       continue;
372     }
373 
374     if (auto *Arg = dyn_cast<Argument>(V)) {
375       if ((VS & AA::Interprocedural) && !Arg->hasPassPointeeByValueCopyAttr()) {
376         SmallVector<Item> CallSiteValues;
377         bool UsedAssumedInformation = false;
378         if (A.checkForAllCallSites(
379                 [&](AbstractCallSite ACS) {
380                   // Callbacks might not have a corresponding call site operand,
381                   // stick with the argument in that case.
382                   Value *CSOp = ACS.getCallArgOperand(*Arg);
383                   if (!CSOp)
384                     return false;
385                   CallSiteValues.push_back({CSOp, ACS.getInstruction()});
386                   return true;
387                 },
388                 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) {
389           Worklist.append(CallSiteValues);
390           continue;
391         }
392       }
393     }
394 
395     if (UseValueSimplify && !isa<Constant>(V)) {
396       Optional<Value *> SimpleV =
397           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
398       if (!SimpleV.hasValue())
399         continue;
400       Value *NewV = SimpleV.getValue();
401       if (NewV && NewV != V) {
402         if ((VS & AA::Interprocedural) || !CtxI ||
403             AA::isValidInScope(*NewV, CtxI->getFunction())) {
404           Worklist.push_back({NewV, CtxI});
405           continue;
406         }
407       }
408     }
409 
410     if (auto *LI = dyn_cast<LoadInst>(V)) {
411       bool UsedAssumedInformation = false;
412       // If we ask for the potentially loaded values from the initial pointer we
413       // will simply end up here again. The load is as far as we can make it.
414       if (LI->getPointerOperand() != InitialV) {
415         SmallSetVector<Value *, 4> PotentialCopies;
416         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
417         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
418                                            PotentialValueOrigins, QueryingAA,
419                                            UsedAssumedInformation,
420                                            /* OnlyExact */ true)) {
421           // Values have to be dynamically unique or we loose the fact that a
422           // single llvm::Value might represent two runtime values (e.g., stack
423           // locations in different recursive calls).
424           bool DynamicallyUnique =
425               llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) {
426                 return AA::isDynamicallyUnique(A, QueryingAA, *PC);
427               });
428           if (DynamicallyUnique &&
429               ((VS & AA::Interprocedural) || !CtxI ||
430                llvm::all_of(PotentialCopies, [CtxI](Value *PC) {
431                  return AA::isValidInScope(*PC, CtxI->getFunction());
432                }))) {
433             for (auto *PotentialCopy : PotentialCopies)
434               Worklist.push_back({PotentialCopy, CtxI});
435             continue;
436           }
437         }
438       }
439     }
440 
441     // Once a leaf is reached we inform the user through the callback.
442     if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) {
443       LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: "
444                         << *V << "!\n");
445       return false;
446     }
447   } while (!Worklist.empty());
448 
449   // If we actually used liveness information so we have to record a dependence.
450   for (auto &It : LivenessAAs)
451     if (It.second.AnyDead)
452       A.recordDependence(*It.second.LivenessAA, QueryingAA,
453                          DepClassTy::OPTIONAL);
454 
455   // All values have been visited.
456   return true;
457 }
458 
459 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
460                                      SmallVectorImpl<Value *> &Objects,
461                                      const AbstractAttribute &QueryingAA,
462                                      const Instruction *CtxI,
463                                      bool &UsedAssumedInformation,
464                                      AA::ValueScope VS) {
465   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
466   SmallPtrSet<Value *, 8> SeenObjects;
467   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
468                                      SmallVectorImpl<Value *> &Objects,
469                                      bool) -> bool {
470     if (SeenObjects.insert(&Val).second)
471       Objects.push_back(&Val);
472     return true;
473   };
474   if (!genericValueTraversal<decltype(Objects)>(
475           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
476           UsedAssumedInformation, true, 32, StripCB, VS))
477     return false;
478   return true;
479 }
480 
481 static const Value *
482 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
483                           const Value *Val, const DataLayout &DL, APInt &Offset,
484                           bool GetMinOffset, bool AllowNonInbounds,
485                           bool UseAssumed = false) {
486 
487   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
488     const IRPosition &Pos = IRPosition::value(V);
489     // Only track dependence if we are going to use the assumed info.
490     const AAValueConstantRange &ValueConstantRangeAA =
491         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
492                                          UseAssumed ? DepClassTy::OPTIONAL
493                                                     : DepClassTy::NONE);
494     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
495                                      : ValueConstantRangeAA.getKnown();
496     if (Range.isFullSet())
497       return false;
498 
499     // We can only use the lower part of the range because the upper part can
500     // be higher than what the value can really be.
501     if (GetMinOffset)
502       ROffset = Range.getSignedMin();
503     else
504       ROffset = Range.getSignedMax();
505     return true;
506   };
507 
508   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
509                                                 /* AllowInvariant */ true,
510                                                 AttributorAnalysis);
511 }
512 
513 static const Value *
514 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
515                         const Value *Ptr, int64_t &BytesOffset,
516                         const DataLayout &DL, bool AllowNonInbounds = false) {
517   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
518   const Value *Base =
519       stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
520                                 /* GetMinOffset */ true, AllowNonInbounds);
521 
522   BytesOffset = OffsetAPInt.getSExtValue();
523   return Base;
524 }
525 
526 /// Clamp the information known for all returned values of a function
527 /// (identified by \p QueryingAA) into \p S.
528 template <typename AAType, typename StateType = typename AAType::StateType>
529 static void clampReturnedValueStates(
530     Attributor &A, const AAType &QueryingAA, StateType &S,
531     const IRPosition::CallBaseContext *CBContext = nullptr) {
532   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
533                     << QueryingAA << " into " << S << "\n");
534 
535   assert((QueryingAA.getIRPosition().getPositionKind() ==
536               IRPosition::IRP_RETURNED ||
537           QueryingAA.getIRPosition().getPositionKind() ==
538               IRPosition::IRP_CALL_SITE_RETURNED) &&
539          "Can only clamp returned value states for a function returned or call "
540          "site returned position!");
541 
542   // Use an optional state as there might not be any return values and we want
543   // to join (IntegerState::operator&) the state of all there are.
544   Optional<StateType> T;
545 
546   // Callback for each possibly returned value.
547   auto CheckReturnValue = [&](Value &RV) -> bool {
548     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
549     const AAType &AA =
550         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
551     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
552                       << " @ " << RVPos << "\n");
553     const StateType &AAS = AA.getState();
554     if (!T.hasValue())
555       T = StateType::getBestState(AAS);
556     *T &= AAS;
557     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
558                       << "\n");
559     return T->isValidState();
560   };
561 
562   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
563     S.indicatePessimisticFixpoint();
564   else if (T.hasValue())
565     S ^= *T;
566 }
567 
568 namespace {
569 /// Helper class for generic deduction: return value -> returned position.
570 template <typename AAType, typename BaseType,
571           typename StateType = typename BaseType::StateType,
572           bool PropagateCallBaseContext = false>
573 struct AAReturnedFromReturnedValues : public BaseType {
574   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
575       : BaseType(IRP, A) {}
576 
577   /// See AbstractAttribute::updateImpl(...).
578   ChangeStatus updateImpl(Attributor &A) override {
579     StateType S(StateType::getBestState(this->getState()));
580     clampReturnedValueStates<AAType, StateType>(
581         A, *this, S,
582         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
583     // TODO: If we know we visited all returned values, thus no are assumed
584     // dead, we can take the known information from the state T.
585     return clampStateAndIndicateChange<StateType>(this->getState(), S);
586   }
587 };
588 
589 /// Clamp the information known at all call sites for a given argument
590 /// (identified by \p QueryingAA) into \p S.
591 template <typename AAType, typename StateType = typename AAType::StateType>
592 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
593                                         StateType &S) {
594   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
595                     << QueryingAA << " into " << S << "\n");
596 
597   assert(QueryingAA.getIRPosition().getPositionKind() ==
598              IRPosition::IRP_ARGUMENT &&
599          "Can only clamp call site argument states for an argument position!");
600 
601   // Use an optional state as there might not be any return values and we want
602   // to join (IntegerState::operator&) the state of all there are.
603   Optional<StateType> T;
604 
605   // The argument number which is also the call site argument number.
606   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
607 
608   auto CallSiteCheck = [&](AbstractCallSite ACS) {
609     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
610     // Check if a coresponding argument was found or if it is on not associated
611     // (which can happen for callback calls).
612     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
613       return false;
614 
615     const AAType &AA =
616         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
617     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
618                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
619     const StateType &AAS = AA.getState();
620     if (!T.hasValue())
621       T = StateType::getBestState(AAS);
622     *T &= AAS;
623     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
624                       << "\n");
625     return T->isValidState();
626   };
627 
628   bool UsedAssumedInformation = false;
629   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
630                               UsedAssumedInformation))
631     S.indicatePessimisticFixpoint();
632   else if (T.hasValue())
633     S ^= *T;
634 }
635 
636 /// This function is the bridge between argument position and the call base
637 /// context.
638 template <typename AAType, typename BaseType,
639           typename StateType = typename AAType::StateType>
640 bool getArgumentStateFromCallBaseContext(Attributor &A,
641                                          BaseType &QueryingAttribute,
642                                          IRPosition &Pos, StateType &State) {
643   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
644          "Expected an 'argument' position !");
645   const CallBase *CBContext = Pos.getCallBaseContext();
646   if (!CBContext)
647     return false;
648 
649   int ArgNo = Pos.getCallSiteArgNo();
650   assert(ArgNo >= 0 && "Invalid Arg No!");
651 
652   const auto &AA = A.getAAFor<AAType>(
653       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
654       DepClassTy::REQUIRED);
655   const StateType &CBArgumentState =
656       static_cast<const StateType &>(AA.getState());
657 
658   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
659                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
660                     << "\n");
661 
662   // NOTE: If we want to do call site grouping it should happen here.
663   State ^= CBArgumentState;
664   return true;
665 }
666 
667 /// Helper class for generic deduction: call site argument -> argument position.
668 template <typename AAType, typename BaseType,
669           typename StateType = typename AAType::StateType,
670           bool BridgeCallBaseContext = false>
671 struct AAArgumentFromCallSiteArguments : public BaseType {
672   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
673       : BaseType(IRP, A) {}
674 
675   /// See AbstractAttribute::updateImpl(...).
676   ChangeStatus updateImpl(Attributor &A) override {
677     StateType S = StateType::getBestState(this->getState());
678 
679     if (BridgeCallBaseContext) {
680       bool Success =
681           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
682               A, *this, this->getIRPosition(), S);
683       if (Success)
684         return clampStateAndIndicateChange<StateType>(this->getState(), S);
685     }
686     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
687 
688     // TODO: If we know we visited all incoming values, thus no are assumed
689     // dead, we can take the known information from the state T.
690     return clampStateAndIndicateChange<StateType>(this->getState(), S);
691   }
692 };
693 
694 /// Helper class for generic replication: function returned -> cs returned.
695 template <typename AAType, typename BaseType,
696           typename StateType = typename BaseType::StateType,
697           bool IntroduceCallBaseContext = false>
698 struct AACallSiteReturnedFromReturned : public BaseType {
699   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
700       : BaseType(IRP, A) {}
701 
702   /// See AbstractAttribute::updateImpl(...).
703   ChangeStatus updateImpl(Attributor &A) override {
704     assert(this->getIRPosition().getPositionKind() ==
705                IRPosition::IRP_CALL_SITE_RETURNED &&
706            "Can only wrap function returned positions for call site returned "
707            "positions!");
708     auto &S = this->getState();
709 
710     const Function *AssociatedFunction =
711         this->getIRPosition().getAssociatedFunction();
712     if (!AssociatedFunction)
713       return S.indicatePessimisticFixpoint();
714 
715     CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
716     if (IntroduceCallBaseContext)
717       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
718                         << CBContext << "\n");
719 
720     IRPosition FnPos = IRPosition::returned(
721         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
722     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
723     return clampStateAndIndicateChange(S, AA.getState());
724   }
725 };
726 
727 /// Helper function to accumulate uses.
728 template <class AAType, typename StateType = typename AAType::StateType>
729 static void followUsesInContext(AAType &AA, Attributor &A,
730                                 MustBeExecutedContextExplorer &Explorer,
731                                 const Instruction *CtxI,
732                                 SetVector<const Use *> &Uses,
733                                 StateType &State) {
734   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
735   for (unsigned u = 0; u < Uses.size(); ++u) {
736     const Use *U = Uses[u];
737     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
738       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
739       if (Found && AA.followUseInMBEC(A, U, UserI, State))
740         for (const Use &Us : UserI->uses())
741           Uses.insert(&Us);
742     }
743   }
744 }
745 
746 /// Use the must-be-executed-context around \p I to add information into \p S.
747 /// The AAType class is required to have `followUseInMBEC` method with the
748 /// following signature and behaviour:
749 ///
750 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
751 /// U - Underlying use.
752 /// I - The user of the \p U.
753 /// Returns true if the value should be tracked transitively.
754 ///
755 template <class AAType, typename StateType = typename AAType::StateType>
756 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
757                              Instruction &CtxI) {
758 
759   // Container for (transitive) uses of the associated value.
760   SetVector<const Use *> Uses;
761   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
762     Uses.insert(&U);
763 
764   MustBeExecutedContextExplorer &Explorer =
765       A.getInfoCache().getMustBeExecutedContextExplorer();
766 
767   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
768 
769   if (S.isAtFixpoint())
770     return;
771 
772   SmallVector<const BranchInst *, 4> BrInsts;
773   auto Pred = [&](const Instruction *I) {
774     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
775       if (Br->isConditional())
776         BrInsts.push_back(Br);
777     return true;
778   };
779 
780   // Here, accumulate conditional branch instructions in the context. We
781   // explore the child paths and collect the known states. The disjunction of
782   // those states can be merged to its own state. Let ParentState_i be a state
783   // to indicate the known information for an i-th branch instruction in the
784   // context. ChildStates are created for its successors respectively.
785   //
786   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
787   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
788   //      ...
789   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
790   //
791   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
792   //
793   // FIXME: Currently, recursive branches are not handled. For example, we
794   // can't deduce that ptr must be dereferenced in below function.
795   //
796   // void f(int a, int c, int *ptr) {
797   //    if(a)
798   //      if (b) {
799   //        *ptr = 0;
800   //      } else {
801   //        *ptr = 1;
802   //      }
803   //    else {
804   //      if (b) {
805   //        *ptr = 0;
806   //      } else {
807   //        *ptr = 1;
808   //      }
809   //    }
810   // }
811 
812   Explorer.checkForAllContext(&CtxI, Pred);
813   for (const BranchInst *Br : BrInsts) {
814     StateType ParentState;
815 
816     // The known state of the parent state is a conjunction of children's
817     // known states so it is initialized with a best state.
818     ParentState.indicateOptimisticFixpoint();
819 
820     for (const BasicBlock *BB : Br->successors()) {
821       StateType ChildState;
822 
823       size_t BeforeSize = Uses.size();
824       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
825 
826       // Erase uses which only appear in the child.
827       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
828         It = Uses.erase(It);
829 
830       ParentState &= ChildState;
831     }
832 
833     // Use only known state.
834     S += ParentState;
835   }
836 }
837 } // namespace
838 
839 /// ------------------------ PointerInfo ---------------------------------------
840 
841 namespace llvm {
842 namespace AA {
843 namespace PointerInfo {
844 
845 struct State;
846 
847 } // namespace PointerInfo
848 } // namespace AA
849 
850 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
851 template <>
852 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
853   using Access = AAPointerInfo::Access;
854   static inline Access getEmptyKey();
855   static inline Access getTombstoneKey();
856   static unsigned getHashValue(const Access &A);
857   static bool isEqual(const Access &LHS, const Access &RHS);
858 };
859 
860 /// Helper that allows OffsetAndSize as a key in a DenseMap.
861 template <>
862 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
863     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
864 
865 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
866 /// but the instruction
867 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
868   using Base = DenseMapInfo<Instruction *>;
869   using Access = AAPointerInfo::Access;
870   static inline Access getEmptyKey();
871   static inline Access getTombstoneKey();
872   static unsigned getHashValue(const Access &A);
873   static bool isEqual(const Access &LHS, const Access &RHS);
874 };
875 
876 } // namespace llvm
877 
878 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
879 struct AA::PointerInfo::State : public AbstractState {
880 
881   ~State() {
882     // We do not delete the Accesses objects but need to destroy them still.
883     for (auto &It : AccessBins)
884       It.second->~Accesses();
885   }
886 
887   /// Return the best possible representable state.
888   static State getBestState(const State &SIS) { return State(); }
889 
890   /// Return the worst possible representable state.
891   static State getWorstState(const State &SIS) {
892     State R;
893     R.indicatePessimisticFixpoint();
894     return R;
895   }
896 
897   State() = default;
898   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {
899     SIS.AccessBins.clear();
900   }
901 
902   const State &getAssumed() const { return *this; }
903 
904   /// See AbstractState::isValidState().
905   bool isValidState() const override { return BS.isValidState(); }
906 
907   /// See AbstractState::isAtFixpoint().
908   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
909 
910   /// See AbstractState::indicateOptimisticFixpoint().
911   ChangeStatus indicateOptimisticFixpoint() override {
912     BS.indicateOptimisticFixpoint();
913     return ChangeStatus::UNCHANGED;
914   }
915 
916   /// See AbstractState::indicatePessimisticFixpoint().
917   ChangeStatus indicatePessimisticFixpoint() override {
918     BS.indicatePessimisticFixpoint();
919     return ChangeStatus::CHANGED;
920   }
921 
922   State &operator=(const State &R) {
923     if (this == &R)
924       return *this;
925     BS = R.BS;
926     AccessBins = R.AccessBins;
927     return *this;
928   }
929 
930   State &operator=(State &&R) {
931     if (this == &R)
932       return *this;
933     std::swap(BS, R.BS);
934     std::swap(AccessBins, R.AccessBins);
935     return *this;
936   }
937 
938   bool operator==(const State &R) const {
939     if (BS != R.BS)
940       return false;
941     if (AccessBins.size() != R.AccessBins.size())
942       return false;
943     auto It = begin(), RIt = R.begin(), E = end();
944     while (It != E) {
945       if (It->getFirst() != RIt->getFirst())
946         return false;
947       auto &Accs = It->getSecond();
948       auto &RAccs = RIt->getSecond();
949       if (Accs->size() != RAccs->size())
950         return false;
951       for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
952         if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
953           return false;
954       ++It;
955       ++RIt;
956     }
957     return true;
958   }
959   bool operator!=(const State &R) const { return !(*this == R); }
960 
961   /// We store accesses in a set with the instruction as key.
962   struct Accesses {
963     SmallVector<AAPointerInfo::Access, 4> Accesses;
964     DenseMap<const Instruction *, unsigned> Map;
965 
966     unsigned size() const { return Accesses.size(); }
967 
968     using vec_iterator = decltype(Accesses)::iterator;
969     vec_iterator begin() { return Accesses.begin(); }
970     vec_iterator end() { return Accesses.end(); }
971 
972     using iterator = decltype(Map)::const_iterator;
973     iterator find(AAPointerInfo::Access &Acc) {
974       return Map.find(Acc.getRemoteInst());
975     }
976     iterator find_end() { return Map.end(); }
977 
978     AAPointerInfo::Access &get(iterator &It) {
979       return Accesses[It->getSecond()];
980     }
981 
982     void insert(AAPointerInfo::Access &Acc) {
983       Map[Acc.getRemoteInst()] = Accesses.size();
984       Accesses.push_back(Acc);
985     }
986   };
987 
988   /// We store all accesses in bins denoted by their offset and size.
989   using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>;
990 
991   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
992   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
993 
994 protected:
995   /// The bins with all the accesses for the associated pointer.
996   AccessBinsTy AccessBins;
997 
998   /// Add a new access to the state at offset \p Offset and with size \p Size.
999   /// The access is associated with \p I, writes \p Content (if anything), and
1000   /// is of kind \p Kind.
1001   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
1002   ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
1003                          Instruction &I, Optional<Value *> Content,
1004                          AAPointerInfo::AccessKind Kind, Type *Ty,
1005                          Instruction *RemoteI = nullptr,
1006                          Accesses *BinPtr = nullptr) {
1007     AAPointerInfo::OffsetAndSize Key{Offset, Size};
1008     Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
1009     if (!Bin)
1010       Bin = new (A.Allocator) Accesses;
1011     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1012     // Check if we have an access for this instruction in this bin, if not,
1013     // simply add it.
1014     auto It = Bin->find(Acc);
1015     if (It == Bin->find_end()) {
1016       Bin->insert(Acc);
1017       return ChangeStatus::CHANGED;
1018     }
1019     // If the existing access is the same as then new one, nothing changed.
1020     AAPointerInfo::Access &Current = Bin->get(It);
1021     AAPointerInfo::Access Before = Current;
1022     // The new one will be combined with the existing one.
1023     Current &= Acc;
1024     return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1025   }
1026 
1027   /// See AAPointerInfo::forallInterferingAccesses.
1028   bool forallInterferingAccesses(
1029       AAPointerInfo::OffsetAndSize OAS,
1030       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1031     if (!isValidState())
1032       return false;
1033 
1034     for (auto &It : AccessBins) {
1035       AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
1036       if (!OAS.mayOverlap(ItOAS))
1037         continue;
1038       bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
1039       for (auto &Access : *It.getSecond())
1040         if (!CB(Access, IsExact))
1041           return false;
1042     }
1043     return true;
1044   }
1045 
1046   /// See AAPointerInfo::forallInterferingAccesses.
1047   bool forallInterferingAccesses(
1048       Instruction &I,
1049       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1050     if (!isValidState())
1051       return false;
1052 
1053     // First find the offset and size of I.
1054     AAPointerInfo::OffsetAndSize OAS(-1, -1);
1055     for (auto &It : AccessBins) {
1056       for (auto &Access : *It.getSecond()) {
1057         if (Access.getRemoteInst() == &I) {
1058           OAS = It.getFirst();
1059           break;
1060         }
1061       }
1062       if (OAS.getSize() != -1)
1063         break;
1064     }
1065     // No access for I was found, we are done.
1066     if (OAS.getSize() == -1)
1067       return true;
1068 
1069     // Now that we have an offset and size, find all overlapping ones and use
1070     // the callback on the accesses.
1071     return forallInterferingAccesses(OAS, CB);
1072   }
1073 
1074 private:
1075   /// State to track fixpoint and validity.
1076   BooleanState BS;
1077 };
1078 
1079 namespace {
1080 struct AAPointerInfoImpl
1081     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1082   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1083   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1084 
1085   /// See AbstractAttribute::initialize(...).
1086   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1087 
1088   /// See AbstractAttribute::getAsStr().
1089   const std::string getAsStr() const override {
1090     return std::string("PointerInfo ") +
1091            (isValidState() ? (std::string("#") +
1092                               std::to_string(AccessBins.size()) + " bins")
1093                            : "<invalid>");
1094   }
1095 
1096   /// See AbstractAttribute::manifest(...).
1097   ChangeStatus manifest(Attributor &A) override {
1098     return AAPointerInfo::manifest(A);
1099   }
1100 
1101   bool forallInterferingAccesses(
1102       OffsetAndSize OAS,
1103       function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1104       const override {
1105     return State::forallInterferingAccesses(OAS, CB);
1106   }
1107   bool forallInterferingAccesses(
1108       Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
1109       function_ref<bool(const Access &, bool)> UserCB) const override {
1110     SmallPtrSet<const Access *, 8> DominatingWrites;
1111     SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
1112 
1113     Function &Scope = *I.getFunction();
1114     const auto &NoSyncAA = A.getAAFor<AANoSync>(
1115         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1116     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1117         IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
1118     const bool NoSync = NoSyncAA.isAssumedNoSync();
1119 
1120     // Helper to determine if we need to consider threading, which we cannot
1121     // right now. However, if the function is (assumed) nosync or the thread
1122     // executing all instructions is the main thread only we can ignore
1123     // threading.
1124     auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
1125       if (NoSync)
1126         return true;
1127       if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
1128         return true;
1129       return false;
1130     };
1131 
1132     // Helper to determine if the access is executed by the same thread as the
1133     // load, for now it is sufficient to avoid any potential threading effects
1134     // as we cannot deal with them anyway.
1135     auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
1136       return CanIgnoreThreading(*Acc.getLocalInst());
1137     };
1138 
1139     // TODO: Use inter-procedural reachability and dominance.
1140     const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1141         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1142 
1143     const bool FindInterferingWrites = I.mayReadFromMemory();
1144     const bool FindInterferingReads = I.mayWriteToMemory();
1145     const bool UseDominanceReasoning = FindInterferingWrites;
1146     const bool CanUseCFGResoning = CanIgnoreThreading(I);
1147     InformationCache &InfoCache = A.getInfoCache();
1148     const DominatorTree *DT =
1149         NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning
1150             ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
1151                   Scope)
1152             : nullptr;
1153 
1154     enum GPUAddressSpace : unsigned {
1155       Generic = 0,
1156       Global = 1,
1157       Shared = 3,
1158       Constant = 4,
1159       Local = 5,
1160     };
1161 
1162     // Helper to check if a value has "kernel lifetime", that is it will not
1163     // outlive a GPU kernel. This is true for shared, constant, and local
1164     // globals on AMD and NVIDIA GPUs.
1165     auto HasKernelLifetime = [&](Value *V, Module &M) {
1166       Triple T(M.getTargetTriple());
1167       if (!(T.isAMDGPU() || T.isNVPTX()))
1168         return false;
1169       switch (V->getType()->getPointerAddressSpace()) {
1170       case GPUAddressSpace::Shared:
1171       case GPUAddressSpace::Constant:
1172       case GPUAddressSpace::Local:
1173         return true;
1174       default:
1175         return false;
1176       };
1177     };
1178 
1179     // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1180     // to determine if we should look at reachability from the callee. For
1181     // certain pointers we know the lifetime and we do not have to step into the
1182     // callee to determine reachability as the pointer would be dead in the
1183     // callee. See the conditional initialization below.
1184     std::function<bool(const Function &)> IsLiveInCalleeCB;
1185 
1186     if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1187       // If the alloca containing function is not recursive the alloca
1188       // must be dead in the callee.
1189       const Function *AIFn = AI->getFunction();
1190       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1191           *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL);
1192       if (NoRecurseAA.isAssumedNoRecurse()) {
1193         IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1194       }
1195     } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1196       // If the global has kernel lifetime we can stop if we reach a kernel
1197       // as it is "dead" in the (unknown) callees.
1198       if (HasKernelLifetime(GV, *GV->getParent()))
1199         IsLiveInCalleeCB = [](const Function &Fn) {
1200           return !Fn.hasFnAttribute("kernel");
1201         };
1202     }
1203 
1204     auto AccessCB = [&](const Access &Acc, bool Exact) {
1205       if ((!FindInterferingWrites || !Acc.isWrite()) &&
1206           (!FindInterferingReads || !Acc.isRead()))
1207         return true;
1208 
1209       // For now we only filter accesses based on CFG reasoning which does not
1210       // work yet if we have threading effects, or the access is complicated.
1211       if (CanUseCFGResoning) {
1212         if ((!Acc.isWrite() ||
1213              !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1214                                          IsLiveInCalleeCB)) &&
1215             (!Acc.isRead() ||
1216              !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1217                                          IsLiveInCalleeCB)))
1218           return true;
1219         if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) &&
1220             IsSameThreadAsLoad(Acc)) {
1221           if (DT->dominates(Acc.getLocalInst(), &I))
1222             DominatingWrites.insert(&Acc);
1223         }
1224       }
1225 
1226       InterferingAccesses.push_back({&Acc, Exact});
1227       return true;
1228     };
1229     if (!State::forallInterferingAccesses(I, AccessCB))
1230       return false;
1231 
1232     // If we cannot use CFG reasoning we only filter the non-write accesses
1233     // and are done here.
1234     if (!CanUseCFGResoning) {
1235       for (auto &It : InterferingAccesses)
1236         if (!UserCB(*It.first, It.second))
1237           return false;
1238       return true;
1239     }
1240 
1241     // Helper to determine if we can skip a specific write access. This is in
1242     // the worst case quadratic as we are looking for another write that will
1243     // hide the effect of this one.
1244     auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1245       if (!IsSameThreadAsLoad(Acc))
1246         return false;
1247       if (!DominatingWrites.count(&Acc))
1248         return false;
1249       for (const Access *DomAcc : DominatingWrites) {
1250         assert(Acc.getLocalInst()->getFunction() ==
1251                    DomAcc->getLocalInst()->getFunction() &&
1252                "Expected dominating writes to be in the same function!");
1253 
1254         if (DomAcc != &Acc &&
1255             DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1256           return true;
1257         }
1258       }
1259       return false;
1260     };
1261 
1262     // Run the user callback on all accesses we cannot skip and return if that
1263     // succeeded for all or not.
1264     unsigned NumInterferingAccesses = InterferingAccesses.size();
1265     for (auto &It : InterferingAccesses) {
1266       if (!DT || NumInterferingAccesses > MaxInterferingAccesses ||
1267           !CanSkipAccess(*It.first, It.second)) {
1268         if (!UserCB(*It.first, It.second))
1269           return false;
1270       }
1271     }
1272     return true;
1273   }
1274 
1275   ChangeStatus translateAndAddCalleeState(Attributor &A,
1276                                           const AAPointerInfo &CalleeAA,
1277                                           int64_t CallArgOffset, CallBase &CB) {
1278     using namespace AA::PointerInfo;
1279     if (!CalleeAA.getState().isValidState() || !isValidState())
1280       return indicatePessimisticFixpoint();
1281 
1282     const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1283     bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1284 
1285     // Combine the accesses bin by bin.
1286     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1287     for (auto &It : CalleeImplAA.getState()) {
1288       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1289       if (CallArgOffset != OffsetAndSize::Unknown)
1290         OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1291                             It.first.getSize());
1292       Accesses *Bin = AccessBins[OAS];
1293       for (const AAPointerInfo::Access &RAcc : *It.second) {
1294         if (IsByval && !RAcc.isRead())
1295           continue;
1296         bool UsedAssumedInformation = false;
1297         Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1298             RAcc.getContent(), CB, *this, UsedAssumedInformation);
1299         AccessKind AK =
1300             AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1301                                                  : AccessKind::AK_READ_WRITE));
1302         Changed =
1303             Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1304                                 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1305       }
1306     }
1307     return Changed;
1308   }
1309 
1310   /// Statistic tracking for all AAPointerInfo implementations.
1311   /// See AbstractAttribute::trackStatistics().
1312   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1313 };
1314 
1315 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1316   using AccessKind = AAPointerInfo::AccessKind;
1317   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1318       : AAPointerInfoImpl(IRP, A) {}
1319 
1320   /// See AbstractAttribute::initialize(...).
1321   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1322 
1323   /// Deal with an access and signal if it was handled successfully.
1324   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1325                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1326                     ChangeStatus &Changed, Type *Ty,
1327                     int64_t Size = OffsetAndSize::Unknown) {
1328     using namespace AA::PointerInfo;
1329     // No need to find a size if one is given or the offset is unknown.
1330     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1331         Ty) {
1332       const DataLayout &DL = A.getDataLayout();
1333       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1334       if (!AccessSize.isScalable())
1335         Size = AccessSize.getFixedSize();
1336     }
1337     Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1338     return true;
1339   };
1340 
1341   /// Helper struct, will support ranges eventually.
1342   struct OffsetInfo {
1343     int64_t Offset = OffsetAndSize::Unknown;
1344 
1345     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1346   };
1347 
1348   /// See AbstractAttribute::updateImpl(...).
1349   ChangeStatus updateImpl(Attributor &A) override {
1350     using namespace AA::PointerInfo;
1351     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1352     Value &AssociatedValue = getAssociatedValue();
1353 
1354     const DataLayout &DL = A.getDataLayout();
1355     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1356     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1357 
1358     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1359                                      bool &Follow) {
1360       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1361       UsrOI = PtrOI;
1362       Follow = true;
1363       return true;
1364     };
1365 
1366     const auto *TLI = getAnchorScope()
1367                           ? A.getInfoCache().getTargetLibraryInfoForFunction(
1368                                 *getAnchorScope())
1369                           : nullptr;
1370     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1371       Value *CurPtr = U.get();
1372       User *Usr = U.getUser();
1373       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1374                         << *Usr << "\n");
1375       assert(OffsetInfoMap.count(CurPtr) &&
1376              "The current pointer offset should have been seeded!");
1377 
1378       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1379         if (CE->isCast())
1380           return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1381         if (CE->isCompare())
1382           return true;
1383         if (!isa<GEPOperator>(CE)) {
1384           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1385                             << "\n");
1386           return false;
1387         }
1388       }
1389       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1390         // Note the order here, the Usr access might change the map, CurPtr is
1391         // already in it though.
1392         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1393         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1394         UsrOI = PtrOI;
1395 
1396         // TODO: Use range information.
1397         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1398             !GEP->hasAllConstantIndices()) {
1399           UsrOI.Offset = OffsetAndSize::Unknown;
1400           Follow = true;
1401           return true;
1402         }
1403 
1404         SmallVector<Value *, 8> Indices;
1405         for (Use &Idx : GEP->indices()) {
1406           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1407             Indices.push_back(CIdx);
1408             continue;
1409           }
1410 
1411           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1412                             << " : " << *Idx << "\n");
1413           return false;
1414         }
1415         UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1416                                           GEP->getSourceElementType(), Indices);
1417         Follow = true;
1418         return true;
1419       }
1420       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1421         return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1422 
1423       // For PHIs we need to take care of the recurrence explicitly as the value
1424       // might change while we iterate through a loop. For now, we give up if
1425       // the PHI is not invariant.
1426       if (isa<PHINode>(Usr)) {
1427         // Note the order here, the Usr access might change the map, CurPtr is
1428         // already in it though.
1429         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1430         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1431         // Check if the PHI is invariant (so far).
1432         if (UsrOI == PtrOI)
1433           return true;
1434 
1435         // Check if the PHI operand has already an unknown offset as we can't
1436         // improve on that anymore.
1437         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1438           UsrOI = PtrOI;
1439           Follow = true;
1440           return true;
1441         }
1442 
1443         // Check if the PHI operand is not dependent on the PHI itself.
1444         // TODO: This is not great as we look at the pointer type. However, it
1445         // is unclear where the Offset size comes from with typeless pointers.
1446         APInt Offset(
1447             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1448             0);
1449         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1450                                     DL, Offset, /* AllowNonInbounds */ true)) {
1451           if (Offset != PtrOI.Offset) {
1452             LLVM_DEBUG(dbgs()
1453                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1454                        << *CurPtr << " in " << *Usr << "\n");
1455             return false;
1456           }
1457           return HandlePassthroughUser(Usr, PtrOI, Follow);
1458         }
1459 
1460         // TODO: Approximate in case we know the direction of the recurrence.
1461         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1462                           << *CurPtr << " in " << *Usr << "\n");
1463         UsrOI = PtrOI;
1464         UsrOI.Offset = OffsetAndSize::Unknown;
1465         Follow = true;
1466         return true;
1467       }
1468 
1469       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1470         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1471                             AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1472                             Changed, LoadI->getType());
1473       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1474         if (StoreI->getValueOperand() == CurPtr) {
1475           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1476                             << *StoreI << "\n");
1477           return false;
1478         }
1479         bool UsedAssumedInformation = false;
1480         Optional<Value *> Content = A.getAssumedSimplified(
1481             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1482         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1483                             OffsetInfoMap[CurPtr].Offset, Changed,
1484                             StoreI->getValueOperand()->getType());
1485       }
1486       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1487         if (CB->isLifetimeStartOrEnd())
1488           return true;
1489         if (TLI && isFreeCall(CB, TLI))
1490           return true;
1491         if (CB->isArgOperand(&U)) {
1492           unsigned ArgNo = CB->getArgOperandNo(&U);
1493           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1494               *this, IRPosition::callsite_argument(*CB, ArgNo),
1495               DepClassTy::REQUIRED);
1496           Changed = translateAndAddCalleeState(
1497                         A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) |
1498                     Changed;
1499           return true;
1500         }
1501         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1502                           << "\n");
1503         // TODO: Allow some call uses
1504         return false;
1505       }
1506 
1507       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1508       return false;
1509     };
1510     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1511       if (OffsetInfoMap.count(NewU))
1512         return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1513       OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1514       return true;
1515     };
1516     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1517                            /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1518                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
1519       return indicatePessimisticFixpoint();
1520 
1521     LLVM_DEBUG({
1522       dbgs() << "Accesses by bin after update:\n";
1523       for (auto &It : AccessBins) {
1524         dbgs() << "[" << It.first.getOffset() << "-"
1525                << It.first.getOffset() + It.first.getSize()
1526                << "] : " << It.getSecond()->size() << "\n";
1527         for (auto &Acc : *It.getSecond()) {
1528           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1529                  << "\n";
1530           if (Acc.getLocalInst() != Acc.getRemoteInst())
1531             dbgs() << "     -->                         "
1532                    << *Acc.getRemoteInst() << "\n";
1533           if (!Acc.isWrittenValueYetUndetermined()) {
1534             if (Acc.getWrittenValue())
1535               dbgs() << "       - c: " << *Acc.getWrittenValue() << "\n";
1536             else
1537               dbgs() << "       - c: <unknown>\n";
1538           }
1539         }
1540       }
1541     });
1542 
1543     return Changed;
1544   }
1545 
1546   /// See AbstractAttribute::trackStatistics()
1547   void trackStatistics() const override {
1548     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1549   }
1550 };
1551 
1552 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1553   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1554       : AAPointerInfoImpl(IRP, A) {}
1555 
1556   /// See AbstractAttribute::updateImpl(...).
1557   ChangeStatus updateImpl(Attributor &A) override {
1558     return indicatePessimisticFixpoint();
1559   }
1560 
1561   /// See AbstractAttribute::trackStatistics()
1562   void trackStatistics() const override {
1563     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1564   }
1565 };
1566 
1567 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1568   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1569       : AAPointerInfoFloating(IRP, A) {}
1570 
1571   /// See AbstractAttribute::initialize(...).
1572   void initialize(Attributor &A) override {
1573     AAPointerInfoFloating::initialize(A);
1574     if (getAnchorScope()->isDeclaration())
1575       indicatePessimisticFixpoint();
1576   }
1577 
1578   /// See AbstractAttribute::trackStatistics()
1579   void trackStatistics() const override {
1580     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1581   }
1582 };
1583 
1584 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1585   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1586       : AAPointerInfoFloating(IRP, A) {}
1587 
1588   /// See AbstractAttribute::updateImpl(...).
1589   ChangeStatus updateImpl(Attributor &A) override {
1590     using namespace AA::PointerInfo;
1591     // We handle memory intrinsics explicitly, at least the first (=
1592     // destination) and second (=source) arguments as we know how they are
1593     // accessed.
1594     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1595       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1596       int64_t LengthVal = OffsetAndSize::Unknown;
1597       if (Length)
1598         LengthVal = Length->getSExtValue();
1599       Value &Ptr = getAssociatedValue();
1600       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1601       ChangeStatus Changed = ChangeStatus::UNCHANGED;
1602       if (ArgNo == 0) {
1603         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1604                      nullptr, LengthVal);
1605       } else if (ArgNo == 1) {
1606         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1607                      nullptr, LengthVal);
1608       } else {
1609         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1610                           << *MI << "\n");
1611         return indicatePessimisticFixpoint();
1612       }
1613       return Changed;
1614     }
1615 
1616     // TODO: Once we have call site specific value information we can provide
1617     //       call site specific liveness information and then it makes
1618     //       sense to specialize attributes for call sites arguments instead of
1619     //       redirecting requests to the callee argument.
1620     Argument *Arg = getAssociatedArgument();
1621     if (!Arg)
1622       return indicatePessimisticFixpoint();
1623     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1624     auto &ArgAA =
1625         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1626     return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1627   }
1628 
1629   /// See AbstractAttribute::trackStatistics()
1630   void trackStatistics() const override {
1631     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1632   }
1633 };
1634 
1635 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1636   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1637       : AAPointerInfoFloating(IRP, A) {}
1638 
1639   /// See AbstractAttribute::trackStatistics()
1640   void trackStatistics() const override {
1641     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1642   }
1643 };
1644 } // namespace
1645 
1646 /// -----------------------NoUnwind Function Attribute--------------------------
1647 
1648 namespace {
1649 struct AANoUnwindImpl : AANoUnwind {
1650   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1651 
1652   const std::string getAsStr() const override {
1653     return getAssumed() ? "nounwind" : "may-unwind";
1654   }
1655 
1656   /// See AbstractAttribute::updateImpl(...).
1657   ChangeStatus updateImpl(Attributor &A) override {
1658     auto Opcodes = {
1659         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1660         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1661         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1662 
1663     auto CheckForNoUnwind = [&](Instruction &I) {
1664       if (!I.mayThrow())
1665         return true;
1666 
1667       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1668         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1669             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1670         return NoUnwindAA.isAssumedNoUnwind();
1671       }
1672       return false;
1673     };
1674 
1675     bool UsedAssumedInformation = false;
1676     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1677                                    UsedAssumedInformation))
1678       return indicatePessimisticFixpoint();
1679 
1680     return ChangeStatus::UNCHANGED;
1681   }
1682 };
1683 
1684 struct AANoUnwindFunction final : public AANoUnwindImpl {
1685   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1686       : AANoUnwindImpl(IRP, A) {}
1687 
1688   /// See AbstractAttribute::trackStatistics()
1689   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1690 };
1691 
1692 /// NoUnwind attribute deduction for a call sites.
1693 struct AANoUnwindCallSite final : AANoUnwindImpl {
1694   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1695       : AANoUnwindImpl(IRP, A) {}
1696 
1697   /// See AbstractAttribute::initialize(...).
1698   void initialize(Attributor &A) override {
1699     AANoUnwindImpl::initialize(A);
1700     Function *F = getAssociatedFunction();
1701     if (!F || F->isDeclaration())
1702       indicatePessimisticFixpoint();
1703   }
1704 
1705   /// See AbstractAttribute::updateImpl(...).
1706   ChangeStatus updateImpl(Attributor &A) override {
1707     // TODO: Once we have call site specific value information we can provide
1708     //       call site specific liveness information and then it makes
1709     //       sense to specialize attributes for call sites arguments instead of
1710     //       redirecting requests to the callee argument.
1711     Function *F = getAssociatedFunction();
1712     const IRPosition &FnPos = IRPosition::function(*F);
1713     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1714     return clampStateAndIndicateChange(getState(), FnAA.getState());
1715   }
1716 
1717   /// See AbstractAttribute::trackStatistics()
1718   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1719 };
1720 } // namespace
1721 
1722 /// --------------------- Function Return Values -------------------------------
1723 
1724 namespace {
1725 /// "Attribute" that collects all potential returned values and the return
1726 /// instructions that they arise from.
1727 ///
1728 /// If there is a unique returned value R, the manifest method will:
1729 ///   - mark R with the "returned" attribute, if R is an argument.
1730 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1731 
1732   /// Mapping of values potentially returned by the associated function to the
1733   /// return instructions that might return them.
1734   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1735 
1736   /// State flags
1737   ///
1738   ///{
1739   bool IsFixed = false;
1740   bool IsValidState = true;
1741   ///}
1742 
1743 public:
1744   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1745       : AAReturnedValues(IRP, A) {}
1746 
1747   /// See AbstractAttribute::initialize(...).
1748   void initialize(Attributor &A) override {
1749     // Reset the state.
1750     IsFixed = false;
1751     IsValidState = true;
1752     ReturnedValues.clear();
1753 
1754     Function *F = getAssociatedFunction();
1755     if (!F || F->isDeclaration()) {
1756       indicatePessimisticFixpoint();
1757       return;
1758     }
1759     assert(!F->getReturnType()->isVoidTy() &&
1760            "Did not expect a void return type!");
1761 
1762     // The map from instruction opcodes to those instructions in the function.
1763     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1764 
1765     // Look through all arguments, if one is marked as returned we are done.
1766     for (Argument &Arg : F->args()) {
1767       if (Arg.hasReturnedAttr()) {
1768         auto &ReturnInstSet = ReturnedValues[&Arg];
1769         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1770           for (Instruction *RI : *Insts)
1771             ReturnInstSet.insert(cast<ReturnInst>(RI));
1772 
1773         indicateOptimisticFixpoint();
1774         return;
1775       }
1776     }
1777 
1778     if (!A.isFunctionIPOAmendable(*F))
1779       indicatePessimisticFixpoint();
1780   }
1781 
1782   /// See AbstractAttribute::manifest(...).
1783   ChangeStatus manifest(Attributor &A) override;
1784 
1785   /// See AbstractAttribute::getState(...).
1786   AbstractState &getState() override { return *this; }
1787 
1788   /// See AbstractAttribute::getState(...).
1789   const AbstractState &getState() const override { return *this; }
1790 
1791   /// See AbstractAttribute::updateImpl(Attributor &A).
1792   ChangeStatus updateImpl(Attributor &A) override;
1793 
1794   llvm::iterator_range<iterator> returned_values() override {
1795     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1796   }
1797 
1798   llvm::iterator_range<const_iterator> returned_values() const override {
1799     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1800   }
1801 
1802   /// Return the number of potential return values, -1 if unknown.
1803   size_t getNumReturnValues() const override {
1804     return isValidState() ? ReturnedValues.size() : -1;
1805   }
1806 
1807   /// Return an assumed unique return value if a single candidate is found. If
1808   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1809   /// Optional::NoneType.
1810   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1811 
1812   /// See AbstractState::checkForAllReturnedValues(...).
1813   bool checkForAllReturnedValuesAndReturnInsts(
1814       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1815       const override;
1816 
1817   /// Pretty print the attribute similar to the IR representation.
1818   const std::string getAsStr() const override;
1819 
1820   /// See AbstractState::isAtFixpoint().
1821   bool isAtFixpoint() const override { return IsFixed; }
1822 
1823   /// See AbstractState::isValidState().
1824   bool isValidState() const override { return IsValidState; }
1825 
1826   /// See AbstractState::indicateOptimisticFixpoint(...).
1827   ChangeStatus indicateOptimisticFixpoint() override {
1828     IsFixed = true;
1829     return ChangeStatus::UNCHANGED;
1830   }
1831 
1832   ChangeStatus indicatePessimisticFixpoint() override {
1833     IsFixed = true;
1834     IsValidState = false;
1835     return ChangeStatus::CHANGED;
1836   }
1837 };
1838 
1839 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1840   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1841 
1842   // Bookkeeping.
1843   assert(isValidState());
1844   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1845                   "Number of function with known return values");
1846 
1847   // Check if we have an assumed unique return value that we could manifest.
1848   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1849 
1850   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1851     return Changed;
1852 
1853   // Bookkeeping.
1854   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1855                   "Number of function with unique return");
1856   // If the assumed unique return value is an argument, annotate it.
1857   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1858     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1859             getAssociatedFunction()->getReturnType())) {
1860       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1861       Changed = IRAttribute::manifest(A);
1862     }
1863   }
1864   return Changed;
1865 }
1866 
1867 const std::string AAReturnedValuesImpl::getAsStr() const {
1868   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1869          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1870 }
1871 
1872 Optional<Value *>
1873 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1874   // If checkForAllReturnedValues provides a unique value, ignoring potential
1875   // undef values that can also be present, it is assumed to be the actual
1876   // return value and forwarded to the caller of this method. If there are
1877   // multiple, a nullptr is returned indicating there cannot be a unique
1878   // returned value.
1879   Optional<Value *> UniqueRV;
1880   Type *Ty = getAssociatedFunction()->getReturnType();
1881 
1882   auto Pred = [&](Value &RV) -> bool {
1883     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1884     return UniqueRV != Optional<Value *>(nullptr);
1885   };
1886 
1887   if (!A.checkForAllReturnedValues(Pred, *this))
1888     UniqueRV = nullptr;
1889 
1890   return UniqueRV;
1891 }
1892 
1893 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1894     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1895     const {
1896   if (!isValidState())
1897     return false;
1898 
1899   // Check all returned values but ignore call sites as long as we have not
1900   // encountered an overdefined one during an update.
1901   for (auto &It : ReturnedValues) {
1902     Value *RV = It.first;
1903     if (!Pred(*RV, It.second))
1904       return false;
1905   }
1906 
1907   return true;
1908 }
1909 
1910 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1911   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1912 
1913   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1914                            bool) -> bool {
1915     assert(AA::isValidInScope(V, Ret.getFunction()) &&
1916            "Assumed returned value should be valid in function scope!");
1917     if (ReturnedValues[&V].insert(&Ret))
1918       Changed = ChangeStatus::CHANGED;
1919     return true;
1920   };
1921 
1922   bool UsedAssumedInformation = false;
1923   auto ReturnInstCB = [&](Instruction &I) {
1924     ReturnInst &Ret = cast<ReturnInst>(I);
1925     return genericValueTraversal<ReturnInst>(
1926         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1927         &I, UsedAssumedInformation, /* UseValueSimplify */ true,
1928         /* MaxValues */ 16,
1929         /* StripCB */ nullptr, AA::Intraprocedural);
1930   };
1931 
1932   // Discover returned values from all live returned instructions in the
1933   // associated function.
1934   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1935                                  UsedAssumedInformation))
1936     return indicatePessimisticFixpoint();
1937   return Changed;
1938 }
1939 
1940 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1941   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1942       : AAReturnedValuesImpl(IRP, A) {}
1943 
1944   /// See AbstractAttribute::trackStatistics()
1945   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1946 };
1947 
1948 /// Returned values information for a call sites.
1949 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1950   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1951       : AAReturnedValuesImpl(IRP, A) {}
1952 
1953   /// See AbstractAttribute::initialize(...).
1954   void initialize(Attributor &A) override {
1955     // TODO: Once we have call site specific value information we can provide
1956     //       call site specific liveness information and then it makes
1957     //       sense to specialize attributes for call sites instead of
1958     //       redirecting requests to the callee.
1959     llvm_unreachable("Abstract attributes for returned values are not "
1960                      "supported for call sites yet!");
1961   }
1962 
1963   /// See AbstractAttribute::updateImpl(...).
1964   ChangeStatus updateImpl(Attributor &A) override {
1965     return indicatePessimisticFixpoint();
1966   }
1967 
1968   /// See AbstractAttribute::trackStatistics()
1969   void trackStatistics() const override {}
1970 };
1971 } // namespace
1972 
1973 /// ------------------------ NoSync Function Attribute -------------------------
1974 
1975 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
1976   if (!I->isAtomic())
1977     return false;
1978 
1979   if (auto *FI = dyn_cast<FenceInst>(I))
1980     // All legal orderings for fence are stronger than monotonic.
1981     return FI->getSyncScopeID() != SyncScope::SingleThread;
1982   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1983     // Unordered is not a legal ordering for cmpxchg.
1984     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1985             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1986   }
1987 
1988   AtomicOrdering Ordering;
1989   switch (I->getOpcode()) {
1990   case Instruction::AtomicRMW:
1991     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1992     break;
1993   case Instruction::Store:
1994     Ordering = cast<StoreInst>(I)->getOrdering();
1995     break;
1996   case Instruction::Load:
1997     Ordering = cast<LoadInst>(I)->getOrdering();
1998     break;
1999   default:
2000     llvm_unreachable(
2001         "New atomic operations need to be known in the attributor.");
2002   }
2003 
2004   return (Ordering != AtomicOrdering::Unordered &&
2005           Ordering != AtomicOrdering::Monotonic);
2006 }
2007 
2008 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
2009 /// which would be nosync except that they have a volatile flag.  All other
2010 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
2011 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
2012   if (auto *MI = dyn_cast<MemIntrinsic>(I))
2013     return !MI->isVolatile();
2014   return false;
2015 }
2016 
2017 namespace {
2018 struct AANoSyncImpl : AANoSync {
2019   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
2020 
2021   const std::string getAsStr() const override {
2022     return getAssumed() ? "nosync" : "may-sync";
2023   }
2024 
2025   /// See AbstractAttribute::updateImpl(...).
2026   ChangeStatus updateImpl(Attributor &A) override;
2027 };
2028 
2029 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2030 
2031   auto CheckRWInstForNoSync = [&](Instruction &I) {
2032     return AA::isNoSyncInst(A, I, *this);
2033   };
2034 
2035   auto CheckForNoSync = [&](Instruction &I) {
2036     // At this point we handled all read/write effects and they are all
2037     // nosync, so they can be skipped.
2038     if (I.mayReadOrWriteMemory())
2039       return true;
2040 
2041     // non-convergent and readnone imply nosync.
2042     return !cast<CallBase>(I).isConvergent();
2043   };
2044 
2045   bool UsedAssumedInformation = false;
2046   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2047                                           UsedAssumedInformation) ||
2048       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2049                                          UsedAssumedInformation))
2050     return indicatePessimisticFixpoint();
2051 
2052   return ChangeStatus::UNCHANGED;
2053 }
2054 
2055 struct AANoSyncFunction final : public AANoSyncImpl {
2056   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2057       : AANoSyncImpl(IRP, A) {}
2058 
2059   /// See AbstractAttribute::trackStatistics()
2060   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
2061 };
2062 
2063 /// NoSync attribute deduction for a call sites.
2064 struct AANoSyncCallSite final : AANoSyncImpl {
2065   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2066       : AANoSyncImpl(IRP, A) {}
2067 
2068   /// See AbstractAttribute::initialize(...).
2069   void initialize(Attributor &A) override {
2070     AANoSyncImpl::initialize(A);
2071     Function *F = getAssociatedFunction();
2072     if (!F || F->isDeclaration())
2073       indicatePessimisticFixpoint();
2074   }
2075 
2076   /// See AbstractAttribute::updateImpl(...).
2077   ChangeStatus updateImpl(Attributor &A) override {
2078     // TODO: Once we have call site specific value information we can provide
2079     //       call site specific liveness information and then it makes
2080     //       sense to specialize attributes for call sites arguments instead of
2081     //       redirecting requests to the callee argument.
2082     Function *F = getAssociatedFunction();
2083     const IRPosition &FnPos = IRPosition::function(*F);
2084     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
2085     return clampStateAndIndicateChange(getState(), FnAA.getState());
2086   }
2087 
2088   /// See AbstractAttribute::trackStatistics()
2089   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
2090 };
2091 } // namespace
2092 
2093 /// ------------------------ No-Free Attributes ----------------------------
2094 
2095 namespace {
2096 struct AANoFreeImpl : public AANoFree {
2097   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2098 
2099   /// See AbstractAttribute::updateImpl(...).
2100   ChangeStatus updateImpl(Attributor &A) override {
2101     auto CheckForNoFree = [&](Instruction &I) {
2102       const auto &CB = cast<CallBase>(I);
2103       if (CB.hasFnAttr(Attribute::NoFree))
2104         return true;
2105 
2106       const auto &NoFreeAA = A.getAAFor<AANoFree>(
2107           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2108       return NoFreeAA.isAssumedNoFree();
2109     };
2110 
2111     bool UsedAssumedInformation = false;
2112     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2113                                            UsedAssumedInformation))
2114       return indicatePessimisticFixpoint();
2115     return ChangeStatus::UNCHANGED;
2116   }
2117 
2118   /// See AbstractAttribute::getAsStr().
2119   const std::string getAsStr() const override {
2120     return getAssumed() ? "nofree" : "may-free";
2121   }
2122 };
2123 
2124 struct AANoFreeFunction final : public AANoFreeImpl {
2125   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2126       : AANoFreeImpl(IRP, A) {}
2127 
2128   /// See AbstractAttribute::trackStatistics()
2129   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2130 };
2131 
2132 /// NoFree attribute deduction for a call sites.
2133 struct AANoFreeCallSite final : AANoFreeImpl {
2134   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2135       : AANoFreeImpl(IRP, A) {}
2136 
2137   /// See AbstractAttribute::initialize(...).
2138   void initialize(Attributor &A) override {
2139     AANoFreeImpl::initialize(A);
2140     Function *F = getAssociatedFunction();
2141     if (!F || F->isDeclaration())
2142       indicatePessimisticFixpoint();
2143   }
2144 
2145   /// See AbstractAttribute::updateImpl(...).
2146   ChangeStatus updateImpl(Attributor &A) override {
2147     // TODO: Once we have call site specific value information we can provide
2148     //       call site specific liveness information and then it makes
2149     //       sense to specialize attributes for call sites arguments instead of
2150     //       redirecting requests to the callee argument.
2151     Function *F = getAssociatedFunction();
2152     const IRPosition &FnPos = IRPosition::function(*F);
2153     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2154     return clampStateAndIndicateChange(getState(), FnAA.getState());
2155   }
2156 
2157   /// See AbstractAttribute::trackStatistics()
2158   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2159 };
2160 
2161 /// NoFree attribute for floating values.
2162 struct AANoFreeFloating : AANoFreeImpl {
2163   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2164       : AANoFreeImpl(IRP, A) {}
2165 
2166   /// See AbstractAttribute::trackStatistics()
2167   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2168 
2169   /// See Abstract Attribute::updateImpl(...).
2170   ChangeStatus updateImpl(Attributor &A) override {
2171     const IRPosition &IRP = getIRPosition();
2172 
2173     const auto &NoFreeAA = A.getAAFor<AANoFree>(
2174         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2175     if (NoFreeAA.isAssumedNoFree())
2176       return ChangeStatus::UNCHANGED;
2177 
2178     Value &AssociatedValue = getIRPosition().getAssociatedValue();
2179     auto Pred = [&](const Use &U, bool &Follow) -> bool {
2180       Instruction *UserI = cast<Instruction>(U.getUser());
2181       if (auto *CB = dyn_cast<CallBase>(UserI)) {
2182         if (CB->isBundleOperand(&U))
2183           return false;
2184         if (!CB->isArgOperand(&U))
2185           return true;
2186         unsigned ArgNo = CB->getArgOperandNo(&U);
2187 
2188         const auto &NoFreeArg = A.getAAFor<AANoFree>(
2189             *this, IRPosition::callsite_argument(*CB, ArgNo),
2190             DepClassTy::REQUIRED);
2191         return NoFreeArg.isAssumedNoFree();
2192       }
2193 
2194       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2195           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2196         Follow = true;
2197         return true;
2198       }
2199       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2200           isa<ReturnInst>(UserI))
2201         return true;
2202 
2203       // Unknown user.
2204       return false;
2205     };
2206     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2207       return indicatePessimisticFixpoint();
2208 
2209     return ChangeStatus::UNCHANGED;
2210   }
2211 };
2212 
2213 /// NoFree attribute for a call site argument.
2214 struct AANoFreeArgument final : AANoFreeFloating {
2215   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2216       : AANoFreeFloating(IRP, A) {}
2217 
2218   /// See AbstractAttribute::trackStatistics()
2219   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2220 };
2221 
2222 /// NoFree attribute for call site arguments.
2223 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2224   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2225       : AANoFreeFloating(IRP, A) {}
2226 
2227   /// See AbstractAttribute::updateImpl(...).
2228   ChangeStatus updateImpl(Attributor &A) override {
2229     // TODO: Once we have call site specific value information we can provide
2230     //       call site specific liveness information and then it makes
2231     //       sense to specialize attributes for call sites arguments instead of
2232     //       redirecting requests to the callee argument.
2233     Argument *Arg = getAssociatedArgument();
2234     if (!Arg)
2235       return indicatePessimisticFixpoint();
2236     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2237     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2238     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2239   }
2240 
2241   /// See AbstractAttribute::trackStatistics()
2242   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2243 };
2244 
2245 /// NoFree attribute for function return value.
2246 struct AANoFreeReturned final : AANoFreeFloating {
2247   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2248       : AANoFreeFloating(IRP, A) {
2249     llvm_unreachable("NoFree is not applicable to function returns!");
2250   }
2251 
2252   /// See AbstractAttribute::initialize(...).
2253   void initialize(Attributor &A) override {
2254     llvm_unreachable("NoFree is not applicable to function returns!");
2255   }
2256 
2257   /// See AbstractAttribute::updateImpl(...).
2258   ChangeStatus updateImpl(Attributor &A) override {
2259     llvm_unreachable("NoFree is not applicable to function returns!");
2260   }
2261 
2262   /// See AbstractAttribute::trackStatistics()
2263   void trackStatistics() const override {}
2264 };
2265 
2266 /// NoFree attribute deduction for a call site return value.
2267 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2268   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2269       : AANoFreeFloating(IRP, A) {}
2270 
2271   ChangeStatus manifest(Attributor &A) override {
2272     return ChangeStatus::UNCHANGED;
2273   }
2274   /// See AbstractAttribute::trackStatistics()
2275   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2276 };
2277 } // namespace
2278 
2279 /// ------------------------ NonNull Argument Attribute ------------------------
2280 namespace {
2281 static int64_t getKnownNonNullAndDerefBytesForUse(
2282     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2283     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2284   TrackUse = false;
2285 
2286   const Value *UseV = U->get();
2287   if (!UseV->getType()->isPointerTy())
2288     return 0;
2289 
2290   // We need to follow common pointer manipulation uses to the accesses they
2291   // feed into. We can try to be smart to avoid looking through things we do not
2292   // like for now, e.g., non-inbounds GEPs.
2293   if (isa<CastInst>(I)) {
2294     TrackUse = true;
2295     return 0;
2296   }
2297 
2298   if (isa<GetElementPtrInst>(I)) {
2299     TrackUse = true;
2300     return 0;
2301   }
2302 
2303   Type *PtrTy = UseV->getType();
2304   const Function *F = I->getFunction();
2305   bool NullPointerIsDefined =
2306       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2307   const DataLayout &DL = A.getInfoCache().getDL();
2308   if (const auto *CB = dyn_cast<CallBase>(I)) {
2309     if (CB->isBundleOperand(U)) {
2310       if (RetainedKnowledge RK = getKnowledgeFromUse(
2311               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2312         IsNonNull |=
2313             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2314         return RK.ArgValue;
2315       }
2316       return 0;
2317     }
2318 
2319     if (CB->isCallee(U)) {
2320       IsNonNull |= !NullPointerIsDefined;
2321       return 0;
2322     }
2323 
2324     unsigned ArgNo = CB->getArgOperandNo(U);
2325     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2326     // As long as we only use known information there is no need to track
2327     // dependences here.
2328     auto &DerefAA =
2329         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2330     IsNonNull |= DerefAA.isKnownNonNull();
2331     return DerefAA.getKnownDereferenceableBytes();
2332   }
2333 
2334   Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2335   if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2336     return 0;
2337 
2338   int64_t Offset;
2339   const Value *Base =
2340       getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2341   if (Base && Base == &AssociatedValue) {
2342     int64_t DerefBytes = Loc->Size.getValue() + Offset;
2343     IsNonNull |= !NullPointerIsDefined;
2344     return std::max(int64_t(0), DerefBytes);
2345   }
2346 
2347   /// Corner case when an offset is 0.
2348   Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2349                                           /*AllowNonInbounds*/ true);
2350   if (Base && Base == &AssociatedValue && Offset == 0) {
2351     int64_t DerefBytes = Loc->Size.getValue();
2352     IsNonNull |= !NullPointerIsDefined;
2353     return std::max(int64_t(0), DerefBytes);
2354   }
2355 
2356   return 0;
2357 }
2358 
2359 struct AANonNullImpl : AANonNull {
2360   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2361       : AANonNull(IRP, A),
2362         NullIsDefined(NullPointerIsDefined(
2363             getAnchorScope(),
2364             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2365 
2366   /// See AbstractAttribute::initialize(...).
2367   void initialize(Attributor &A) override {
2368     Value &V = *getAssociatedValue().stripPointerCasts();
2369     if (!NullIsDefined &&
2370         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2371                 /* IgnoreSubsumingPositions */ false, &A)) {
2372       indicateOptimisticFixpoint();
2373       return;
2374     }
2375 
2376     if (isa<ConstantPointerNull>(V)) {
2377       indicatePessimisticFixpoint();
2378       return;
2379     }
2380 
2381     AANonNull::initialize(A);
2382 
2383     bool CanBeNull, CanBeFreed;
2384     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2385                                          CanBeFreed)) {
2386       if (!CanBeNull) {
2387         indicateOptimisticFixpoint();
2388         return;
2389       }
2390     }
2391 
2392     if (isa<GlobalValue>(V)) {
2393       indicatePessimisticFixpoint();
2394       return;
2395     }
2396 
2397     if (Instruction *CtxI = getCtxI())
2398       followUsesInMBEC(*this, A, getState(), *CtxI);
2399   }
2400 
2401   /// See followUsesInMBEC
2402   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2403                        AANonNull::StateType &State) {
2404     bool IsNonNull = false;
2405     bool TrackUse = false;
2406     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2407                                        IsNonNull, TrackUse);
2408     State.setKnown(IsNonNull);
2409     return TrackUse;
2410   }
2411 
2412   /// See AbstractAttribute::getAsStr().
2413   const std::string getAsStr() const override {
2414     return getAssumed() ? "nonnull" : "may-null";
2415   }
2416 
2417   /// Flag to determine if the underlying value can be null and still allow
2418   /// valid accesses.
2419   const bool NullIsDefined;
2420 };
2421 
2422 /// NonNull attribute for a floating value.
2423 struct AANonNullFloating : public AANonNullImpl {
2424   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2425       : AANonNullImpl(IRP, A) {}
2426 
2427   /// See AbstractAttribute::updateImpl(...).
2428   ChangeStatus updateImpl(Attributor &A) override {
2429     const DataLayout &DL = A.getDataLayout();
2430 
2431     DominatorTree *DT = nullptr;
2432     AssumptionCache *AC = nullptr;
2433     InformationCache &InfoCache = A.getInfoCache();
2434     if (const Function *Fn = getAnchorScope()) {
2435       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2436       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2437     }
2438 
2439     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2440                             AANonNull::StateType &T, bool Stripped) -> bool {
2441       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2442                                              DepClassTy::REQUIRED);
2443       if (!Stripped && this == &AA) {
2444         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2445           T.indicatePessimisticFixpoint();
2446       } else {
2447         // Use abstract attribute information.
2448         const AANonNull::StateType &NS = AA.getState();
2449         T ^= NS;
2450       }
2451       return T.isValidState();
2452     };
2453 
2454     StateType T;
2455     bool UsedAssumedInformation = false;
2456     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2457                                           VisitValueCB, getCtxI(),
2458                                           UsedAssumedInformation))
2459       return indicatePessimisticFixpoint();
2460 
2461     return clampStateAndIndicateChange(getState(), T);
2462   }
2463 
2464   /// See AbstractAttribute::trackStatistics()
2465   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2466 };
2467 
2468 /// NonNull attribute for function return value.
2469 struct AANonNullReturned final
2470     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2471   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2472       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2473 
2474   /// See AbstractAttribute::getAsStr().
2475   const std::string getAsStr() const override {
2476     return getAssumed() ? "nonnull" : "may-null";
2477   }
2478 
2479   /// See AbstractAttribute::trackStatistics()
2480   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2481 };
2482 
2483 /// NonNull attribute for function argument.
2484 struct AANonNullArgument final
2485     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2486   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2487       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2488 
2489   /// See AbstractAttribute::trackStatistics()
2490   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2491 };
2492 
2493 struct AANonNullCallSiteArgument final : AANonNullFloating {
2494   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2495       : AANonNullFloating(IRP, A) {}
2496 
2497   /// See AbstractAttribute::trackStatistics()
2498   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2499 };
2500 
2501 /// NonNull attribute for a call site return position.
2502 struct AANonNullCallSiteReturned final
2503     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2504   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2505       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2506 
2507   /// See AbstractAttribute::trackStatistics()
2508   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2509 };
2510 } // namespace
2511 
2512 /// ------------------------ No-Recurse Attributes ----------------------------
2513 
2514 namespace {
2515 struct AANoRecurseImpl : public AANoRecurse {
2516   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2517 
2518   /// See AbstractAttribute::getAsStr()
2519   const std::string getAsStr() const override {
2520     return getAssumed() ? "norecurse" : "may-recurse";
2521   }
2522 };
2523 
2524 struct AANoRecurseFunction final : AANoRecurseImpl {
2525   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2526       : AANoRecurseImpl(IRP, A) {}
2527 
2528   /// See AbstractAttribute::updateImpl(...).
2529   ChangeStatus updateImpl(Attributor &A) override {
2530 
2531     // If all live call sites are known to be no-recurse, we are as well.
2532     auto CallSitePred = [&](AbstractCallSite ACS) {
2533       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2534           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2535           DepClassTy::NONE);
2536       return NoRecurseAA.isKnownNoRecurse();
2537     };
2538     bool UsedAssumedInformation = false;
2539     if (A.checkForAllCallSites(CallSitePred, *this, true,
2540                                UsedAssumedInformation)) {
2541       // If we know all call sites and all are known no-recurse, we are done.
2542       // If all known call sites, which might not be all that exist, are known
2543       // to be no-recurse, we are not done but we can continue to assume
2544       // no-recurse. If one of the call sites we have not visited will become
2545       // live, another update is triggered.
2546       if (!UsedAssumedInformation)
2547         indicateOptimisticFixpoint();
2548       return ChangeStatus::UNCHANGED;
2549     }
2550 
2551     const AAFunctionReachability &EdgeReachability =
2552         A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2553                                            DepClassTy::REQUIRED);
2554     if (EdgeReachability.canReach(A, *getAnchorScope()))
2555       return indicatePessimisticFixpoint();
2556     return ChangeStatus::UNCHANGED;
2557   }
2558 
2559   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2560 };
2561 
2562 /// NoRecurse attribute deduction for a call sites.
2563 struct AANoRecurseCallSite final : AANoRecurseImpl {
2564   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2565       : AANoRecurseImpl(IRP, A) {}
2566 
2567   /// See AbstractAttribute::initialize(...).
2568   void initialize(Attributor &A) override {
2569     AANoRecurseImpl::initialize(A);
2570     Function *F = getAssociatedFunction();
2571     if (!F || F->isDeclaration())
2572       indicatePessimisticFixpoint();
2573   }
2574 
2575   /// See AbstractAttribute::updateImpl(...).
2576   ChangeStatus updateImpl(Attributor &A) override {
2577     // TODO: Once we have call site specific value information we can provide
2578     //       call site specific liveness information and then it makes
2579     //       sense to specialize attributes for call sites arguments instead of
2580     //       redirecting requests to the callee argument.
2581     Function *F = getAssociatedFunction();
2582     const IRPosition &FnPos = IRPosition::function(*F);
2583     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2584     return clampStateAndIndicateChange(getState(), FnAA.getState());
2585   }
2586 
2587   /// See AbstractAttribute::trackStatistics()
2588   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2589 };
2590 } // namespace
2591 
2592 /// -------------------- Undefined-Behavior Attributes ------------------------
2593 
2594 namespace {
2595 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2596   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2597       : AAUndefinedBehavior(IRP, A) {}
2598 
2599   /// See AbstractAttribute::updateImpl(...).
2600   // through a pointer (i.e. also branches etc.)
2601   ChangeStatus updateImpl(Attributor &A) override {
2602     const size_t UBPrevSize = KnownUBInsts.size();
2603     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2604 
2605     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2606       // Lang ref now states volatile store is not UB, let's skip them.
2607       if (I.isVolatile() && I.mayWriteToMemory())
2608         return true;
2609 
2610       // Skip instructions that are already saved.
2611       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2612         return true;
2613 
2614       // If we reach here, we know we have an instruction
2615       // that accesses memory through a pointer operand,
2616       // for which getPointerOperand() should give it to us.
2617       Value *PtrOp =
2618           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2619       assert(PtrOp &&
2620              "Expected pointer operand of memory accessing instruction");
2621 
2622       // Either we stopped and the appropriate action was taken,
2623       // or we got back a simplified value to continue.
2624       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2625       if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2626         return true;
2627       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2628 
2629       // A memory access through a pointer is considered UB
2630       // only if the pointer has constant null value.
2631       // TODO: Expand it to not only check constant values.
2632       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2633         AssumedNoUBInsts.insert(&I);
2634         return true;
2635       }
2636       const Type *PtrTy = PtrOpVal->getType();
2637 
2638       // Because we only consider instructions inside functions,
2639       // assume that a parent function exists.
2640       const Function *F = I.getFunction();
2641 
2642       // A memory access using constant null pointer is only considered UB
2643       // if null pointer is _not_ defined for the target platform.
2644       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2645         AssumedNoUBInsts.insert(&I);
2646       else
2647         KnownUBInsts.insert(&I);
2648       return true;
2649     };
2650 
2651     auto InspectBrInstForUB = [&](Instruction &I) {
2652       // A conditional branch instruction is considered UB if it has `undef`
2653       // condition.
2654 
2655       // Skip instructions that are already saved.
2656       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2657         return true;
2658 
2659       // We know we have a branch instruction.
2660       auto *BrInst = cast<BranchInst>(&I);
2661 
2662       // Unconditional branches are never considered UB.
2663       if (BrInst->isUnconditional())
2664         return true;
2665 
2666       // Either we stopped and the appropriate action was taken,
2667       // or we got back a simplified value to continue.
2668       Optional<Value *> SimplifiedCond =
2669           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2670       if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2671         return true;
2672       AssumedNoUBInsts.insert(&I);
2673       return true;
2674     };
2675 
2676     auto InspectCallSiteForUB = [&](Instruction &I) {
2677       // Check whether a callsite always cause UB or not
2678 
2679       // Skip instructions that are already saved.
2680       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2681         return true;
2682 
2683       // Check nonnull and noundef argument attribute violation for each
2684       // callsite.
2685       CallBase &CB = cast<CallBase>(I);
2686       Function *Callee = CB.getCalledFunction();
2687       if (!Callee)
2688         return true;
2689       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2690         // If current argument is known to be simplified to null pointer and the
2691         // corresponding argument position is known to have nonnull attribute,
2692         // the argument is poison. Furthermore, if the argument is poison and
2693         // the position is known to have noundef attriubte, this callsite is
2694         // considered UB.
2695         if (idx >= Callee->arg_size())
2696           break;
2697         Value *ArgVal = CB.getArgOperand(idx);
2698         if (!ArgVal)
2699           continue;
2700         // Here, we handle three cases.
2701         //   (1) Not having a value means it is dead. (we can replace the value
2702         //       with undef)
2703         //   (2) Simplified to undef. The argument violate noundef attriubte.
2704         //   (3) Simplified to null pointer where known to be nonnull.
2705         //       The argument is a poison value and violate noundef attribute.
2706         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2707         auto &NoUndefAA =
2708             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2709         if (!NoUndefAA.isKnownNoUndef())
2710           continue;
2711         bool UsedAssumedInformation = false;
2712         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2713             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2714         if (UsedAssumedInformation)
2715           continue;
2716         if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2717           return true;
2718         if (!SimplifiedVal.hasValue() ||
2719             isa<UndefValue>(*SimplifiedVal.getValue())) {
2720           KnownUBInsts.insert(&I);
2721           continue;
2722         }
2723         if (!ArgVal->getType()->isPointerTy() ||
2724             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2725           continue;
2726         auto &NonNullAA =
2727             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2728         if (NonNullAA.isKnownNonNull())
2729           KnownUBInsts.insert(&I);
2730       }
2731       return true;
2732     };
2733 
2734     auto InspectReturnInstForUB = [&](Instruction &I) {
2735       auto &RI = cast<ReturnInst>(I);
2736       // Either we stopped and the appropriate action was taken,
2737       // or we got back a simplified return value to continue.
2738       Optional<Value *> SimplifiedRetValue =
2739           stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2740       if (!SimplifiedRetValue.hasValue() || !SimplifiedRetValue.getValue())
2741         return true;
2742 
2743       // Check if a return instruction always cause UB or not
2744       // Note: It is guaranteed that the returned position of the anchor
2745       //       scope has noundef attribute when this is called.
2746       //       We also ensure the return position is not "assumed dead"
2747       //       because the returned value was then potentially simplified to
2748       //       `undef` in AAReturnedValues without removing the `noundef`
2749       //       attribute yet.
2750 
2751       // When the returned position has noundef attriubte, UB occurs in the
2752       // following cases.
2753       //   (1) Returned value is known to be undef.
2754       //   (2) The value is known to be a null pointer and the returned
2755       //       position has nonnull attribute (because the returned value is
2756       //       poison).
2757       if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2758         auto &NonNullAA = A.getAAFor<AANonNull>(
2759             *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2760         if (NonNullAA.isKnownNonNull())
2761           KnownUBInsts.insert(&I);
2762       }
2763 
2764       return true;
2765     };
2766 
2767     bool UsedAssumedInformation = false;
2768     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2769                               {Instruction::Load, Instruction::Store,
2770                                Instruction::AtomicCmpXchg,
2771                                Instruction::AtomicRMW},
2772                               UsedAssumedInformation,
2773                               /* CheckBBLivenessOnly */ true);
2774     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2775                               UsedAssumedInformation,
2776                               /* CheckBBLivenessOnly */ true);
2777     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2778                                       UsedAssumedInformation);
2779 
2780     // If the returned position of the anchor scope has noundef attriubte, check
2781     // all returned instructions.
2782     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2783       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2784       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2785         auto &RetPosNoUndefAA =
2786             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2787         if (RetPosNoUndefAA.isKnownNoUndef())
2788           A.checkForAllInstructions(InspectReturnInstForUB, *this,
2789                                     {Instruction::Ret}, UsedAssumedInformation,
2790                                     /* CheckBBLivenessOnly */ true);
2791       }
2792     }
2793 
2794     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2795         UBPrevSize != KnownUBInsts.size())
2796       return ChangeStatus::CHANGED;
2797     return ChangeStatus::UNCHANGED;
2798   }
2799 
2800   bool isKnownToCauseUB(Instruction *I) const override {
2801     return KnownUBInsts.count(I);
2802   }
2803 
2804   bool isAssumedToCauseUB(Instruction *I) const override {
2805     // In simple words, if an instruction is not in the assumed to _not_
2806     // cause UB, then it is assumed UB (that includes those
2807     // in the KnownUBInsts set). The rest is boilerplate
2808     // is to ensure that it is one of the instructions we test
2809     // for UB.
2810 
2811     switch (I->getOpcode()) {
2812     case Instruction::Load:
2813     case Instruction::Store:
2814     case Instruction::AtomicCmpXchg:
2815     case Instruction::AtomicRMW:
2816       return !AssumedNoUBInsts.count(I);
2817     case Instruction::Br: {
2818       auto *BrInst = cast<BranchInst>(I);
2819       if (BrInst->isUnconditional())
2820         return false;
2821       return !AssumedNoUBInsts.count(I);
2822     } break;
2823     default:
2824       return false;
2825     }
2826     return false;
2827   }
2828 
2829   ChangeStatus manifest(Attributor &A) override {
2830     if (KnownUBInsts.empty())
2831       return ChangeStatus::UNCHANGED;
2832     for (Instruction *I : KnownUBInsts)
2833       A.changeToUnreachableAfterManifest(I);
2834     return ChangeStatus::CHANGED;
2835   }
2836 
2837   /// See AbstractAttribute::getAsStr()
2838   const std::string getAsStr() const override {
2839     return getAssumed() ? "undefined-behavior" : "no-ub";
2840   }
2841 
2842   /// Note: The correctness of this analysis depends on the fact that the
2843   /// following 2 sets will stop changing after some point.
2844   /// "Change" here means that their size changes.
2845   /// The size of each set is monotonically increasing
2846   /// (we only add items to them) and it is upper bounded by the number of
2847   /// instructions in the processed function (we can never save more
2848   /// elements in either set than this number). Hence, at some point,
2849   /// they will stop increasing.
2850   /// Consequently, at some point, both sets will have stopped
2851   /// changing, effectively making the analysis reach a fixpoint.
2852 
2853   /// Note: These 2 sets are disjoint and an instruction can be considered
2854   /// one of 3 things:
2855   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2856   ///    the KnownUBInsts set.
2857   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2858   ///    has a reason to assume it).
2859   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2860   ///    could not find a reason to assume or prove that it can cause UB,
2861   ///    hence it assumes it doesn't. We have a set for these instructions
2862   ///    so that we don't reprocess them in every update.
2863   ///    Note however that instructions in this set may cause UB.
2864 
2865 protected:
2866   /// A set of all live instructions _known_ to cause UB.
2867   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2868 
2869 private:
2870   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2871   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2872 
2873   // Should be called on updates in which if we're processing an instruction
2874   // \p I that depends on a value \p V, one of the following has to happen:
2875   // - If the value is assumed, then stop.
2876   // - If the value is known but undef, then consider it UB.
2877   // - Otherwise, do specific processing with the simplified value.
2878   // We return None in the first 2 cases to signify that an appropriate
2879   // action was taken and the caller should stop.
2880   // Otherwise, we return the simplified value that the caller should
2881   // use for specific processing.
2882   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2883                                          Instruction *I) {
2884     bool UsedAssumedInformation = false;
2885     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2886         IRPosition::value(*V), *this, UsedAssumedInformation);
2887     if (!UsedAssumedInformation) {
2888       // Don't depend on assumed values.
2889       if (!SimplifiedV.hasValue()) {
2890         // If it is known (which we tested above) but it doesn't have a value,
2891         // then we can assume `undef` and hence the instruction is UB.
2892         KnownUBInsts.insert(I);
2893         return llvm::None;
2894       }
2895       if (!SimplifiedV.getValue())
2896         return nullptr;
2897       V = *SimplifiedV;
2898     }
2899     if (isa<UndefValue>(V)) {
2900       KnownUBInsts.insert(I);
2901       return llvm::None;
2902     }
2903     return V;
2904   }
2905 };
2906 
2907 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2908   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2909       : AAUndefinedBehaviorImpl(IRP, A) {}
2910 
2911   /// See AbstractAttribute::trackStatistics()
2912   void trackStatistics() const override {
2913     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2914                "Number of instructions known to have UB");
2915     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2916         KnownUBInsts.size();
2917   }
2918 };
2919 } // namespace
2920 
2921 /// ------------------------ Will-Return Attributes ----------------------------
2922 
2923 namespace {
2924 // Helper function that checks whether a function has any cycle which we don't
2925 // know if it is bounded or not.
2926 // Loops with maximum trip count are considered bounded, any other cycle not.
2927 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2928   ScalarEvolution *SE =
2929       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2930   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2931   // If either SCEV or LoopInfo is not available for the function then we assume
2932   // any cycle to be unbounded cycle.
2933   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2934   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2935   if (!SE || !LI) {
2936     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2937       if (SCCI.hasCycle())
2938         return true;
2939     return false;
2940   }
2941 
2942   // If there's irreducible control, the function may contain non-loop cycles.
2943   if (mayContainIrreducibleControl(F, LI))
2944     return true;
2945 
2946   // Any loop that does not have a max trip count is considered unbounded cycle.
2947   for (auto *L : LI->getLoopsInPreorder()) {
2948     if (!SE->getSmallConstantMaxTripCount(L))
2949       return true;
2950   }
2951   return false;
2952 }
2953 
2954 struct AAWillReturnImpl : public AAWillReturn {
2955   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2956       : AAWillReturn(IRP, A) {}
2957 
2958   /// See AbstractAttribute::initialize(...).
2959   void initialize(Attributor &A) override {
2960     AAWillReturn::initialize(A);
2961 
2962     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2963       indicateOptimisticFixpoint();
2964       return;
2965     }
2966   }
2967 
2968   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2969   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2970     // Check for `mustprogress` in the scope and the associated function which
2971     // might be different if this is a call site.
2972     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2973         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2974       return false;
2975 
2976     bool IsKnown;
2977     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
2978       return IsKnown || !KnownOnly;
2979     return false;
2980   }
2981 
2982   /// See AbstractAttribute::updateImpl(...).
2983   ChangeStatus updateImpl(Attributor &A) override {
2984     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2985       return ChangeStatus::UNCHANGED;
2986 
2987     auto CheckForWillReturn = [&](Instruction &I) {
2988       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2989       const auto &WillReturnAA =
2990           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2991       if (WillReturnAA.isKnownWillReturn())
2992         return true;
2993       if (!WillReturnAA.isAssumedWillReturn())
2994         return false;
2995       const auto &NoRecurseAA =
2996           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2997       return NoRecurseAA.isAssumedNoRecurse();
2998     };
2999 
3000     bool UsedAssumedInformation = false;
3001     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
3002                                            UsedAssumedInformation))
3003       return indicatePessimisticFixpoint();
3004 
3005     return ChangeStatus::UNCHANGED;
3006   }
3007 
3008   /// See AbstractAttribute::getAsStr()
3009   const std::string getAsStr() const override {
3010     return getAssumed() ? "willreturn" : "may-noreturn";
3011   }
3012 };
3013 
3014 struct AAWillReturnFunction final : AAWillReturnImpl {
3015   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
3016       : AAWillReturnImpl(IRP, A) {}
3017 
3018   /// See AbstractAttribute::initialize(...).
3019   void initialize(Attributor &A) override {
3020     AAWillReturnImpl::initialize(A);
3021 
3022     Function *F = getAnchorScope();
3023     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
3024       indicatePessimisticFixpoint();
3025   }
3026 
3027   /// See AbstractAttribute::trackStatistics()
3028   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
3029 };
3030 
3031 /// WillReturn attribute deduction for a call sites.
3032 struct AAWillReturnCallSite final : AAWillReturnImpl {
3033   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3034       : AAWillReturnImpl(IRP, A) {}
3035 
3036   /// See AbstractAttribute::initialize(...).
3037   void initialize(Attributor &A) override {
3038     AAWillReturnImpl::initialize(A);
3039     Function *F = getAssociatedFunction();
3040     if (!F || !A.isFunctionIPOAmendable(*F))
3041       indicatePessimisticFixpoint();
3042   }
3043 
3044   /// See AbstractAttribute::updateImpl(...).
3045   ChangeStatus updateImpl(Attributor &A) override {
3046     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3047       return ChangeStatus::UNCHANGED;
3048 
3049     // TODO: Once we have call site specific value information we can provide
3050     //       call site specific liveness information and then it makes
3051     //       sense to specialize attributes for call sites arguments instead of
3052     //       redirecting requests to the callee argument.
3053     Function *F = getAssociatedFunction();
3054     const IRPosition &FnPos = IRPosition::function(*F);
3055     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
3056     return clampStateAndIndicateChange(getState(), FnAA.getState());
3057   }
3058 
3059   /// See AbstractAttribute::trackStatistics()
3060   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
3061 };
3062 } // namespace
3063 
3064 /// -------------------AAReachability Attribute--------------------------
3065 
3066 namespace {
3067 struct AAReachabilityImpl : AAReachability {
3068   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
3069       : AAReachability(IRP, A) {}
3070 
3071   const std::string getAsStr() const override {
3072     // TODO: Return the number of reachable queries.
3073     return "reachable";
3074   }
3075 
3076   /// See AbstractAttribute::updateImpl(...).
3077   ChangeStatus updateImpl(Attributor &A) override {
3078     return ChangeStatus::UNCHANGED;
3079   }
3080 };
3081 
3082 struct AAReachabilityFunction final : public AAReachabilityImpl {
3083   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
3084       : AAReachabilityImpl(IRP, A) {}
3085 
3086   /// See AbstractAttribute::trackStatistics()
3087   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
3088 };
3089 } // namespace
3090 
3091 /// ------------------------ NoAlias Argument Attribute ------------------------
3092 
3093 namespace {
3094 struct AANoAliasImpl : AANoAlias {
3095   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3096     assert(getAssociatedType()->isPointerTy() &&
3097            "Noalias is a pointer attribute");
3098   }
3099 
3100   const std::string getAsStr() const override {
3101     return getAssumed() ? "noalias" : "may-alias";
3102   }
3103 };
3104 
3105 /// NoAlias attribute for a floating value.
3106 struct AANoAliasFloating final : AANoAliasImpl {
3107   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3108       : AANoAliasImpl(IRP, A) {}
3109 
3110   /// See AbstractAttribute::initialize(...).
3111   void initialize(Attributor &A) override {
3112     AANoAliasImpl::initialize(A);
3113     Value *Val = &getAssociatedValue();
3114     do {
3115       CastInst *CI = dyn_cast<CastInst>(Val);
3116       if (!CI)
3117         break;
3118       Value *Base = CI->getOperand(0);
3119       if (!Base->hasOneUse())
3120         break;
3121       Val = Base;
3122     } while (true);
3123 
3124     if (!Val->getType()->isPointerTy()) {
3125       indicatePessimisticFixpoint();
3126       return;
3127     }
3128 
3129     if (isa<AllocaInst>(Val))
3130       indicateOptimisticFixpoint();
3131     else if (isa<ConstantPointerNull>(Val) &&
3132              !NullPointerIsDefined(getAnchorScope(),
3133                                    Val->getType()->getPointerAddressSpace()))
3134       indicateOptimisticFixpoint();
3135     else if (Val != &getAssociatedValue()) {
3136       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3137           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3138       if (ValNoAliasAA.isKnownNoAlias())
3139         indicateOptimisticFixpoint();
3140     }
3141   }
3142 
3143   /// See AbstractAttribute::updateImpl(...).
3144   ChangeStatus updateImpl(Attributor &A) override {
3145     // TODO: Implement this.
3146     return indicatePessimisticFixpoint();
3147   }
3148 
3149   /// See AbstractAttribute::trackStatistics()
3150   void trackStatistics() const override {
3151     STATS_DECLTRACK_FLOATING_ATTR(noalias)
3152   }
3153 };
3154 
3155 /// NoAlias attribute for an argument.
3156 struct AANoAliasArgument final
3157     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3158   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3159   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3160 
3161   /// See AbstractAttribute::initialize(...).
3162   void initialize(Attributor &A) override {
3163     Base::initialize(A);
3164     // See callsite argument attribute and callee argument attribute.
3165     if (hasAttr({Attribute::ByVal}))
3166       indicateOptimisticFixpoint();
3167   }
3168 
3169   /// See AbstractAttribute::update(...).
3170   ChangeStatus updateImpl(Attributor &A) override {
3171     // We have to make sure no-alias on the argument does not break
3172     // synchronization when this is a callback argument, see also [1] below.
3173     // If synchronization cannot be affected, we delegate to the base updateImpl
3174     // function, otherwise we give up for now.
3175 
3176     // If the function is no-sync, no-alias cannot break synchronization.
3177     const auto &NoSyncAA =
3178         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3179                              DepClassTy::OPTIONAL);
3180     if (NoSyncAA.isAssumedNoSync())
3181       return Base::updateImpl(A);
3182 
3183     // If the argument is read-only, no-alias cannot break synchronization.
3184     bool IsKnown;
3185     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3186       return Base::updateImpl(A);
3187 
3188     // If the argument is never passed through callbacks, no-alias cannot break
3189     // synchronization.
3190     bool UsedAssumedInformation = false;
3191     if (A.checkForAllCallSites(
3192             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3193             true, UsedAssumedInformation))
3194       return Base::updateImpl(A);
3195 
3196     // TODO: add no-alias but make sure it doesn't break synchronization by
3197     // introducing fake uses. See:
3198     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3199     //     International Workshop on OpenMP 2018,
3200     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3201 
3202     return indicatePessimisticFixpoint();
3203   }
3204 
3205   /// See AbstractAttribute::trackStatistics()
3206   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3207 };
3208 
3209 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3210   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3211       : AANoAliasImpl(IRP, A) {}
3212 
3213   /// See AbstractAttribute::initialize(...).
3214   void initialize(Attributor &A) override {
3215     // See callsite argument attribute and callee argument attribute.
3216     const auto &CB = cast<CallBase>(getAnchorValue());
3217     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3218       indicateOptimisticFixpoint();
3219     Value &Val = getAssociatedValue();
3220     if (isa<ConstantPointerNull>(Val) &&
3221         !NullPointerIsDefined(getAnchorScope(),
3222                               Val.getType()->getPointerAddressSpace()))
3223       indicateOptimisticFixpoint();
3224   }
3225 
3226   /// Determine if the underlying value may alias with the call site argument
3227   /// \p OtherArgNo of \p ICS (= the underlying call site).
3228   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3229                             const AAMemoryBehavior &MemBehaviorAA,
3230                             const CallBase &CB, unsigned OtherArgNo) {
3231     // We do not need to worry about aliasing with the underlying IRP.
3232     if (this->getCalleeArgNo() == (int)OtherArgNo)
3233       return false;
3234 
3235     // If it is not a pointer or pointer vector we do not alias.
3236     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3237     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3238       return false;
3239 
3240     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3241         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3242 
3243     // If the argument is readnone, there is no read-write aliasing.
3244     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3245       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3246       return false;
3247     }
3248 
3249     // If the argument is readonly and the underlying value is readonly, there
3250     // is no read-write aliasing.
3251     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3252     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3253       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3254       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3255       return false;
3256     }
3257 
3258     // We have to utilize actual alias analysis queries so we need the object.
3259     if (!AAR)
3260       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3261 
3262     // Try to rule it out at the call site.
3263     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3264     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3265                          "callsite arguments: "
3266                       << getAssociatedValue() << " " << *ArgOp << " => "
3267                       << (IsAliasing ? "" : "no-") << "alias \n");
3268 
3269     return IsAliasing;
3270   }
3271 
3272   bool
3273   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3274                                          const AAMemoryBehavior &MemBehaviorAA,
3275                                          const AANoAlias &NoAliasAA) {
3276     // We can deduce "noalias" if the following conditions hold.
3277     // (i)   Associated value is assumed to be noalias in the definition.
3278     // (ii)  Associated value is assumed to be no-capture in all the uses
3279     //       possibly executed before this callsite.
3280     // (iii) There is no other pointer argument which could alias with the
3281     //       value.
3282 
3283     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3284     if (!AssociatedValueIsNoAliasAtDef) {
3285       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3286                         << " is not no-alias at the definition\n");
3287       return false;
3288     }
3289 
3290     auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3291       const auto &DerefAA = A.getAAFor<AADereferenceable>(
3292           *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3293       return DerefAA.getAssumedDereferenceableBytes();
3294     };
3295 
3296     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3297 
3298     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3299     const Function *ScopeFn = VIRP.getAnchorScope();
3300     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3301     // Check whether the value is captured in the scope using AANoCapture.
3302     // Look at CFG and check only uses possibly executed before this
3303     // callsite.
3304     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3305       Instruction *UserI = cast<Instruction>(U.getUser());
3306 
3307       // If UserI is the curr instruction and there is a single potential use of
3308       // the value in UserI we allow the use.
3309       // TODO: We should inspect the operands and allow those that cannot alias
3310       //       with the value.
3311       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3312         return true;
3313 
3314       if (ScopeFn) {
3315         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3316           if (CB->isArgOperand(&U)) {
3317 
3318             unsigned ArgNo = CB->getArgOperandNo(&U);
3319 
3320             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3321                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3322                 DepClassTy::OPTIONAL);
3323 
3324             if (NoCaptureAA.isAssumedNoCapture())
3325               return true;
3326           }
3327         }
3328 
3329         if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this))
3330           return true;
3331       }
3332 
3333       // TODO: We should track the capturing uses in AANoCapture but the problem
3334       //       is CGSCC runs. For those we would need to "allow" AANoCapture for
3335       //       a value in the module slice.
3336       switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3337       case UseCaptureKind::NO_CAPTURE:
3338         return true;
3339       case UseCaptureKind::MAY_CAPTURE:
3340         LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3341                           << "\n");
3342         return false;
3343       case UseCaptureKind::PASSTHROUGH:
3344         Follow = true;
3345         return true;
3346       }
3347       llvm_unreachable("unknown UseCaptureKind");
3348     };
3349 
3350     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3351       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3352         LLVM_DEBUG(
3353             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3354                    << " cannot be noalias as it is potentially captured\n");
3355         return false;
3356       }
3357     }
3358     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3359 
3360     // Check there is no other pointer argument which could alias with the
3361     // value passed at this call site.
3362     // TODO: AbstractCallSite
3363     const auto &CB = cast<CallBase>(getAnchorValue());
3364     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3365       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3366         return false;
3367 
3368     return true;
3369   }
3370 
3371   /// See AbstractAttribute::updateImpl(...).
3372   ChangeStatus updateImpl(Attributor &A) override {
3373     // If the argument is readnone we are done as there are no accesses via the
3374     // argument.
3375     auto &MemBehaviorAA =
3376         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3377     if (MemBehaviorAA.isAssumedReadNone()) {
3378       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3379       return ChangeStatus::UNCHANGED;
3380     }
3381 
3382     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3383     const auto &NoAliasAA =
3384         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3385 
3386     AAResults *AAR = nullptr;
3387     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3388                                                NoAliasAA)) {
3389       LLVM_DEBUG(
3390           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3391       return ChangeStatus::UNCHANGED;
3392     }
3393 
3394     return indicatePessimisticFixpoint();
3395   }
3396 
3397   /// See AbstractAttribute::trackStatistics()
3398   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3399 };
3400 
3401 /// NoAlias attribute for function return value.
3402 struct AANoAliasReturned final : AANoAliasImpl {
3403   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3404       : AANoAliasImpl(IRP, A) {}
3405 
3406   /// See AbstractAttribute::initialize(...).
3407   void initialize(Attributor &A) override {
3408     AANoAliasImpl::initialize(A);
3409     Function *F = getAssociatedFunction();
3410     if (!F || F->isDeclaration())
3411       indicatePessimisticFixpoint();
3412   }
3413 
3414   /// See AbstractAttribute::updateImpl(...).
3415   virtual ChangeStatus updateImpl(Attributor &A) override {
3416 
3417     auto CheckReturnValue = [&](Value &RV) -> bool {
3418       if (Constant *C = dyn_cast<Constant>(&RV))
3419         if (C->isNullValue() || isa<UndefValue>(C))
3420           return true;
3421 
3422       /// For now, we can only deduce noalias if we have call sites.
3423       /// FIXME: add more support.
3424       if (!isa<CallBase>(&RV))
3425         return false;
3426 
3427       const IRPosition &RVPos = IRPosition::value(RV);
3428       const auto &NoAliasAA =
3429           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3430       if (!NoAliasAA.isAssumedNoAlias())
3431         return false;
3432 
3433       const auto &NoCaptureAA =
3434           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3435       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3436     };
3437 
3438     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3439       return indicatePessimisticFixpoint();
3440 
3441     return ChangeStatus::UNCHANGED;
3442   }
3443 
3444   /// See AbstractAttribute::trackStatistics()
3445   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3446 };
3447 
3448 /// NoAlias attribute deduction for a call site return value.
3449 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3450   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3451       : AANoAliasImpl(IRP, A) {}
3452 
3453   /// See AbstractAttribute::initialize(...).
3454   void initialize(Attributor &A) override {
3455     AANoAliasImpl::initialize(A);
3456     Function *F = getAssociatedFunction();
3457     if (!F || F->isDeclaration())
3458       indicatePessimisticFixpoint();
3459   }
3460 
3461   /// See AbstractAttribute::updateImpl(...).
3462   ChangeStatus updateImpl(Attributor &A) override {
3463     // TODO: Once we have call site specific value information we can provide
3464     //       call site specific liveness information and then it makes
3465     //       sense to specialize attributes for call sites arguments instead of
3466     //       redirecting requests to the callee argument.
3467     Function *F = getAssociatedFunction();
3468     const IRPosition &FnPos = IRPosition::returned(*F);
3469     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3470     return clampStateAndIndicateChange(getState(), FnAA.getState());
3471   }
3472 
3473   /// See AbstractAttribute::trackStatistics()
3474   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3475 };
3476 } // namespace
3477 
3478 /// -------------------AAIsDead Function Attribute-----------------------
3479 
3480 namespace {
3481 struct AAIsDeadValueImpl : public AAIsDead {
3482   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3483 
3484   /// See AbstractAttribute::initialize(...).
3485   void initialize(Attributor &A) override {
3486     if (auto *Scope = getAnchorScope())
3487       if (!A.isRunOn(*Scope))
3488         indicatePessimisticFixpoint();
3489   }
3490 
3491   /// See AAIsDead::isAssumedDead().
3492   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3493 
3494   /// See AAIsDead::isKnownDead().
3495   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3496 
3497   /// See AAIsDead::isAssumedDead(BasicBlock *).
3498   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3499 
3500   /// See AAIsDead::isKnownDead(BasicBlock *).
3501   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3502 
3503   /// See AAIsDead::isAssumedDead(Instruction *I).
3504   bool isAssumedDead(const Instruction *I) const override {
3505     return I == getCtxI() && isAssumedDead();
3506   }
3507 
3508   /// See AAIsDead::isKnownDead(Instruction *I).
3509   bool isKnownDead(const Instruction *I) const override {
3510     return isAssumedDead(I) && isKnownDead();
3511   }
3512 
3513   /// See AbstractAttribute::getAsStr().
3514   virtual const std::string getAsStr() const override {
3515     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3516   }
3517 
3518   /// Check if all uses are assumed dead.
3519   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3520     // Callers might not check the type, void has no uses.
3521     if (V.getType()->isVoidTy() || V.use_empty())
3522       return true;
3523 
3524     // If we replace a value with a constant there are no uses left afterwards.
3525     if (!isa<Constant>(V)) {
3526       if (auto *I = dyn_cast<Instruction>(&V))
3527         if (!A.isRunOn(*I->getFunction()))
3528           return false;
3529       bool UsedAssumedInformation = false;
3530       Optional<Constant *> C =
3531           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3532       if (!C.hasValue() || *C)
3533         return true;
3534     }
3535 
3536     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3537     // Explicitly set the dependence class to required because we want a long
3538     // chain of N dependent instructions to be considered live as soon as one is
3539     // without going through N update cycles. This is not required for
3540     // correctness.
3541     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3542                              DepClassTy::REQUIRED,
3543                              /* IgnoreDroppableUses */ false);
3544   }
3545 
3546   /// Determine if \p I is assumed to be side-effect free.
3547   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3548     if (!I || wouldInstructionBeTriviallyDead(I))
3549       return true;
3550 
3551     auto *CB = dyn_cast<CallBase>(I);
3552     if (!CB || isa<IntrinsicInst>(CB))
3553       return false;
3554 
3555     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3556     const auto &NoUnwindAA =
3557         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3558     if (!NoUnwindAA.isAssumedNoUnwind())
3559       return false;
3560     if (!NoUnwindAA.isKnownNoUnwind())
3561       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3562 
3563     bool IsKnown;
3564     return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3565   }
3566 };
3567 
3568 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3569   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3570       : AAIsDeadValueImpl(IRP, A) {}
3571 
3572   /// See AbstractAttribute::initialize(...).
3573   void initialize(Attributor &A) override {
3574     AAIsDeadValueImpl::initialize(A);
3575 
3576     if (isa<UndefValue>(getAssociatedValue())) {
3577       indicatePessimisticFixpoint();
3578       return;
3579     }
3580 
3581     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3582     if (!isAssumedSideEffectFree(A, I)) {
3583       if (!isa_and_nonnull<StoreInst>(I))
3584         indicatePessimisticFixpoint();
3585       else
3586         removeAssumedBits(HAS_NO_EFFECT);
3587     }
3588   }
3589 
3590   bool isDeadStore(Attributor &A, StoreInst &SI) {
3591     // Lang ref now states volatile store is not UB/dead, let's skip them.
3592     if (SI.isVolatile())
3593       return false;
3594 
3595     bool UsedAssumedInformation = false;
3596     SmallSetVector<Value *, 4> PotentialCopies;
3597     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3598                                              UsedAssumedInformation))
3599       return false;
3600     return llvm::all_of(PotentialCopies, [&](Value *V) {
3601       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3602                              UsedAssumedInformation);
3603     });
3604   }
3605 
3606   /// See AbstractAttribute::getAsStr().
3607   const std::string getAsStr() const override {
3608     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3609     if (isa_and_nonnull<StoreInst>(I))
3610       if (isValidState())
3611         return "assumed-dead-store";
3612     return AAIsDeadValueImpl::getAsStr();
3613   }
3614 
3615   /// See AbstractAttribute::updateImpl(...).
3616   ChangeStatus updateImpl(Attributor &A) override {
3617     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3618     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3619       if (!isDeadStore(A, *SI))
3620         return indicatePessimisticFixpoint();
3621     } else {
3622       if (!isAssumedSideEffectFree(A, I))
3623         return indicatePessimisticFixpoint();
3624       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3625         return indicatePessimisticFixpoint();
3626     }
3627     return ChangeStatus::UNCHANGED;
3628   }
3629 
3630   bool isRemovableStore() const override {
3631     return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3632   }
3633 
3634   /// See AbstractAttribute::manifest(...).
3635   ChangeStatus manifest(Attributor &A) override {
3636     Value &V = getAssociatedValue();
3637     if (auto *I = dyn_cast<Instruction>(&V)) {
3638       // If we get here we basically know the users are all dead. We check if
3639       // isAssumedSideEffectFree returns true here again because it might not be
3640       // the case and only the users are dead but the instruction (=call) is
3641       // still needed.
3642       if (isa<StoreInst>(I) ||
3643           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3644         A.deleteAfterManifest(*I);
3645         return ChangeStatus::CHANGED;
3646       }
3647     }
3648     return ChangeStatus::UNCHANGED;
3649   }
3650 
3651   /// See AbstractAttribute::trackStatistics()
3652   void trackStatistics() const override {
3653     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3654   }
3655 };
3656 
3657 struct AAIsDeadArgument : public AAIsDeadFloating {
3658   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3659       : AAIsDeadFloating(IRP, A) {}
3660 
3661   /// See AbstractAttribute::initialize(...).
3662   void initialize(Attributor &A) override {
3663     AAIsDeadFloating::initialize(A);
3664     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3665       indicatePessimisticFixpoint();
3666   }
3667 
3668   /// See AbstractAttribute::manifest(...).
3669   ChangeStatus manifest(Attributor &A) override {
3670     Argument &Arg = *getAssociatedArgument();
3671     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3672       if (A.registerFunctionSignatureRewrite(
3673               Arg, /* ReplacementTypes */ {},
3674               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3675               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3676         return ChangeStatus::CHANGED;
3677       }
3678     return ChangeStatus::UNCHANGED;
3679   }
3680 
3681   /// See AbstractAttribute::trackStatistics()
3682   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3683 };
3684 
3685 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3686   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3687       : AAIsDeadValueImpl(IRP, A) {}
3688 
3689   /// See AbstractAttribute::initialize(...).
3690   void initialize(Attributor &A) override {
3691     AAIsDeadValueImpl::initialize(A);
3692     if (isa<UndefValue>(getAssociatedValue()))
3693       indicatePessimisticFixpoint();
3694   }
3695 
3696   /// See AbstractAttribute::updateImpl(...).
3697   ChangeStatus updateImpl(Attributor &A) override {
3698     // TODO: Once we have call site specific value information we can provide
3699     //       call site specific liveness information and then it makes
3700     //       sense to specialize attributes for call sites arguments instead of
3701     //       redirecting requests to the callee argument.
3702     Argument *Arg = getAssociatedArgument();
3703     if (!Arg)
3704       return indicatePessimisticFixpoint();
3705     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3706     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3707     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3708   }
3709 
3710   /// See AbstractAttribute::manifest(...).
3711   ChangeStatus manifest(Attributor &A) override {
3712     CallBase &CB = cast<CallBase>(getAnchorValue());
3713     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3714     assert(!isa<UndefValue>(U.get()) &&
3715            "Expected undef values to be filtered out!");
3716     UndefValue &UV = *UndefValue::get(U->getType());
3717     if (A.changeUseAfterManifest(U, UV))
3718       return ChangeStatus::CHANGED;
3719     return ChangeStatus::UNCHANGED;
3720   }
3721 
3722   /// See AbstractAttribute::trackStatistics()
3723   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3724 };
3725 
3726 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3727   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3728       : AAIsDeadFloating(IRP, A) {}
3729 
3730   /// See AAIsDead::isAssumedDead().
3731   bool isAssumedDead() const override {
3732     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3733   }
3734 
3735   /// See AbstractAttribute::initialize(...).
3736   void initialize(Attributor &A) override {
3737     AAIsDeadFloating::initialize(A);
3738     if (isa<UndefValue>(getAssociatedValue())) {
3739       indicatePessimisticFixpoint();
3740       return;
3741     }
3742 
3743     // We track this separately as a secondary state.
3744     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3745   }
3746 
3747   /// See AbstractAttribute::updateImpl(...).
3748   ChangeStatus updateImpl(Attributor &A) override {
3749     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3750     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3751       IsAssumedSideEffectFree = false;
3752       Changed = ChangeStatus::CHANGED;
3753     }
3754     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3755       return indicatePessimisticFixpoint();
3756     return Changed;
3757   }
3758 
3759   /// See AbstractAttribute::trackStatistics()
3760   void trackStatistics() const override {
3761     if (IsAssumedSideEffectFree)
3762       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3763     else
3764       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3765   }
3766 
3767   /// See AbstractAttribute::getAsStr().
3768   const std::string getAsStr() const override {
3769     return isAssumedDead()
3770                ? "assumed-dead"
3771                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3772   }
3773 
3774 private:
3775   bool IsAssumedSideEffectFree = true;
3776 };
3777 
3778 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3779   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3780       : AAIsDeadValueImpl(IRP, A) {}
3781 
3782   /// See AbstractAttribute::updateImpl(...).
3783   ChangeStatus updateImpl(Attributor &A) override {
3784 
3785     bool UsedAssumedInformation = false;
3786     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3787                               {Instruction::Ret}, UsedAssumedInformation);
3788 
3789     auto PredForCallSite = [&](AbstractCallSite ACS) {
3790       if (ACS.isCallbackCall() || !ACS.getInstruction())
3791         return false;
3792       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3793     };
3794 
3795     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3796                                 UsedAssumedInformation))
3797       return indicatePessimisticFixpoint();
3798 
3799     return ChangeStatus::UNCHANGED;
3800   }
3801 
3802   /// See AbstractAttribute::manifest(...).
3803   ChangeStatus manifest(Attributor &A) override {
3804     // TODO: Rewrite the signature to return void?
3805     bool AnyChange = false;
3806     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3807     auto RetInstPred = [&](Instruction &I) {
3808       ReturnInst &RI = cast<ReturnInst>(I);
3809       if (!isa<UndefValue>(RI.getReturnValue()))
3810         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3811       return true;
3812     };
3813     bool UsedAssumedInformation = false;
3814     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3815                               UsedAssumedInformation);
3816     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3817   }
3818 
3819   /// See AbstractAttribute::trackStatistics()
3820   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3821 };
3822 
3823 struct AAIsDeadFunction : public AAIsDead {
3824   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3825 
3826   /// See AbstractAttribute::initialize(...).
3827   void initialize(Attributor &A) override {
3828     Function *F = getAnchorScope();
3829     if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3830       indicatePessimisticFixpoint();
3831       return;
3832     }
3833     ToBeExploredFrom.insert(&F->getEntryBlock().front());
3834     assumeLive(A, F->getEntryBlock());
3835   }
3836 
3837   /// See AbstractAttribute::getAsStr().
3838   const std::string getAsStr() const override {
3839     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3840            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3841            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3842            std::to_string(KnownDeadEnds.size()) + "]";
3843   }
3844 
3845   /// See AbstractAttribute::manifest(...).
3846   ChangeStatus manifest(Attributor &A) override {
3847     assert(getState().isValidState() &&
3848            "Attempted to manifest an invalid state!");
3849 
3850     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3851     Function &F = *getAnchorScope();
3852 
3853     if (AssumedLiveBlocks.empty()) {
3854       A.deleteAfterManifest(F);
3855       return ChangeStatus::CHANGED;
3856     }
3857 
3858     // Flag to determine if we can change an invoke to a call assuming the
3859     // callee is nounwind. This is not possible if the personality of the
3860     // function allows to catch asynchronous exceptions.
3861     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3862 
3863     KnownDeadEnds.set_union(ToBeExploredFrom);
3864     for (const Instruction *DeadEndI : KnownDeadEnds) {
3865       auto *CB = dyn_cast<CallBase>(DeadEndI);
3866       if (!CB)
3867         continue;
3868       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3869           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3870       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3871       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3872         continue;
3873 
3874       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3875         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3876       else
3877         A.changeToUnreachableAfterManifest(
3878             const_cast<Instruction *>(DeadEndI->getNextNode()));
3879       HasChanged = ChangeStatus::CHANGED;
3880     }
3881 
3882     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3883     for (BasicBlock &BB : F)
3884       if (!AssumedLiveBlocks.count(&BB)) {
3885         A.deleteAfterManifest(BB);
3886         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3887         HasChanged = ChangeStatus::CHANGED;
3888       }
3889 
3890     return HasChanged;
3891   }
3892 
3893   /// See AbstractAttribute::updateImpl(...).
3894   ChangeStatus updateImpl(Attributor &A) override;
3895 
3896   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3897     assert(From->getParent() == getAnchorScope() &&
3898            To->getParent() == getAnchorScope() &&
3899            "Used AAIsDead of the wrong function");
3900     return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3901   }
3902 
3903   /// See AbstractAttribute::trackStatistics()
3904   void trackStatistics() const override {}
3905 
3906   /// Returns true if the function is assumed dead.
3907   bool isAssumedDead() const override { return false; }
3908 
3909   /// See AAIsDead::isKnownDead().
3910   bool isKnownDead() const override { return false; }
3911 
3912   /// See AAIsDead::isAssumedDead(BasicBlock *).
3913   bool isAssumedDead(const BasicBlock *BB) const override {
3914     assert(BB->getParent() == getAnchorScope() &&
3915            "BB must be in the same anchor scope function.");
3916 
3917     if (!getAssumed())
3918       return false;
3919     return !AssumedLiveBlocks.count(BB);
3920   }
3921 
3922   /// See AAIsDead::isKnownDead(BasicBlock *).
3923   bool isKnownDead(const BasicBlock *BB) const override {
3924     return getKnown() && isAssumedDead(BB);
3925   }
3926 
3927   /// See AAIsDead::isAssumed(Instruction *I).
3928   bool isAssumedDead(const Instruction *I) const override {
3929     assert(I->getParent()->getParent() == getAnchorScope() &&
3930            "Instruction must be in the same anchor scope function.");
3931 
3932     if (!getAssumed())
3933       return false;
3934 
3935     // If it is not in AssumedLiveBlocks then it for sure dead.
3936     // Otherwise, it can still be after noreturn call in a live block.
3937     if (!AssumedLiveBlocks.count(I->getParent()))
3938       return true;
3939 
3940     // If it is not after a liveness barrier it is live.
3941     const Instruction *PrevI = I->getPrevNode();
3942     while (PrevI) {
3943       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3944         return true;
3945       PrevI = PrevI->getPrevNode();
3946     }
3947     return false;
3948   }
3949 
3950   /// See AAIsDead::isKnownDead(Instruction *I).
3951   bool isKnownDead(const Instruction *I) const override {
3952     return getKnown() && isAssumedDead(I);
3953   }
3954 
3955   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3956   /// that internal function called from \p BB should now be looked at.
3957   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3958     if (!AssumedLiveBlocks.insert(&BB).second)
3959       return false;
3960 
3961     // We assume that all of BB is (probably) live now and if there are calls to
3962     // internal functions we will assume that those are now live as well. This
3963     // is a performance optimization for blocks with calls to a lot of internal
3964     // functions. It can however cause dead functions to be treated as live.
3965     for (const Instruction &I : BB)
3966       if (const auto *CB = dyn_cast<CallBase>(&I))
3967         if (const Function *F = CB->getCalledFunction())
3968           if (F->hasLocalLinkage())
3969             A.markLiveInternalFunction(*F);
3970     return true;
3971   }
3972 
3973   /// Collection of instructions that need to be explored again, e.g., we
3974   /// did assume they do not transfer control to (one of their) successors.
3975   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3976 
3977   /// Collection of instructions that are known to not transfer control.
3978   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3979 
3980   /// Collection of all assumed live edges
3981   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3982 
3983   /// Collection of all assumed live BasicBlocks.
3984   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3985 };
3986 
3987 static bool
3988 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3989                         AbstractAttribute &AA,
3990                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3991   const IRPosition &IPos = IRPosition::callsite_function(CB);
3992 
3993   const auto &NoReturnAA =
3994       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3995   if (NoReturnAA.isAssumedNoReturn())
3996     return !NoReturnAA.isKnownNoReturn();
3997   if (CB.isTerminator())
3998     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3999   else
4000     AliveSuccessors.push_back(CB.getNextNode());
4001   return false;
4002 }
4003 
4004 static bool
4005 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
4006                         AbstractAttribute &AA,
4007                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4008   bool UsedAssumedInformation =
4009       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
4010 
4011   // First, determine if we can change an invoke to a call assuming the
4012   // callee is nounwind. This is not possible if the personality of the
4013   // function allows to catch asynchronous exceptions.
4014   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
4015     AliveSuccessors.push_back(&II.getUnwindDest()->front());
4016   } else {
4017     const IRPosition &IPos = IRPosition::callsite_function(II);
4018     const auto &AANoUnw =
4019         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
4020     if (AANoUnw.isAssumedNoUnwind()) {
4021       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
4022     } else {
4023       AliveSuccessors.push_back(&II.getUnwindDest()->front());
4024     }
4025   }
4026   return UsedAssumedInformation;
4027 }
4028 
4029 static bool
4030 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
4031                         AbstractAttribute &AA,
4032                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4033   bool UsedAssumedInformation = false;
4034   if (BI.getNumSuccessors() == 1) {
4035     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4036   } else {
4037     Optional<Constant *> C =
4038         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
4039     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
4040       // No value yet, assume both edges are dead.
4041     } else if (isa_and_nonnull<ConstantInt>(*C)) {
4042       const BasicBlock *SuccBB =
4043           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
4044       AliveSuccessors.push_back(&SuccBB->front());
4045     } else {
4046       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4047       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4048       UsedAssumedInformation = false;
4049     }
4050   }
4051   return UsedAssumedInformation;
4052 }
4053 
4054 static bool
4055 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4056                         AbstractAttribute &AA,
4057                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4058   bool UsedAssumedInformation = false;
4059   Optional<Constant *> C =
4060       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
4061   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
4062     // No value yet, assume all edges are dead.
4063   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
4064     for (auto &CaseIt : SI.cases()) {
4065       if (CaseIt.getCaseValue() == C.getValue()) {
4066         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4067         return UsedAssumedInformation;
4068       }
4069     }
4070     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4071     return UsedAssumedInformation;
4072   } else {
4073     for (const BasicBlock *SuccBB : successors(SI.getParent()))
4074       AliveSuccessors.push_back(&SuccBB->front());
4075   }
4076   return UsedAssumedInformation;
4077 }
4078 
4079 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4080   ChangeStatus Change = ChangeStatus::UNCHANGED;
4081 
4082   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4083                     << getAnchorScope()->size() << "] BBs and "
4084                     << ToBeExploredFrom.size() << " exploration points and "
4085                     << KnownDeadEnds.size() << " known dead ends\n");
4086 
4087   // Copy and clear the list of instructions we need to explore from. It is
4088   // refilled with instructions the next update has to look at.
4089   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4090                                                ToBeExploredFrom.end());
4091   decltype(ToBeExploredFrom) NewToBeExploredFrom;
4092 
4093   SmallVector<const Instruction *, 8> AliveSuccessors;
4094   while (!Worklist.empty()) {
4095     const Instruction *I = Worklist.pop_back_val();
4096     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4097 
4098     // Fast forward for uninteresting instructions. We could look for UB here
4099     // though.
4100     while (!I->isTerminator() && !isa<CallBase>(I))
4101       I = I->getNextNode();
4102 
4103     AliveSuccessors.clear();
4104 
4105     bool UsedAssumedInformation = false;
4106     switch (I->getOpcode()) {
4107     // TODO: look for (assumed) UB to backwards propagate "deadness".
4108     default:
4109       assert(I->isTerminator() &&
4110              "Expected non-terminators to be handled already!");
4111       for (const BasicBlock *SuccBB : successors(I->getParent()))
4112         AliveSuccessors.push_back(&SuccBB->front());
4113       break;
4114     case Instruction::Call:
4115       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4116                                                        *this, AliveSuccessors);
4117       break;
4118     case Instruction::Invoke:
4119       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4120                                                        *this, AliveSuccessors);
4121       break;
4122     case Instruction::Br:
4123       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4124                                                        *this, AliveSuccessors);
4125       break;
4126     case Instruction::Switch:
4127       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4128                                                        *this, AliveSuccessors);
4129       break;
4130     }
4131 
4132     if (UsedAssumedInformation) {
4133       NewToBeExploredFrom.insert(I);
4134     } else if (AliveSuccessors.empty() ||
4135                (I->isTerminator() &&
4136                 AliveSuccessors.size() < I->getNumSuccessors())) {
4137       if (KnownDeadEnds.insert(I))
4138         Change = ChangeStatus::CHANGED;
4139     }
4140 
4141     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4142                       << AliveSuccessors.size() << " UsedAssumedInformation: "
4143                       << UsedAssumedInformation << "\n");
4144 
4145     for (const Instruction *AliveSuccessor : AliveSuccessors) {
4146       if (!I->isTerminator()) {
4147         assert(AliveSuccessors.size() == 1 &&
4148                "Non-terminator expected to have a single successor!");
4149         Worklist.push_back(AliveSuccessor);
4150       } else {
4151         // record the assumed live edge
4152         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4153         if (AssumedLiveEdges.insert(Edge).second)
4154           Change = ChangeStatus::CHANGED;
4155         if (assumeLive(A, *AliveSuccessor->getParent()))
4156           Worklist.push_back(AliveSuccessor);
4157       }
4158     }
4159   }
4160 
4161   // Check if the content of ToBeExploredFrom changed, ignore the order.
4162   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4163       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4164         return !ToBeExploredFrom.count(I);
4165       })) {
4166     Change = ChangeStatus::CHANGED;
4167     ToBeExploredFrom = std::move(NewToBeExploredFrom);
4168   }
4169 
4170   // If we know everything is live there is no need to query for liveness.
4171   // Instead, indicating a pessimistic fixpoint will cause the state to be
4172   // "invalid" and all queries to be answered conservatively without lookups.
4173   // To be in this state we have to (1) finished the exploration and (3) not
4174   // discovered any non-trivial dead end and (2) not ruled unreachable code
4175   // dead.
4176   if (ToBeExploredFrom.empty() &&
4177       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4178       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4179         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4180       }))
4181     return indicatePessimisticFixpoint();
4182   return Change;
4183 }
4184 
4185 /// Liveness information for a call sites.
4186 struct AAIsDeadCallSite final : AAIsDeadFunction {
4187   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4188       : AAIsDeadFunction(IRP, A) {}
4189 
4190   /// See AbstractAttribute::initialize(...).
4191   void initialize(Attributor &A) override {
4192     // TODO: Once we have call site specific value information we can provide
4193     //       call site specific liveness information and then it makes
4194     //       sense to specialize attributes for call sites instead of
4195     //       redirecting requests to the callee.
4196     llvm_unreachable("Abstract attributes for liveness are not "
4197                      "supported for call sites yet!");
4198   }
4199 
4200   /// See AbstractAttribute::updateImpl(...).
4201   ChangeStatus updateImpl(Attributor &A) override {
4202     return indicatePessimisticFixpoint();
4203   }
4204 
4205   /// See AbstractAttribute::trackStatistics()
4206   void trackStatistics() const override {}
4207 };
4208 } // namespace
4209 
4210 /// -------------------- Dereferenceable Argument Attribute --------------------
4211 
4212 namespace {
4213 struct AADereferenceableImpl : AADereferenceable {
4214   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4215       : AADereferenceable(IRP, A) {}
4216   using StateType = DerefState;
4217 
4218   /// See AbstractAttribute::initialize(...).
4219   void initialize(Attributor &A) override {
4220     Value &V = *getAssociatedValue().stripPointerCasts();
4221     SmallVector<Attribute, 4> Attrs;
4222     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4223              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4224     for (const Attribute &Attr : Attrs)
4225       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4226 
4227     const IRPosition &IRP = this->getIRPosition();
4228     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4229 
4230     bool CanBeNull, CanBeFreed;
4231     takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4232         A.getDataLayout(), CanBeNull, CanBeFreed));
4233 
4234     bool IsFnInterface = IRP.isFnInterfaceKind();
4235     Function *FnScope = IRP.getAnchorScope();
4236     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4237       indicatePessimisticFixpoint();
4238       return;
4239     }
4240 
4241     if (Instruction *CtxI = getCtxI())
4242       followUsesInMBEC(*this, A, getState(), *CtxI);
4243   }
4244 
4245   /// See AbstractAttribute::getState()
4246   /// {
4247   StateType &getState() override { return *this; }
4248   const StateType &getState() const override { return *this; }
4249   /// }
4250 
4251   /// Helper function for collecting accessed bytes in must-be-executed-context
4252   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4253                               DerefState &State) {
4254     const Value *UseV = U->get();
4255     if (!UseV->getType()->isPointerTy())
4256       return;
4257 
4258     Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4259     if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4260       return;
4261 
4262     int64_t Offset;
4263     const Value *Base = GetPointerBaseWithConstantOffset(
4264         Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4265     if (Base && Base == &getAssociatedValue())
4266       State.addAccessedBytes(Offset, Loc->Size.getValue());
4267   }
4268 
4269   /// See followUsesInMBEC
4270   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4271                        AADereferenceable::StateType &State) {
4272     bool IsNonNull = false;
4273     bool TrackUse = false;
4274     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4275         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4276     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4277                       << " for instruction " << *I << "\n");
4278 
4279     addAccessedBytesForUse(A, U, I, State);
4280     State.takeKnownDerefBytesMaximum(DerefBytes);
4281     return TrackUse;
4282   }
4283 
4284   /// See AbstractAttribute::manifest(...).
4285   ChangeStatus manifest(Attributor &A) override {
4286     ChangeStatus Change = AADereferenceable::manifest(A);
4287     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4288       removeAttrs({Attribute::DereferenceableOrNull});
4289       return ChangeStatus::CHANGED;
4290     }
4291     return Change;
4292   }
4293 
4294   void getDeducedAttributes(LLVMContext &Ctx,
4295                             SmallVectorImpl<Attribute> &Attrs) const override {
4296     // TODO: Add *_globally support
4297     if (isAssumedNonNull())
4298       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4299           Ctx, getAssumedDereferenceableBytes()));
4300     else
4301       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4302           Ctx, getAssumedDereferenceableBytes()));
4303   }
4304 
4305   /// See AbstractAttribute::getAsStr().
4306   const std::string getAsStr() const override {
4307     if (!getAssumedDereferenceableBytes())
4308       return "unknown-dereferenceable";
4309     return std::string("dereferenceable") +
4310            (isAssumedNonNull() ? "" : "_or_null") +
4311            (isAssumedGlobal() ? "_globally" : "") + "<" +
4312            std::to_string(getKnownDereferenceableBytes()) + "-" +
4313            std::to_string(getAssumedDereferenceableBytes()) + ">";
4314   }
4315 };
4316 
4317 /// Dereferenceable attribute for a floating value.
4318 struct AADereferenceableFloating : AADereferenceableImpl {
4319   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4320       : AADereferenceableImpl(IRP, A) {}
4321 
4322   /// See AbstractAttribute::updateImpl(...).
4323   ChangeStatus updateImpl(Attributor &A) override {
4324     const DataLayout &DL = A.getDataLayout();
4325 
4326     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4327                             bool Stripped) -> bool {
4328       unsigned IdxWidth =
4329           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4330       APInt Offset(IdxWidth, 0);
4331       const Value *Base = stripAndAccumulateOffsets(
4332           A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4333           /* AllowNonInbounds */ true);
4334 
4335       const auto &AA = A.getAAFor<AADereferenceable>(
4336           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4337       int64_t DerefBytes = 0;
4338       if (!Stripped && this == &AA) {
4339         // Use IR information if we did not strip anything.
4340         // TODO: track globally.
4341         bool CanBeNull, CanBeFreed;
4342         DerefBytes =
4343             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4344         T.GlobalState.indicatePessimisticFixpoint();
4345       } else {
4346         const DerefState &DS = AA.getState();
4347         DerefBytes = DS.DerefBytesState.getAssumed();
4348         T.GlobalState &= DS.GlobalState;
4349       }
4350 
4351       // For now we do not try to "increase" dereferenceability due to negative
4352       // indices as we first have to come up with code to deal with loops and
4353       // for overflows of the dereferenceable bytes.
4354       int64_t OffsetSExt = Offset.getSExtValue();
4355       if (OffsetSExt < 0)
4356         OffsetSExt = 0;
4357 
4358       T.takeAssumedDerefBytesMinimum(
4359           std::max(int64_t(0), DerefBytes - OffsetSExt));
4360 
4361       if (this == &AA) {
4362         if (!Stripped) {
4363           // If nothing was stripped IR information is all we got.
4364           T.takeKnownDerefBytesMaximum(
4365               std::max(int64_t(0), DerefBytes - OffsetSExt));
4366           T.indicatePessimisticFixpoint();
4367         } else if (OffsetSExt > 0) {
4368           // If something was stripped but there is circular reasoning we look
4369           // for the offset. If it is positive we basically decrease the
4370           // dereferenceable bytes in a circluar loop now, which will simply
4371           // drive them down to the known value in a very slow way which we
4372           // can accelerate.
4373           T.indicatePessimisticFixpoint();
4374         }
4375       }
4376 
4377       return T.isValidState();
4378     };
4379 
4380     DerefState T;
4381     bool UsedAssumedInformation = false;
4382     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4383                                            VisitValueCB, getCtxI(),
4384                                            UsedAssumedInformation))
4385       return indicatePessimisticFixpoint();
4386 
4387     return clampStateAndIndicateChange(getState(), T);
4388   }
4389 
4390   /// See AbstractAttribute::trackStatistics()
4391   void trackStatistics() const override {
4392     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4393   }
4394 };
4395 
4396 /// Dereferenceable attribute for a return value.
4397 struct AADereferenceableReturned final
4398     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4399   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4400       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4401             IRP, A) {}
4402 
4403   /// See AbstractAttribute::trackStatistics()
4404   void trackStatistics() const override {
4405     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4406   }
4407 };
4408 
4409 /// Dereferenceable attribute for an argument
4410 struct AADereferenceableArgument final
4411     : AAArgumentFromCallSiteArguments<AADereferenceable,
4412                                       AADereferenceableImpl> {
4413   using Base =
4414       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4415   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4416       : Base(IRP, A) {}
4417 
4418   /// See AbstractAttribute::trackStatistics()
4419   void trackStatistics() const override {
4420     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4421   }
4422 };
4423 
4424 /// Dereferenceable attribute for a call site argument.
4425 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4426   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4427       : AADereferenceableFloating(IRP, A) {}
4428 
4429   /// See AbstractAttribute::trackStatistics()
4430   void trackStatistics() const override {
4431     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4432   }
4433 };
4434 
4435 /// Dereferenceable attribute deduction for a call site return value.
4436 struct AADereferenceableCallSiteReturned final
4437     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4438   using Base =
4439       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4440   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4441       : Base(IRP, A) {}
4442 
4443   /// See AbstractAttribute::trackStatistics()
4444   void trackStatistics() const override {
4445     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4446   }
4447 };
4448 } // namespace
4449 
4450 // ------------------------ Align Argument Attribute ------------------------
4451 
4452 namespace {
4453 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4454                                     Value &AssociatedValue, const Use *U,
4455                                     const Instruction *I, bool &TrackUse) {
4456   // We need to follow common pointer manipulation uses to the accesses they
4457   // feed into.
4458   if (isa<CastInst>(I)) {
4459     // Follow all but ptr2int casts.
4460     TrackUse = !isa<PtrToIntInst>(I);
4461     return 0;
4462   }
4463   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4464     if (GEP->hasAllConstantIndices())
4465       TrackUse = true;
4466     return 0;
4467   }
4468 
4469   MaybeAlign MA;
4470   if (const auto *CB = dyn_cast<CallBase>(I)) {
4471     if (CB->isBundleOperand(U) || CB->isCallee(U))
4472       return 0;
4473 
4474     unsigned ArgNo = CB->getArgOperandNo(U);
4475     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4476     // As long as we only use known information there is no need to track
4477     // dependences here.
4478     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4479     MA = MaybeAlign(AlignAA.getKnownAlign());
4480   }
4481 
4482   const DataLayout &DL = A.getDataLayout();
4483   const Value *UseV = U->get();
4484   if (auto *SI = dyn_cast<StoreInst>(I)) {
4485     if (SI->getPointerOperand() == UseV)
4486       MA = SI->getAlign();
4487   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4488     if (LI->getPointerOperand() == UseV)
4489       MA = LI->getAlign();
4490   }
4491 
4492   if (!MA || *MA <= QueryingAA.getKnownAlign())
4493     return 0;
4494 
4495   unsigned Alignment = MA->value();
4496   int64_t Offset;
4497 
4498   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4499     if (Base == &AssociatedValue) {
4500       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4501       // So we can say that the maximum power of two which is a divisor of
4502       // gcd(Offset, Alignment) is an alignment.
4503 
4504       uint32_t gcd =
4505           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4506       Alignment = llvm::PowerOf2Floor(gcd);
4507     }
4508   }
4509 
4510   return Alignment;
4511 }
4512 
4513 struct AAAlignImpl : AAAlign {
4514   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4515 
4516   /// See AbstractAttribute::initialize(...).
4517   void initialize(Attributor &A) override {
4518     SmallVector<Attribute, 4> Attrs;
4519     getAttrs({Attribute::Alignment}, Attrs);
4520     for (const Attribute &Attr : Attrs)
4521       takeKnownMaximum(Attr.getValueAsInt());
4522 
4523     Value &V = *getAssociatedValue().stripPointerCasts();
4524     takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4525 
4526     if (getIRPosition().isFnInterfaceKind() &&
4527         (!getAnchorScope() ||
4528          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4529       indicatePessimisticFixpoint();
4530       return;
4531     }
4532 
4533     if (Instruction *CtxI = getCtxI())
4534       followUsesInMBEC(*this, A, getState(), *CtxI);
4535   }
4536 
4537   /// See AbstractAttribute::manifest(...).
4538   ChangeStatus manifest(Attributor &A) override {
4539     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4540 
4541     // Check for users that allow alignment annotations.
4542     Value &AssociatedValue = getAssociatedValue();
4543     for (const Use &U : AssociatedValue.uses()) {
4544       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4545         if (SI->getPointerOperand() == &AssociatedValue)
4546           if (SI->getAlignment() < getAssumedAlign()) {
4547             STATS_DECLTRACK(AAAlign, Store,
4548                             "Number of times alignment added to a store");
4549             SI->setAlignment(Align(getAssumedAlign()));
4550             LoadStoreChanged = ChangeStatus::CHANGED;
4551           }
4552       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4553         if (LI->getPointerOperand() == &AssociatedValue)
4554           if (LI->getAlignment() < getAssumedAlign()) {
4555             LI->setAlignment(Align(getAssumedAlign()));
4556             STATS_DECLTRACK(AAAlign, Load,
4557                             "Number of times alignment added to a load");
4558             LoadStoreChanged = ChangeStatus::CHANGED;
4559           }
4560       }
4561     }
4562 
4563     ChangeStatus Changed = AAAlign::manifest(A);
4564 
4565     Align InheritAlign =
4566         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4567     if (InheritAlign >= getAssumedAlign())
4568       return LoadStoreChanged;
4569     return Changed | LoadStoreChanged;
4570   }
4571 
4572   // TODO: Provide a helper to determine the implied ABI alignment and check in
4573   //       the existing manifest method and a new one for AAAlignImpl that value
4574   //       to avoid making the alignment explicit if it did not improve.
4575 
4576   /// See AbstractAttribute::getDeducedAttributes
4577   virtual void
4578   getDeducedAttributes(LLVMContext &Ctx,
4579                        SmallVectorImpl<Attribute> &Attrs) const override {
4580     if (getAssumedAlign() > 1)
4581       Attrs.emplace_back(
4582           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4583   }
4584 
4585   /// See followUsesInMBEC
4586   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4587                        AAAlign::StateType &State) {
4588     bool TrackUse = false;
4589 
4590     unsigned int KnownAlign =
4591         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4592     State.takeKnownMaximum(KnownAlign);
4593 
4594     return TrackUse;
4595   }
4596 
4597   /// See AbstractAttribute::getAsStr().
4598   const std::string getAsStr() const override {
4599     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4600                                 "-" + std::to_string(getAssumedAlign()) + ">")
4601                              : "unknown-align";
4602   }
4603 };
4604 
4605 /// Align attribute for a floating value.
4606 struct AAAlignFloating : AAAlignImpl {
4607   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4608 
4609   /// See AbstractAttribute::updateImpl(...).
4610   ChangeStatus updateImpl(Attributor &A) override {
4611     const DataLayout &DL = A.getDataLayout();
4612 
4613     auto VisitValueCB = [&](Value &V, const Instruction *,
4614                             AAAlign::StateType &T, bool Stripped) -> bool {
4615       if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4616         return true;
4617       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4618                                            DepClassTy::REQUIRED);
4619       if (!Stripped && this == &AA) {
4620         int64_t Offset;
4621         unsigned Alignment = 1;
4622         if (const Value *Base =
4623                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4624           // TODO: Use AAAlign for the base too.
4625           Align PA = Base->getPointerAlignment(DL);
4626           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4627           // So we can say that the maximum power of two which is a divisor of
4628           // gcd(Offset, Alignment) is an alignment.
4629 
4630           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4631                                                uint32_t(PA.value()));
4632           Alignment = llvm::PowerOf2Floor(gcd);
4633         } else {
4634           Alignment = V.getPointerAlignment(DL).value();
4635         }
4636         // Use only IR information if we did not strip anything.
4637         T.takeKnownMaximum(Alignment);
4638         T.indicatePessimisticFixpoint();
4639       } else {
4640         // Use abstract attribute information.
4641         const AAAlign::StateType &DS = AA.getState();
4642         T ^= DS;
4643       }
4644       return T.isValidState();
4645     };
4646 
4647     StateType T;
4648     bool UsedAssumedInformation = false;
4649     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4650                                           VisitValueCB, getCtxI(),
4651                                           UsedAssumedInformation))
4652       return indicatePessimisticFixpoint();
4653 
4654     // TODO: If we know we visited all incoming values, thus no are assumed
4655     // dead, we can take the known information from the state T.
4656     return clampStateAndIndicateChange(getState(), T);
4657   }
4658 
4659   /// See AbstractAttribute::trackStatistics()
4660   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4661 };
4662 
4663 /// Align attribute for function return value.
4664 struct AAAlignReturned final
4665     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4666   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4667   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4668 
4669   /// See AbstractAttribute::initialize(...).
4670   void initialize(Attributor &A) override {
4671     Base::initialize(A);
4672     Function *F = getAssociatedFunction();
4673     if (!F || F->isDeclaration())
4674       indicatePessimisticFixpoint();
4675   }
4676 
4677   /// See AbstractAttribute::trackStatistics()
4678   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4679 };
4680 
4681 /// Align attribute for function argument.
4682 struct AAAlignArgument final
4683     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4684   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4685   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4686 
4687   /// See AbstractAttribute::manifest(...).
4688   ChangeStatus manifest(Attributor &A) override {
4689     // If the associated argument is involved in a must-tail call we give up
4690     // because we would need to keep the argument alignments of caller and
4691     // callee in-sync. Just does not seem worth the trouble right now.
4692     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4693       return ChangeStatus::UNCHANGED;
4694     return Base::manifest(A);
4695   }
4696 
4697   /// See AbstractAttribute::trackStatistics()
4698   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4699 };
4700 
4701 struct AAAlignCallSiteArgument final : AAAlignFloating {
4702   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4703       : AAAlignFloating(IRP, A) {}
4704 
4705   /// See AbstractAttribute::manifest(...).
4706   ChangeStatus manifest(Attributor &A) override {
4707     // If the associated argument is involved in a must-tail call we give up
4708     // because we would need to keep the argument alignments of caller and
4709     // callee in-sync. Just does not seem worth the trouble right now.
4710     if (Argument *Arg = getAssociatedArgument())
4711       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4712         return ChangeStatus::UNCHANGED;
4713     ChangeStatus Changed = AAAlignImpl::manifest(A);
4714     Align InheritAlign =
4715         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4716     if (InheritAlign >= getAssumedAlign())
4717       Changed = ChangeStatus::UNCHANGED;
4718     return Changed;
4719   }
4720 
4721   /// See AbstractAttribute::updateImpl(Attributor &A).
4722   ChangeStatus updateImpl(Attributor &A) override {
4723     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4724     if (Argument *Arg = getAssociatedArgument()) {
4725       // We only take known information from the argument
4726       // so we do not need to track a dependence.
4727       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4728           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4729       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4730     }
4731     return Changed;
4732   }
4733 
4734   /// See AbstractAttribute::trackStatistics()
4735   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4736 };
4737 
4738 /// Align attribute deduction for a call site return value.
4739 struct AAAlignCallSiteReturned final
4740     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4741   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4742   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4743       : Base(IRP, A) {}
4744 
4745   /// See AbstractAttribute::initialize(...).
4746   void initialize(Attributor &A) override {
4747     Base::initialize(A);
4748     Function *F = getAssociatedFunction();
4749     if (!F || F->isDeclaration())
4750       indicatePessimisticFixpoint();
4751   }
4752 
4753   /// See AbstractAttribute::trackStatistics()
4754   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4755 };
4756 } // namespace
4757 
4758 /// ------------------ Function No-Return Attribute ----------------------------
4759 namespace {
4760 struct AANoReturnImpl : public AANoReturn {
4761   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4762 
4763   /// See AbstractAttribute::initialize(...).
4764   void initialize(Attributor &A) override {
4765     AANoReturn::initialize(A);
4766     Function *F = getAssociatedFunction();
4767     if (!F || F->isDeclaration())
4768       indicatePessimisticFixpoint();
4769   }
4770 
4771   /// See AbstractAttribute::getAsStr().
4772   const std::string getAsStr() const override {
4773     return getAssumed() ? "noreturn" : "may-return";
4774   }
4775 
4776   /// See AbstractAttribute::updateImpl(Attributor &A).
4777   virtual ChangeStatus updateImpl(Attributor &A) override {
4778     auto CheckForNoReturn = [](Instruction &) { return false; };
4779     bool UsedAssumedInformation = false;
4780     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4781                                    {(unsigned)Instruction::Ret},
4782                                    UsedAssumedInformation))
4783       return indicatePessimisticFixpoint();
4784     return ChangeStatus::UNCHANGED;
4785   }
4786 };
4787 
4788 struct AANoReturnFunction final : AANoReturnImpl {
4789   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4790       : AANoReturnImpl(IRP, A) {}
4791 
4792   /// See AbstractAttribute::trackStatistics()
4793   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4794 };
4795 
4796 /// NoReturn attribute deduction for a call sites.
4797 struct AANoReturnCallSite final : AANoReturnImpl {
4798   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4799       : AANoReturnImpl(IRP, A) {}
4800 
4801   /// See AbstractAttribute::initialize(...).
4802   void initialize(Attributor &A) override {
4803     AANoReturnImpl::initialize(A);
4804     if (Function *F = getAssociatedFunction()) {
4805       const IRPosition &FnPos = IRPosition::function(*F);
4806       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4807       if (!FnAA.isAssumedNoReturn())
4808         indicatePessimisticFixpoint();
4809     }
4810   }
4811 
4812   /// See AbstractAttribute::updateImpl(...).
4813   ChangeStatus updateImpl(Attributor &A) override {
4814     // TODO: Once we have call site specific value information we can provide
4815     //       call site specific liveness information and then it makes
4816     //       sense to specialize attributes for call sites arguments instead of
4817     //       redirecting requests to the callee argument.
4818     Function *F = getAssociatedFunction();
4819     const IRPosition &FnPos = IRPosition::function(*F);
4820     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4821     return clampStateAndIndicateChange(getState(), FnAA.getState());
4822   }
4823 
4824   /// See AbstractAttribute::trackStatistics()
4825   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4826 };
4827 } // namespace
4828 
4829 /// ----------------------- Instance Info ---------------------------------
4830 
4831 namespace {
4832 /// A class to hold the state of for no-capture attributes.
4833 struct AAInstanceInfoImpl : public AAInstanceInfo {
4834   AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4835       : AAInstanceInfo(IRP, A) {}
4836 
4837   /// See AbstractAttribute::initialize(...).
4838   void initialize(Attributor &A) override {
4839     Value &V = getAssociatedValue();
4840     if (auto *C = dyn_cast<Constant>(&V)) {
4841       if (C->isThreadDependent())
4842         indicatePessimisticFixpoint();
4843       else
4844         indicateOptimisticFixpoint();
4845       return;
4846     }
4847     if (auto *CB = dyn_cast<CallBase>(&V))
4848       if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4849           !CB->mayReadFromMemory()) {
4850         indicateOptimisticFixpoint();
4851         return;
4852       }
4853   }
4854 
4855   /// See AbstractAttribute::updateImpl(...).
4856   ChangeStatus updateImpl(Attributor &A) override {
4857     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4858 
4859     Value &V = getAssociatedValue();
4860     const Function *Scope = nullptr;
4861     if (auto *I = dyn_cast<Instruction>(&V))
4862       Scope = I->getFunction();
4863     if (auto *A = dyn_cast<Argument>(&V)) {
4864       Scope = A->getParent();
4865       if (!Scope->hasLocalLinkage())
4866         return Changed;
4867     }
4868     if (!Scope)
4869       return indicateOptimisticFixpoint();
4870 
4871     auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4872         *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
4873     if (NoRecurseAA.isAssumedNoRecurse())
4874       return Changed;
4875 
4876     auto UsePred = [&](const Use &U, bool &Follow) {
4877       const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4878       if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4879           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4880         Follow = true;
4881         return true;
4882       }
4883       if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4884           (isa<StoreInst>(UserI) &&
4885            cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4886         return true;
4887       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4888         // This check is not guaranteeing uniqueness but for now that we cannot
4889         // end up with two versions of \p U thinking it was one.
4890         if (!CB->getCalledFunction() ||
4891             !CB->getCalledFunction()->hasLocalLinkage())
4892           return true;
4893         if (!CB->isArgOperand(&U))
4894           return false;
4895         const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4896             *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4897             DepClassTy::OPTIONAL);
4898         if (!ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4899           return false;
4900         // If this call base might reach the scope again we might forward the
4901         // argument back here. This is very conservative.
4902         if (AA::isPotentiallyReachable(A, *CB, *Scope, *this, nullptr))
4903           return false;
4904         return true;
4905       }
4906       return false;
4907     };
4908 
4909     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4910       if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4911         auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4912         if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr))
4913           return true;
4914         auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(
4915             *SI->getFunction());
4916         if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr))
4917           return true;
4918       }
4919       return false;
4920     };
4921 
4922     if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4923                            DepClassTy::OPTIONAL,
4924                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
4925       return indicatePessimisticFixpoint();
4926 
4927     return Changed;
4928   }
4929 
4930   /// See AbstractState::getAsStr().
4931   const std::string getAsStr() const override {
4932     return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4933   }
4934 
4935   /// See AbstractAttribute::trackStatistics()
4936   void trackStatistics() const override {}
4937 };
4938 
4939 /// InstanceInfo attribute for floating values.
4940 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4941   AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4942       : AAInstanceInfoImpl(IRP, A) {}
4943 };
4944 
4945 /// NoCapture attribute for function arguments.
4946 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4947   AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4948       : AAInstanceInfoFloating(IRP, A) {}
4949 };
4950 
4951 /// InstanceInfo attribute for call site arguments.
4952 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4953   AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4954       : AAInstanceInfoImpl(IRP, A) {}
4955 
4956   /// See AbstractAttribute::updateImpl(...).
4957   ChangeStatus updateImpl(Attributor &A) override {
4958     // TODO: Once we have call site specific value information we can provide
4959     //       call site specific liveness information and then it makes
4960     //       sense to specialize attributes for call sites arguments instead of
4961     //       redirecting requests to the callee argument.
4962     Argument *Arg = getAssociatedArgument();
4963     if (!Arg)
4964       return indicatePessimisticFixpoint();
4965     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4966     auto &ArgAA =
4967         A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
4968     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4969   }
4970 };
4971 
4972 /// InstanceInfo attribute for function return value.
4973 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
4974   AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
4975       : AAInstanceInfoImpl(IRP, A) {
4976     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4977   }
4978 
4979   /// See AbstractAttribute::initialize(...).
4980   void initialize(Attributor &A) override {
4981     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4982   }
4983 
4984   /// See AbstractAttribute::updateImpl(...).
4985   ChangeStatus updateImpl(Attributor &A) override {
4986     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4987   }
4988 };
4989 
4990 /// InstanceInfo attribute deduction for a call site return value.
4991 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
4992   AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
4993       : AAInstanceInfoFloating(IRP, A) {}
4994 };
4995 } // namespace
4996 
4997 /// ----------------------- Variable Capturing ---------------------------------
4998 
4999 namespace {
5000 /// A class to hold the state of for no-capture attributes.
5001 struct AANoCaptureImpl : public AANoCapture {
5002   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
5003 
5004   /// See AbstractAttribute::initialize(...).
5005   void initialize(Attributor &A) override {
5006     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
5007       indicateOptimisticFixpoint();
5008       return;
5009     }
5010     Function *AnchorScope = getAnchorScope();
5011     if (isFnInterfaceKind() &&
5012         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
5013       indicatePessimisticFixpoint();
5014       return;
5015     }
5016 
5017     // You cannot "capture" null in the default address space.
5018     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
5019         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
5020       indicateOptimisticFixpoint();
5021       return;
5022     }
5023 
5024     const Function *F =
5025         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
5026 
5027     // Check what state the associated function can actually capture.
5028     if (F)
5029       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5030     else
5031       indicatePessimisticFixpoint();
5032   }
5033 
5034   /// See AbstractAttribute::updateImpl(...).
5035   ChangeStatus updateImpl(Attributor &A) override;
5036 
5037   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
5038   virtual void
5039   getDeducedAttributes(LLVMContext &Ctx,
5040                        SmallVectorImpl<Attribute> &Attrs) const override {
5041     if (!isAssumedNoCaptureMaybeReturned())
5042       return;
5043 
5044     if (isArgumentPosition()) {
5045       if (isAssumedNoCapture())
5046         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
5047       else if (ManifestInternal)
5048         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
5049     }
5050   }
5051 
5052   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
5053   /// depending on the ability of the function associated with \p IRP to capture
5054   /// state in memory and through "returning/throwing", respectively.
5055   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
5056                                                    const Function &F,
5057                                                    BitIntegerState &State) {
5058     // TODO: Once we have memory behavior attributes we should use them here.
5059 
5060     // If we know we cannot communicate or write to memory, we do not care about
5061     // ptr2int anymore.
5062     if (F.onlyReadsMemory() && F.doesNotThrow() &&
5063         F.getReturnType()->isVoidTy()) {
5064       State.addKnownBits(NO_CAPTURE);
5065       return;
5066     }
5067 
5068     // A function cannot capture state in memory if it only reads memory, it can
5069     // however return/throw state and the state might be influenced by the
5070     // pointer value, e.g., loading from a returned pointer might reveal a bit.
5071     if (F.onlyReadsMemory())
5072       State.addKnownBits(NOT_CAPTURED_IN_MEM);
5073 
5074     // A function cannot communicate state back if it does not through
5075     // exceptions and doesn not return values.
5076     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
5077       State.addKnownBits(NOT_CAPTURED_IN_RET);
5078 
5079     // Check existing "returned" attributes.
5080     int ArgNo = IRP.getCalleeArgNo();
5081     if (F.doesNotThrow() && ArgNo >= 0) {
5082       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
5083         if (F.hasParamAttribute(u, Attribute::Returned)) {
5084           if (u == unsigned(ArgNo))
5085             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5086           else if (F.onlyReadsMemory())
5087             State.addKnownBits(NO_CAPTURE);
5088           else
5089             State.addKnownBits(NOT_CAPTURED_IN_RET);
5090           break;
5091         }
5092     }
5093   }
5094 
5095   /// See AbstractState::getAsStr().
5096   const std::string getAsStr() const override {
5097     if (isKnownNoCapture())
5098       return "known not-captured";
5099     if (isAssumedNoCapture())
5100       return "assumed not-captured";
5101     if (isKnownNoCaptureMaybeReturned())
5102       return "known not-captured-maybe-returned";
5103     if (isAssumedNoCaptureMaybeReturned())
5104       return "assumed not-captured-maybe-returned";
5105     return "assumed-captured";
5106   }
5107 
5108   /// Check the use \p U and update \p State accordingly. Return true if we
5109   /// should continue to update the state.
5110   bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5111                 bool &Follow) {
5112     Instruction *UInst = cast<Instruction>(U.getUser());
5113     LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5114                       << *UInst << "\n");
5115 
5116     // Deal with ptr2int by following uses.
5117     if (isa<PtrToIntInst>(UInst)) {
5118       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5119       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5120                           /* Return */ true);
5121     }
5122 
5123     // For stores we already checked if we can follow them, if they make it
5124     // here we give up.
5125     if (isa<StoreInst>(UInst))
5126       return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5127                           /* Return */ false);
5128 
5129     // Explicitly catch return instructions.
5130     if (isa<ReturnInst>(UInst)) {
5131       if (UInst->getFunction() == getAnchorScope())
5132         return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5133                             /* Return */ true);
5134       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5135                           /* Return */ true);
5136     }
5137 
5138     // For now we only use special logic for call sites. However, the tracker
5139     // itself knows about a lot of other non-capturing cases already.
5140     auto *CB = dyn_cast<CallBase>(UInst);
5141     if (!CB || !CB->isArgOperand(&U))
5142       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5143                           /* Return */ true);
5144 
5145     unsigned ArgNo = CB->getArgOperandNo(&U);
5146     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5147     // If we have a abstract no-capture attribute for the argument we can use
5148     // it to justify a non-capture attribute here. This allows recursion!
5149     auto &ArgNoCaptureAA =
5150         A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5151     if (ArgNoCaptureAA.isAssumedNoCapture())
5152       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5153                           /* Return */ false);
5154     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5155       Follow = true;
5156       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5157                           /* Return */ false);
5158     }
5159 
5160     // Lastly, we could not find a reason no-capture can be assumed so we don't.
5161     return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5162                         /* Return */ true);
5163   }
5164 
5165   /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5166   /// \p CapturedInRet, then return true if we should continue updating the
5167   /// state.
5168   static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5169                            bool CapturedInInt, bool CapturedInRet) {
5170     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5171                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5172     if (CapturedInMem)
5173       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5174     if (CapturedInInt)
5175       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5176     if (CapturedInRet)
5177       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5178     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5179   }
5180 };
5181 
5182 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5183   const IRPosition &IRP = getIRPosition();
5184   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5185                                   : &IRP.getAssociatedValue();
5186   if (!V)
5187     return indicatePessimisticFixpoint();
5188 
5189   const Function *F =
5190       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5191   assert(F && "Expected a function!");
5192   const IRPosition &FnPos = IRPosition::function(*F);
5193 
5194   AANoCapture::StateType T;
5195 
5196   // Readonly means we cannot capture through memory.
5197   bool IsKnown;
5198   if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5199     T.addKnownBits(NOT_CAPTURED_IN_MEM);
5200     if (IsKnown)
5201       addKnownBits(NOT_CAPTURED_IN_MEM);
5202   }
5203 
5204   // Make sure all returned values are different than the underlying value.
5205   // TODO: we could do this in a more sophisticated way inside
5206   //       AAReturnedValues, e.g., track all values that escape through returns
5207   //       directly somehow.
5208   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5209     if (!RVAA.getState().isValidState())
5210       return false;
5211     bool SeenConstant = false;
5212     for (auto &It : RVAA.returned_values()) {
5213       if (isa<Constant>(It.first)) {
5214         if (SeenConstant)
5215           return false;
5216         SeenConstant = true;
5217       } else if (!isa<Argument>(It.first) ||
5218                  It.first == getAssociatedArgument())
5219         return false;
5220     }
5221     return true;
5222   };
5223 
5224   const auto &NoUnwindAA =
5225       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5226   if (NoUnwindAA.isAssumedNoUnwind()) {
5227     bool IsVoidTy = F->getReturnType()->isVoidTy();
5228     const AAReturnedValues *RVAA =
5229         IsVoidTy ? nullptr
5230                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5231 
5232                                                  DepClassTy::OPTIONAL);
5233     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5234       T.addKnownBits(NOT_CAPTURED_IN_RET);
5235       if (T.isKnown(NOT_CAPTURED_IN_MEM))
5236         return ChangeStatus::UNCHANGED;
5237       if (NoUnwindAA.isKnownNoUnwind() &&
5238           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5239         addKnownBits(NOT_CAPTURED_IN_RET);
5240         if (isKnown(NOT_CAPTURED_IN_MEM))
5241           return indicateOptimisticFixpoint();
5242       }
5243     }
5244   }
5245 
5246   auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5247     const auto &DerefAA = A.getAAFor<AADereferenceable>(
5248         *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5249     return DerefAA.getAssumedDereferenceableBytes();
5250   };
5251 
5252   auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5253     switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5254     case UseCaptureKind::NO_CAPTURE:
5255       return true;
5256     case UseCaptureKind::MAY_CAPTURE:
5257       return checkUse(A, T, U, Follow);
5258     case UseCaptureKind::PASSTHROUGH:
5259       Follow = true;
5260       return true;
5261     }
5262     llvm_unreachable("Unexpected use capture kind!");
5263   };
5264 
5265   if (!A.checkForAllUses(UseCheck, *this, *V))
5266     return indicatePessimisticFixpoint();
5267 
5268   AANoCapture::StateType &S = getState();
5269   auto Assumed = S.getAssumed();
5270   S.intersectAssumedBits(T.getAssumed());
5271   if (!isAssumedNoCaptureMaybeReturned())
5272     return indicatePessimisticFixpoint();
5273   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5274                                    : ChangeStatus::CHANGED;
5275 }
5276 
5277 /// NoCapture attribute for function arguments.
5278 struct AANoCaptureArgument final : AANoCaptureImpl {
5279   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5280       : AANoCaptureImpl(IRP, A) {}
5281 
5282   /// See AbstractAttribute::trackStatistics()
5283   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5284 };
5285 
5286 /// NoCapture attribute for call site arguments.
5287 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5288   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5289       : AANoCaptureImpl(IRP, A) {}
5290 
5291   /// See AbstractAttribute::initialize(...).
5292   void initialize(Attributor &A) override {
5293     if (Argument *Arg = getAssociatedArgument())
5294       if (Arg->hasByValAttr())
5295         indicateOptimisticFixpoint();
5296     AANoCaptureImpl::initialize(A);
5297   }
5298 
5299   /// See AbstractAttribute::updateImpl(...).
5300   ChangeStatus updateImpl(Attributor &A) override {
5301     // TODO: Once we have call site specific value information we can provide
5302     //       call site specific liveness information and then it makes
5303     //       sense to specialize attributes for call sites arguments instead of
5304     //       redirecting requests to the callee argument.
5305     Argument *Arg = getAssociatedArgument();
5306     if (!Arg)
5307       return indicatePessimisticFixpoint();
5308     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5309     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5310     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5311   }
5312 
5313   /// See AbstractAttribute::trackStatistics()
5314   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5315 };
5316 
5317 /// NoCapture attribute for floating values.
5318 struct AANoCaptureFloating final : AANoCaptureImpl {
5319   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5320       : AANoCaptureImpl(IRP, A) {}
5321 
5322   /// See AbstractAttribute::trackStatistics()
5323   void trackStatistics() const override {
5324     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5325   }
5326 };
5327 
5328 /// NoCapture attribute for function return value.
5329 struct AANoCaptureReturned final : AANoCaptureImpl {
5330   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5331       : AANoCaptureImpl(IRP, A) {
5332     llvm_unreachable("NoCapture is not applicable to function returns!");
5333   }
5334 
5335   /// See AbstractAttribute::initialize(...).
5336   void initialize(Attributor &A) override {
5337     llvm_unreachable("NoCapture is not applicable to function returns!");
5338   }
5339 
5340   /// See AbstractAttribute::updateImpl(...).
5341   ChangeStatus updateImpl(Attributor &A) override {
5342     llvm_unreachable("NoCapture is not applicable to function returns!");
5343   }
5344 
5345   /// See AbstractAttribute::trackStatistics()
5346   void trackStatistics() const override {}
5347 };
5348 
5349 /// NoCapture attribute deduction for a call site return value.
5350 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5351   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5352       : AANoCaptureImpl(IRP, A) {}
5353 
5354   /// See AbstractAttribute::initialize(...).
5355   void initialize(Attributor &A) override {
5356     const Function *F = getAnchorScope();
5357     // Check what state the associated function can actually capture.
5358     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5359   }
5360 
5361   /// See AbstractAttribute::trackStatistics()
5362   void trackStatistics() const override {
5363     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5364   }
5365 };
5366 } // namespace
5367 
5368 /// ------------------ Value Simplify Attribute ----------------------------
5369 
5370 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5371   // FIXME: Add a typecast support.
5372   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5373       SimplifiedAssociatedValue, Other, Ty);
5374   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5375     return false;
5376 
5377   LLVM_DEBUG({
5378     if (SimplifiedAssociatedValue.hasValue())
5379       dbgs() << "[ValueSimplify] is assumed to be "
5380              << **SimplifiedAssociatedValue << "\n";
5381     else
5382       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5383   });
5384   return true;
5385 }
5386 
5387 namespace {
5388 struct AAValueSimplifyImpl : AAValueSimplify {
5389   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5390       : AAValueSimplify(IRP, A) {}
5391 
5392   /// See AbstractAttribute::initialize(...).
5393   void initialize(Attributor &A) override {
5394     if (getAssociatedValue().getType()->isVoidTy())
5395       indicatePessimisticFixpoint();
5396     if (A.hasSimplificationCallback(getIRPosition()))
5397       indicatePessimisticFixpoint();
5398   }
5399 
5400   /// See AbstractAttribute::getAsStr().
5401   const std::string getAsStr() const override {
5402     LLVM_DEBUG({
5403       dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
5404       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5405         dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5406     });
5407     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5408                           : "not-simple";
5409   }
5410 
5411   /// See AbstractAttribute::trackStatistics()
5412   void trackStatistics() const override {}
5413 
5414   /// See AAValueSimplify::getAssumedSimplifiedValue()
5415   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5416     return SimplifiedAssociatedValue;
5417   }
5418 
5419   /// Ensure the return value is \p V with type \p Ty, if not possible return
5420   /// nullptr. If \p Check is true we will only verify such an operation would
5421   /// suceed and return a non-nullptr value if that is the case. No IR is
5422   /// generated or modified.
5423   static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
5424                            bool Check) {
5425     if (auto *TypedV = AA::getWithType(V, Ty))
5426       return TypedV;
5427     if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
5428       return Check ? &V
5429                    : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty,
5430                                                                       "", CtxI);
5431     return nullptr;
5432   }
5433 
5434   /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
5435   /// If \p Check is true we will only verify such an operation would suceed and
5436   /// return a non-nullptr value if that is the case. No IR is generated or
5437   /// modified.
5438   static Value *reproduceInst(Attributor &A,
5439                               const AbstractAttribute &QueryingAA,
5440                               Instruction &I, Type &Ty, Instruction *CtxI,
5441                               bool Check, ValueToValueMapTy &VMap) {
5442     assert(CtxI && "Cannot reproduce an instruction without context!");
5443     if (Check && (I.mayReadFromMemory() ||
5444                   !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
5445                                                 /* TLI */ nullptr)))
5446       return nullptr;
5447     for (Value *Op : I.operands()) {
5448       Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
5449       if (!NewOp) {
5450         assert(Check && "Manifest of new value unexpectedly failed!");
5451         return nullptr;
5452       }
5453       if (!Check)
5454         VMap[Op] = NewOp;
5455     }
5456     if (Check)
5457       return &I;
5458 
5459     Instruction *CloneI = I.clone();
5460     // TODO: Try to salvage debug information here.
5461     CloneI->setDebugLoc(DebugLoc());
5462     VMap[&I] = CloneI;
5463     CloneI->insertBefore(CtxI);
5464     RemapInstruction(CloneI, VMap);
5465     return CloneI;
5466   }
5467 
5468   /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
5469   /// If \p Check is true we will only verify such an operation would suceed and
5470   /// return a non-nullptr value if that is the case. No IR is generated or
5471   /// modified.
5472   static Value *reproduceValue(Attributor &A,
5473                                const AbstractAttribute &QueryingAA, Value &V,
5474                                Type &Ty, Instruction *CtxI, bool Check,
5475                                ValueToValueMapTy &VMap) {
5476     if (const auto &NewV = VMap.lookup(&V))
5477       return NewV;
5478     bool UsedAssumedInformation = false;
5479     Optional<Value *> SimpleV =
5480         A.getAssumedSimplified(V, QueryingAA, UsedAssumedInformation);
5481     if (!SimpleV.hasValue())
5482       return PoisonValue::get(&Ty);
5483     Value *EffectiveV = &V;
5484     if (SimpleV.getValue())
5485       EffectiveV = SimpleV.getValue();
5486     if (auto *C = dyn_cast<Constant>(EffectiveV))
5487       if (!C->canTrap())
5488         return C;
5489     if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
5490                                       A.getInfoCache()))
5491       return ensureType(A, *EffectiveV, Ty, CtxI, Check);
5492     if (auto *I = dyn_cast<Instruction>(EffectiveV))
5493       if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
5494         return ensureType(A, *NewV, Ty, CtxI, Check);
5495     return nullptr;
5496   }
5497 
5498   /// Return a value we can use as replacement for the associated one, or
5499   /// nullptr if we don't have one that makes sense.
5500   Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
5501     Value *NewV = SimplifiedAssociatedValue.hasValue()
5502                       ? SimplifiedAssociatedValue.getValue()
5503                       : UndefValue::get(getAssociatedType());
5504     if (NewV && NewV != &getAssociatedValue()) {
5505       ValueToValueMapTy VMap;
5506       // First verify we can reprduce the value with the required type at the
5507       // context location before we actually start modifying the IR.
5508       if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5509                          /* CheckOnly */ true, VMap))
5510         return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5511                               /* CheckOnly */ false, VMap);
5512     }
5513     return nullptr;
5514   }
5515 
5516   /// Helper function for querying AAValueSimplify and updating candicate.
5517   /// \param IRP The value position we are trying to unify with SimplifiedValue
5518   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5519                       const IRPosition &IRP, bool Simplify = true) {
5520     bool UsedAssumedInformation = false;
5521     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5522     if (Simplify)
5523       QueryingValueSimplified =
5524           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5525     return unionAssumed(QueryingValueSimplified);
5526   }
5527 
5528   /// Returns a candidate is found or not
5529   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5530     if (!getAssociatedValue().getType()->isIntegerTy())
5531       return false;
5532 
5533     // This will also pass the call base context.
5534     const auto &AA =
5535         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5536 
5537     Optional<Constant *> COpt = AA.getAssumedConstant(A);
5538 
5539     if (!COpt.hasValue()) {
5540       SimplifiedAssociatedValue = llvm::None;
5541       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5542       return true;
5543     }
5544     if (auto *C = COpt.getValue()) {
5545       SimplifiedAssociatedValue = C;
5546       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5547       return true;
5548     }
5549     return false;
5550   }
5551 
5552   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5553     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5554       return true;
5555     if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
5556       return true;
5557     return false;
5558   }
5559 
5560   /// See AbstractAttribute::manifest(...).
5561   ChangeStatus manifest(Attributor &A) override {
5562     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5563     for (auto &U : getAssociatedValue().uses()) {
5564       // Check if we need to adjust the insertion point to make sure the IR is
5565       // valid.
5566       Instruction *IP = dyn_cast<Instruction>(U.getUser());
5567       if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
5568         IP = PHI->getIncomingBlock(U)->getTerminator();
5569       if (auto *NewV = manifestReplacementValue(A, IP)) {
5570         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
5571                           << " -> " << *NewV << " :: " << *this << "\n");
5572         if (A.changeUseAfterManifest(U, *NewV))
5573           Changed = ChangeStatus::CHANGED;
5574       }
5575     }
5576 
5577     return Changed | AAValueSimplify::manifest(A);
5578   }
5579 
5580   /// See AbstractState::indicatePessimisticFixpoint(...).
5581   ChangeStatus indicatePessimisticFixpoint() override {
5582     SimplifiedAssociatedValue = &getAssociatedValue();
5583     return AAValueSimplify::indicatePessimisticFixpoint();
5584   }
5585 };
5586 
5587 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5588   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5589       : AAValueSimplifyImpl(IRP, A) {}
5590 
5591   void initialize(Attributor &A) override {
5592     AAValueSimplifyImpl::initialize(A);
5593     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5594       indicatePessimisticFixpoint();
5595     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5596                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5597                 /* IgnoreSubsumingPositions */ true))
5598       indicatePessimisticFixpoint();
5599   }
5600 
5601   /// See AbstractAttribute::updateImpl(...).
5602   ChangeStatus updateImpl(Attributor &A) override {
5603     // Byval is only replacable if it is readonly otherwise we would write into
5604     // the replaced value and not the copy that byval creates implicitly.
5605     Argument *Arg = getAssociatedArgument();
5606     if (Arg->hasByValAttr()) {
5607       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5608       //       there is no race by not copying a constant byval.
5609       bool IsKnown;
5610       if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
5611         return indicatePessimisticFixpoint();
5612     }
5613 
5614     auto Before = SimplifiedAssociatedValue;
5615 
5616     auto PredForCallSite = [&](AbstractCallSite ACS) {
5617       const IRPosition &ACSArgPos =
5618           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5619       // Check if a coresponding argument was found or if it is on not
5620       // associated (which can happen for callback calls).
5621       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5622         return false;
5623 
5624       // Simplify the argument operand explicitly and check if the result is
5625       // valid in the current scope. This avoids refering to simplified values
5626       // in other functions, e.g., we don't want to say a an argument in a
5627       // static function is actually an argument in a different function.
5628       bool UsedAssumedInformation = false;
5629       Optional<Constant *> SimpleArgOp =
5630           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5631       if (!SimpleArgOp.hasValue())
5632         return true;
5633       if (!SimpleArgOp.getValue())
5634         return false;
5635       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5636         return false;
5637       return unionAssumed(*SimpleArgOp);
5638     };
5639 
5640     // Generate a answer specific to a call site context.
5641     bool Success;
5642     bool UsedAssumedInformation = false;
5643     if (hasCallBaseContext() &&
5644         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5645       Success = PredForCallSite(
5646           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5647     else
5648       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5649                                        UsedAssumedInformation);
5650 
5651     if (!Success)
5652       if (!askSimplifiedValueForOtherAAs(A))
5653         return indicatePessimisticFixpoint();
5654 
5655     // If a candicate was found in this update, return CHANGED.
5656     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5657                                                : ChangeStatus ::CHANGED;
5658   }
5659 
5660   /// See AbstractAttribute::trackStatistics()
5661   void trackStatistics() const override {
5662     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5663   }
5664 };
5665 
5666 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5667   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5668       : AAValueSimplifyImpl(IRP, A) {}
5669 
5670   /// See AAValueSimplify::getAssumedSimplifiedValue()
5671   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5672     if (!isValidState())
5673       return nullptr;
5674     return SimplifiedAssociatedValue;
5675   }
5676 
5677   /// See AbstractAttribute::updateImpl(...).
5678   ChangeStatus updateImpl(Attributor &A) override {
5679     auto Before = SimplifiedAssociatedValue;
5680 
5681     auto ReturnInstCB = [&](Instruction &I) {
5682       auto &RI = cast<ReturnInst>(I);
5683       return checkAndUpdate(
5684           A, *this,
5685           IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
5686     };
5687 
5688     bool UsedAssumedInformation = false;
5689     if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
5690                                    UsedAssumedInformation))
5691       if (!askSimplifiedValueForOtherAAs(A))
5692         return indicatePessimisticFixpoint();
5693 
5694     // If a candicate was found in this update, return CHANGED.
5695     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5696                                                : ChangeStatus ::CHANGED;
5697   }
5698 
5699   ChangeStatus manifest(Attributor &A) override {
5700     // We queried AAValueSimplify for the returned values so they will be
5701     // replaced if a simplified form was found. Nothing to do here.
5702     return ChangeStatus::UNCHANGED;
5703   }
5704 
5705   /// See AbstractAttribute::trackStatistics()
5706   void trackStatistics() const override {
5707     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5708   }
5709 };
5710 
5711 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5712   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5713       : AAValueSimplifyImpl(IRP, A) {}
5714 
5715   /// See AbstractAttribute::initialize(...).
5716   void initialize(Attributor &A) override {
5717     AAValueSimplifyImpl::initialize(A);
5718     Value &V = getAnchorValue();
5719 
5720     // TODO: add other stuffs
5721     if (isa<Constant>(V))
5722       indicatePessimisticFixpoint();
5723   }
5724 
5725   /// Check if \p Cmp is a comparison we can simplify.
5726   ///
5727   /// We handle multiple cases, one in which at least one operand is an
5728   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5729   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5730   /// will be updated.
5731   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5732     auto Union = [&](Value &V) {
5733       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5734           SimplifiedAssociatedValue, &V, V.getType());
5735       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5736     };
5737 
5738     Value *LHS = Cmp.getOperand(0);
5739     Value *RHS = Cmp.getOperand(1);
5740 
5741     // Simplify the operands first.
5742     bool UsedAssumedInformation = false;
5743     const auto &SimplifiedLHS =
5744         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5745                                *this, UsedAssumedInformation);
5746     if (!SimplifiedLHS.hasValue())
5747       return true;
5748     if (!SimplifiedLHS.getValue())
5749       return false;
5750     LHS = *SimplifiedLHS;
5751 
5752     const auto &SimplifiedRHS =
5753         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5754                                *this, UsedAssumedInformation);
5755     if (!SimplifiedRHS.hasValue())
5756       return true;
5757     if (!SimplifiedRHS.getValue())
5758       return false;
5759     RHS = *SimplifiedRHS;
5760 
5761     LLVMContext &Ctx = Cmp.getContext();
5762     // Handle the trivial case first in which we don't even need to think about
5763     // null or non-null.
5764     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5765       Constant *NewVal =
5766           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5767       if (!Union(*NewVal))
5768         return false;
5769       if (!UsedAssumedInformation)
5770         indicateOptimisticFixpoint();
5771       return true;
5772     }
5773 
5774     // From now on we only handle equalities (==, !=).
5775     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5776     if (!ICmp || !ICmp->isEquality())
5777       return false;
5778 
5779     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5780     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5781     if (!LHSIsNull && !RHSIsNull)
5782       return false;
5783 
5784     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5785     // non-nullptr operand and if we assume it's non-null we can conclude the
5786     // result of the comparison.
5787     assert((LHSIsNull || RHSIsNull) &&
5788            "Expected nullptr versus non-nullptr comparison at this point");
5789 
5790     // The index is the operand that we assume is not null.
5791     unsigned PtrIdx = LHSIsNull;
5792     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5793         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5794         DepClassTy::REQUIRED);
5795     if (!PtrNonNullAA.isAssumedNonNull())
5796       return false;
5797     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5798 
5799     // The new value depends on the predicate, true for != and false for ==.
5800     Constant *NewVal = ConstantInt::get(
5801         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5802     if (!Union(*NewVal))
5803       return false;
5804 
5805     if (!UsedAssumedInformation)
5806       indicateOptimisticFixpoint();
5807 
5808     return true;
5809   }
5810 
5811   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5812   /// simplify any operand of the instruction \p I. Return true if successful,
5813   /// in that case SimplifiedAssociatedValue will be updated.
5814   bool handleGenericInst(Attributor &A, Instruction &I) {
5815     bool SomeSimplified = false;
5816     bool UsedAssumedInformation = false;
5817 
5818     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5819     int Idx = 0;
5820     for (Value *Op : I.operands()) {
5821       const auto &SimplifiedOp =
5822           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5823                                  *this, UsedAssumedInformation);
5824       // If we are not sure about any operand we are not sure about the entire
5825       // instruction, we'll wait.
5826       if (!SimplifiedOp.hasValue())
5827         return true;
5828 
5829       if (SimplifiedOp.getValue())
5830         NewOps[Idx] = SimplifiedOp.getValue();
5831       else
5832         NewOps[Idx] = Op;
5833 
5834       SomeSimplified |= (NewOps[Idx] != Op);
5835       ++Idx;
5836     }
5837 
5838     // We won't bother with the InstSimplify interface if we didn't simplify any
5839     // operand ourselves.
5840     if (!SomeSimplified)
5841       return false;
5842 
5843     InformationCache &InfoCache = A.getInfoCache();
5844     Function *F = I.getFunction();
5845     const auto *DT =
5846         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5847     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5848     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5849     OptimizationRemarkEmitter *ORE = nullptr;
5850 
5851     const DataLayout &DL = I.getModule()->getDataLayout();
5852     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5853     if (Value *SimplifiedI =
5854             simplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5855       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5856           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5857       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5858     }
5859     return false;
5860   }
5861 
5862   /// See AbstractAttribute::updateImpl(...).
5863   ChangeStatus updateImpl(Attributor &A) override {
5864     auto Before = SimplifiedAssociatedValue;
5865 
5866     // Do not simplify loads that are only used in llvm.assume if we cannot also
5867     // remove all stores that may feed into the load. The reason is that the
5868     // assume is probably worth something as long as the stores are around.
5869     if (auto *LI = dyn_cast<LoadInst>(&getAssociatedValue())) {
5870       InformationCache &InfoCache = A.getInfoCache();
5871       if (InfoCache.isOnlyUsedByAssume(*LI)) {
5872         SmallSetVector<Value *, 4> PotentialCopies;
5873         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
5874         bool UsedAssumedInformation = false;
5875         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
5876                                            PotentialValueOrigins, *this,
5877                                            UsedAssumedInformation,
5878                                            /* OnlyExact */ true)) {
5879           if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
5880                 if (!I)
5881                   return true;
5882                 if (auto *SI = dyn_cast<StoreInst>(I))
5883                   return A.isAssumedDead(SI->getOperandUse(0), this,
5884                                          /* LivenessAA */ nullptr,
5885                                          UsedAssumedInformation,
5886                                          /* CheckBBLivenessOnly */ false);
5887                 return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
5888                                        UsedAssumedInformation,
5889                                        /* CheckBBLivenessOnly */ false);
5890               }))
5891             return indicatePessimisticFixpoint();
5892         }
5893       }
5894     }
5895 
5896     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5897                             bool Stripped) -> bool {
5898       auto &AA = A.getAAFor<AAValueSimplify>(
5899           *this, IRPosition::value(V, getCallBaseContext()),
5900           DepClassTy::REQUIRED);
5901       if (!Stripped && this == &AA) {
5902 
5903         if (auto *I = dyn_cast<Instruction>(&V)) {
5904           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5905             if (handleCmp(A, *Cmp))
5906               return true;
5907           if (handleGenericInst(A, *I))
5908             return true;
5909         }
5910         // TODO: Look the instruction and check recursively.
5911 
5912         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5913                           << "\n");
5914         return false;
5915       }
5916       return checkAndUpdate(A, *this,
5917                             IRPosition::value(V, getCallBaseContext()));
5918     };
5919 
5920     bool Dummy = false;
5921     bool UsedAssumedInformation = false;
5922     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5923                                      VisitValueCB, getCtxI(),
5924                                      UsedAssumedInformation,
5925                                      /* UseValueSimplify */ false))
5926       if (!askSimplifiedValueForOtherAAs(A))
5927         return indicatePessimisticFixpoint();
5928 
5929     // If a candicate was found in this update, return CHANGED.
5930     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5931                                                : ChangeStatus ::CHANGED;
5932   }
5933 
5934   /// See AbstractAttribute::trackStatistics()
5935   void trackStatistics() const override {
5936     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5937   }
5938 };
5939 
5940 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5941   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5942       : AAValueSimplifyImpl(IRP, A) {}
5943 
5944   /// See AbstractAttribute::initialize(...).
5945   void initialize(Attributor &A) override {
5946     SimplifiedAssociatedValue = nullptr;
5947     indicateOptimisticFixpoint();
5948   }
5949   /// See AbstractAttribute::initialize(...).
5950   ChangeStatus updateImpl(Attributor &A) override {
5951     llvm_unreachable(
5952         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5953   }
5954   /// See AbstractAttribute::trackStatistics()
5955   void trackStatistics() const override {
5956     STATS_DECLTRACK_FN_ATTR(value_simplify)
5957   }
5958 };
5959 
5960 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5961   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5962       : AAValueSimplifyFunction(IRP, A) {}
5963   /// See AbstractAttribute::trackStatistics()
5964   void trackStatistics() const override {
5965     STATS_DECLTRACK_CS_ATTR(value_simplify)
5966   }
5967 };
5968 
5969 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5970   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5971       : AAValueSimplifyImpl(IRP, A) {}
5972 
5973   void initialize(Attributor &A) override {
5974     AAValueSimplifyImpl::initialize(A);
5975     Function *Fn = getAssociatedFunction();
5976     if (!Fn) {
5977       indicatePessimisticFixpoint();
5978       return;
5979     }
5980     for (Argument &Arg : Fn->args()) {
5981       if (Arg.hasReturnedAttr()) {
5982         auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
5983                                                  Arg.getArgNo());
5984         if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
5985             checkAndUpdate(A, *this, IRP))
5986           indicateOptimisticFixpoint();
5987         else
5988           indicatePessimisticFixpoint();
5989         return;
5990       }
5991     }
5992   }
5993 
5994   /// See AbstractAttribute::updateImpl(...).
5995   ChangeStatus updateImpl(Attributor &A) override {
5996     auto Before = SimplifiedAssociatedValue;
5997     auto &RetAA = A.getAAFor<AAReturnedValues>(
5998         *this, IRPosition::function(*getAssociatedFunction()),
5999         DepClassTy::REQUIRED);
6000     auto PredForReturned =
6001         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
6002           bool UsedAssumedInformation = false;
6003           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
6004               &RetVal, *cast<CallBase>(getCtxI()), *this,
6005               UsedAssumedInformation);
6006           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
6007               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
6008           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
6009         };
6010     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
6011       if (!askSimplifiedValueForOtherAAs(A))
6012         return indicatePessimisticFixpoint();
6013     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6014                                                : ChangeStatus ::CHANGED;
6015   }
6016 
6017   void trackStatistics() const override {
6018     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
6019   }
6020 };
6021 
6022 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
6023   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
6024       : AAValueSimplifyFloating(IRP, A) {}
6025 
6026   /// See AbstractAttribute::manifest(...).
6027   ChangeStatus manifest(Attributor &A) override {
6028     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6029     // TODO: We should avoid simplification duplication to begin with.
6030     auto *FloatAA = A.lookupAAFor<AAValueSimplify>(
6031         IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE);
6032     if (FloatAA && FloatAA->getState().isValidState())
6033       return Changed;
6034 
6035     if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
6036       Use &U = cast<CallBase>(&getAnchorValue())
6037                    ->getArgOperandUse(getCallSiteArgNo());
6038       if (A.changeUseAfterManifest(U, *NewV))
6039         Changed = ChangeStatus::CHANGED;
6040     }
6041 
6042     return Changed | AAValueSimplify::manifest(A);
6043   }
6044 
6045   void trackStatistics() const override {
6046     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
6047   }
6048 };
6049 } // namespace
6050 
6051 /// ----------------------- Heap-To-Stack Conversion ---------------------------
6052 namespace {
6053 struct AAHeapToStackFunction final : public AAHeapToStack {
6054 
6055   struct AllocationInfo {
6056     /// The call that allocates the memory.
6057     CallBase *const CB;
6058 
6059     /// The library function id for the allocation.
6060     LibFunc LibraryFunctionId = NotLibFunc;
6061 
6062     /// The status wrt. a rewrite.
6063     enum {
6064       STACK_DUE_TO_USE,
6065       STACK_DUE_TO_FREE,
6066       INVALID,
6067     } Status = STACK_DUE_TO_USE;
6068 
6069     /// Flag to indicate if we encountered a use that might free this allocation
6070     /// but which is not in the deallocation infos.
6071     bool HasPotentiallyFreeingUnknownUses = false;
6072 
6073     /// The set of free calls that use this allocation.
6074     SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
6075   };
6076 
6077   struct DeallocationInfo {
6078     /// The call that deallocates the memory.
6079     CallBase *const CB;
6080 
6081     /// Flag to indicate if we don't know all objects this deallocation might
6082     /// free.
6083     bool MightFreeUnknownObjects = false;
6084 
6085     /// The set of allocation calls that are potentially freed.
6086     SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
6087   };
6088 
6089   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
6090       : AAHeapToStack(IRP, A) {}
6091 
6092   ~AAHeapToStackFunction() {
6093     // Ensure we call the destructor so we release any memory allocated in the
6094     // sets.
6095     for (auto &It : AllocationInfos)
6096       It.second->~AllocationInfo();
6097     for (auto &It : DeallocationInfos)
6098       It.second->~DeallocationInfo();
6099   }
6100 
6101   void initialize(Attributor &A) override {
6102     AAHeapToStack::initialize(A);
6103 
6104     const Function *F = getAnchorScope();
6105     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6106 
6107     auto AllocationIdentifierCB = [&](Instruction &I) {
6108       CallBase *CB = dyn_cast<CallBase>(&I);
6109       if (!CB)
6110         return true;
6111       if (isFreeCall(CB, TLI)) {
6112         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
6113         return true;
6114       }
6115       // To do heap to stack, we need to know that the allocation itself is
6116       // removable once uses are rewritten, and that we can initialize the
6117       // alloca to the same pattern as the original allocation result.
6118       if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) {
6119         auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
6120         if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
6121           AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
6122           AllocationInfos[CB] = AI;
6123           if (TLI)
6124             TLI->getLibFunc(*CB, AI->LibraryFunctionId);
6125         }
6126       }
6127       return true;
6128     };
6129 
6130     bool UsedAssumedInformation = false;
6131     bool Success = A.checkForAllCallLikeInstructions(
6132         AllocationIdentifierCB, *this, UsedAssumedInformation,
6133         /* CheckBBLivenessOnly */ false,
6134         /* CheckPotentiallyDead */ true);
6135     (void)Success;
6136     assert(Success && "Did not expect the call base visit callback to fail!");
6137 
6138     Attributor::SimplifictionCallbackTy SCB =
6139         [](const IRPosition &, const AbstractAttribute *,
6140            bool &) -> Optional<Value *> { return nullptr; };
6141     for (const auto &It : AllocationInfos)
6142       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6143                                        SCB);
6144     for (const auto &It : DeallocationInfos)
6145       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6146                                        SCB);
6147   }
6148 
6149   const std::string getAsStr() const override {
6150     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
6151     for (const auto &It : AllocationInfos) {
6152       if (It.second->Status == AllocationInfo::INVALID)
6153         ++NumInvalidMallocs;
6154       else
6155         ++NumH2SMallocs;
6156     }
6157     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
6158            std::to_string(NumInvalidMallocs);
6159   }
6160 
6161   /// See AbstractAttribute::trackStatistics().
6162   void trackStatistics() const override {
6163     STATS_DECL(
6164         MallocCalls, Function,
6165         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
6166     for (auto &It : AllocationInfos)
6167       if (It.second->Status != AllocationInfo::INVALID)
6168         ++BUILD_STAT_NAME(MallocCalls, Function);
6169   }
6170 
6171   bool isAssumedHeapToStack(const CallBase &CB) const override {
6172     if (isValidState())
6173       if (AllocationInfo *AI =
6174               AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
6175         return AI->Status != AllocationInfo::INVALID;
6176     return false;
6177   }
6178 
6179   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
6180     if (!isValidState())
6181       return false;
6182 
6183     for (auto &It : AllocationInfos) {
6184       AllocationInfo &AI = *It.second;
6185       if (AI.Status == AllocationInfo::INVALID)
6186         continue;
6187 
6188       if (AI.PotentialFreeCalls.count(&CB))
6189         return true;
6190     }
6191 
6192     return false;
6193   }
6194 
6195   ChangeStatus manifest(Attributor &A) override {
6196     assert(getState().isValidState() &&
6197            "Attempted to manifest an invalid state!");
6198 
6199     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
6200     Function *F = getAnchorScope();
6201     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6202 
6203     for (auto &It : AllocationInfos) {
6204       AllocationInfo &AI = *It.second;
6205       if (AI.Status == AllocationInfo::INVALID)
6206         continue;
6207 
6208       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
6209         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
6210         A.deleteAfterManifest(*FreeCall);
6211         HasChanged = ChangeStatus::CHANGED;
6212       }
6213 
6214       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
6215                         << "\n");
6216 
6217       auto Remark = [&](OptimizationRemark OR) {
6218         LibFunc IsAllocShared;
6219         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
6220           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
6221             return OR << "Moving globalized variable to the stack.";
6222         return OR << "Moving memory allocation from the heap to the stack.";
6223       };
6224       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6225         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
6226       else
6227         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
6228 
6229       const DataLayout &DL = A.getInfoCache().getDL();
6230       Value *Size;
6231       Optional<APInt> SizeAPI = getSize(A, *this, AI);
6232       if (SizeAPI.hasValue()) {
6233         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
6234       } else {
6235         LLVMContext &Ctx = AI.CB->getContext();
6236         ObjectSizeOpts Opts;
6237         ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
6238         SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
6239         assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
6240                cast<ConstantInt>(SizeOffsetPair.second)->isZero());
6241         Size = SizeOffsetPair.first;
6242       }
6243 
6244       Align Alignment(1);
6245       if (MaybeAlign RetAlign = AI.CB->getRetAlign())
6246         Alignment = max(Alignment, RetAlign);
6247       if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6248         Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
6249         assert(AlignmentAPI.hasValue() &&
6250                "Expected an alignment during manifest!");
6251         Alignment =
6252             max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue()));
6253       }
6254 
6255       // TODO: Hoist the alloca towards the function entry.
6256       unsigned AS = DL.getAllocaAddrSpace();
6257       Instruction *Alloca = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
6258                                            Size, Alignment, "", AI.CB);
6259 
6260       if (Alloca->getType() != AI.CB->getType())
6261         Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6262             Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
6263 
6264       auto *I8Ty = Type::getInt8Ty(F->getContext());
6265       auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6266       assert(InitVal &&
6267              "Must be able to materialize initial memory state of allocation");
6268 
6269       A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca);
6270 
6271       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6272         auto *NBB = II->getNormalDest();
6273         BranchInst::Create(NBB, AI.CB->getParent());
6274         A.deleteAfterManifest(*AI.CB);
6275       } else {
6276         A.deleteAfterManifest(*AI.CB);
6277       }
6278 
6279       // Initialize the alloca with the same value as used by the allocation
6280       // function.  We can skip undef as the initial value of an alloc is
6281       // undef, and the memset would simply end up being DSEd.
6282       if (!isa<UndefValue>(InitVal)) {
6283         IRBuilder<> Builder(Alloca->getNextNode());
6284         // TODO: Use alignment above if align!=1
6285         Builder.CreateMemSet(Alloca, InitVal, Size, None);
6286       }
6287       HasChanged = ChangeStatus::CHANGED;
6288     }
6289 
6290     return HasChanged;
6291   }
6292 
6293   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6294                            Value &V) {
6295     bool UsedAssumedInformation = false;
6296     Optional<Constant *> SimpleV =
6297         A.getAssumedConstant(V, AA, UsedAssumedInformation);
6298     if (!SimpleV.hasValue())
6299       return APInt(64, 0);
6300     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
6301       return CI->getValue();
6302     return llvm::None;
6303   }
6304 
6305   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6306                           AllocationInfo &AI) {
6307     auto Mapper = [&](const Value *V) -> const Value * {
6308       bool UsedAssumedInformation = false;
6309       if (Optional<Constant *> SimpleV =
6310               A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6311         if (*SimpleV)
6312           return *SimpleV;
6313       return V;
6314     };
6315 
6316     const Function *F = getAnchorScope();
6317     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6318     return getAllocSize(AI.CB, TLI, Mapper);
6319   }
6320 
6321   /// Collection of all malloc-like calls in a function with associated
6322   /// information.
6323   MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6324 
6325   /// Collection of all free-like calls in a function with associated
6326   /// information.
6327   MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6328 
6329   ChangeStatus updateImpl(Attributor &A) override;
6330 };
6331 
6332 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6333   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6334   const Function *F = getAnchorScope();
6335   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6336 
6337   const auto &LivenessAA =
6338       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6339 
6340   MustBeExecutedContextExplorer &Explorer =
6341       A.getInfoCache().getMustBeExecutedContextExplorer();
6342 
6343   bool StackIsAccessibleByOtherThreads =
6344       A.getInfoCache().stackIsAccessibleByOtherThreads();
6345 
6346   // Flag to ensure we update our deallocation information at most once per
6347   // updateImpl call and only if we use the free check reasoning.
6348   bool HasUpdatedFrees = false;
6349 
6350   auto UpdateFrees = [&]() {
6351     HasUpdatedFrees = true;
6352 
6353     for (auto &It : DeallocationInfos) {
6354       DeallocationInfo &DI = *It.second;
6355       // For now we cannot use deallocations that have unknown inputs, skip
6356       // them.
6357       if (DI.MightFreeUnknownObjects)
6358         continue;
6359 
6360       // No need to analyze dead calls, ignore them instead.
6361       bool UsedAssumedInformation = false;
6362       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6363                           /* CheckBBLivenessOnly */ true))
6364         continue;
6365 
6366       // Use the optimistic version to get the freed objects, ignoring dead
6367       // branches etc.
6368       SmallVector<Value *, 8> Objects;
6369       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6370                                            *this, DI.CB,
6371                                            UsedAssumedInformation)) {
6372         LLVM_DEBUG(
6373             dbgs()
6374             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6375         DI.MightFreeUnknownObjects = true;
6376         continue;
6377       }
6378 
6379       // Check each object explicitly.
6380       for (auto *Obj : Objects) {
6381         // Free of null and undef can be ignored as no-ops (or UB in the latter
6382         // case).
6383         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6384           continue;
6385 
6386         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6387         if (!ObjCB) {
6388           LLVM_DEBUG(dbgs()
6389                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6390           DI.MightFreeUnknownObjects = true;
6391           continue;
6392         }
6393 
6394         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6395         if (!AI) {
6396           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6397                             << "\n");
6398           DI.MightFreeUnknownObjects = true;
6399           continue;
6400         }
6401 
6402         DI.PotentialAllocationCalls.insert(ObjCB);
6403       }
6404     }
6405   };
6406 
6407   auto FreeCheck = [&](AllocationInfo &AI) {
6408     // If the stack is not accessible by other threads, the "must-free" logic
6409     // doesn't apply as the pointer could be shared and needs to be places in
6410     // "shareable" memory.
6411     if (!StackIsAccessibleByOtherThreads) {
6412       auto &NoSyncAA =
6413           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6414       if (!NoSyncAA.isAssumedNoSync()) {
6415         LLVM_DEBUG(
6416             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6417                       "other threads and function is not nosync:\n");
6418         return false;
6419       }
6420     }
6421     if (!HasUpdatedFrees)
6422       UpdateFrees();
6423 
6424     // TODO: Allow multi exit functions that have different free calls.
6425     if (AI.PotentialFreeCalls.size() != 1) {
6426       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6427                         << AI.PotentialFreeCalls.size() << "\n");
6428       return false;
6429     }
6430     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6431     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6432     if (!DI) {
6433       LLVM_DEBUG(
6434           dbgs() << "[H2S] unique free call was not known as deallocation call "
6435                  << *UniqueFree << "\n");
6436       return false;
6437     }
6438     if (DI->MightFreeUnknownObjects) {
6439       LLVM_DEBUG(
6440           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6441       return false;
6442     }
6443     if (DI->PotentialAllocationCalls.empty())
6444       return true;
6445     if (DI->PotentialAllocationCalls.size() > 1) {
6446       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6447                         << DI->PotentialAllocationCalls.size()
6448                         << " different allocations\n");
6449       return false;
6450     }
6451     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6452       LLVM_DEBUG(
6453           dbgs()
6454           << "[H2S] unique free call not known to free this allocation but "
6455           << **DI->PotentialAllocationCalls.begin() << "\n");
6456       return false;
6457     }
6458     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6459     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6460       LLVM_DEBUG(
6461           dbgs()
6462           << "[H2S] unique free call might not be executed with the allocation "
6463           << *UniqueFree << "\n");
6464       return false;
6465     }
6466     return true;
6467   };
6468 
6469   auto UsesCheck = [&](AllocationInfo &AI) {
6470     bool ValidUsesOnly = true;
6471 
6472     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6473       Instruction *UserI = cast<Instruction>(U.getUser());
6474       if (isa<LoadInst>(UserI))
6475         return true;
6476       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6477         if (SI->getValueOperand() == U.get()) {
6478           LLVM_DEBUG(dbgs()
6479                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6480           ValidUsesOnly = false;
6481         } else {
6482           // A store into the malloc'ed memory is fine.
6483         }
6484         return true;
6485       }
6486       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6487         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6488           return true;
6489         if (DeallocationInfos.count(CB)) {
6490           AI.PotentialFreeCalls.insert(CB);
6491           return true;
6492         }
6493 
6494         unsigned ArgNo = CB->getArgOperandNo(&U);
6495 
6496         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6497             *this, IRPosition::callsite_argument(*CB, ArgNo),
6498             DepClassTy::OPTIONAL);
6499 
6500         // If a call site argument use is nofree, we are fine.
6501         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6502             *this, IRPosition::callsite_argument(*CB, ArgNo),
6503             DepClassTy::OPTIONAL);
6504 
6505         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6506         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6507         if (MaybeCaptured ||
6508             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6509              MaybeFreed)) {
6510           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6511 
6512           // Emit a missed remark if this is missed OpenMP globalization.
6513           auto Remark = [&](OptimizationRemarkMissed ORM) {
6514             return ORM
6515                    << "Could not move globalized variable to the stack. "
6516                       "Variable is potentially captured in call. Mark "
6517                       "parameter as `__attribute__((noescape))` to override.";
6518           };
6519 
6520           if (ValidUsesOnly &&
6521               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6522             A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
6523 
6524           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6525           ValidUsesOnly = false;
6526         }
6527         return true;
6528       }
6529 
6530       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6531           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6532         Follow = true;
6533         return true;
6534       }
6535       // Unknown user for which we can not track uses further (in a way that
6536       // makes sense).
6537       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6538       ValidUsesOnly = false;
6539       return true;
6540     };
6541     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6542       return false;
6543     return ValidUsesOnly;
6544   };
6545 
6546   // The actual update starts here. We look at all allocations and depending on
6547   // their status perform the appropriate check(s).
6548   for (auto &It : AllocationInfos) {
6549     AllocationInfo &AI = *It.second;
6550     if (AI.Status == AllocationInfo::INVALID)
6551       continue;
6552 
6553     if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6554       Optional<APInt> APAlign = getAPInt(A, *this, *Align);
6555       if (!APAlign) {
6556         // Can't generate an alloca which respects the required alignment
6557         // on the allocation.
6558         LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6559                           << "\n");
6560         AI.Status = AllocationInfo::INVALID;
6561         Changed = ChangeStatus::CHANGED;
6562         continue;
6563       } else {
6564         if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
6565             !APAlign->isPowerOf2()) {
6566           LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
6567                             << "\n");
6568           AI.Status = AllocationInfo::INVALID;
6569           Changed = ChangeStatus::CHANGED;
6570           continue;
6571         }
6572       }
6573     }
6574 
6575     if (MaxHeapToStackSize != -1) {
6576       Optional<APInt> Size = getSize(A, *this, AI);
6577       if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6578         LLVM_DEBUG({
6579           if (!Size.hasValue())
6580             dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
6581           else
6582             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6583                    << MaxHeapToStackSize << "\n";
6584         });
6585 
6586         AI.Status = AllocationInfo::INVALID;
6587         Changed = ChangeStatus::CHANGED;
6588         continue;
6589       }
6590     }
6591 
6592     switch (AI.Status) {
6593     case AllocationInfo::STACK_DUE_TO_USE:
6594       if (UsesCheck(AI))
6595         continue;
6596       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6597       LLVM_FALLTHROUGH;
6598     case AllocationInfo::STACK_DUE_TO_FREE:
6599       if (FreeCheck(AI))
6600         continue;
6601       AI.Status = AllocationInfo::INVALID;
6602       Changed = ChangeStatus::CHANGED;
6603       continue;
6604     case AllocationInfo::INVALID:
6605       llvm_unreachable("Invalid allocations should never reach this point!");
6606     };
6607   }
6608 
6609   return Changed;
6610 }
6611 } // namespace
6612 
6613 /// ----------------------- Privatizable Pointers ------------------------------
6614 namespace {
6615 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6616   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6617       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6618 
6619   ChangeStatus indicatePessimisticFixpoint() override {
6620     AAPrivatizablePtr::indicatePessimisticFixpoint();
6621     PrivatizableType = nullptr;
6622     return ChangeStatus::CHANGED;
6623   }
6624 
6625   /// Identify the type we can chose for a private copy of the underlying
6626   /// argument. None means it is not clear yet, nullptr means there is none.
6627   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6628 
6629   /// Return a privatizable type that encloses both T0 and T1.
6630   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6631   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6632     if (!T0.hasValue())
6633       return T1;
6634     if (!T1.hasValue())
6635       return T0;
6636     if (T0 == T1)
6637       return T0;
6638     return nullptr;
6639   }
6640 
6641   Optional<Type *> getPrivatizableType() const override {
6642     return PrivatizableType;
6643   }
6644 
6645   const std::string getAsStr() const override {
6646     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6647   }
6648 
6649 protected:
6650   Optional<Type *> PrivatizableType;
6651 };
6652 
6653 // TODO: Do this for call site arguments (probably also other values) as well.
6654 
6655 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6656   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6657       : AAPrivatizablePtrImpl(IRP, A) {}
6658 
6659   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6660   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6661     // If this is a byval argument and we know all the call sites (so we can
6662     // rewrite them), there is no need to check them explicitly.
6663     bool UsedAssumedInformation = false;
6664     SmallVector<Attribute, 1> Attrs;
6665     getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true);
6666     if (!Attrs.empty() &&
6667         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6668                                true, UsedAssumedInformation))
6669       return Attrs[0].getValueAsType();
6670 
6671     Optional<Type *> Ty;
6672     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6673 
6674     // Make sure the associated call site argument has the same type at all call
6675     // sites and it is an allocation we know is safe to privatize, for now that
6676     // means we only allow alloca instructions.
6677     // TODO: We can additionally analyze the accesses in the callee to  create
6678     //       the type from that information instead. That is a little more
6679     //       involved and will be done in a follow up patch.
6680     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6681       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6682       // Check if a coresponding argument was found or if it is one not
6683       // associated (which can happen for callback calls).
6684       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6685         return false;
6686 
6687       // Check that all call sites agree on a type.
6688       auto &PrivCSArgAA =
6689           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6690       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6691 
6692       LLVM_DEBUG({
6693         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6694         if (CSTy.hasValue() && CSTy.getValue())
6695           CSTy.getValue()->print(dbgs());
6696         else if (CSTy.hasValue())
6697           dbgs() << "<nullptr>";
6698         else
6699           dbgs() << "<none>";
6700       });
6701 
6702       Ty = combineTypes(Ty, CSTy);
6703 
6704       LLVM_DEBUG({
6705         dbgs() << " : New Type: ";
6706         if (Ty.hasValue() && Ty.getValue())
6707           Ty.getValue()->print(dbgs());
6708         else if (Ty.hasValue())
6709           dbgs() << "<nullptr>";
6710         else
6711           dbgs() << "<none>";
6712         dbgs() << "\n";
6713       });
6714 
6715       return !Ty.hasValue() || Ty.getValue();
6716     };
6717 
6718     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6719                                 UsedAssumedInformation))
6720       return nullptr;
6721     return Ty;
6722   }
6723 
6724   /// See AbstractAttribute::updateImpl(...).
6725   ChangeStatus updateImpl(Attributor &A) override {
6726     PrivatizableType = identifyPrivatizableType(A);
6727     if (!PrivatizableType.hasValue())
6728       return ChangeStatus::UNCHANGED;
6729     if (!PrivatizableType.getValue())
6730       return indicatePessimisticFixpoint();
6731 
6732     // The dependence is optional so we don't give up once we give up on the
6733     // alignment.
6734     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6735                         DepClassTy::OPTIONAL);
6736 
6737     // Avoid arguments with padding for now.
6738     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6739         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
6740                                                 A.getInfoCache().getDL())) {
6741       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6742       return indicatePessimisticFixpoint();
6743     }
6744 
6745     // Collect the types that will replace the privatizable type in the function
6746     // signature.
6747     SmallVector<Type *, 16> ReplacementTypes;
6748     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6749 
6750     // Verify callee and caller agree on how the promoted argument would be
6751     // passed.
6752     Function &Fn = *getIRPosition().getAnchorScope();
6753     const auto *TTI =
6754         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6755     if (!TTI) {
6756       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
6757                         << Fn.getName() << "\n");
6758       return indicatePessimisticFixpoint();
6759     }
6760 
6761     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6762       CallBase *CB = ACS.getInstruction();
6763       return TTI->areTypesABICompatible(
6764           CB->getCaller(), CB->getCalledFunction(), ReplacementTypes);
6765     };
6766     bool UsedAssumedInformation = false;
6767     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6768                                 UsedAssumedInformation)) {
6769       LLVM_DEBUG(
6770           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6771                  << Fn.getName() << "\n");
6772       return indicatePessimisticFixpoint();
6773     }
6774 
6775     // Register a rewrite of the argument.
6776     Argument *Arg = getAssociatedArgument();
6777     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6778       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6779       return indicatePessimisticFixpoint();
6780     }
6781 
6782     unsigned ArgNo = Arg->getArgNo();
6783 
6784     // Helper to check if for the given call site the associated argument is
6785     // passed to a callback where the privatization would be different.
6786     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6787       SmallVector<const Use *, 4> CallbackUses;
6788       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6789       for (const Use *U : CallbackUses) {
6790         AbstractCallSite CBACS(U);
6791         assert(CBACS && CBACS.isCallbackCall());
6792         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6793           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6794 
6795           LLVM_DEBUG({
6796             dbgs()
6797                 << "[AAPrivatizablePtr] Argument " << *Arg
6798                 << "check if can be privatized in the context of its parent ("
6799                 << Arg->getParent()->getName()
6800                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6801                    "callback ("
6802                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6803                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6804                 << CBACS.getCallArgOperand(CBArg) << " vs "
6805                 << CB.getArgOperand(ArgNo) << "\n"
6806                 << "[AAPrivatizablePtr] " << CBArg << " : "
6807                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6808           });
6809 
6810           if (CBArgNo != int(ArgNo))
6811             continue;
6812           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6813               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6814           if (CBArgPrivAA.isValidState()) {
6815             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6816             if (!CBArgPrivTy.hasValue())
6817               continue;
6818             if (CBArgPrivTy.getValue() == PrivatizableType)
6819               continue;
6820           }
6821 
6822           LLVM_DEBUG({
6823             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6824                    << " cannot be privatized in the context of its parent ("
6825                    << Arg->getParent()->getName()
6826                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6827                       "callback ("
6828                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6829                    << ").\n[AAPrivatizablePtr] for which the argument "
6830                       "privatization is not compatible.\n";
6831           });
6832           return false;
6833         }
6834       }
6835       return true;
6836     };
6837 
6838     // Helper to check if for the given call site the associated argument is
6839     // passed to a direct call where the privatization would be different.
6840     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6841       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6842       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6843       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6844              "Expected a direct call operand for callback call operand");
6845 
6846       LLVM_DEBUG({
6847         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6848                << " check if be privatized in the context of its parent ("
6849                << Arg->getParent()->getName()
6850                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6851                   "direct call of ("
6852                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6853                << ").\n";
6854       });
6855 
6856       Function *DCCallee = DC->getCalledFunction();
6857       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6858         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6859             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6860             DepClassTy::REQUIRED);
6861         if (DCArgPrivAA.isValidState()) {
6862           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6863           if (!DCArgPrivTy.hasValue())
6864             return true;
6865           if (DCArgPrivTy.getValue() == PrivatizableType)
6866             return true;
6867         }
6868       }
6869 
6870       LLVM_DEBUG({
6871         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6872                << " cannot be privatized in the context of its parent ("
6873                << Arg->getParent()->getName()
6874                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6875                   "direct call of ("
6876                << ACS.getInstruction()->getCalledFunction()->getName()
6877                << ").\n[AAPrivatizablePtr] for which the argument "
6878                   "privatization is not compatible.\n";
6879       });
6880       return false;
6881     };
6882 
6883     // Helper to check if the associated argument is used at the given abstract
6884     // call site in a way that is incompatible with the privatization assumed
6885     // here.
6886     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6887       if (ACS.isDirectCall())
6888         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6889       if (ACS.isCallbackCall())
6890         return IsCompatiblePrivArgOfDirectCS(ACS);
6891       return false;
6892     };
6893 
6894     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6895                                 UsedAssumedInformation))
6896       return indicatePessimisticFixpoint();
6897 
6898     return ChangeStatus::UNCHANGED;
6899   }
6900 
6901   /// Given a type to private \p PrivType, collect the constituates (which are
6902   /// used) in \p ReplacementTypes.
6903   static void
6904   identifyReplacementTypes(Type *PrivType,
6905                            SmallVectorImpl<Type *> &ReplacementTypes) {
6906     // TODO: For now we expand the privatization type to the fullest which can
6907     //       lead to dead arguments that need to be removed later.
6908     assert(PrivType && "Expected privatizable type!");
6909 
6910     // Traverse the type, extract constituate types on the outermost level.
6911     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6912       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6913         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6914     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6915       ReplacementTypes.append(PrivArrayType->getNumElements(),
6916                               PrivArrayType->getElementType());
6917     } else {
6918       ReplacementTypes.push_back(PrivType);
6919     }
6920   }
6921 
6922   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6923   /// The values needed are taken from the arguments of \p F starting at
6924   /// position \p ArgNo.
6925   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6926                                    unsigned ArgNo, Instruction &IP) {
6927     assert(PrivType && "Expected privatizable type!");
6928 
6929     IRBuilder<NoFolder> IRB(&IP);
6930     const DataLayout &DL = F.getParent()->getDataLayout();
6931 
6932     // Traverse the type, build GEPs and stores.
6933     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6934       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6935       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6936         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6937         Value *Ptr =
6938             constructPointer(PointeeTy, PrivType, &Base,
6939                              PrivStructLayout->getElementOffset(u), IRB, DL);
6940         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6941       }
6942     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6943       Type *PointeeTy = PrivArrayType->getElementType();
6944       Type *PointeePtrTy = PointeeTy->getPointerTo();
6945       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6946       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6947         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6948                                       u * PointeeTySize, IRB, DL);
6949         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6950       }
6951     } else {
6952       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6953     }
6954   }
6955 
6956   /// Extract values from \p Base according to the type \p PrivType at the
6957   /// call position \p ACS. The values are appended to \p ReplacementValues.
6958   void createReplacementValues(Align Alignment, Type *PrivType,
6959                                AbstractCallSite ACS, Value *Base,
6960                                SmallVectorImpl<Value *> &ReplacementValues) {
6961     assert(Base && "Expected base value!");
6962     assert(PrivType && "Expected privatizable type!");
6963     Instruction *IP = ACS.getInstruction();
6964 
6965     IRBuilder<NoFolder> IRB(IP);
6966     const DataLayout &DL = IP->getModule()->getDataLayout();
6967 
6968     Type *PrivPtrType = PrivType->getPointerTo();
6969     if (Base->getType() != PrivPtrType)
6970       Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6971           Base, PrivPtrType, "", ACS.getInstruction());
6972 
6973     // Traverse the type, build GEPs and loads.
6974     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6975       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6976       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6977         Type *PointeeTy = PrivStructType->getElementType(u);
6978         Value *Ptr =
6979             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6980                              PrivStructLayout->getElementOffset(u), IRB, DL);
6981         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6982         L->setAlignment(Alignment);
6983         ReplacementValues.push_back(L);
6984       }
6985     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6986       Type *PointeeTy = PrivArrayType->getElementType();
6987       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6988       Type *PointeePtrTy = PointeeTy->getPointerTo();
6989       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6990         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6991                                       u * PointeeTySize, IRB, DL);
6992         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6993         L->setAlignment(Alignment);
6994         ReplacementValues.push_back(L);
6995       }
6996     } else {
6997       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6998       L->setAlignment(Alignment);
6999       ReplacementValues.push_back(L);
7000     }
7001   }
7002 
7003   /// See AbstractAttribute::manifest(...)
7004   ChangeStatus manifest(Attributor &A) override {
7005     if (!PrivatizableType.hasValue())
7006       return ChangeStatus::UNCHANGED;
7007     assert(PrivatizableType.getValue() && "Expected privatizable type!");
7008 
7009     // Collect all tail calls in the function as we cannot allow new allocas to
7010     // escape into tail recursion.
7011     // TODO: Be smarter about new allocas escaping into tail calls.
7012     SmallVector<CallInst *, 16> TailCalls;
7013     bool UsedAssumedInformation = false;
7014     if (!A.checkForAllInstructions(
7015             [&](Instruction &I) {
7016               CallInst &CI = cast<CallInst>(I);
7017               if (CI.isTailCall())
7018                 TailCalls.push_back(&CI);
7019               return true;
7020             },
7021             *this, {Instruction::Call}, UsedAssumedInformation))
7022       return ChangeStatus::UNCHANGED;
7023 
7024     Argument *Arg = getAssociatedArgument();
7025     // Query AAAlign attribute for alignment of associated argument to
7026     // determine the best alignment of loads.
7027     const auto &AlignAA =
7028         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
7029 
7030     // Callback to repair the associated function. A new alloca is placed at the
7031     // beginning and initialized with the values passed through arguments. The
7032     // new alloca replaces the use of the old pointer argument.
7033     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
7034         [=](const Attributor::ArgumentReplacementInfo &ARI,
7035             Function &ReplacementFn, Function::arg_iterator ArgIt) {
7036           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
7037           Instruction *IP = &*EntryBB.getFirstInsertionPt();
7038           const DataLayout &DL = IP->getModule()->getDataLayout();
7039           unsigned AS = DL.getAllocaAddrSpace();
7040           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS,
7041                                            Arg->getName() + ".priv", IP);
7042           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
7043                                ArgIt->getArgNo(), *IP);
7044 
7045           if (AI->getType() != Arg->getType())
7046             AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7047                 AI, Arg->getType(), "", IP);
7048           Arg->replaceAllUsesWith(AI);
7049 
7050           for (CallInst *CI : TailCalls)
7051             CI->setTailCall(false);
7052         };
7053 
7054     // Callback to repair a call site of the associated function. The elements
7055     // of the privatizable type are loaded prior to the call and passed to the
7056     // new function version.
7057     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
7058         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
7059                       AbstractCallSite ACS,
7060                       SmallVectorImpl<Value *> &NewArgOperands) {
7061           // When no alignment is specified for the load instruction,
7062           // natural alignment is assumed.
7063           createReplacementValues(
7064               assumeAligned(AlignAA.getAssumedAlign()),
7065               PrivatizableType.getValue(), ACS,
7066               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
7067               NewArgOperands);
7068         };
7069 
7070     // Collect the types that will replace the privatizable type in the function
7071     // signature.
7072     SmallVector<Type *, 16> ReplacementTypes;
7073     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
7074 
7075     // Register a rewrite of the argument.
7076     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
7077                                            std::move(FnRepairCB),
7078                                            std::move(ACSRepairCB)))
7079       return ChangeStatus::CHANGED;
7080     return ChangeStatus::UNCHANGED;
7081   }
7082 
7083   /// See AbstractAttribute::trackStatistics()
7084   void trackStatistics() const override {
7085     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
7086   }
7087 };
7088 
7089 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
7090   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
7091       : AAPrivatizablePtrImpl(IRP, A) {}
7092 
7093   /// See AbstractAttribute::initialize(...).
7094   virtual void initialize(Attributor &A) override {
7095     // TODO: We can privatize more than arguments.
7096     indicatePessimisticFixpoint();
7097   }
7098 
7099   ChangeStatus updateImpl(Attributor &A) override {
7100     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
7101                      "updateImpl will not be called");
7102   }
7103 
7104   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
7105   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
7106     Value *Obj = getUnderlyingObject(&getAssociatedValue());
7107     if (!Obj) {
7108       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
7109       return nullptr;
7110     }
7111 
7112     if (auto *AI = dyn_cast<AllocaInst>(Obj))
7113       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
7114         if (CI->isOne())
7115           return AI->getAllocatedType();
7116     if (auto *Arg = dyn_cast<Argument>(Obj)) {
7117       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
7118           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
7119       if (PrivArgAA.isAssumedPrivatizablePtr())
7120         return PrivArgAA.getPrivatizableType();
7121     }
7122 
7123     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
7124                          "alloca nor privatizable argument: "
7125                       << *Obj << "!\n");
7126     return nullptr;
7127   }
7128 
7129   /// See AbstractAttribute::trackStatistics()
7130   void trackStatistics() const override {
7131     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
7132   }
7133 };
7134 
7135 struct AAPrivatizablePtrCallSiteArgument final
7136     : public AAPrivatizablePtrFloating {
7137   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
7138       : AAPrivatizablePtrFloating(IRP, A) {}
7139 
7140   /// See AbstractAttribute::initialize(...).
7141   void initialize(Attributor &A) override {
7142     if (getIRPosition().hasAttr(Attribute::ByVal))
7143       indicateOptimisticFixpoint();
7144   }
7145 
7146   /// See AbstractAttribute::updateImpl(...).
7147   ChangeStatus updateImpl(Attributor &A) override {
7148     PrivatizableType = identifyPrivatizableType(A);
7149     if (!PrivatizableType.hasValue())
7150       return ChangeStatus::UNCHANGED;
7151     if (!PrivatizableType.getValue())
7152       return indicatePessimisticFixpoint();
7153 
7154     const IRPosition &IRP = getIRPosition();
7155     auto &NoCaptureAA =
7156         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
7157     if (!NoCaptureAA.isAssumedNoCapture()) {
7158       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
7159       return indicatePessimisticFixpoint();
7160     }
7161 
7162     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
7163     if (!NoAliasAA.isAssumedNoAlias()) {
7164       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
7165       return indicatePessimisticFixpoint();
7166     }
7167 
7168     bool IsKnown;
7169     if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
7170       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
7171       return indicatePessimisticFixpoint();
7172     }
7173 
7174     return ChangeStatus::UNCHANGED;
7175   }
7176 
7177   /// See AbstractAttribute::trackStatistics()
7178   void trackStatistics() const override {
7179     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
7180   }
7181 };
7182 
7183 struct AAPrivatizablePtrCallSiteReturned final
7184     : public AAPrivatizablePtrFloating {
7185   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
7186       : AAPrivatizablePtrFloating(IRP, A) {}
7187 
7188   /// See AbstractAttribute::initialize(...).
7189   void initialize(Attributor &A) override {
7190     // TODO: We can privatize more than arguments.
7191     indicatePessimisticFixpoint();
7192   }
7193 
7194   /// See AbstractAttribute::trackStatistics()
7195   void trackStatistics() const override {
7196     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
7197   }
7198 };
7199 
7200 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
7201   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
7202       : AAPrivatizablePtrFloating(IRP, A) {}
7203 
7204   /// See AbstractAttribute::initialize(...).
7205   void initialize(Attributor &A) override {
7206     // TODO: We can privatize more than arguments.
7207     indicatePessimisticFixpoint();
7208   }
7209 
7210   /// See AbstractAttribute::trackStatistics()
7211   void trackStatistics() const override {
7212     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
7213   }
7214 };
7215 } // namespace
7216 
7217 /// -------------------- Memory Behavior Attributes ----------------------------
7218 /// Includes read-none, read-only, and write-only.
7219 /// ----------------------------------------------------------------------------
7220 namespace {
7221 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
7222   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
7223       : AAMemoryBehavior(IRP, A) {}
7224 
7225   /// See AbstractAttribute::initialize(...).
7226   void initialize(Attributor &A) override {
7227     intersectAssumedBits(BEST_STATE);
7228     getKnownStateFromValue(getIRPosition(), getState());
7229     AAMemoryBehavior::initialize(A);
7230   }
7231 
7232   /// Return the memory behavior information encoded in the IR for \p IRP.
7233   static void getKnownStateFromValue(const IRPosition &IRP,
7234                                      BitIntegerState &State,
7235                                      bool IgnoreSubsumingPositions = false) {
7236     SmallVector<Attribute, 2> Attrs;
7237     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7238     for (const Attribute &Attr : Attrs) {
7239       switch (Attr.getKindAsEnum()) {
7240       case Attribute::ReadNone:
7241         State.addKnownBits(NO_ACCESSES);
7242         break;
7243       case Attribute::ReadOnly:
7244         State.addKnownBits(NO_WRITES);
7245         break;
7246       case Attribute::WriteOnly:
7247         State.addKnownBits(NO_READS);
7248         break;
7249       default:
7250         llvm_unreachable("Unexpected attribute!");
7251       }
7252     }
7253 
7254     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7255       if (!I->mayReadFromMemory())
7256         State.addKnownBits(NO_READS);
7257       if (!I->mayWriteToMemory())
7258         State.addKnownBits(NO_WRITES);
7259     }
7260   }
7261 
7262   /// See AbstractAttribute::getDeducedAttributes(...).
7263   void getDeducedAttributes(LLVMContext &Ctx,
7264                             SmallVectorImpl<Attribute> &Attrs) const override {
7265     assert(Attrs.size() == 0);
7266     if (isAssumedReadNone())
7267       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7268     else if (isAssumedReadOnly())
7269       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7270     else if (isAssumedWriteOnly())
7271       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7272     assert(Attrs.size() <= 1);
7273   }
7274 
7275   /// See AbstractAttribute::manifest(...).
7276   ChangeStatus manifest(Attributor &A) override {
7277     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
7278       return ChangeStatus::UNCHANGED;
7279 
7280     const IRPosition &IRP = getIRPosition();
7281 
7282     // Check if we would improve the existing attributes first.
7283     SmallVector<Attribute, 4> DeducedAttrs;
7284     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7285     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7286           return IRP.hasAttr(Attr.getKindAsEnum(),
7287                              /* IgnoreSubsumingPositions */ true);
7288         }))
7289       return ChangeStatus::UNCHANGED;
7290 
7291     // Clear existing attributes.
7292     IRP.removeAttrs(AttrKinds);
7293 
7294     // Use the generic manifest method.
7295     return IRAttribute::manifest(A);
7296   }
7297 
7298   /// See AbstractState::getAsStr().
7299   const std::string getAsStr() const override {
7300     if (isAssumedReadNone())
7301       return "readnone";
7302     if (isAssumedReadOnly())
7303       return "readonly";
7304     if (isAssumedWriteOnly())
7305       return "writeonly";
7306     return "may-read/write";
7307   }
7308 
7309   /// The set of IR attributes AAMemoryBehavior deals with.
7310   static const Attribute::AttrKind AttrKinds[3];
7311 };
7312 
7313 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7314     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7315 
7316 /// Memory behavior attribute for a floating value.
7317 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7318   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7319       : AAMemoryBehaviorImpl(IRP, A) {}
7320 
7321   /// See AbstractAttribute::updateImpl(...).
7322   ChangeStatus updateImpl(Attributor &A) override;
7323 
7324   /// See AbstractAttribute::trackStatistics()
7325   void trackStatistics() const override {
7326     if (isAssumedReadNone())
7327       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7328     else if (isAssumedReadOnly())
7329       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7330     else if (isAssumedWriteOnly())
7331       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7332   }
7333 
7334 private:
7335   /// Return true if users of \p UserI might access the underlying
7336   /// variable/location described by \p U and should therefore be analyzed.
7337   bool followUsersOfUseIn(Attributor &A, const Use &U,
7338                           const Instruction *UserI);
7339 
7340   /// Update the state according to the effect of use \p U in \p UserI.
7341   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7342 };
7343 
7344 /// Memory behavior attribute for function argument.
7345 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7346   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7347       : AAMemoryBehaviorFloating(IRP, A) {}
7348 
7349   /// See AbstractAttribute::initialize(...).
7350   void initialize(Attributor &A) override {
7351     intersectAssumedBits(BEST_STATE);
7352     const IRPosition &IRP = getIRPosition();
7353     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7354     // can query it when we use has/getAttr. That would allow us to reuse the
7355     // initialize of the base class here.
7356     bool HasByVal =
7357         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7358     getKnownStateFromValue(IRP, getState(),
7359                            /* IgnoreSubsumingPositions */ HasByVal);
7360 
7361     // Initialize the use vector with all direct uses of the associated value.
7362     Argument *Arg = getAssociatedArgument();
7363     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7364       indicatePessimisticFixpoint();
7365   }
7366 
7367   ChangeStatus manifest(Attributor &A) override {
7368     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7369     if (!getAssociatedValue().getType()->isPointerTy())
7370       return ChangeStatus::UNCHANGED;
7371 
7372     // TODO: From readattrs.ll: "inalloca parameters are always
7373     //                           considered written"
7374     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7375       removeKnownBits(NO_WRITES);
7376       removeAssumedBits(NO_WRITES);
7377     }
7378     return AAMemoryBehaviorFloating::manifest(A);
7379   }
7380 
7381   /// See AbstractAttribute::trackStatistics()
7382   void trackStatistics() const override {
7383     if (isAssumedReadNone())
7384       STATS_DECLTRACK_ARG_ATTR(readnone)
7385     else if (isAssumedReadOnly())
7386       STATS_DECLTRACK_ARG_ATTR(readonly)
7387     else if (isAssumedWriteOnly())
7388       STATS_DECLTRACK_ARG_ATTR(writeonly)
7389   }
7390 };
7391 
7392 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7393   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7394       : AAMemoryBehaviorArgument(IRP, A) {}
7395 
7396   /// See AbstractAttribute::initialize(...).
7397   void initialize(Attributor &A) override {
7398     // If we don't have an associated attribute this is either a variadic call
7399     // or an indirect call, either way, nothing to do here.
7400     Argument *Arg = getAssociatedArgument();
7401     if (!Arg) {
7402       indicatePessimisticFixpoint();
7403       return;
7404     }
7405     if (Arg->hasByValAttr()) {
7406       addKnownBits(NO_WRITES);
7407       removeKnownBits(NO_READS);
7408       removeAssumedBits(NO_READS);
7409     }
7410     AAMemoryBehaviorArgument::initialize(A);
7411     if (getAssociatedFunction()->isDeclaration())
7412       indicatePessimisticFixpoint();
7413   }
7414 
7415   /// See AbstractAttribute::updateImpl(...).
7416   ChangeStatus updateImpl(Attributor &A) override {
7417     // TODO: Once we have call site specific value information we can provide
7418     //       call site specific liveness liveness information and then it makes
7419     //       sense to specialize attributes for call sites arguments instead of
7420     //       redirecting requests to the callee argument.
7421     Argument *Arg = getAssociatedArgument();
7422     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7423     auto &ArgAA =
7424         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7425     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7426   }
7427 
7428   /// See AbstractAttribute::trackStatistics()
7429   void trackStatistics() const override {
7430     if (isAssumedReadNone())
7431       STATS_DECLTRACK_CSARG_ATTR(readnone)
7432     else if (isAssumedReadOnly())
7433       STATS_DECLTRACK_CSARG_ATTR(readonly)
7434     else if (isAssumedWriteOnly())
7435       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7436   }
7437 };
7438 
7439 /// Memory behavior attribute for a call site return position.
7440 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7441   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7442       : AAMemoryBehaviorFloating(IRP, A) {}
7443 
7444   /// See AbstractAttribute::initialize(...).
7445   void initialize(Attributor &A) override {
7446     AAMemoryBehaviorImpl::initialize(A);
7447     Function *F = getAssociatedFunction();
7448     if (!F || F->isDeclaration())
7449       indicatePessimisticFixpoint();
7450   }
7451 
7452   /// See AbstractAttribute::manifest(...).
7453   ChangeStatus manifest(Attributor &A) override {
7454     // We do not annotate returned values.
7455     return ChangeStatus::UNCHANGED;
7456   }
7457 
7458   /// See AbstractAttribute::trackStatistics()
7459   void trackStatistics() const override {}
7460 };
7461 
7462 /// An AA to represent the memory behavior function attributes.
7463 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7464   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7465       : AAMemoryBehaviorImpl(IRP, A) {}
7466 
7467   /// See AbstractAttribute::updateImpl(Attributor &A).
7468   virtual ChangeStatus updateImpl(Attributor &A) override;
7469 
7470   /// See AbstractAttribute::manifest(...).
7471   ChangeStatus manifest(Attributor &A) override {
7472     Function &F = cast<Function>(getAnchorValue());
7473     if (isAssumedReadNone()) {
7474       F.removeFnAttr(Attribute::ArgMemOnly);
7475       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7476       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7477     }
7478     return AAMemoryBehaviorImpl::manifest(A);
7479   }
7480 
7481   /// See AbstractAttribute::trackStatistics()
7482   void trackStatistics() const override {
7483     if (isAssumedReadNone())
7484       STATS_DECLTRACK_FN_ATTR(readnone)
7485     else if (isAssumedReadOnly())
7486       STATS_DECLTRACK_FN_ATTR(readonly)
7487     else if (isAssumedWriteOnly())
7488       STATS_DECLTRACK_FN_ATTR(writeonly)
7489   }
7490 };
7491 
7492 /// AAMemoryBehavior attribute for call sites.
7493 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7494   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7495       : AAMemoryBehaviorImpl(IRP, A) {}
7496 
7497   /// See AbstractAttribute::initialize(...).
7498   void initialize(Attributor &A) override {
7499     AAMemoryBehaviorImpl::initialize(A);
7500     Function *F = getAssociatedFunction();
7501     if (!F || F->isDeclaration())
7502       indicatePessimisticFixpoint();
7503   }
7504 
7505   /// See AbstractAttribute::updateImpl(...).
7506   ChangeStatus updateImpl(Attributor &A) override {
7507     // TODO: Once we have call site specific value information we can provide
7508     //       call site specific liveness liveness information and then it makes
7509     //       sense to specialize attributes for call sites arguments instead of
7510     //       redirecting requests to the callee argument.
7511     Function *F = getAssociatedFunction();
7512     const IRPosition &FnPos = IRPosition::function(*F);
7513     auto &FnAA =
7514         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7515     return clampStateAndIndicateChange(getState(), FnAA.getState());
7516   }
7517 
7518   /// See AbstractAttribute::trackStatistics()
7519   void trackStatistics() const override {
7520     if (isAssumedReadNone())
7521       STATS_DECLTRACK_CS_ATTR(readnone)
7522     else if (isAssumedReadOnly())
7523       STATS_DECLTRACK_CS_ATTR(readonly)
7524     else if (isAssumedWriteOnly())
7525       STATS_DECLTRACK_CS_ATTR(writeonly)
7526   }
7527 };
7528 
7529 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7530 
7531   // The current assumed state used to determine a change.
7532   auto AssumedState = getAssumed();
7533 
7534   auto CheckRWInst = [&](Instruction &I) {
7535     // If the instruction has an own memory behavior state, use it to restrict
7536     // the local state. No further analysis is required as the other memory
7537     // state is as optimistic as it gets.
7538     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7539       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7540           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7541       intersectAssumedBits(MemBehaviorAA.getAssumed());
7542       return !isAtFixpoint();
7543     }
7544 
7545     // Remove access kind modifiers if necessary.
7546     if (I.mayReadFromMemory())
7547       removeAssumedBits(NO_READS);
7548     if (I.mayWriteToMemory())
7549       removeAssumedBits(NO_WRITES);
7550     return !isAtFixpoint();
7551   };
7552 
7553   bool UsedAssumedInformation = false;
7554   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7555                                           UsedAssumedInformation))
7556     return indicatePessimisticFixpoint();
7557 
7558   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7559                                         : ChangeStatus::UNCHANGED;
7560 }
7561 
7562 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7563 
7564   const IRPosition &IRP = getIRPosition();
7565   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7566   AAMemoryBehavior::StateType &S = getState();
7567 
7568   // First, check the function scope. We take the known information and we avoid
7569   // work if the assumed information implies the current assumed information for
7570   // this attribute. This is a valid for all but byval arguments.
7571   Argument *Arg = IRP.getAssociatedArgument();
7572   AAMemoryBehavior::base_t FnMemAssumedState =
7573       AAMemoryBehavior::StateType::getWorstState();
7574   if (!Arg || !Arg->hasByValAttr()) {
7575     const auto &FnMemAA =
7576         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7577     FnMemAssumedState = FnMemAA.getAssumed();
7578     S.addKnownBits(FnMemAA.getKnown());
7579     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7580       return ChangeStatus::UNCHANGED;
7581   }
7582 
7583   // The current assumed state used to determine a change.
7584   auto AssumedState = S.getAssumed();
7585 
7586   // Make sure the value is not captured (except through "return"), if
7587   // it is, any information derived would be irrelevant anyway as we cannot
7588   // check the potential aliases introduced by the capture. However, no need
7589   // to fall back to anythign less optimistic than the function state.
7590   const auto &ArgNoCaptureAA =
7591       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7592   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7593     S.intersectAssumedBits(FnMemAssumedState);
7594     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7595                                           : ChangeStatus::UNCHANGED;
7596   }
7597 
7598   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7599   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7600     Instruction *UserI = cast<Instruction>(U.getUser());
7601     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7602                       << " \n");
7603 
7604     // Droppable users, e.g., llvm::assume does not actually perform any action.
7605     if (UserI->isDroppable())
7606       return true;
7607 
7608     // Check if the users of UserI should also be visited.
7609     Follow = followUsersOfUseIn(A, U, UserI);
7610 
7611     // If UserI might touch memory we analyze the use in detail.
7612     if (UserI->mayReadOrWriteMemory())
7613       analyzeUseIn(A, U, UserI);
7614 
7615     return !isAtFixpoint();
7616   };
7617 
7618   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7619     return indicatePessimisticFixpoint();
7620 
7621   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7622                                         : ChangeStatus::UNCHANGED;
7623 }
7624 
7625 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7626                                                   const Instruction *UserI) {
7627   // The loaded value is unrelated to the pointer argument, no need to
7628   // follow the users of the load.
7629   if (isa<LoadInst>(UserI))
7630     return false;
7631 
7632   // By default we follow all uses assuming UserI might leak information on U,
7633   // we have special handling for call sites operands though.
7634   const auto *CB = dyn_cast<CallBase>(UserI);
7635   if (!CB || !CB->isArgOperand(&U))
7636     return true;
7637 
7638   // If the use is a call argument known not to be captured, the users of
7639   // the call do not need to be visited because they have to be unrelated to
7640   // the input. Note that this check is not trivial even though we disallow
7641   // general capturing of the underlying argument. The reason is that the
7642   // call might the argument "through return", which we allow and for which we
7643   // need to check call users.
7644   if (U.get()->getType()->isPointerTy()) {
7645     unsigned ArgNo = CB->getArgOperandNo(&U);
7646     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7647         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7648     return !ArgNoCaptureAA.isAssumedNoCapture();
7649   }
7650 
7651   return true;
7652 }
7653 
7654 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7655                                             const Instruction *UserI) {
7656   assert(UserI->mayReadOrWriteMemory());
7657 
7658   switch (UserI->getOpcode()) {
7659   default:
7660     // TODO: Handle all atomics and other side-effect operations we know of.
7661     break;
7662   case Instruction::Load:
7663     // Loads cause the NO_READS property to disappear.
7664     removeAssumedBits(NO_READS);
7665     return;
7666 
7667   case Instruction::Store:
7668     // Stores cause the NO_WRITES property to disappear if the use is the
7669     // pointer operand. Note that while capturing was taken care of somewhere
7670     // else we need to deal with stores of the value that is not looked through.
7671     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7672       removeAssumedBits(NO_WRITES);
7673     else
7674       indicatePessimisticFixpoint();
7675     return;
7676 
7677   case Instruction::Call:
7678   case Instruction::CallBr:
7679   case Instruction::Invoke: {
7680     // For call sites we look at the argument memory behavior attribute (this
7681     // could be recursive!) in order to restrict our own state.
7682     const auto *CB = cast<CallBase>(UserI);
7683 
7684     // Give up on operand bundles.
7685     if (CB->isBundleOperand(&U)) {
7686       indicatePessimisticFixpoint();
7687       return;
7688     }
7689 
7690     // Calling a function does read the function pointer, maybe write it if the
7691     // function is self-modifying.
7692     if (CB->isCallee(&U)) {
7693       removeAssumedBits(NO_READS);
7694       break;
7695     }
7696 
7697     // Adjust the possible access behavior based on the information on the
7698     // argument.
7699     IRPosition Pos;
7700     if (U.get()->getType()->isPointerTy())
7701       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7702     else
7703       Pos = IRPosition::callsite_function(*CB);
7704     const auto &MemBehaviorAA =
7705         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7706     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7707     // and at least "known".
7708     intersectAssumedBits(MemBehaviorAA.getAssumed());
7709     return;
7710   }
7711   };
7712 
7713   // Generally, look at the "may-properties" and adjust the assumed state if we
7714   // did not trigger special handling before.
7715   if (UserI->mayReadFromMemory())
7716     removeAssumedBits(NO_READS);
7717   if (UserI->mayWriteToMemory())
7718     removeAssumedBits(NO_WRITES);
7719 }
7720 } // namespace
7721 
7722 /// -------------------- Memory Locations Attributes ---------------------------
7723 /// Includes read-none, argmemonly, inaccessiblememonly,
7724 /// inaccessiblememorargmemonly
7725 /// ----------------------------------------------------------------------------
7726 
7727 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7728     AAMemoryLocation::MemoryLocationsKind MLK) {
7729   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7730     return "all memory";
7731   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7732     return "no memory";
7733   std::string S = "memory:";
7734   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7735     S += "stack,";
7736   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7737     S += "constant,";
7738   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7739     S += "internal global,";
7740   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7741     S += "external global,";
7742   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7743     S += "argument,";
7744   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7745     S += "inaccessible,";
7746   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7747     S += "malloced,";
7748   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7749     S += "unknown,";
7750   S.pop_back();
7751   return S;
7752 }
7753 
7754 namespace {
7755 struct AAMemoryLocationImpl : public AAMemoryLocation {
7756 
7757   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7758       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7759     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7760       AccessKind2Accesses[u] = nullptr;
7761   }
7762 
7763   ~AAMemoryLocationImpl() {
7764     // The AccessSets are allocated via a BumpPtrAllocator, we call
7765     // the destructor manually.
7766     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7767       if (AccessKind2Accesses[u])
7768         AccessKind2Accesses[u]->~AccessSet();
7769   }
7770 
7771   /// See AbstractAttribute::initialize(...).
7772   void initialize(Attributor &A) override {
7773     intersectAssumedBits(BEST_STATE);
7774     getKnownStateFromValue(A, getIRPosition(), getState());
7775     AAMemoryLocation::initialize(A);
7776   }
7777 
7778   /// Return the memory behavior information encoded in the IR for \p IRP.
7779   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7780                                      BitIntegerState &State,
7781                                      bool IgnoreSubsumingPositions = false) {
7782     // For internal functions we ignore `argmemonly` and
7783     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7784     // constant propagation. It is unclear if this is the best way but it is
7785     // unlikely this will cause real performance problems. If we are deriving
7786     // attributes for the anchor function we even remove the attribute in
7787     // addition to ignoring it.
7788     bool UseArgMemOnly = true;
7789     Function *AnchorFn = IRP.getAnchorScope();
7790     if (AnchorFn && A.isRunOn(*AnchorFn))
7791       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7792 
7793     SmallVector<Attribute, 2> Attrs;
7794     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7795     for (const Attribute &Attr : Attrs) {
7796       switch (Attr.getKindAsEnum()) {
7797       case Attribute::ReadNone:
7798         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7799         break;
7800       case Attribute::InaccessibleMemOnly:
7801         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7802         break;
7803       case Attribute::ArgMemOnly:
7804         if (UseArgMemOnly)
7805           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7806         else
7807           IRP.removeAttrs({Attribute::ArgMemOnly});
7808         break;
7809       case Attribute::InaccessibleMemOrArgMemOnly:
7810         if (UseArgMemOnly)
7811           State.addKnownBits(inverseLocation(
7812               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7813         else
7814           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7815         break;
7816       default:
7817         llvm_unreachable("Unexpected attribute!");
7818       }
7819     }
7820   }
7821 
7822   /// See AbstractAttribute::getDeducedAttributes(...).
7823   void getDeducedAttributes(LLVMContext &Ctx,
7824                             SmallVectorImpl<Attribute> &Attrs) const override {
7825     assert(Attrs.size() == 0);
7826     if (isAssumedReadNone()) {
7827       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7828     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7829       if (isAssumedInaccessibleMemOnly())
7830         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7831       else if (isAssumedArgMemOnly())
7832         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7833       else if (isAssumedInaccessibleOrArgMemOnly())
7834         Attrs.push_back(
7835             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7836     }
7837     assert(Attrs.size() <= 1);
7838   }
7839 
7840   /// See AbstractAttribute::manifest(...).
7841   ChangeStatus manifest(Attributor &A) override {
7842     const IRPosition &IRP = getIRPosition();
7843 
7844     // Check if we would improve the existing attributes first.
7845     SmallVector<Attribute, 4> DeducedAttrs;
7846     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7847     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7848           return IRP.hasAttr(Attr.getKindAsEnum(),
7849                              /* IgnoreSubsumingPositions */ true);
7850         }))
7851       return ChangeStatus::UNCHANGED;
7852 
7853     // Clear existing attributes.
7854     IRP.removeAttrs(AttrKinds);
7855     if (isAssumedReadNone())
7856       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7857 
7858     // Use the generic manifest method.
7859     return IRAttribute::manifest(A);
7860   }
7861 
7862   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7863   bool checkForAllAccessesToMemoryKind(
7864       function_ref<bool(const Instruction *, const Value *, AccessKind,
7865                         MemoryLocationsKind)>
7866           Pred,
7867       MemoryLocationsKind RequestedMLK) const override {
7868     if (!isValidState())
7869       return false;
7870 
7871     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7872     if (AssumedMLK == NO_LOCATIONS)
7873       return true;
7874 
7875     unsigned Idx = 0;
7876     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7877          CurMLK *= 2, ++Idx) {
7878       if (CurMLK & RequestedMLK)
7879         continue;
7880 
7881       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7882         for (const AccessInfo &AI : *Accesses)
7883           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7884             return false;
7885     }
7886 
7887     return true;
7888   }
7889 
7890   ChangeStatus indicatePessimisticFixpoint() override {
7891     // If we give up and indicate a pessimistic fixpoint this instruction will
7892     // become an access for all potential access kinds:
7893     // TODO: Add pointers for argmemonly and globals to improve the results of
7894     //       checkForAllAccessesToMemoryKind.
7895     bool Changed = false;
7896     MemoryLocationsKind KnownMLK = getKnown();
7897     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7898     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7899       if (!(CurMLK & KnownMLK))
7900         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7901                                   getAccessKindFromInst(I));
7902     return AAMemoryLocation::indicatePessimisticFixpoint();
7903   }
7904 
7905 protected:
7906   /// Helper struct to tie together an instruction that has a read or write
7907   /// effect with the pointer it accesses (if any).
7908   struct AccessInfo {
7909 
7910     /// The instruction that caused the access.
7911     const Instruction *I;
7912 
7913     /// The base pointer that is accessed, or null if unknown.
7914     const Value *Ptr;
7915 
7916     /// The kind of access (read/write/read+write).
7917     AccessKind Kind;
7918 
7919     bool operator==(const AccessInfo &RHS) const {
7920       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7921     }
7922     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7923       if (LHS.I != RHS.I)
7924         return LHS.I < RHS.I;
7925       if (LHS.Ptr != RHS.Ptr)
7926         return LHS.Ptr < RHS.Ptr;
7927       if (LHS.Kind != RHS.Kind)
7928         return LHS.Kind < RHS.Kind;
7929       return false;
7930     }
7931   };
7932 
7933   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7934   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7935   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7936   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7937 
7938   /// Categorize the pointer arguments of CB that might access memory in
7939   /// AccessedLoc and update the state and access map accordingly.
7940   void
7941   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7942                                      AAMemoryLocation::StateType &AccessedLocs,
7943                                      bool &Changed);
7944 
7945   /// Return the kind(s) of location that may be accessed by \p V.
7946   AAMemoryLocation::MemoryLocationsKind
7947   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7948 
7949   /// Return the access kind as determined by \p I.
7950   AccessKind getAccessKindFromInst(const Instruction *I) {
7951     AccessKind AK = READ_WRITE;
7952     if (I) {
7953       AK = I->mayReadFromMemory() ? READ : NONE;
7954       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7955     }
7956     return AK;
7957   }
7958 
7959   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7960   /// an access of kind \p AK to a \p MLK memory location with the access
7961   /// pointer \p Ptr.
7962   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7963                                  MemoryLocationsKind MLK, const Instruction *I,
7964                                  const Value *Ptr, bool &Changed,
7965                                  AccessKind AK = READ_WRITE) {
7966 
7967     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7968     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7969     if (!Accesses)
7970       Accesses = new (Allocator) AccessSet();
7971     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7972     State.removeAssumedBits(MLK);
7973   }
7974 
7975   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7976   /// arguments, and update the state and access map accordingly.
7977   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7978                           AAMemoryLocation::StateType &State, bool &Changed);
7979 
7980   /// Used to allocate access sets.
7981   BumpPtrAllocator &Allocator;
7982 
7983   /// The set of IR attributes AAMemoryLocation deals with.
7984   static const Attribute::AttrKind AttrKinds[4];
7985 };
7986 
7987 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7988     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7989     Attribute::InaccessibleMemOrArgMemOnly};
7990 
7991 void AAMemoryLocationImpl::categorizePtrValue(
7992     Attributor &A, const Instruction &I, const Value &Ptr,
7993     AAMemoryLocation::StateType &State, bool &Changed) {
7994   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7995                     << Ptr << " ["
7996                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7997 
7998   SmallVector<Value *, 8> Objects;
7999   bool UsedAssumedInformation = false;
8000   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
8001                                        UsedAssumedInformation,
8002                                        AA::Intraprocedural)) {
8003     LLVM_DEBUG(
8004         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
8005     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
8006                               getAccessKindFromInst(&I));
8007     return;
8008   }
8009 
8010   for (Value *Obj : Objects) {
8011     // TODO: recognize the TBAA used for constant accesses.
8012     MemoryLocationsKind MLK = NO_LOCATIONS;
8013     if (isa<UndefValue>(Obj))
8014       continue;
8015     if (isa<Argument>(Obj)) {
8016       // TODO: For now we do not treat byval arguments as local copies performed
8017       // on the call edge, though, we should. To make that happen we need to
8018       // teach various passes, e.g., DSE, about the copy effect of a byval. That
8019       // would also allow us to mark functions only accessing byval arguments as
8020       // readnone again, atguably their acceses have no effect outside of the
8021       // function, like accesses to allocas.
8022       MLK = NO_ARGUMENT_MEM;
8023     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
8024       // Reading constant memory is not treated as a read "effect" by the
8025       // function attr pass so we won't neither. Constants defined by TBAA are
8026       // similar. (We know we do not write it because it is constant.)
8027       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
8028         if (GVar->isConstant())
8029           continue;
8030 
8031       if (GV->hasLocalLinkage())
8032         MLK = NO_GLOBAL_INTERNAL_MEM;
8033       else
8034         MLK = NO_GLOBAL_EXTERNAL_MEM;
8035     } else if (isa<ConstantPointerNull>(Obj) &&
8036                !NullPointerIsDefined(getAssociatedFunction(),
8037                                      Ptr.getType()->getPointerAddressSpace())) {
8038       continue;
8039     } else if (isa<AllocaInst>(Obj)) {
8040       MLK = NO_LOCAL_MEM;
8041     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
8042       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
8043           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
8044       if (NoAliasAA.isAssumedNoAlias())
8045         MLK = NO_MALLOCED_MEM;
8046       else
8047         MLK = NO_UNKOWN_MEM;
8048     } else {
8049       MLK = NO_UNKOWN_MEM;
8050     }
8051 
8052     assert(MLK != NO_LOCATIONS && "No location specified!");
8053     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
8054                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
8055                       << "\n");
8056     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
8057                               getAccessKindFromInst(&I));
8058   }
8059 
8060   LLVM_DEBUG(
8061       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
8062              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
8063 }
8064 
8065 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
8066     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
8067     bool &Changed) {
8068   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
8069 
8070     // Skip non-pointer arguments.
8071     const Value *ArgOp = CB.getArgOperand(ArgNo);
8072     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
8073       continue;
8074 
8075     // Skip readnone arguments.
8076     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
8077     const auto &ArgOpMemLocationAA =
8078         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
8079 
8080     if (ArgOpMemLocationAA.isAssumedReadNone())
8081       continue;
8082 
8083     // Categorize potentially accessed pointer arguments as if there was an
8084     // access instruction with them as pointer.
8085     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
8086   }
8087 }
8088 
8089 AAMemoryLocation::MemoryLocationsKind
8090 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
8091                                                   bool &Changed) {
8092   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
8093                     << I << "\n");
8094 
8095   AAMemoryLocation::StateType AccessedLocs;
8096   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
8097 
8098   if (auto *CB = dyn_cast<CallBase>(&I)) {
8099 
8100     // First check if we assume any memory is access is visible.
8101     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
8102         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
8103     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
8104                       << " [" << CBMemLocationAA << "]\n");
8105 
8106     if (CBMemLocationAA.isAssumedReadNone())
8107       return NO_LOCATIONS;
8108 
8109     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
8110       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
8111                                 Changed, getAccessKindFromInst(&I));
8112       return AccessedLocs.getAssumed();
8113     }
8114 
8115     uint32_t CBAssumedNotAccessedLocs =
8116         CBMemLocationAA.getAssumedNotAccessedLocation();
8117 
8118     // Set the argmemonly and global bit as we handle them separately below.
8119     uint32_t CBAssumedNotAccessedLocsNoArgMem =
8120         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
8121 
8122     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
8123       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
8124         continue;
8125       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
8126                                 getAccessKindFromInst(&I));
8127     }
8128 
8129     // Now handle global memory if it might be accessed. This is slightly tricky
8130     // as NO_GLOBAL_MEM has multiple bits set.
8131     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
8132     if (HasGlobalAccesses) {
8133       auto AccessPred = [&](const Instruction *, const Value *Ptr,
8134                             AccessKind Kind, MemoryLocationsKind MLK) {
8135         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
8136                                   getAccessKindFromInst(&I));
8137         return true;
8138       };
8139       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
8140               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
8141         return AccessedLocs.getWorstState();
8142     }
8143 
8144     LLVM_DEBUG(
8145         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
8146                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8147 
8148     // Now handle argument memory if it might be accessed.
8149     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
8150     if (HasArgAccesses)
8151       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
8152 
8153     LLVM_DEBUG(
8154         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
8155                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8156 
8157     return AccessedLocs.getAssumed();
8158   }
8159 
8160   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
8161     LLVM_DEBUG(
8162         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
8163                << I << " [" << *Ptr << "]\n");
8164     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
8165     return AccessedLocs.getAssumed();
8166   }
8167 
8168   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
8169                     << I << "\n");
8170   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
8171                             getAccessKindFromInst(&I));
8172   return AccessedLocs.getAssumed();
8173 }
8174 
8175 /// An AA to represent the memory behavior function attributes.
8176 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
8177   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
8178       : AAMemoryLocationImpl(IRP, A) {}
8179 
8180   /// See AbstractAttribute::updateImpl(Attributor &A).
8181   virtual ChangeStatus updateImpl(Attributor &A) override {
8182 
8183     const auto &MemBehaviorAA =
8184         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
8185     if (MemBehaviorAA.isAssumedReadNone()) {
8186       if (MemBehaviorAA.isKnownReadNone())
8187         return indicateOptimisticFixpoint();
8188       assert(isAssumedReadNone() &&
8189              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
8190       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
8191       return ChangeStatus::UNCHANGED;
8192     }
8193 
8194     // The current assumed state used to determine a change.
8195     auto AssumedState = getAssumed();
8196     bool Changed = false;
8197 
8198     auto CheckRWInst = [&](Instruction &I) {
8199       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
8200       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
8201                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
8202       removeAssumedBits(inverseLocation(MLK, false, false));
8203       // Stop once only the valid bit set in the *not assumed location*, thus
8204       // once we don't actually exclude any memory locations in the state.
8205       return getAssumedNotAccessedLocation() != VALID_STATE;
8206     };
8207 
8208     bool UsedAssumedInformation = false;
8209     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8210                                             UsedAssumedInformation))
8211       return indicatePessimisticFixpoint();
8212 
8213     Changed |= AssumedState != getAssumed();
8214     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8215   }
8216 
8217   /// See AbstractAttribute::trackStatistics()
8218   void trackStatistics() const override {
8219     if (isAssumedReadNone())
8220       STATS_DECLTRACK_FN_ATTR(readnone)
8221     else if (isAssumedArgMemOnly())
8222       STATS_DECLTRACK_FN_ATTR(argmemonly)
8223     else if (isAssumedInaccessibleMemOnly())
8224       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
8225     else if (isAssumedInaccessibleOrArgMemOnly())
8226       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
8227   }
8228 };
8229 
8230 /// AAMemoryLocation attribute for call sites.
8231 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
8232   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
8233       : AAMemoryLocationImpl(IRP, A) {}
8234 
8235   /// See AbstractAttribute::initialize(...).
8236   void initialize(Attributor &A) override {
8237     AAMemoryLocationImpl::initialize(A);
8238     Function *F = getAssociatedFunction();
8239     if (!F || F->isDeclaration())
8240       indicatePessimisticFixpoint();
8241   }
8242 
8243   /// See AbstractAttribute::updateImpl(...).
8244   ChangeStatus updateImpl(Attributor &A) override {
8245     // TODO: Once we have call site specific value information we can provide
8246     //       call site specific liveness liveness information and then it makes
8247     //       sense to specialize attributes for call sites arguments instead of
8248     //       redirecting requests to the callee argument.
8249     Function *F = getAssociatedFunction();
8250     const IRPosition &FnPos = IRPosition::function(*F);
8251     auto &FnAA =
8252         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8253     bool Changed = false;
8254     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8255                           AccessKind Kind, MemoryLocationsKind MLK) {
8256       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8257                                 getAccessKindFromInst(I));
8258       return true;
8259     };
8260     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8261       return indicatePessimisticFixpoint();
8262     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8263   }
8264 
8265   /// See AbstractAttribute::trackStatistics()
8266   void trackStatistics() const override {
8267     if (isAssumedReadNone())
8268       STATS_DECLTRACK_CS_ATTR(readnone)
8269   }
8270 };
8271 } // namespace
8272 
8273 /// ------------------ Value Constant Range Attribute -------------------------
8274 
8275 namespace {
8276 struct AAValueConstantRangeImpl : AAValueConstantRange {
8277   using StateType = IntegerRangeState;
8278   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8279       : AAValueConstantRange(IRP, A) {}
8280 
8281   /// See AbstractAttribute::initialize(..).
8282   void initialize(Attributor &A) override {
8283     if (A.hasSimplificationCallback(getIRPosition())) {
8284       indicatePessimisticFixpoint();
8285       return;
8286     }
8287 
8288     // Intersect a range given by SCEV.
8289     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8290 
8291     // Intersect a range given by LVI.
8292     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8293   }
8294 
8295   /// See AbstractAttribute::getAsStr().
8296   const std::string getAsStr() const override {
8297     std::string Str;
8298     llvm::raw_string_ostream OS(Str);
8299     OS << "range(" << getBitWidth() << ")<";
8300     getKnown().print(OS);
8301     OS << " / ";
8302     getAssumed().print(OS);
8303     OS << ">";
8304     return OS.str();
8305   }
8306 
8307   /// Helper function to get a SCEV expr for the associated value at program
8308   /// point \p I.
8309   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8310     if (!getAnchorScope())
8311       return nullptr;
8312 
8313     ScalarEvolution *SE =
8314         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8315             *getAnchorScope());
8316 
8317     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8318         *getAnchorScope());
8319 
8320     if (!SE || !LI)
8321       return nullptr;
8322 
8323     const SCEV *S = SE->getSCEV(&getAssociatedValue());
8324     if (!I)
8325       return S;
8326 
8327     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8328   }
8329 
8330   /// Helper function to get a range from SCEV for the associated value at
8331   /// program point \p I.
8332   ConstantRange getConstantRangeFromSCEV(Attributor &A,
8333                                          const Instruction *I = nullptr) const {
8334     if (!getAnchorScope())
8335       return getWorstState(getBitWidth());
8336 
8337     ScalarEvolution *SE =
8338         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8339             *getAnchorScope());
8340 
8341     const SCEV *S = getSCEV(A, I);
8342     if (!SE || !S)
8343       return getWorstState(getBitWidth());
8344 
8345     return SE->getUnsignedRange(S);
8346   }
8347 
8348   /// Helper function to get a range from LVI for the associated value at
8349   /// program point \p I.
8350   ConstantRange
8351   getConstantRangeFromLVI(Attributor &A,
8352                           const Instruction *CtxI = nullptr) const {
8353     if (!getAnchorScope())
8354       return getWorstState(getBitWidth());
8355 
8356     LazyValueInfo *LVI =
8357         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8358             *getAnchorScope());
8359 
8360     if (!LVI || !CtxI)
8361       return getWorstState(getBitWidth());
8362     return LVI->getConstantRange(&getAssociatedValue(),
8363                                  const_cast<Instruction *>(CtxI));
8364   }
8365 
8366   /// Return true if \p CtxI is valid for querying outside analyses.
8367   /// This basically makes sure we do not ask intra-procedural analysis
8368   /// about a context in the wrong function or a context that violates
8369   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8370   /// if the original context of this AA is OK or should be considered invalid.
8371   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8372                                                const Instruction *CtxI,
8373                                                bool AllowAACtxI) const {
8374     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8375       return false;
8376 
8377     // Our context might be in a different function, neither intra-procedural
8378     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8379     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8380       return false;
8381 
8382     // If the context is not dominated by the value there are paths to the
8383     // context that do not define the value. This cannot be handled by
8384     // LazyValueInfo so we need to bail.
8385     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8386       InformationCache &InfoCache = A.getInfoCache();
8387       const DominatorTree *DT =
8388           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8389               *I->getFunction());
8390       return DT && DT->dominates(I, CtxI);
8391     }
8392 
8393     return true;
8394   }
8395 
8396   /// See AAValueConstantRange::getKnownConstantRange(..).
8397   ConstantRange
8398   getKnownConstantRange(Attributor &A,
8399                         const Instruction *CtxI = nullptr) const override {
8400     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8401                                                  /* AllowAACtxI */ false))
8402       return getKnown();
8403 
8404     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8405     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8406     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8407   }
8408 
8409   /// See AAValueConstantRange::getAssumedConstantRange(..).
8410   ConstantRange
8411   getAssumedConstantRange(Attributor &A,
8412                           const Instruction *CtxI = nullptr) const override {
8413     // TODO: Make SCEV use Attributor assumption.
8414     //       We may be able to bound a variable range via assumptions in
8415     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8416     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8417     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8418                                                  /* AllowAACtxI */ false))
8419       return getAssumed();
8420 
8421     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8422     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8423     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8424   }
8425 
8426   /// Helper function to create MDNode for range metadata.
8427   static MDNode *
8428   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8429                             const ConstantRange &AssumedConstantRange) {
8430     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8431                                   Ty, AssumedConstantRange.getLower())),
8432                               ConstantAsMetadata::get(ConstantInt::get(
8433                                   Ty, AssumedConstantRange.getUpper()))};
8434     return MDNode::get(Ctx, LowAndHigh);
8435   }
8436 
8437   /// Return true if \p Assumed is included in \p KnownRanges.
8438   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8439 
8440     if (Assumed.isFullSet())
8441       return false;
8442 
8443     if (!KnownRanges)
8444       return true;
8445 
8446     // If multiple ranges are annotated in IR, we give up to annotate assumed
8447     // range for now.
8448 
8449     // TODO:  If there exists a known range which containts assumed range, we
8450     // can say assumed range is better.
8451     if (KnownRanges->getNumOperands() > 2)
8452       return false;
8453 
8454     ConstantInt *Lower =
8455         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8456     ConstantInt *Upper =
8457         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8458 
8459     ConstantRange Known(Lower->getValue(), Upper->getValue());
8460     return Known.contains(Assumed) && Known != Assumed;
8461   }
8462 
8463   /// Helper function to set range metadata.
8464   static bool
8465   setRangeMetadataIfisBetterRange(Instruction *I,
8466                                   const ConstantRange &AssumedConstantRange) {
8467     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8468     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8469       if (!AssumedConstantRange.isEmptySet()) {
8470         I->setMetadata(LLVMContext::MD_range,
8471                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8472                                                  AssumedConstantRange));
8473         return true;
8474       }
8475     }
8476     return false;
8477   }
8478 
8479   /// See AbstractAttribute::manifest()
8480   ChangeStatus manifest(Attributor &A) override {
8481     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8482     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8483     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8484 
8485     auto &V = getAssociatedValue();
8486     if (!AssumedConstantRange.isEmptySet() &&
8487         !AssumedConstantRange.isSingleElement()) {
8488       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8489         assert(I == getCtxI() && "Should not annotate an instruction which is "
8490                                  "not the context instruction");
8491         if (isa<CallInst>(I) || isa<LoadInst>(I))
8492           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8493             Changed = ChangeStatus::CHANGED;
8494       }
8495     }
8496 
8497     return Changed;
8498   }
8499 };
8500 
8501 struct AAValueConstantRangeArgument final
8502     : AAArgumentFromCallSiteArguments<
8503           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8504           true /* BridgeCallBaseContext */> {
8505   using Base = AAArgumentFromCallSiteArguments<
8506       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8507       true /* BridgeCallBaseContext */>;
8508   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8509       : Base(IRP, A) {}
8510 
8511   /// See AbstractAttribute::initialize(..).
8512   void initialize(Attributor &A) override {
8513     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8514       indicatePessimisticFixpoint();
8515     } else {
8516       Base::initialize(A);
8517     }
8518   }
8519 
8520   /// See AbstractAttribute::trackStatistics()
8521   void trackStatistics() const override {
8522     STATS_DECLTRACK_ARG_ATTR(value_range)
8523   }
8524 };
8525 
8526 struct AAValueConstantRangeReturned
8527     : AAReturnedFromReturnedValues<AAValueConstantRange,
8528                                    AAValueConstantRangeImpl,
8529                                    AAValueConstantRangeImpl::StateType,
8530                                    /* PropogateCallBaseContext */ true> {
8531   using Base =
8532       AAReturnedFromReturnedValues<AAValueConstantRange,
8533                                    AAValueConstantRangeImpl,
8534                                    AAValueConstantRangeImpl::StateType,
8535                                    /* PropogateCallBaseContext */ true>;
8536   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8537       : Base(IRP, A) {}
8538 
8539   /// See AbstractAttribute::initialize(...).
8540   void initialize(Attributor &A) override {}
8541 
8542   /// See AbstractAttribute::trackStatistics()
8543   void trackStatistics() const override {
8544     STATS_DECLTRACK_FNRET_ATTR(value_range)
8545   }
8546 };
8547 
8548 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8549   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8550       : AAValueConstantRangeImpl(IRP, A) {}
8551 
8552   /// See AbstractAttribute::initialize(...).
8553   void initialize(Attributor &A) override {
8554     AAValueConstantRangeImpl::initialize(A);
8555     if (isAtFixpoint())
8556       return;
8557 
8558     Value &V = getAssociatedValue();
8559 
8560     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8561       unionAssumed(ConstantRange(C->getValue()));
8562       indicateOptimisticFixpoint();
8563       return;
8564     }
8565 
8566     if (isa<UndefValue>(&V)) {
8567       // Collapse the undef state to 0.
8568       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8569       indicateOptimisticFixpoint();
8570       return;
8571     }
8572 
8573     if (isa<CallBase>(&V))
8574       return;
8575 
8576     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8577       return;
8578 
8579     // If it is a load instruction with range metadata, use it.
8580     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8581       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8582         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8583         return;
8584       }
8585 
8586     // We can work with PHI and select instruction as we traverse their operands
8587     // during update.
8588     if (isa<SelectInst>(V) || isa<PHINode>(V))
8589       return;
8590 
8591     // Otherwise we give up.
8592     indicatePessimisticFixpoint();
8593 
8594     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8595                       << getAssociatedValue() << "\n");
8596   }
8597 
8598   bool calculateBinaryOperator(
8599       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8600       const Instruction *CtxI,
8601       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8602     Value *LHS = BinOp->getOperand(0);
8603     Value *RHS = BinOp->getOperand(1);
8604 
8605     // Simplify the operands first.
8606     bool UsedAssumedInformation = false;
8607     const auto &SimplifiedLHS =
8608         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8609                                *this, UsedAssumedInformation);
8610     if (!SimplifiedLHS.hasValue())
8611       return true;
8612     if (!SimplifiedLHS.getValue())
8613       return false;
8614     LHS = *SimplifiedLHS;
8615 
8616     const auto &SimplifiedRHS =
8617         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8618                                *this, UsedAssumedInformation);
8619     if (!SimplifiedRHS.hasValue())
8620       return true;
8621     if (!SimplifiedRHS.getValue())
8622       return false;
8623     RHS = *SimplifiedRHS;
8624 
8625     // TODO: Allow non integers as well.
8626     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8627       return false;
8628 
8629     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8630         *this, IRPosition::value(*LHS, getCallBaseContext()),
8631         DepClassTy::REQUIRED);
8632     QuerriedAAs.push_back(&LHSAA);
8633     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8634 
8635     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8636         *this, IRPosition::value(*RHS, getCallBaseContext()),
8637         DepClassTy::REQUIRED);
8638     QuerriedAAs.push_back(&RHSAA);
8639     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8640 
8641     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8642 
8643     T.unionAssumed(AssumedRange);
8644 
8645     // TODO: Track a known state too.
8646 
8647     return T.isValidState();
8648   }
8649 
8650   bool calculateCastInst(
8651       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8652       const Instruction *CtxI,
8653       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8654     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8655     // TODO: Allow non integers as well.
8656     Value *OpV = CastI->getOperand(0);
8657 
8658     // Simplify the operand first.
8659     bool UsedAssumedInformation = false;
8660     const auto &SimplifiedOpV =
8661         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8662                                *this, UsedAssumedInformation);
8663     if (!SimplifiedOpV.hasValue())
8664       return true;
8665     if (!SimplifiedOpV.getValue())
8666       return false;
8667     OpV = *SimplifiedOpV;
8668 
8669     if (!OpV->getType()->isIntegerTy())
8670       return false;
8671 
8672     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8673         *this, IRPosition::value(*OpV, getCallBaseContext()),
8674         DepClassTy::REQUIRED);
8675     QuerriedAAs.push_back(&OpAA);
8676     T.unionAssumed(
8677         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8678     return T.isValidState();
8679   }
8680 
8681   bool
8682   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8683                    const Instruction *CtxI,
8684                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8685     Value *LHS = CmpI->getOperand(0);
8686     Value *RHS = CmpI->getOperand(1);
8687 
8688     // Simplify the operands first.
8689     bool UsedAssumedInformation = false;
8690     const auto &SimplifiedLHS =
8691         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8692                                *this, UsedAssumedInformation);
8693     if (!SimplifiedLHS.hasValue())
8694       return true;
8695     if (!SimplifiedLHS.getValue())
8696       return false;
8697     LHS = *SimplifiedLHS;
8698 
8699     const auto &SimplifiedRHS =
8700         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8701                                *this, UsedAssumedInformation);
8702     if (!SimplifiedRHS.hasValue())
8703       return true;
8704     if (!SimplifiedRHS.getValue())
8705       return false;
8706     RHS = *SimplifiedRHS;
8707 
8708     // TODO: Allow non integers as well.
8709     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8710       return false;
8711 
8712     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8713         *this, IRPosition::value(*LHS, getCallBaseContext()),
8714         DepClassTy::REQUIRED);
8715     QuerriedAAs.push_back(&LHSAA);
8716     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8717         *this, IRPosition::value(*RHS, getCallBaseContext()),
8718         DepClassTy::REQUIRED);
8719     QuerriedAAs.push_back(&RHSAA);
8720     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8721     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8722 
8723     // If one of them is empty set, we can't decide.
8724     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8725       return true;
8726 
8727     bool MustTrue = false, MustFalse = false;
8728 
8729     auto AllowedRegion =
8730         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8731 
8732     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8733       MustFalse = true;
8734 
8735     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8736       MustTrue = true;
8737 
8738     assert((!MustTrue || !MustFalse) &&
8739            "Either MustTrue or MustFalse should be false!");
8740 
8741     if (MustTrue)
8742       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8743     else if (MustFalse)
8744       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8745     else
8746       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8747 
8748     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8749                       << " " << RHSAA << "\n");
8750 
8751     // TODO: Track a known state too.
8752     return T.isValidState();
8753   }
8754 
8755   /// See AbstractAttribute::updateImpl(...).
8756   ChangeStatus updateImpl(Attributor &A) override {
8757     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8758                             IntegerRangeState &T, bool Stripped) -> bool {
8759       Instruction *I = dyn_cast<Instruction>(&V);
8760       if (!I || isa<CallBase>(I)) {
8761 
8762         // Simplify the operand first.
8763         bool UsedAssumedInformation = false;
8764         const auto &SimplifiedOpV =
8765             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8766                                    *this, UsedAssumedInformation);
8767         if (!SimplifiedOpV.hasValue())
8768           return true;
8769         if (!SimplifiedOpV.getValue())
8770           return false;
8771         Value *VPtr = *SimplifiedOpV;
8772 
8773         // If the value is not instruction, we query AA to Attributor.
8774         const auto &AA = A.getAAFor<AAValueConstantRange>(
8775             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8776             DepClassTy::REQUIRED);
8777 
8778         // Clamp operator is not used to utilize a program point CtxI.
8779         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8780 
8781         return T.isValidState();
8782       }
8783 
8784       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8785       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8786         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8787           return false;
8788       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8789         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8790           return false;
8791       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8792         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8793           return false;
8794       } else {
8795         // Give up with other instructions.
8796         // TODO: Add other instructions
8797 
8798         T.indicatePessimisticFixpoint();
8799         return false;
8800       }
8801 
8802       // Catch circular reasoning in a pessimistic way for now.
8803       // TODO: Check how the range evolves and if we stripped anything, see also
8804       //       AADereferenceable or AAAlign for similar situations.
8805       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8806         if (QueriedAA != this)
8807           continue;
8808         // If we are in a stady state we do not need to worry.
8809         if (T.getAssumed() == getState().getAssumed())
8810           continue;
8811         T.indicatePessimisticFixpoint();
8812       }
8813 
8814       return T.isValidState();
8815     };
8816 
8817     IntegerRangeState T(getBitWidth());
8818 
8819     bool UsedAssumedInformation = false;
8820     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8821                                                   VisitValueCB, getCtxI(),
8822                                                   UsedAssumedInformation,
8823                                                   /* UseValueSimplify */ false))
8824       return indicatePessimisticFixpoint();
8825 
8826     // Ensure that long def-use chains can't cause circular reasoning either by
8827     // introducing a cutoff below.
8828     if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
8829       return ChangeStatus::UNCHANGED;
8830     if (++NumChanges > MaxNumChanges) {
8831       LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
8832                         << " but only " << MaxNumChanges
8833                         << " are allowed to avoid cyclic reasoning.");
8834       return indicatePessimisticFixpoint();
8835     }
8836     return ChangeStatus::CHANGED;
8837   }
8838 
8839   /// See AbstractAttribute::trackStatistics()
8840   void trackStatistics() const override {
8841     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8842   }
8843 
8844   /// Tracker to bail after too many widening steps of the constant range.
8845   int NumChanges = 0;
8846 
8847   /// Upper bound for the number of allowed changes (=widening steps) for the
8848   /// constant range before we give up.
8849   static constexpr int MaxNumChanges = 5;
8850 };
8851 
8852 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8853   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8854       : AAValueConstantRangeImpl(IRP, A) {}
8855 
8856   /// See AbstractAttribute::initialize(...).
8857   ChangeStatus updateImpl(Attributor &A) override {
8858     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8859                      "not be called");
8860   }
8861 
8862   /// See AbstractAttribute::trackStatistics()
8863   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8864 };
8865 
8866 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8867   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8868       : AAValueConstantRangeFunction(IRP, A) {}
8869 
8870   /// See AbstractAttribute::trackStatistics()
8871   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8872 };
8873 
8874 struct AAValueConstantRangeCallSiteReturned
8875     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8876                                      AAValueConstantRangeImpl,
8877                                      AAValueConstantRangeImpl::StateType,
8878                                      /* IntroduceCallBaseContext */ true> {
8879   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8880       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8881                                        AAValueConstantRangeImpl,
8882                                        AAValueConstantRangeImpl::StateType,
8883                                        /* IntroduceCallBaseContext */ true>(IRP,
8884                                                                             A) {
8885   }
8886 
8887   /// See AbstractAttribute::initialize(...).
8888   void initialize(Attributor &A) override {
8889     // If it is a load instruction with range metadata, use the metadata.
8890     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8891       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8892         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8893 
8894     AAValueConstantRangeImpl::initialize(A);
8895   }
8896 
8897   /// See AbstractAttribute::trackStatistics()
8898   void trackStatistics() const override {
8899     STATS_DECLTRACK_CSRET_ATTR(value_range)
8900   }
8901 };
8902 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8903   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8904       : AAValueConstantRangeFloating(IRP, A) {}
8905 
8906   /// See AbstractAttribute::manifest()
8907   ChangeStatus manifest(Attributor &A) override {
8908     return ChangeStatus::UNCHANGED;
8909   }
8910 
8911   /// See AbstractAttribute::trackStatistics()
8912   void trackStatistics() const override {
8913     STATS_DECLTRACK_CSARG_ATTR(value_range)
8914   }
8915 };
8916 } // namespace
8917 
8918 /// ------------------ Potential Values Attribute -------------------------
8919 
8920 namespace {
8921 struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
8922   using StateType = PotentialConstantIntValuesState;
8923 
8924   AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
8925       : AAPotentialConstantValues(IRP, A) {}
8926 
8927   /// See AbstractAttribute::initialize(..).
8928   void initialize(Attributor &A) override {
8929     if (A.hasSimplificationCallback(getIRPosition()))
8930       indicatePessimisticFixpoint();
8931     else
8932       AAPotentialConstantValues::initialize(A);
8933   }
8934 
8935   /// See AbstractAttribute::getAsStr().
8936   const std::string getAsStr() const override {
8937     std::string Str;
8938     llvm::raw_string_ostream OS(Str);
8939     OS << getState();
8940     return OS.str();
8941   }
8942 
8943   /// See AbstractAttribute::updateImpl(...).
8944   ChangeStatus updateImpl(Attributor &A) override {
8945     return indicatePessimisticFixpoint();
8946   }
8947 };
8948 
8949 struct AAPotentialConstantValuesArgument final
8950     : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8951                                       AAPotentialConstantValuesImpl,
8952                                       PotentialConstantIntValuesState> {
8953   using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8954                                                AAPotentialConstantValuesImpl,
8955                                                PotentialConstantIntValuesState>;
8956   AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
8957       : Base(IRP, A) {}
8958 
8959   /// See AbstractAttribute::initialize(..).
8960   void initialize(Attributor &A) override {
8961     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8962       indicatePessimisticFixpoint();
8963     } else {
8964       Base::initialize(A);
8965     }
8966   }
8967 
8968   /// See AbstractAttribute::trackStatistics()
8969   void trackStatistics() const override {
8970     STATS_DECLTRACK_ARG_ATTR(potential_values)
8971   }
8972 };
8973 
8974 struct AAPotentialConstantValuesReturned
8975     : AAReturnedFromReturnedValues<AAPotentialConstantValues,
8976                                    AAPotentialConstantValuesImpl> {
8977   using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
8978                                             AAPotentialConstantValuesImpl>;
8979   AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
8980       : Base(IRP, A) {}
8981 
8982   /// See AbstractAttribute::trackStatistics()
8983   void trackStatistics() const override {
8984     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8985   }
8986 };
8987 
8988 struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
8989   AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
8990       : AAPotentialConstantValuesImpl(IRP, A) {}
8991 
8992   /// See AbstractAttribute::initialize(..).
8993   void initialize(Attributor &A) override {
8994     AAPotentialConstantValuesImpl::initialize(A);
8995     if (isAtFixpoint())
8996       return;
8997 
8998     Value &V = getAssociatedValue();
8999 
9000     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9001       unionAssumed(C->getValue());
9002       indicateOptimisticFixpoint();
9003       return;
9004     }
9005 
9006     if (isa<UndefValue>(&V)) {
9007       unionAssumedWithUndef();
9008       indicateOptimisticFixpoint();
9009       return;
9010     }
9011 
9012     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
9013       return;
9014 
9015     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
9016       return;
9017 
9018     indicatePessimisticFixpoint();
9019 
9020     LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "
9021                       << getAssociatedValue() << "\n");
9022   }
9023 
9024   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
9025                                 const APInt &RHS) {
9026     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
9027   }
9028 
9029   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
9030                                  uint32_t ResultBitWidth) {
9031     Instruction::CastOps CastOp = CI->getOpcode();
9032     switch (CastOp) {
9033     default:
9034       llvm_unreachable("unsupported or not integer cast");
9035     case Instruction::Trunc:
9036       return Src.trunc(ResultBitWidth);
9037     case Instruction::SExt:
9038       return Src.sext(ResultBitWidth);
9039     case Instruction::ZExt:
9040       return Src.zext(ResultBitWidth);
9041     case Instruction::BitCast:
9042       return Src;
9043     }
9044   }
9045 
9046   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
9047                                        const APInt &LHS, const APInt &RHS,
9048                                        bool &SkipOperation, bool &Unsupported) {
9049     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
9050     // Unsupported is set to true when the binary operator is not supported.
9051     // SkipOperation is set to true when UB occur with the given operand pair
9052     // (LHS, RHS).
9053     // TODO: we should look at nsw and nuw keywords to handle operations
9054     //       that create poison or undef value.
9055     switch (BinOpcode) {
9056     default:
9057       Unsupported = true;
9058       return LHS;
9059     case Instruction::Add:
9060       return LHS + RHS;
9061     case Instruction::Sub:
9062       return LHS - RHS;
9063     case Instruction::Mul:
9064       return LHS * RHS;
9065     case Instruction::UDiv:
9066       if (RHS.isZero()) {
9067         SkipOperation = true;
9068         return LHS;
9069       }
9070       return LHS.udiv(RHS);
9071     case Instruction::SDiv:
9072       if (RHS.isZero()) {
9073         SkipOperation = true;
9074         return LHS;
9075       }
9076       return LHS.sdiv(RHS);
9077     case Instruction::URem:
9078       if (RHS.isZero()) {
9079         SkipOperation = true;
9080         return LHS;
9081       }
9082       return LHS.urem(RHS);
9083     case Instruction::SRem:
9084       if (RHS.isZero()) {
9085         SkipOperation = true;
9086         return LHS;
9087       }
9088       return LHS.srem(RHS);
9089     case Instruction::Shl:
9090       return LHS.shl(RHS);
9091     case Instruction::LShr:
9092       return LHS.lshr(RHS);
9093     case Instruction::AShr:
9094       return LHS.ashr(RHS);
9095     case Instruction::And:
9096       return LHS & RHS;
9097     case Instruction::Or:
9098       return LHS | RHS;
9099     case Instruction::Xor:
9100       return LHS ^ RHS;
9101     }
9102   }
9103 
9104   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
9105                                            const APInt &LHS, const APInt &RHS) {
9106     bool SkipOperation = false;
9107     bool Unsupported = false;
9108     APInt Result =
9109         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
9110     if (Unsupported)
9111       return false;
9112     // If SkipOperation is true, we can ignore this operand pair (L, R).
9113     if (!SkipOperation)
9114       unionAssumed(Result);
9115     return isValidState();
9116   }
9117 
9118   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
9119     auto AssumedBefore = getAssumed();
9120     Value *LHS = ICI->getOperand(0);
9121     Value *RHS = ICI->getOperand(1);
9122 
9123     // Simplify the operands first.
9124     bool UsedAssumedInformation = false;
9125     const auto &SimplifiedLHS =
9126         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9127                                *this, UsedAssumedInformation);
9128     if (!SimplifiedLHS.hasValue())
9129       return ChangeStatus::UNCHANGED;
9130     if (!SimplifiedLHS.getValue())
9131       return indicatePessimisticFixpoint();
9132     LHS = *SimplifiedLHS;
9133 
9134     const auto &SimplifiedRHS =
9135         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9136                                *this, UsedAssumedInformation);
9137     if (!SimplifiedRHS.hasValue())
9138       return ChangeStatus::UNCHANGED;
9139     if (!SimplifiedRHS.getValue())
9140       return indicatePessimisticFixpoint();
9141     RHS = *SimplifiedRHS;
9142 
9143     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9144       return indicatePessimisticFixpoint();
9145 
9146     auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9147         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9148     if (!LHSAA.isValidState())
9149       return indicatePessimisticFixpoint();
9150 
9151     auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9152         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9153     if (!RHSAA.isValidState())
9154       return indicatePessimisticFixpoint();
9155 
9156     const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9157     const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9158 
9159     // TODO: make use of undef flag to limit potential values aggressively.
9160     bool MaybeTrue = false, MaybeFalse = false;
9161     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
9162     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9163       // The result of any comparison between undefs can be soundly replaced
9164       // with undef.
9165       unionAssumedWithUndef();
9166     } else if (LHSAA.undefIsContained()) {
9167       for (const APInt &R : RHSAAPVS) {
9168         bool CmpResult = calculateICmpInst(ICI, Zero, R);
9169         MaybeTrue |= CmpResult;
9170         MaybeFalse |= !CmpResult;
9171         if (MaybeTrue & MaybeFalse)
9172           return indicatePessimisticFixpoint();
9173       }
9174     } else if (RHSAA.undefIsContained()) {
9175       for (const APInt &L : LHSAAPVS) {
9176         bool CmpResult = calculateICmpInst(ICI, L, Zero);
9177         MaybeTrue |= CmpResult;
9178         MaybeFalse |= !CmpResult;
9179         if (MaybeTrue & MaybeFalse)
9180           return indicatePessimisticFixpoint();
9181       }
9182     } else {
9183       for (const APInt &L : LHSAAPVS) {
9184         for (const APInt &R : RHSAAPVS) {
9185           bool CmpResult = calculateICmpInst(ICI, L, R);
9186           MaybeTrue |= CmpResult;
9187           MaybeFalse |= !CmpResult;
9188           if (MaybeTrue & MaybeFalse)
9189             return indicatePessimisticFixpoint();
9190         }
9191       }
9192     }
9193     if (MaybeTrue)
9194       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
9195     if (MaybeFalse)
9196       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
9197     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9198                                          : ChangeStatus::CHANGED;
9199   }
9200 
9201   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
9202     auto AssumedBefore = getAssumed();
9203     Value *LHS = SI->getTrueValue();
9204     Value *RHS = SI->getFalseValue();
9205 
9206     // Simplify the operands first.
9207     bool UsedAssumedInformation = false;
9208     const auto &SimplifiedLHS =
9209         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9210                                *this, UsedAssumedInformation);
9211     if (!SimplifiedLHS.hasValue())
9212       return ChangeStatus::UNCHANGED;
9213     if (!SimplifiedLHS.getValue())
9214       return indicatePessimisticFixpoint();
9215     LHS = *SimplifiedLHS;
9216 
9217     const auto &SimplifiedRHS =
9218         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9219                                *this, UsedAssumedInformation);
9220     if (!SimplifiedRHS.hasValue())
9221       return ChangeStatus::UNCHANGED;
9222     if (!SimplifiedRHS.getValue())
9223       return indicatePessimisticFixpoint();
9224     RHS = *SimplifiedRHS;
9225 
9226     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9227       return indicatePessimisticFixpoint();
9228 
9229     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
9230                                                   UsedAssumedInformation);
9231 
9232     // Check if we only need one operand.
9233     bool OnlyLeft = false, OnlyRight = false;
9234     if (C.hasValue() && *C && (*C)->isOneValue())
9235       OnlyLeft = true;
9236     else if (C.hasValue() && *C && (*C)->isZeroValue())
9237       OnlyRight = true;
9238 
9239     const AAPotentialConstantValues *LHSAA = nullptr, *RHSAA = nullptr;
9240     if (!OnlyRight) {
9241       LHSAA = &A.getAAFor<AAPotentialConstantValues>(
9242           *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9243       if (!LHSAA->isValidState())
9244         return indicatePessimisticFixpoint();
9245     }
9246     if (!OnlyLeft) {
9247       RHSAA = &A.getAAFor<AAPotentialConstantValues>(
9248           *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9249       if (!RHSAA->isValidState())
9250         return indicatePessimisticFixpoint();
9251     }
9252 
9253     if (!LHSAA || !RHSAA) {
9254       // select (true/false), lhs, rhs
9255       auto *OpAA = LHSAA ? LHSAA : RHSAA;
9256 
9257       if (OpAA->undefIsContained())
9258         unionAssumedWithUndef();
9259       else
9260         unionAssumed(*OpAA);
9261 
9262     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
9263       // select i1 *, undef , undef => undef
9264       unionAssumedWithUndef();
9265     } else {
9266       unionAssumed(*LHSAA);
9267       unionAssumed(*RHSAA);
9268     }
9269     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9270                                          : ChangeStatus::CHANGED;
9271   }
9272 
9273   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9274     auto AssumedBefore = getAssumed();
9275     if (!CI->isIntegerCast())
9276       return indicatePessimisticFixpoint();
9277     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
9278     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9279     Value *Src = CI->getOperand(0);
9280 
9281     // Simplify the operand first.
9282     bool UsedAssumedInformation = false;
9283     const auto &SimplifiedSrc =
9284         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
9285                                *this, UsedAssumedInformation);
9286     if (!SimplifiedSrc.hasValue())
9287       return ChangeStatus::UNCHANGED;
9288     if (!SimplifiedSrc.getValue())
9289       return indicatePessimisticFixpoint();
9290     Src = *SimplifiedSrc;
9291 
9292     auto &SrcAA = A.getAAFor<AAPotentialConstantValues>(
9293         *this, IRPosition::value(*Src), DepClassTy::REQUIRED);
9294     if (!SrcAA.isValidState())
9295       return indicatePessimisticFixpoint();
9296     const SetTy &SrcAAPVS = SrcAA.getAssumedSet();
9297     if (SrcAA.undefIsContained())
9298       unionAssumedWithUndef();
9299     else {
9300       for (const APInt &S : SrcAAPVS) {
9301         APInt T = calculateCastInst(CI, S, ResultBitWidth);
9302         unionAssumed(T);
9303       }
9304     }
9305     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9306                                          : ChangeStatus::CHANGED;
9307   }
9308 
9309   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9310     auto AssumedBefore = getAssumed();
9311     Value *LHS = BinOp->getOperand(0);
9312     Value *RHS = BinOp->getOperand(1);
9313 
9314     // Simplify the operands first.
9315     bool UsedAssumedInformation = false;
9316     const auto &SimplifiedLHS =
9317         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9318                                *this, UsedAssumedInformation);
9319     if (!SimplifiedLHS.hasValue())
9320       return ChangeStatus::UNCHANGED;
9321     if (!SimplifiedLHS.getValue())
9322       return indicatePessimisticFixpoint();
9323     LHS = *SimplifiedLHS;
9324 
9325     const auto &SimplifiedRHS =
9326         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9327                                *this, UsedAssumedInformation);
9328     if (!SimplifiedRHS.hasValue())
9329       return ChangeStatus::UNCHANGED;
9330     if (!SimplifiedRHS.getValue())
9331       return indicatePessimisticFixpoint();
9332     RHS = *SimplifiedRHS;
9333 
9334     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9335       return indicatePessimisticFixpoint();
9336 
9337     auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9338         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9339     if (!LHSAA.isValidState())
9340       return indicatePessimisticFixpoint();
9341 
9342     auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9343         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9344     if (!RHSAA.isValidState())
9345       return indicatePessimisticFixpoint();
9346 
9347     const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9348     const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9349     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9350 
9351     // TODO: make use of undef flag to limit potential values aggressively.
9352     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9353       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9354         return indicatePessimisticFixpoint();
9355     } else if (LHSAA.undefIsContained()) {
9356       for (const APInt &R : RHSAAPVS) {
9357         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9358           return indicatePessimisticFixpoint();
9359       }
9360     } else if (RHSAA.undefIsContained()) {
9361       for (const APInt &L : LHSAAPVS) {
9362         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9363           return indicatePessimisticFixpoint();
9364       }
9365     } else {
9366       for (const APInt &L : LHSAAPVS) {
9367         for (const APInt &R : RHSAAPVS) {
9368           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9369             return indicatePessimisticFixpoint();
9370         }
9371       }
9372     }
9373     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9374                                          : ChangeStatus::CHANGED;
9375   }
9376 
9377   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9378     auto AssumedBefore = getAssumed();
9379     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9380       Value *IncomingValue = PHI->getIncomingValue(u);
9381 
9382       // Simplify the operand first.
9383       bool UsedAssumedInformation = false;
9384       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9385           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9386           UsedAssumedInformation);
9387       if (!SimplifiedIncomingValue.hasValue())
9388         continue;
9389       if (!SimplifiedIncomingValue.getValue())
9390         return indicatePessimisticFixpoint();
9391       IncomingValue = *SimplifiedIncomingValue;
9392 
9393       auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
9394           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9395       if (!PotentialValuesAA.isValidState())
9396         return indicatePessimisticFixpoint();
9397       if (PotentialValuesAA.undefIsContained())
9398         unionAssumedWithUndef();
9399       else
9400         unionAssumed(PotentialValuesAA.getAssumed());
9401     }
9402     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9403                                          : ChangeStatus::CHANGED;
9404   }
9405 
9406   /// See AbstractAttribute::updateImpl(...).
9407   ChangeStatus updateImpl(Attributor &A) override {
9408     Value &V = getAssociatedValue();
9409     Instruction *I = dyn_cast<Instruction>(&V);
9410 
9411     if (auto *ICI = dyn_cast<ICmpInst>(I))
9412       return updateWithICmpInst(A, ICI);
9413 
9414     if (auto *SI = dyn_cast<SelectInst>(I))
9415       return updateWithSelectInst(A, SI);
9416 
9417     if (auto *CI = dyn_cast<CastInst>(I))
9418       return updateWithCastInst(A, CI);
9419 
9420     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9421       return updateWithBinaryOperator(A, BinOp);
9422 
9423     if (auto *PHI = dyn_cast<PHINode>(I))
9424       return updateWithPHINode(A, PHI);
9425 
9426     return indicatePessimisticFixpoint();
9427   }
9428 
9429   /// See AbstractAttribute::trackStatistics()
9430   void trackStatistics() const override {
9431     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9432   }
9433 };
9434 
9435 struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
9436   AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
9437       : AAPotentialConstantValuesImpl(IRP, A) {}
9438 
9439   /// See AbstractAttribute::initialize(...).
9440   ChangeStatus updateImpl(Attributor &A) override {
9441     llvm_unreachable(
9442         "AAPotentialConstantValues(Function|CallSite)::updateImpl will "
9443         "not be called");
9444   }
9445 
9446   /// See AbstractAttribute::trackStatistics()
9447   void trackStatistics() const override {
9448     STATS_DECLTRACK_FN_ATTR(potential_values)
9449   }
9450 };
9451 
9452 struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
9453   AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
9454       : AAPotentialConstantValuesFunction(IRP, A) {}
9455 
9456   /// See AbstractAttribute::trackStatistics()
9457   void trackStatistics() const override {
9458     STATS_DECLTRACK_CS_ATTR(potential_values)
9459   }
9460 };
9461 
9462 struct AAPotentialConstantValuesCallSiteReturned
9463     : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9464                                      AAPotentialConstantValuesImpl> {
9465   AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
9466                                             Attributor &A)
9467       : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9468                                        AAPotentialConstantValuesImpl>(IRP, A) {}
9469 
9470   /// See AbstractAttribute::trackStatistics()
9471   void trackStatistics() const override {
9472     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9473   }
9474 };
9475 
9476 struct AAPotentialConstantValuesCallSiteArgument
9477     : AAPotentialConstantValuesFloating {
9478   AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
9479                                             Attributor &A)
9480       : AAPotentialConstantValuesFloating(IRP, A) {}
9481 
9482   /// See AbstractAttribute::initialize(..).
9483   void initialize(Attributor &A) override {
9484     AAPotentialConstantValuesImpl::initialize(A);
9485     if (isAtFixpoint())
9486       return;
9487 
9488     Value &V = getAssociatedValue();
9489 
9490     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9491       unionAssumed(C->getValue());
9492       indicateOptimisticFixpoint();
9493       return;
9494     }
9495 
9496     if (isa<UndefValue>(&V)) {
9497       unionAssumedWithUndef();
9498       indicateOptimisticFixpoint();
9499       return;
9500     }
9501   }
9502 
9503   /// See AbstractAttribute::updateImpl(...).
9504   ChangeStatus updateImpl(Attributor &A) override {
9505     Value &V = getAssociatedValue();
9506     auto AssumedBefore = getAssumed();
9507     auto &AA = A.getAAFor<AAPotentialConstantValues>(
9508         *this, IRPosition::value(V), DepClassTy::REQUIRED);
9509     const auto &S = AA.getAssumed();
9510     unionAssumed(S);
9511     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9512                                          : ChangeStatus::CHANGED;
9513   }
9514 
9515   /// See AbstractAttribute::trackStatistics()
9516   void trackStatistics() const override {
9517     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9518   }
9519 };
9520 
9521 /// ------------------------ NoUndef Attribute ---------------------------------
9522 struct AANoUndefImpl : AANoUndef {
9523   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9524 
9525   /// See AbstractAttribute::initialize(...).
9526   void initialize(Attributor &A) override {
9527     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9528       indicateOptimisticFixpoint();
9529       return;
9530     }
9531     Value &V = getAssociatedValue();
9532     if (isa<UndefValue>(V))
9533       indicatePessimisticFixpoint();
9534     else if (isa<FreezeInst>(V))
9535       indicateOptimisticFixpoint();
9536     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9537              isGuaranteedNotToBeUndefOrPoison(&V))
9538       indicateOptimisticFixpoint();
9539     else
9540       AANoUndef::initialize(A);
9541   }
9542 
9543   /// See followUsesInMBEC
9544   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9545                        AANoUndef::StateType &State) {
9546     const Value *UseV = U->get();
9547     const DominatorTree *DT = nullptr;
9548     AssumptionCache *AC = nullptr;
9549     InformationCache &InfoCache = A.getInfoCache();
9550     if (Function *F = getAnchorScope()) {
9551       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9552       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9553     }
9554     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9555     bool TrackUse = false;
9556     // Track use for instructions which must produce undef or poison bits when
9557     // at least one operand contains such bits.
9558     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9559       TrackUse = true;
9560     return TrackUse;
9561   }
9562 
9563   /// See AbstractAttribute::getAsStr().
9564   const std::string getAsStr() const override {
9565     return getAssumed() ? "noundef" : "may-undef-or-poison";
9566   }
9567 
9568   ChangeStatus manifest(Attributor &A) override {
9569     // We don't manifest noundef attribute for dead positions because the
9570     // associated values with dead positions would be replaced with undef
9571     // values.
9572     bool UsedAssumedInformation = false;
9573     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9574                         UsedAssumedInformation))
9575       return ChangeStatus::UNCHANGED;
9576     // A position whose simplified value does not have any value is
9577     // considered to be dead. We don't manifest noundef in such positions for
9578     // the same reason above.
9579     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9580              .hasValue())
9581       return ChangeStatus::UNCHANGED;
9582     return AANoUndef::manifest(A);
9583   }
9584 };
9585 
9586 struct AANoUndefFloating : public AANoUndefImpl {
9587   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9588       : AANoUndefImpl(IRP, A) {}
9589 
9590   /// See AbstractAttribute::initialize(...).
9591   void initialize(Attributor &A) override {
9592     AANoUndefImpl::initialize(A);
9593     if (!getState().isAtFixpoint())
9594       if (Instruction *CtxI = getCtxI())
9595         followUsesInMBEC(*this, A, getState(), *CtxI);
9596   }
9597 
9598   /// See AbstractAttribute::updateImpl(...).
9599   ChangeStatus updateImpl(Attributor &A) override {
9600     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9601                             AANoUndef::StateType &T, bool Stripped) -> bool {
9602       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9603                                              DepClassTy::REQUIRED);
9604       if (!Stripped && this == &AA) {
9605         T.indicatePessimisticFixpoint();
9606       } else {
9607         const AANoUndef::StateType &S =
9608             static_cast<const AANoUndef::StateType &>(AA.getState());
9609         T ^= S;
9610       }
9611       return T.isValidState();
9612     };
9613 
9614     StateType T;
9615     bool UsedAssumedInformation = false;
9616     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9617                                           VisitValueCB, getCtxI(),
9618                                           UsedAssumedInformation))
9619       return indicatePessimisticFixpoint();
9620 
9621     return clampStateAndIndicateChange(getState(), T);
9622   }
9623 
9624   /// See AbstractAttribute::trackStatistics()
9625   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9626 };
9627 
9628 struct AANoUndefReturned final
9629     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9630   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9631       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9632 
9633   /// See AbstractAttribute::trackStatistics()
9634   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9635 };
9636 
9637 struct AANoUndefArgument final
9638     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9639   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9640       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9641 
9642   /// See AbstractAttribute::trackStatistics()
9643   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9644 };
9645 
9646 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9647   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9648       : AANoUndefFloating(IRP, A) {}
9649 
9650   /// See AbstractAttribute::trackStatistics()
9651   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9652 };
9653 
9654 struct AANoUndefCallSiteReturned final
9655     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9656   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9657       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9658 
9659   /// See AbstractAttribute::trackStatistics()
9660   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9661 };
9662 
9663 struct AACallEdgesImpl : public AACallEdges {
9664   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9665 
9666   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9667     return CalledFunctions;
9668   }
9669 
9670   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9671 
9672   virtual bool hasNonAsmUnknownCallee() const override {
9673     return HasUnknownCalleeNonAsm;
9674   }
9675 
9676   const std::string getAsStr() const override {
9677     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9678            std::to_string(CalledFunctions.size()) + "]";
9679   }
9680 
9681   void trackStatistics() const override {}
9682 
9683 protected:
9684   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9685     if (CalledFunctions.insert(Fn)) {
9686       Change = ChangeStatus::CHANGED;
9687       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9688                         << "\n");
9689     }
9690   }
9691 
9692   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9693     if (!HasUnknownCallee)
9694       Change = ChangeStatus::CHANGED;
9695     if (NonAsm && !HasUnknownCalleeNonAsm)
9696       Change = ChangeStatus::CHANGED;
9697     HasUnknownCalleeNonAsm |= NonAsm;
9698     HasUnknownCallee = true;
9699   }
9700 
9701 private:
9702   /// Optimistic set of functions that might be called by this position.
9703   SetVector<Function *> CalledFunctions;
9704 
9705   /// Is there any call with a unknown callee.
9706   bool HasUnknownCallee = false;
9707 
9708   /// Is there any call with a unknown callee, excluding any inline asm.
9709   bool HasUnknownCalleeNonAsm = false;
9710 };
9711 
9712 struct AACallEdgesCallSite : public AACallEdgesImpl {
9713   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9714       : AACallEdgesImpl(IRP, A) {}
9715   /// See AbstractAttribute::updateImpl(...).
9716   ChangeStatus updateImpl(Attributor &A) override {
9717     ChangeStatus Change = ChangeStatus::UNCHANGED;
9718 
9719     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9720                           bool Stripped) -> bool {
9721       if (Function *Fn = dyn_cast<Function>(&V)) {
9722         addCalledFunction(Fn, Change);
9723       } else {
9724         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9725         setHasUnknownCallee(true, Change);
9726       }
9727 
9728       // Explore all values.
9729       return true;
9730     };
9731 
9732     // Process any value that we might call.
9733     auto ProcessCalledOperand = [&](Value *V) {
9734       bool DummyValue = false;
9735       bool UsedAssumedInformation = false;
9736       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9737                                        DummyValue, VisitValue, nullptr,
9738                                        UsedAssumedInformation, false)) {
9739         // If we haven't gone through all values, assume that there are unknown
9740         // callees.
9741         setHasUnknownCallee(true, Change);
9742       }
9743     };
9744 
9745     CallBase *CB = cast<CallBase>(getCtxI());
9746 
9747     if (CB->isInlineAsm()) {
9748       if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
9749           !hasAssumption(*CB, "ompx_no_call_asm"))
9750         setHasUnknownCallee(false, Change);
9751       return Change;
9752     }
9753 
9754     // Process callee metadata if available.
9755     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9756       for (auto &Op : MD->operands()) {
9757         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9758         if (Callee)
9759           addCalledFunction(Callee, Change);
9760       }
9761       return Change;
9762     }
9763 
9764     // The most simple case.
9765     ProcessCalledOperand(CB->getCalledOperand());
9766 
9767     // Process callback functions.
9768     SmallVector<const Use *, 4u> CallbackUses;
9769     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9770     for (const Use *U : CallbackUses)
9771       ProcessCalledOperand(U->get());
9772 
9773     return Change;
9774   }
9775 };
9776 
9777 struct AACallEdgesFunction : public AACallEdgesImpl {
9778   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9779       : AACallEdgesImpl(IRP, A) {}
9780 
9781   /// See AbstractAttribute::updateImpl(...).
9782   ChangeStatus updateImpl(Attributor &A) override {
9783     ChangeStatus Change = ChangeStatus::UNCHANGED;
9784 
9785     auto ProcessCallInst = [&](Instruction &Inst) {
9786       CallBase &CB = cast<CallBase>(Inst);
9787 
9788       auto &CBEdges = A.getAAFor<AACallEdges>(
9789           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9790       if (CBEdges.hasNonAsmUnknownCallee())
9791         setHasUnknownCallee(true, Change);
9792       if (CBEdges.hasUnknownCallee())
9793         setHasUnknownCallee(false, Change);
9794 
9795       for (Function *F : CBEdges.getOptimisticEdges())
9796         addCalledFunction(F, Change);
9797 
9798       return true;
9799     };
9800 
9801     // Visit all callable instructions.
9802     bool UsedAssumedInformation = false;
9803     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9804                                            UsedAssumedInformation,
9805                                            /* CheckBBLivenessOnly */ true)) {
9806       // If we haven't looked at all call like instructions, assume that there
9807       // are unknown callees.
9808       setHasUnknownCallee(true, Change);
9809     }
9810 
9811     return Change;
9812   }
9813 };
9814 
9815 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9816 private:
9817   struct QuerySet {
9818     void markReachable(const Function &Fn) {
9819       Reachable.insert(&Fn);
9820       Unreachable.erase(&Fn);
9821     }
9822 
9823     /// If there is no information about the function None is returned.
9824     Optional<bool> isCachedReachable(const Function &Fn) {
9825       // Assume that we can reach the function.
9826       // TODO: Be more specific with the unknown callee.
9827       if (CanReachUnknownCallee)
9828         return true;
9829 
9830       if (Reachable.count(&Fn))
9831         return true;
9832 
9833       if (Unreachable.count(&Fn))
9834         return false;
9835 
9836       return llvm::None;
9837     }
9838 
9839     /// Set of functions that we know for sure is reachable.
9840     DenseSet<const Function *> Reachable;
9841 
9842     /// Set of functions that are unreachable, but might become reachable.
9843     DenseSet<const Function *> Unreachable;
9844 
9845     /// If we can reach a function with a call to a unknown function we assume
9846     /// that we can reach any function.
9847     bool CanReachUnknownCallee = false;
9848   };
9849 
9850   struct QueryResolver : public QuerySet {
9851     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9852                         ArrayRef<const AACallEdges *> AAEdgesList) {
9853       ChangeStatus Change = ChangeStatus::UNCHANGED;
9854 
9855       for (auto *AAEdges : AAEdgesList) {
9856         if (AAEdges->hasUnknownCallee()) {
9857           if (!CanReachUnknownCallee)
9858             Change = ChangeStatus::CHANGED;
9859           CanReachUnknownCallee = true;
9860           return Change;
9861         }
9862       }
9863 
9864       for (const Function *Fn : make_early_inc_range(Unreachable)) {
9865         if (checkIfReachable(A, AA, AAEdgesList, *Fn)) {
9866           Change = ChangeStatus::CHANGED;
9867           markReachable(*Fn);
9868         }
9869       }
9870       return Change;
9871     }
9872 
9873     bool isReachable(Attributor &A, AAFunctionReachability &AA,
9874                      ArrayRef<const AACallEdges *> AAEdgesList,
9875                      const Function &Fn) {
9876       Optional<bool> Cached = isCachedReachable(Fn);
9877       if (Cached.hasValue())
9878         return Cached.getValue();
9879 
9880       // The query was not cached, thus it is new. We need to request an update
9881       // explicitly to make sure this the information is properly run to a
9882       // fixpoint.
9883       A.registerForUpdate(AA);
9884 
9885       // We need to assume that this function can't reach Fn to prevent
9886       // an infinite loop if this function is recursive.
9887       Unreachable.insert(&Fn);
9888 
9889       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9890       if (Result)
9891         markReachable(Fn);
9892       return Result;
9893     }
9894 
9895     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9896                           ArrayRef<const AACallEdges *> AAEdgesList,
9897                           const Function &Fn) const {
9898 
9899       // Handle the most trivial case first.
9900       for (auto *AAEdges : AAEdgesList) {
9901         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9902 
9903         if (Edges.count(const_cast<Function *>(&Fn)))
9904           return true;
9905       }
9906 
9907       SmallVector<const AAFunctionReachability *, 8> Deps;
9908       for (auto &AAEdges : AAEdgesList) {
9909         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9910 
9911         for (Function *Edge : Edges) {
9912           // Functions that do not call back into the module can be ignored.
9913           if (Edge->hasFnAttribute(Attribute::NoCallback))
9914             continue;
9915 
9916           // We don't need a dependency if the result is reachable.
9917           const AAFunctionReachability &EdgeReachability =
9918               A.getAAFor<AAFunctionReachability>(
9919                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9920           Deps.push_back(&EdgeReachability);
9921 
9922           if (EdgeReachability.canReach(A, Fn))
9923             return true;
9924         }
9925       }
9926 
9927       // The result is false for now, set dependencies and leave.
9928       for (auto *Dep : Deps)
9929         A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
9930 
9931       return false;
9932     }
9933   };
9934 
9935   /// Get call edges that can be reached by this instruction.
9936   bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability,
9937                              const Instruction &Inst,
9938                              SmallVector<const AACallEdges *> &Result) const {
9939     // Determine call like instructions that we can reach from the inst.
9940     auto CheckCallBase = [&](Instruction &CBInst) {
9941       if (!Reachability.isAssumedReachable(A, Inst, CBInst))
9942         return true;
9943 
9944       auto &CB = cast<CallBase>(CBInst);
9945       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9946           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9947 
9948       Result.push_back(&AAEdges);
9949       return true;
9950     };
9951 
9952     bool UsedAssumedInformation = false;
9953     return A.checkForAllCallLikeInstructions(CheckCallBase, *this,
9954                                              UsedAssumedInformation,
9955                                              /* CheckBBLivenessOnly */ true);
9956   }
9957 
9958 public:
9959   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9960       : AAFunctionReachability(IRP, A) {}
9961 
9962   bool canReach(Attributor &A, const Function &Fn) const override {
9963     if (!isValidState())
9964       return true;
9965 
9966     const AACallEdges &AAEdges =
9967         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9968 
9969     // Attributor returns attributes as const, so this function has to be
9970     // const for users of this attribute to use it without having to do
9971     // a const_cast.
9972     // This is a hack for us to be able to cache queries.
9973     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9974     bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis,
9975                                                           {&AAEdges}, Fn);
9976 
9977     return Result;
9978   }
9979 
9980   /// Can \p CB reach \p Fn
9981   bool canReach(Attributor &A, CallBase &CB,
9982                 const Function &Fn) const override {
9983     if (!isValidState())
9984       return true;
9985 
9986     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9987         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9988 
9989     // Attributor returns attributes as const, so this function has to be
9990     // const for users of this attribute to use it without having to do
9991     // a const_cast.
9992     // This is a hack for us to be able to cache queries.
9993     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9994     QueryResolver &CBQuery = NonConstThis->CBQueries[&CB];
9995 
9996     bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn);
9997 
9998     return Result;
9999   }
10000 
10001   bool instructionCanReach(Attributor &A, const Instruction &Inst,
10002                            const Function &Fn,
10003                            bool UseBackwards) const override {
10004     if (!isValidState())
10005       return true;
10006 
10007     if (UseBackwards)
10008       return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr);
10009 
10010     const auto &Reachability = A.getAAFor<AAReachability>(
10011         *this, IRPosition::function(*getAssociatedFunction()),
10012         DepClassTy::REQUIRED);
10013 
10014     SmallVector<const AACallEdges *> CallEdges;
10015     bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges);
10016     // Attributor returns attributes as const, so this function has to be
10017     // const for users of this attribute to use it without having to do
10018     // a const_cast.
10019     // This is a hack for us to be able to cache queries.
10020     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
10021     QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
10022     if (!AllKnown)
10023       InstQSet.CanReachUnknownCallee = true;
10024 
10025     return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
10026   }
10027 
10028   /// See AbstractAttribute::updateImpl(...).
10029   ChangeStatus updateImpl(Attributor &A) override {
10030     const AACallEdges &AAEdges =
10031         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
10032     ChangeStatus Change = ChangeStatus::UNCHANGED;
10033 
10034     Change |= WholeFunction.update(A, *this, {&AAEdges});
10035 
10036     for (auto &CBPair : CBQueries) {
10037       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
10038           *this, IRPosition::callsite_function(*CBPair.first),
10039           DepClassTy::REQUIRED);
10040 
10041       Change |= CBPair.second.update(A, *this, {&AAEdges});
10042     }
10043 
10044     // Update the Instruction queries.
10045     if (!InstQueries.empty()) {
10046       const AAReachability *Reachability = &A.getAAFor<AAReachability>(
10047           *this, IRPosition::function(*getAssociatedFunction()),
10048           DepClassTy::REQUIRED);
10049 
10050       // Check for local callbases first.
10051       for (auto &InstPair : InstQueries) {
10052         SmallVector<const AACallEdges *> CallEdges;
10053         bool AllKnown =
10054             getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
10055         // Update will return change if we this effects any queries.
10056         if (!AllKnown)
10057           InstPair.second.CanReachUnknownCallee = true;
10058         Change |= InstPair.second.update(A, *this, CallEdges);
10059       }
10060     }
10061 
10062     return Change;
10063   }
10064 
10065   const std::string getAsStr() const override {
10066     size_t QueryCount =
10067         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
10068 
10069     return "FunctionReachability [" +
10070            std::to_string(WholeFunction.Reachable.size()) + "," +
10071            std::to_string(QueryCount) + "]";
10072   }
10073 
10074   void trackStatistics() const override {}
10075 
10076 private:
10077   bool canReachUnknownCallee() const override {
10078     return WholeFunction.CanReachUnknownCallee;
10079   }
10080 
10081   /// Used to answer if a the whole function can reacha a specific function.
10082   QueryResolver WholeFunction;
10083 
10084   /// Used to answer if a call base inside this function can reach a specific
10085   /// function.
10086   MapVector<const CallBase *, QueryResolver> CBQueries;
10087 
10088   /// This is for instruction queries than scan "forward".
10089   MapVector<const Instruction *, QueryResolver> InstQueries;
10090 };
10091 } // namespace
10092 
10093 /// ---------------------- Assumption Propagation ------------------------------
10094 namespace {
10095 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
10096   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
10097                        const DenseSet<StringRef> &Known)
10098       : AAAssumptionInfo(IRP, A, Known) {}
10099 
10100   bool hasAssumption(const StringRef Assumption) const override {
10101     return isValidState() && setContains(Assumption);
10102   }
10103 
10104   /// See AbstractAttribute::getAsStr()
10105   const std::string getAsStr() const override {
10106     const SetContents &Known = getKnown();
10107     const SetContents &Assumed = getAssumed();
10108 
10109     const std::string KnownStr =
10110         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
10111     const std::string AssumedStr =
10112         (Assumed.isUniversal())
10113             ? "Universal"
10114             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
10115 
10116     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
10117   }
10118 };
10119 
10120 /// Propagates assumption information from parent functions to all of their
10121 /// successors. An assumption can be propagated if the containing function
10122 /// dominates the called function.
10123 ///
10124 /// We start with a "known" set of assumptions already valid for the associated
10125 /// function and an "assumed" set that initially contains all possible
10126 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
10127 /// contents as concrete values are known. The concrete values are seeded by the
10128 /// first nodes that are either entries into the call graph, or contains no
10129 /// assumptions. Each node is updated as the intersection of the assumed state
10130 /// with all of its predecessors.
10131 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
10132   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
10133       : AAAssumptionInfoImpl(IRP, A,
10134                              getAssumptions(*IRP.getAssociatedFunction())) {}
10135 
10136   /// See AbstractAttribute::manifest(...).
10137   ChangeStatus manifest(Attributor &A) override {
10138     const auto &Assumptions = getKnown();
10139 
10140     // Don't manifest a universal set if it somehow made it here.
10141     if (Assumptions.isUniversal())
10142       return ChangeStatus::UNCHANGED;
10143 
10144     Function *AssociatedFunction = getAssociatedFunction();
10145 
10146     bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
10147 
10148     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10149   }
10150 
10151   /// See AbstractAttribute::updateImpl(...).
10152   ChangeStatus updateImpl(Attributor &A) override {
10153     bool Changed = false;
10154 
10155     auto CallSitePred = [&](AbstractCallSite ACS) {
10156       const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
10157           *this, IRPosition::callsite_function(*ACS.getInstruction()),
10158           DepClassTy::REQUIRED);
10159       // Get the set of assumptions shared by all of this function's callers.
10160       Changed |= getIntersection(AssumptionAA.getAssumed());
10161       return !getAssumed().empty() || !getKnown().empty();
10162     };
10163 
10164     bool UsedAssumedInformation = false;
10165     // Get the intersection of all assumptions held by this node's predecessors.
10166     // If we don't know all the call sites then this is either an entry into the
10167     // call graph or an empty node. This node is known to only contain its own
10168     // assumptions and can be propagated to its successors.
10169     if (!A.checkForAllCallSites(CallSitePred, *this, true,
10170                                 UsedAssumedInformation))
10171       return indicatePessimisticFixpoint();
10172 
10173     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10174   }
10175 
10176   void trackStatistics() const override {}
10177 };
10178 
10179 /// Assumption Info defined for call sites.
10180 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
10181 
10182   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
10183       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
10184 
10185   /// See AbstractAttribute::initialize(...).
10186   void initialize(Attributor &A) override {
10187     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10188     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10189   }
10190 
10191   /// See AbstractAttribute::manifest(...).
10192   ChangeStatus manifest(Attributor &A) override {
10193     // Don't manifest a universal set if it somehow made it here.
10194     if (getKnown().isUniversal())
10195       return ChangeStatus::UNCHANGED;
10196 
10197     CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
10198     bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
10199 
10200     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10201   }
10202 
10203   /// See AbstractAttribute::updateImpl(...).
10204   ChangeStatus updateImpl(Attributor &A) override {
10205     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10206     auto &AssumptionAA =
10207         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10208     bool Changed = getIntersection(AssumptionAA.getAssumed());
10209     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10210   }
10211 
10212   /// See AbstractAttribute::trackStatistics()
10213   void trackStatistics() const override {}
10214 
10215 private:
10216   /// Helper to initialized the known set as all the assumptions this call and
10217   /// the callee contain.
10218   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
10219     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
10220     auto Assumptions = getAssumptions(CB);
10221     if (Function *F = IRP.getAssociatedFunction())
10222       set_union(Assumptions, getAssumptions(*F));
10223     if (Function *F = IRP.getAssociatedFunction())
10224       set_union(Assumptions, getAssumptions(*F));
10225     return Assumptions;
10226   }
10227 };
10228 } // namespace
10229 
10230 AACallGraphNode *AACallEdgeIterator::operator*() const {
10231   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
10232       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
10233 }
10234 
10235 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
10236 
10237 const char AAReturnedValues::ID = 0;
10238 const char AANoUnwind::ID = 0;
10239 const char AANoSync::ID = 0;
10240 const char AANoFree::ID = 0;
10241 const char AANonNull::ID = 0;
10242 const char AANoRecurse::ID = 0;
10243 const char AAWillReturn::ID = 0;
10244 const char AAUndefinedBehavior::ID = 0;
10245 const char AANoAlias::ID = 0;
10246 const char AAReachability::ID = 0;
10247 const char AANoReturn::ID = 0;
10248 const char AAIsDead::ID = 0;
10249 const char AADereferenceable::ID = 0;
10250 const char AAAlign::ID = 0;
10251 const char AAInstanceInfo::ID = 0;
10252 const char AANoCapture::ID = 0;
10253 const char AAValueSimplify::ID = 0;
10254 const char AAHeapToStack::ID = 0;
10255 const char AAPrivatizablePtr::ID = 0;
10256 const char AAMemoryBehavior::ID = 0;
10257 const char AAMemoryLocation::ID = 0;
10258 const char AAValueConstantRange::ID = 0;
10259 const char AAPotentialConstantValues::ID = 0;
10260 const char AANoUndef::ID = 0;
10261 const char AACallEdges::ID = 0;
10262 const char AAFunctionReachability::ID = 0;
10263 const char AAPointerInfo::ID = 0;
10264 const char AAAssumptionInfo::ID = 0;
10265 
10266 // Macro magic to create the static generator function for attributes that
10267 // follow the naming scheme.
10268 
10269 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
10270   case IRPosition::PK:                                                         \
10271     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
10272 
10273 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
10274   case IRPosition::PK:                                                         \
10275     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
10276     ++NumAAs;                                                                  \
10277     break;
10278 
10279 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
10280   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10281     CLASS *AA = nullptr;                                                       \
10282     switch (IRP.getPositionKind()) {                                           \
10283       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10284       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10285       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10286       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10287       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10288       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10289       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10290       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10291     }                                                                          \
10292     return *AA;                                                                \
10293   }
10294 
10295 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
10296   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10297     CLASS *AA = nullptr;                                                       \
10298     switch (IRP.getPositionKind()) {                                           \
10299       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10300       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
10301       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10302       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10303       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10304       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10305       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10306       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10307     }                                                                          \
10308     return *AA;                                                                \
10309   }
10310 
10311 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
10312   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10313     CLASS *AA = nullptr;                                                       \
10314     switch (IRP.getPositionKind()) {                                           \
10315       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10316       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10317       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10318       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10319       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10320       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10321       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10322       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10323     }                                                                          \
10324     return *AA;                                                                \
10325   }
10326 
10327 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
10328   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10329     CLASS *AA = nullptr;                                                       \
10330     switch (IRP.getPositionKind()) {                                           \
10331       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10332       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10333       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10334       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10335       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10336       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10337       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10338       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10339     }                                                                          \
10340     return *AA;                                                                \
10341   }
10342 
10343 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
10344   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10345     CLASS *AA = nullptr;                                                       \
10346     switch (IRP.getPositionKind()) {                                           \
10347       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10348       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10349       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10350       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10351       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10352       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10353       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10354       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10355     }                                                                          \
10356     return *AA;                                                                \
10357   }
10358 
10359 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
10360 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
10361 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
10362 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
10363 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
10364 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
10365 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
10366 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
10367 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
10368 
10369 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
10370 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
10371 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
10372 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
10373 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
10374 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
10375 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
10376 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
10377 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
10378 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
10379 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
10380 
10381 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
10382 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
10383 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
10384 
10385 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
10386 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
10387 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
10388 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
10389 
10390 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
10391 
10392 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
10393 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
10394 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
10395 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
10396 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
10397 #undef SWITCH_PK_CREATE
10398 #undef SWITCH_PK_INV
10399