1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/SCCIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetOperations.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumeBundleQueries.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/Analysis/LazyValueInfo.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/TargetTransformInfo.h"
34 #include "llvm/Analysis/ValueTracking.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Assumptions.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/GlobalValue.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/NoFolder.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/IR/ValueHandle.h"
49 #include "llvm/Support/Alignment.h"
50 #include "llvm/Support/Casting.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/GraphWriter.h"
54 #include "llvm/Support/MathExtras.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/Transforms/Utils/Local.h"
57 #include "llvm/Transforms/Utils/ValueMapper.h"
58 #include <cassert>
59 
60 using namespace llvm;
61 
62 #define DEBUG_TYPE "attributor"
63 
64 static cl::opt<bool> ManifestInternal(
65     "attributor-manifest-internal", cl::Hidden,
66     cl::desc("Manifest Attributor internal string attributes."),
67     cl::init(false));
68 
69 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
70                                        cl::Hidden);
71 
72 template <>
73 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
74 
75 static cl::opt<unsigned, true> MaxPotentialValues(
76     "attributor-max-potential-values", cl::Hidden,
77     cl::desc("Maximum number of potential values to be "
78              "tracked for each position."),
79     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
80     cl::init(7));
81 
82 static cl::opt<unsigned> MaxInterferingAccesses(
83     "attributor-max-interfering-accesses", cl::Hidden,
84     cl::desc("Maximum number of interfering accesses to "
85              "check before assuming all might interfere."),
86     cl::init(6));
87 
88 STATISTIC(NumAAs, "Number of abstract attributes created");
89 
90 // Some helper macros to deal with statistics tracking.
91 //
92 // Usage:
93 // For simple IR attribute tracking overload trackStatistics in the abstract
94 // attribute and choose the right STATS_DECLTRACK_********* macro,
95 // e.g.,:
96 //  void trackStatistics() const override {
97 //    STATS_DECLTRACK_ARG_ATTR(returned)
98 //  }
99 // If there is a single "increment" side one can use the macro
100 // STATS_DECLTRACK with a custom message. If there are multiple increment
101 // sides, STATS_DECL and STATS_TRACK can also be used separately.
102 //
103 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
104   ("Number of " #TYPE " marked '" #NAME "'")
105 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
106 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
107 #define STATS_DECL(NAME, TYPE, MSG)                                            \
108   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
109 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
110 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
111   {                                                                            \
112     STATS_DECL(NAME, TYPE, MSG)                                                \
113     STATS_TRACK(NAME, TYPE)                                                    \
114   }
115 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
116   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
117 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
118   STATS_DECLTRACK(NAME, CSArguments,                                           \
119                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
120 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
121   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
122 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
123   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
124 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
125   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
126                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
127 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
128   STATS_DECLTRACK(NAME, CSReturn,                                              \
129                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
130 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
131   STATS_DECLTRACK(NAME, Floating,                                              \
132                   ("Number of floating values known to be '" #NAME "'"))
133 
134 // Specialization of the operator<< for abstract attributes subclasses. This
135 // disambiguates situations where multiple operators are applicable.
136 namespace llvm {
137 #define PIPE_OPERATOR(CLASS)                                                   \
138   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
139     return OS << static_cast<const AbstractAttribute &>(AA);                   \
140   }
141 
142 PIPE_OPERATOR(AAIsDead)
143 PIPE_OPERATOR(AANoUnwind)
144 PIPE_OPERATOR(AANoSync)
145 PIPE_OPERATOR(AANoRecurse)
146 PIPE_OPERATOR(AAWillReturn)
147 PIPE_OPERATOR(AANoReturn)
148 PIPE_OPERATOR(AAReturnedValues)
149 PIPE_OPERATOR(AANonNull)
150 PIPE_OPERATOR(AANoAlias)
151 PIPE_OPERATOR(AADereferenceable)
152 PIPE_OPERATOR(AAAlign)
153 PIPE_OPERATOR(AAInstanceInfo)
154 PIPE_OPERATOR(AANoCapture)
155 PIPE_OPERATOR(AAValueSimplify)
156 PIPE_OPERATOR(AANoFree)
157 PIPE_OPERATOR(AAHeapToStack)
158 PIPE_OPERATOR(AAReachability)
159 PIPE_OPERATOR(AAMemoryBehavior)
160 PIPE_OPERATOR(AAMemoryLocation)
161 PIPE_OPERATOR(AAValueConstantRange)
162 PIPE_OPERATOR(AAPrivatizablePtr)
163 PIPE_OPERATOR(AAUndefinedBehavior)
164 PIPE_OPERATOR(AAPotentialConstantValues)
165 PIPE_OPERATOR(AANoUndef)
166 PIPE_OPERATOR(AACallEdges)
167 PIPE_OPERATOR(AAFunctionReachability)
168 PIPE_OPERATOR(AAPointerInfo)
169 PIPE_OPERATOR(AAAssumptionInfo)
170 
171 #undef PIPE_OPERATOR
172 
173 template <>
174 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
175                                                      const DerefState &R) {
176   ChangeStatus CS0 =
177       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
178   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
179   return CS0 | CS1;
180 }
181 
182 } // namespace llvm
183 
184 /// Checks if a type could have padding bytes.
185 static bool isDenselyPacked(Type *Ty, const DataLayout &DL) {
186   // There is no size information, so be conservative.
187   if (!Ty->isSized())
188     return false;
189 
190   // If the alloc size is not equal to the storage size, then there are padding
191   // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
192   if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty))
193     return false;
194 
195   // FIXME: This isn't the right way to check for padding in vectors with
196   // non-byte-size elements.
197   if (VectorType *SeqTy = dyn_cast<VectorType>(Ty))
198     return isDenselyPacked(SeqTy->getElementType(), DL);
199 
200   // For array types, check for padding within members.
201   if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty))
202     return isDenselyPacked(SeqTy->getElementType(), DL);
203 
204   if (!isa<StructType>(Ty))
205     return true;
206 
207   // Check for padding within and between elements of a struct.
208   StructType *StructTy = cast<StructType>(Ty);
209   const StructLayout *Layout = DL.getStructLayout(StructTy);
210   uint64_t StartPos = 0;
211   for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) {
212     Type *ElTy = StructTy->getElementType(I);
213     if (!isDenselyPacked(ElTy, DL))
214       return false;
215     if (StartPos != Layout->getElementOffsetInBits(I))
216       return false;
217     StartPos += DL.getTypeAllocSizeInBits(ElTy);
218   }
219 
220   return true;
221 }
222 
223 /// Get pointer operand of memory accessing instruction. If \p I is
224 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
225 /// is set to false and the instruction is volatile, return nullptr.
226 static const Value *getPointerOperand(const Instruction *I,
227                                       bool AllowVolatile) {
228   if (!AllowVolatile && I->isVolatile())
229     return nullptr;
230 
231   if (auto *LI = dyn_cast<LoadInst>(I)) {
232     return LI->getPointerOperand();
233   }
234 
235   if (auto *SI = dyn_cast<StoreInst>(I)) {
236     return SI->getPointerOperand();
237   }
238 
239   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
240     return CXI->getPointerOperand();
241   }
242 
243   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
244     return RMWI->getPointerOperand();
245   }
246 
247   return nullptr;
248 }
249 
250 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
251 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
252 /// getelement pointer instructions that traverse the natural type of \p Ptr if
253 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
254 /// through a cast to i8*.
255 ///
256 /// TODO: This could probably live somewhere more prominantly if it doesn't
257 ///       already exist.
258 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
259                                int64_t Offset, IRBuilder<NoFolder> &IRB,
260                                const DataLayout &DL) {
261   assert(Offset >= 0 && "Negative offset not supported yet!");
262   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
263                     << "-bytes as " << *ResTy << "\n");
264 
265   if (Offset) {
266     Type *Ty = PtrElemTy;
267     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
268     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
269 
270     SmallVector<Value *, 4> ValIndices;
271     std::string GEPName = Ptr->getName().str();
272     for (const APInt &Index : IntIndices) {
273       ValIndices.push_back(IRB.getInt(Index));
274       GEPName += "." + std::to_string(Index.getZExtValue());
275     }
276 
277     // Create a GEP for the indices collected above.
278     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
279 
280     // If an offset is left we use byte-wise adjustment.
281     if (IntOffset != 0) {
282       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
283       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
284                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
285     }
286   }
287 
288   // Ensure the result has the requested type.
289   Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
290                                                 Ptr->getName() + ".cast");
291 
292   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
293   return Ptr;
294 }
295 
296 /// Recursively visit all values that might become \p IRP at some point. This
297 /// will be done by looking through cast instructions, selects, phis, and calls
298 /// with the "returned" attribute. Once we cannot look through the value any
299 /// further, the callback \p VisitValueCB is invoked and passed the current
300 /// value, the \p State, and a flag to indicate if we stripped anything.
301 /// Stripped means that we unpacked the value associated with \p IRP at least
302 /// once. Note that the value used for the callback may still be the value
303 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
304 /// we will never visit more values than specified by \p MaxValues.
305 /// If \p VS does not contain the Interprocedural bit, only values valid in the
306 /// scope of \p CtxI will be visited and simplification into other scopes is
307 /// prevented.
308 template <typename StateTy>
309 static bool genericValueTraversal(
310     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
311     StateTy &State,
312     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
313         VisitValueCB,
314     const Instruction *CtxI, bool &UsedAssumedInformation,
315     bool UseValueSimplify = true, int MaxValues = 16,
316     function_ref<Value *(Value *)> StripCB = nullptr,
317     AA::ValueScope VS = AA::Interprocedural) {
318 
319   struct LivenessInfo {
320     const AAIsDead *LivenessAA = nullptr;
321     bool AnyDead = false;
322   };
323   SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
324   auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
325     LivenessInfo &LI = LivenessAAs[&F];
326     if (!LI.LivenessAA)
327       LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F),
328                                             DepClassTy::NONE);
329     return LI;
330   };
331 
332   Value *InitialV = &IRP.getAssociatedValue();
333   using Item = std::pair<Value *, const Instruction *>;
334   SmallSet<Item, 16> Visited;
335   SmallVector<Item, 16> Worklist;
336   Worklist.push_back({InitialV, CtxI});
337 
338   int Iteration = 0;
339   do {
340     Item I = Worklist.pop_back_val();
341     Value *V = I.first;
342     CtxI = I.second;
343     if (StripCB)
344       V = StripCB(V);
345 
346     // Check if we should process the current value. To prevent endless
347     // recursion keep a record of the values we followed!
348     if (!Visited.insert(I).second)
349       continue;
350 
351     // Make sure we limit the compile time for complex expressions.
352     if (Iteration++ >= MaxValues) {
353       LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
354                         << Iteration << "!\n");
355       return false;
356     }
357 
358     // Explicitly look through calls with a "returned" attribute if we do
359     // not have a pointer as stripPointerCasts only works on them.
360     Value *NewV = nullptr;
361     if (V->getType()->isPointerTy()) {
362       NewV = V->stripPointerCasts();
363     } else {
364       auto *CB = dyn_cast<CallBase>(V);
365       if (CB && CB->getCalledFunction()) {
366         for (Argument &Arg : CB->getCalledFunction()->args())
367           if (Arg.hasReturnedAttr()) {
368             NewV = CB->getArgOperand(Arg.getArgNo());
369             break;
370           }
371       }
372     }
373     if (NewV && NewV != V) {
374       Worklist.push_back({NewV, CtxI});
375       continue;
376     }
377 
378     // Look through select instructions, visit assumed potential values.
379     if (auto *SI = dyn_cast<SelectInst>(V)) {
380       Optional<Constant *> C = A.getAssumedConstant(
381           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
382       bool NoValueYet = !C;
383       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
384         continue;
385       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
386         if (CI->isZero())
387           Worklist.push_back({SI->getFalseValue(), CtxI});
388         else
389           Worklist.push_back({SI->getTrueValue(), CtxI});
390         continue;
391       }
392       // We could not simplify the condition, assume both values.(
393       Worklist.push_back({SI->getTrueValue(), CtxI});
394       Worklist.push_back({SI->getFalseValue(), CtxI});
395       continue;
396     }
397 
398     // Look through phi nodes, visit all live operands.
399     if (auto *PHI = dyn_cast<PHINode>(V)) {
400       LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction());
401       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
402         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
403         if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) {
404           LI.AnyDead = true;
405           UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint();
406           continue;
407         }
408         Worklist.push_back(
409             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
410       }
411       continue;
412     }
413 
414     if (auto *Arg = dyn_cast<Argument>(V)) {
415       if ((VS & AA::Interprocedural) && !Arg->hasPassPointeeByValueCopyAttr()) {
416         SmallVector<Item> CallSiteValues;
417         bool UsedAssumedInformation = false;
418         if (A.checkForAllCallSites(
419                 [&](AbstractCallSite ACS) {
420                   // Callbacks might not have a corresponding call site operand,
421                   // stick with the argument in that case.
422                   Value *CSOp = ACS.getCallArgOperand(*Arg);
423                   if (!CSOp)
424                     return false;
425                   CallSiteValues.push_back({CSOp, ACS.getInstruction()});
426                   return true;
427                 },
428                 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) {
429           Worklist.append(CallSiteValues);
430           continue;
431         }
432       }
433     }
434 
435     if (UseValueSimplify && !isa<Constant>(V)) {
436       Optional<Value *> SimpleV =
437           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
438       if (!SimpleV)
439         continue;
440       Value *NewV = SimpleV.value();
441       if (NewV && NewV != V) {
442         if ((VS & AA::Interprocedural) || !CtxI ||
443             AA::isValidInScope(*NewV, CtxI->getFunction())) {
444           Worklist.push_back({NewV, CtxI});
445           continue;
446         }
447       }
448     }
449 
450     if (auto *LI = dyn_cast<LoadInst>(V)) {
451       bool UsedAssumedInformation = false;
452       // If we ask for the potentially loaded values from the initial pointer we
453       // will simply end up here again. The load is as far as we can make it.
454       if (LI->getPointerOperand() != InitialV) {
455         SmallSetVector<Value *, 4> PotentialCopies;
456         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
457         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
458                                            PotentialValueOrigins, QueryingAA,
459                                            UsedAssumedInformation,
460                                            /* OnlyExact */ true)) {
461           // Values have to be dynamically unique or we loose the fact that a
462           // single llvm::Value might represent two runtime values (e.g., stack
463           // locations in different recursive calls).
464           bool DynamicallyUnique =
465               llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) {
466                 return AA::isDynamicallyUnique(A, QueryingAA, *PC);
467               });
468           if (DynamicallyUnique &&
469               ((VS & AA::Interprocedural) || !CtxI ||
470                llvm::all_of(PotentialCopies, [CtxI](Value *PC) {
471                  return AA::isValidInScope(*PC, CtxI->getFunction());
472                }))) {
473             for (auto *PotentialCopy : PotentialCopies)
474               Worklist.push_back({PotentialCopy, CtxI});
475             continue;
476           }
477         }
478       }
479     }
480 
481     // Once a leaf is reached we inform the user through the callback.
482     if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) {
483       LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: "
484                         << *V << "!\n");
485       return false;
486     }
487   } while (!Worklist.empty());
488 
489   // If we actually used liveness information so we have to record a dependence.
490   for (auto &It : LivenessAAs)
491     if (It.second.AnyDead)
492       A.recordDependence(*It.second.LivenessAA, QueryingAA,
493                          DepClassTy::OPTIONAL);
494 
495   // All values have been visited.
496   return true;
497 }
498 
499 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
500                                      SmallVectorImpl<Value *> &Objects,
501                                      const AbstractAttribute &QueryingAA,
502                                      const Instruction *CtxI,
503                                      bool &UsedAssumedInformation,
504                                      AA::ValueScope VS) {
505   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
506   SmallPtrSet<Value *, 8> SeenObjects;
507   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
508                                      SmallVectorImpl<Value *> &Objects,
509                                      bool) -> bool {
510     if (SeenObjects.insert(&Val).second)
511       Objects.push_back(&Val);
512     return true;
513   };
514   if (!genericValueTraversal<decltype(Objects)>(
515           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
516           UsedAssumedInformation, true, 32, StripCB, VS))
517     return false;
518   return true;
519 }
520 
521 static const Value *
522 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
523                           const Value *Val, const DataLayout &DL, APInt &Offset,
524                           bool GetMinOffset, bool AllowNonInbounds,
525                           bool UseAssumed = false) {
526 
527   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
528     const IRPosition &Pos = IRPosition::value(V);
529     // Only track dependence if we are going to use the assumed info.
530     const AAValueConstantRange &ValueConstantRangeAA =
531         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
532                                          UseAssumed ? DepClassTy::OPTIONAL
533                                                     : DepClassTy::NONE);
534     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
535                                      : ValueConstantRangeAA.getKnown();
536     if (Range.isFullSet())
537       return false;
538 
539     // We can only use the lower part of the range because the upper part can
540     // be higher than what the value can really be.
541     if (GetMinOffset)
542       ROffset = Range.getSignedMin();
543     else
544       ROffset = Range.getSignedMax();
545     return true;
546   };
547 
548   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
549                                                 /* AllowInvariant */ true,
550                                                 AttributorAnalysis);
551 }
552 
553 static const Value *
554 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
555                         const Value *Ptr, int64_t &BytesOffset,
556                         const DataLayout &DL, bool AllowNonInbounds = false) {
557   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
558   const Value *Base =
559       stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
560                                 /* GetMinOffset */ true, AllowNonInbounds);
561 
562   BytesOffset = OffsetAPInt.getSExtValue();
563   return Base;
564 }
565 
566 /// Clamp the information known for all returned values of a function
567 /// (identified by \p QueryingAA) into \p S.
568 template <typename AAType, typename StateType = typename AAType::StateType>
569 static void clampReturnedValueStates(
570     Attributor &A, const AAType &QueryingAA, StateType &S,
571     const IRPosition::CallBaseContext *CBContext = nullptr) {
572   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
573                     << QueryingAA << " into " << S << "\n");
574 
575   assert((QueryingAA.getIRPosition().getPositionKind() ==
576               IRPosition::IRP_RETURNED ||
577           QueryingAA.getIRPosition().getPositionKind() ==
578               IRPosition::IRP_CALL_SITE_RETURNED) &&
579          "Can only clamp returned value states for a function returned or call "
580          "site returned position!");
581 
582   // Use an optional state as there might not be any return values and we want
583   // to join (IntegerState::operator&) the state of all there are.
584   Optional<StateType> T;
585 
586   // Callback for each possibly returned value.
587   auto CheckReturnValue = [&](Value &RV) -> bool {
588     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
589     const AAType &AA =
590         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
591     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
592                       << " @ " << RVPos << "\n");
593     const StateType &AAS = AA.getState();
594     if (!T)
595       T = StateType::getBestState(AAS);
596     *T &= AAS;
597     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
598                       << "\n");
599     return T->isValidState();
600   };
601 
602   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
603     S.indicatePessimisticFixpoint();
604   else if (T)
605     S ^= *T;
606 }
607 
608 namespace {
609 /// Helper class for generic deduction: return value -> returned position.
610 template <typename AAType, typename BaseType,
611           typename StateType = typename BaseType::StateType,
612           bool PropagateCallBaseContext = false>
613 struct AAReturnedFromReturnedValues : public BaseType {
614   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
615       : BaseType(IRP, A) {}
616 
617   /// See AbstractAttribute::updateImpl(...).
618   ChangeStatus updateImpl(Attributor &A) override {
619     StateType S(StateType::getBestState(this->getState()));
620     clampReturnedValueStates<AAType, StateType>(
621         A, *this, S,
622         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
623     // TODO: If we know we visited all returned values, thus no are assumed
624     // dead, we can take the known information from the state T.
625     return clampStateAndIndicateChange<StateType>(this->getState(), S);
626   }
627 };
628 
629 /// Clamp the information known at all call sites for a given argument
630 /// (identified by \p QueryingAA) into \p S.
631 template <typename AAType, typename StateType = typename AAType::StateType>
632 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
633                                         StateType &S) {
634   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
635                     << QueryingAA << " into " << S << "\n");
636 
637   assert(QueryingAA.getIRPosition().getPositionKind() ==
638              IRPosition::IRP_ARGUMENT &&
639          "Can only clamp call site argument states for an argument position!");
640 
641   // Use an optional state as there might not be any return values and we want
642   // to join (IntegerState::operator&) the state of all there are.
643   Optional<StateType> T;
644 
645   // The argument number which is also the call site argument number.
646   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
647 
648   auto CallSiteCheck = [&](AbstractCallSite ACS) {
649     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
650     // Check if a coresponding argument was found or if it is on not associated
651     // (which can happen for callback calls).
652     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
653       return false;
654 
655     const AAType &AA =
656         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
657     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
658                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
659     const StateType &AAS = AA.getState();
660     if (!T)
661       T = StateType::getBestState(AAS);
662     *T &= AAS;
663     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
664                       << "\n");
665     return T->isValidState();
666   };
667 
668   bool UsedAssumedInformation = false;
669   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
670                               UsedAssumedInformation))
671     S.indicatePessimisticFixpoint();
672   else if (T)
673     S ^= *T;
674 }
675 
676 /// This function is the bridge between argument position and the call base
677 /// context.
678 template <typename AAType, typename BaseType,
679           typename StateType = typename AAType::StateType>
680 bool getArgumentStateFromCallBaseContext(Attributor &A,
681                                          BaseType &QueryingAttribute,
682                                          IRPosition &Pos, StateType &State) {
683   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
684          "Expected an 'argument' position !");
685   const CallBase *CBContext = Pos.getCallBaseContext();
686   if (!CBContext)
687     return false;
688 
689   int ArgNo = Pos.getCallSiteArgNo();
690   assert(ArgNo >= 0 && "Invalid Arg No!");
691 
692   const auto &AA = A.getAAFor<AAType>(
693       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
694       DepClassTy::REQUIRED);
695   const StateType &CBArgumentState =
696       static_cast<const StateType &>(AA.getState());
697 
698   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
699                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
700                     << "\n");
701 
702   // NOTE: If we want to do call site grouping it should happen here.
703   State ^= CBArgumentState;
704   return true;
705 }
706 
707 /// Helper class for generic deduction: call site argument -> argument position.
708 template <typename AAType, typename BaseType,
709           typename StateType = typename AAType::StateType,
710           bool BridgeCallBaseContext = false>
711 struct AAArgumentFromCallSiteArguments : public BaseType {
712   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
713       : BaseType(IRP, A) {}
714 
715   /// See AbstractAttribute::updateImpl(...).
716   ChangeStatus updateImpl(Attributor &A) override {
717     StateType S = StateType::getBestState(this->getState());
718 
719     if (BridgeCallBaseContext) {
720       bool Success =
721           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
722               A, *this, this->getIRPosition(), S);
723       if (Success)
724         return clampStateAndIndicateChange<StateType>(this->getState(), S);
725     }
726     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
727 
728     // TODO: If we know we visited all incoming values, thus no are assumed
729     // dead, we can take the known information from the state T.
730     return clampStateAndIndicateChange<StateType>(this->getState(), S);
731   }
732 };
733 
734 /// Helper class for generic replication: function returned -> cs returned.
735 template <typename AAType, typename BaseType,
736           typename StateType = typename BaseType::StateType,
737           bool IntroduceCallBaseContext = false>
738 struct AACallSiteReturnedFromReturned : public BaseType {
739   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
740       : BaseType(IRP, A) {}
741 
742   /// See AbstractAttribute::updateImpl(...).
743   ChangeStatus updateImpl(Attributor &A) override {
744     assert(this->getIRPosition().getPositionKind() ==
745                IRPosition::IRP_CALL_SITE_RETURNED &&
746            "Can only wrap function returned positions for call site returned "
747            "positions!");
748     auto &S = this->getState();
749 
750     const Function *AssociatedFunction =
751         this->getIRPosition().getAssociatedFunction();
752     if (!AssociatedFunction)
753       return S.indicatePessimisticFixpoint();
754 
755     CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
756     if (IntroduceCallBaseContext)
757       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
758                         << CBContext << "\n");
759 
760     IRPosition FnPos = IRPosition::returned(
761         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
762     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
763     return clampStateAndIndicateChange(S, AA.getState());
764   }
765 };
766 
767 /// Helper function to accumulate uses.
768 template <class AAType, typename StateType = typename AAType::StateType>
769 static void followUsesInContext(AAType &AA, Attributor &A,
770                                 MustBeExecutedContextExplorer &Explorer,
771                                 const Instruction *CtxI,
772                                 SetVector<const Use *> &Uses,
773                                 StateType &State) {
774   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
775   for (unsigned u = 0; u < Uses.size(); ++u) {
776     const Use *U = Uses[u];
777     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
778       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
779       if (Found && AA.followUseInMBEC(A, U, UserI, State))
780         for (const Use &Us : UserI->uses())
781           Uses.insert(&Us);
782     }
783   }
784 }
785 
786 /// Use the must-be-executed-context around \p I to add information into \p S.
787 /// The AAType class is required to have `followUseInMBEC` method with the
788 /// following signature and behaviour:
789 ///
790 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
791 /// U - Underlying use.
792 /// I - The user of the \p U.
793 /// Returns true if the value should be tracked transitively.
794 ///
795 template <class AAType, typename StateType = typename AAType::StateType>
796 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
797                              Instruction &CtxI) {
798 
799   // Container for (transitive) uses of the associated value.
800   SetVector<const Use *> Uses;
801   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
802     Uses.insert(&U);
803 
804   MustBeExecutedContextExplorer &Explorer =
805       A.getInfoCache().getMustBeExecutedContextExplorer();
806 
807   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
808 
809   if (S.isAtFixpoint())
810     return;
811 
812   SmallVector<const BranchInst *, 4> BrInsts;
813   auto Pred = [&](const Instruction *I) {
814     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
815       if (Br->isConditional())
816         BrInsts.push_back(Br);
817     return true;
818   };
819 
820   // Here, accumulate conditional branch instructions in the context. We
821   // explore the child paths and collect the known states. The disjunction of
822   // those states can be merged to its own state. Let ParentState_i be a state
823   // to indicate the known information for an i-th branch instruction in the
824   // context. ChildStates are created for its successors respectively.
825   //
826   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
827   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
828   //      ...
829   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
830   //
831   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
832   //
833   // FIXME: Currently, recursive branches are not handled. For example, we
834   // can't deduce that ptr must be dereferenced in below function.
835   //
836   // void f(int a, int c, int *ptr) {
837   //    if(a)
838   //      if (b) {
839   //        *ptr = 0;
840   //      } else {
841   //        *ptr = 1;
842   //      }
843   //    else {
844   //      if (b) {
845   //        *ptr = 0;
846   //      } else {
847   //        *ptr = 1;
848   //      }
849   //    }
850   // }
851 
852   Explorer.checkForAllContext(&CtxI, Pred);
853   for (const BranchInst *Br : BrInsts) {
854     StateType ParentState;
855 
856     // The known state of the parent state is a conjunction of children's
857     // known states so it is initialized with a best state.
858     ParentState.indicateOptimisticFixpoint();
859 
860     for (const BasicBlock *BB : Br->successors()) {
861       StateType ChildState;
862 
863       size_t BeforeSize = Uses.size();
864       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
865 
866       // Erase uses which only appear in the child.
867       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
868         It = Uses.erase(It);
869 
870       ParentState &= ChildState;
871     }
872 
873     // Use only known state.
874     S += ParentState;
875   }
876 }
877 } // namespace
878 
879 /// ------------------------ PointerInfo ---------------------------------------
880 
881 namespace llvm {
882 namespace AA {
883 namespace PointerInfo {
884 
885 struct State;
886 
887 } // namespace PointerInfo
888 } // namespace AA
889 
890 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
891 template <>
892 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
893   using Access = AAPointerInfo::Access;
894   static inline Access getEmptyKey();
895   static inline Access getTombstoneKey();
896   static unsigned getHashValue(const Access &A);
897   static bool isEqual(const Access &LHS, const Access &RHS);
898 };
899 
900 /// Helper that allows OffsetAndSize as a key in a DenseMap.
901 template <>
902 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
903     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
904 
905 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
906 /// but the instruction
907 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
908   using Base = DenseMapInfo<Instruction *>;
909   using Access = AAPointerInfo::Access;
910   static inline Access getEmptyKey();
911   static inline Access getTombstoneKey();
912   static unsigned getHashValue(const Access &A);
913   static bool isEqual(const Access &LHS, const Access &RHS);
914 };
915 
916 } // namespace llvm
917 
918 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
919 struct AA::PointerInfo::State : public AbstractState {
920 
921   ~State() {
922     // We do not delete the Accesses objects but need to destroy them still.
923     for (auto &It : AccessBins)
924       It.second->~Accesses();
925   }
926 
927   /// Return the best possible representable state.
928   static State getBestState(const State &SIS) { return State(); }
929 
930   /// Return the worst possible representable state.
931   static State getWorstState(const State &SIS) {
932     State R;
933     R.indicatePessimisticFixpoint();
934     return R;
935   }
936 
937   State() = default;
938   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {
939     SIS.AccessBins.clear();
940   }
941 
942   const State &getAssumed() const { return *this; }
943 
944   /// See AbstractState::isValidState().
945   bool isValidState() const override { return BS.isValidState(); }
946 
947   /// See AbstractState::isAtFixpoint().
948   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
949 
950   /// See AbstractState::indicateOptimisticFixpoint().
951   ChangeStatus indicateOptimisticFixpoint() override {
952     BS.indicateOptimisticFixpoint();
953     return ChangeStatus::UNCHANGED;
954   }
955 
956   /// See AbstractState::indicatePessimisticFixpoint().
957   ChangeStatus indicatePessimisticFixpoint() override {
958     BS.indicatePessimisticFixpoint();
959     return ChangeStatus::CHANGED;
960   }
961 
962   State &operator=(const State &R) {
963     if (this == &R)
964       return *this;
965     BS = R.BS;
966     AccessBins = R.AccessBins;
967     return *this;
968   }
969 
970   State &operator=(State &&R) {
971     if (this == &R)
972       return *this;
973     std::swap(BS, R.BS);
974     std::swap(AccessBins, R.AccessBins);
975     return *this;
976   }
977 
978   bool operator==(const State &R) const {
979     if (BS != R.BS)
980       return false;
981     if (AccessBins.size() != R.AccessBins.size())
982       return false;
983     auto It = begin(), RIt = R.begin(), E = end();
984     while (It != E) {
985       if (It->getFirst() != RIt->getFirst())
986         return false;
987       auto &Accs = It->getSecond();
988       auto &RAccs = RIt->getSecond();
989       if (Accs->size() != RAccs->size())
990         return false;
991       for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
992         if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
993           return false;
994       ++It;
995       ++RIt;
996     }
997     return true;
998   }
999   bool operator!=(const State &R) const { return !(*this == R); }
1000 
1001   /// We store accesses in a set with the instruction as key.
1002   struct Accesses {
1003     SmallVector<AAPointerInfo::Access, 4> Accesses;
1004     DenseMap<const Instruction *, unsigned> Map;
1005 
1006     unsigned size() const { return Accesses.size(); }
1007 
1008     using vec_iterator = decltype(Accesses)::iterator;
1009     vec_iterator begin() { return Accesses.begin(); }
1010     vec_iterator end() { return Accesses.end(); }
1011 
1012     using iterator = decltype(Map)::const_iterator;
1013     iterator find(AAPointerInfo::Access &Acc) {
1014       return Map.find(Acc.getRemoteInst());
1015     }
1016     iterator find_end() { return Map.end(); }
1017 
1018     AAPointerInfo::Access &get(iterator &It) {
1019       return Accesses[It->getSecond()];
1020     }
1021 
1022     void insert(AAPointerInfo::Access &Acc) {
1023       Map[Acc.getRemoteInst()] = Accesses.size();
1024       Accesses.push_back(Acc);
1025     }
1026   };
1027 
1028   /// We store all accesses in bins denoted by their offset and size.
1029   using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>;
1030 
1031   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
1032   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
1033 
1034 protected:
1035   /// The bins with all the accesses for the associated pointer.
1036   AccessBinsTy AccessBins;
1037 
1038   /// Add a new access to the state at offset \p Offset and with size \p Size.
1039   /// The access is associated with \p I, writes \p Content (if anything), and
1040   /// is of kind \p Kind.
1041   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
1042   ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
1043                          Instruction &I, Optional<Value *> Content,
1044                          AAPointerInfo::AccessKind Kind, Type *Ty,
1045                          Instruction *RemoteI = nullptr,
1046                          Accesses *BinPtr = nullptr) {
1047     AAPointerInfo::OffsetAndSize Key{Offset, Size};
1048     Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
1049     if (!Bin)
1050       Bin = new (A.Allocator) Accesses;
1051     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1052     // Check if we have an access for this instruction in this bin, if not,
1053     // simply add it.
1054     auto It = Bin->find(Acc);
1055     if (It == Bin->find_end()) {
1056       Bin->insert(Acc);
1057       return ChangeStatus::CHANGED;
1058     }
1059     // If the existing access is the same as then new one, nothing changed.
1060     AAPointerInfo::Access &Current = Bin->get(It);
1061     AAPointerInfo::Access Before = Current;
1062     // The new one will be combined with the existing one.
1063     Current &= Acc;
1064     return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1065   }
1066 
1067   /// See AAPointerInfo::forallInterferingAccesses.
1068   bool forallInterferingAccesses(
1069       AAPointerInfo::OffsetAndSize OAS,
1070       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1071     if (!isValidState())
1072       return false;
1073 
1074     for (auto &It : AccessBins) {
1075       AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
1076       if (!OAS.mayOverlap(ItOAS))
1077         continue;
1078       bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
1079       for (auto &Access : *It.getSecond())
1080         if (!CB(Access, IsExact))
1081           return false;
1082     }
1083     return true;
1084   }
1085 
1086   /// See AAPointerInfo::forallInterferingAccesses.
1087   bool forallInterferingAccesses(
1088       Instruction &I,
1089       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1090     if (!isValidState())
1091       return false;
1092 
1093     // First find the offset and size of I.
1094     AAPointerInfo::OffsetAndSize OAS(-1, -1);
1095     for (auto &It : AccessBins) {
1096       for (auto &Access : *It.getSecond()) {
1097         if (Access.getRemoteInst() == &I) {
1098           OAS = It.getFirst();
1099           break;
1100         }
1101       }
1102       if (OAS.getSize() != -1)
1103         break;
1104     }
1105     // No access for I was found, we are done.
1106     if (OAS.getSize() == -1)
1107       return true;
1108 
1109     // Now that we have an offset and size, find all overlapping ones and use
1110     // the callback on the accesses.
1111     return forallInterferingAccesses(OAS, CB);
1112   }
1113 
1114 private:
1115   /// State to track fixpoint and validity.
1116   BooleanState BS;
1117 };
1118 
1119 namespace {
1120 struct AAPointerInfoImpl
1121     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1122   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1123   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1124 
1125   /// See AbstractAttribute::initialize(...).
1126   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1127 
1128   /// See AbstractAttribute::getAsStr().
1129   const std::string getAsStr() const override {
1130     return std::string("PointerInfo ") +
1131            (isValidState() ? (std::string("#") +
1132                               std::to_string(AccessBins.size()) + " bins")
1133                            : "<invalid>");
1134   }
1135 
1136   /// See AbstractAttribute::manifest(...).
1137   ChangeStatus manifest(Attributor &A) override {
1138     return AAPointerInfo::manifest(A);
1139   }
1140 
1141   bool forallInterferingAccesses(
1142       OffsetAndSize OAS,
1143       function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1144       const override {
1145     return State::forallInterferingAccesses(OAS, CB);
1146   }
1147   bool forallInterferingAccesses(
1148       Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
1149       function_ref<bool(const Access &, bool)> UserCB) const override {
1150     SmallPtrSet<const Access *, 8> DominatingWrites;
1151     SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
1152 
1153     Function &Scope = *I.getFunction();
1154     const auto &NoSyncAA = A.getAAFor<AANoSync>(
1155         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1156     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1157         IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
1158     const bool NoSync = NoSyncAA.isAssumedNoSync();
1159 
1160     // Helper to determine if we need to consider threading, which we cannot
1161     // right now. However, if the function is (assumed) nosync or the thread
1162     // executing all instructions is the main thread only we can ignore
1163     // threading.
1164     auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
1165       if (NoSync)
1166         return true;
1167       if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
1168         return true;
1169       return false;
1170     };
1171 
1172     // Helper to determine if the access is executed by the same thread as the
1173     // load, for now it is sufficient to avoid any potential threading effects
1174     // as we cannot deal with them anyway.
1175     auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
1176       return CanIgnoreThreading(*Acc.getLocalInst());
1177     };
1178 
1179     // TODO: Use inter-procedural reachability and dominance.
1180     const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1181         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1182 
1183     const bool FindInterferingWrites = I.mayReadFromMemory();
1184     const bool FindInterferingReads = I.mayWriteToMemory();
1185     const bool UseDominanceReasoning = FindInterferingWrites;
1186     const bool CanUseCFGResoning = CanIgnoreThreading(I);
1187     InformationCache &InfoCache = A.getInfoCache();
1188     const DominatorTree *DT =
1189         NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning
1190             ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
1191                   Scope)
1192             : nullptr;
1193 
1194     enum GPUAddressSpace : unsigned {
1195       Generic = 0,
1196       Global = 1,
1197       Shared = 3,
1198       Constant = 4,
1199       Local = 5,
1200     };
1201 
1202     // Helper to check if a value has "kernel lifetime", that is it will not
1203     // outlive a GPU kernel. This is true for shared, constant, and local
1204     // globals on AMD and NVIDIA GPUs.
1205     auto HasKernelLifetime = [&](Value *V, Module &M) {
1206       Triple T(M.getTargetTriple());
1207       if (!(T.isAMDGPU() || T.isNVPTX()))
1208         return false;
1209       switch (V->getType()->getPointerAddressSpace()) {
1210       case GPUAddressSpace::Shared:
1211       case GPUAddressSpace::Constant:
1212       case GPUAddressSpace::Local:
1213         return true;
1214       default:
1215         return false;
1216       };
1217     };
1218 
1219     // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1220     // to determine if we should look at reachability from the callee. For
1221     // certain pointers we know the lifetime and we do not have to step into the
1222     // callee to determine reachability as the pointer would be dead in the
1223     // callee. See the conditional initialization below.
1224     std::function<bool(const Function &)> IsLiveInCalleeCB;
1225 
1226     if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1227       // If the alloca containing function is not recursive the alloca
1228       // must be dead in the callee.
1229       const Function *AIFn = AI->getFunction();
1230       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1231           *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL);
1232       if (NoRecurseAA.isAssumedNoRecurse()) {
1233         IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1234       }
1235     } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1236       // If the global has kernel lifetime we can stop if we reach a kernel
1237       // as it is "dead" in the (unknown) callees.
1238       if (HasKernelLifetime(GV, *GV->getParent()))
1239         IsLiveInCalleeCB = [](const Function &Fn) {
1240           return !Fn.hasFnAttribute("kernel");
1241         };
1242     }
1243 
1244     auto AccessCB = [&](const Access &Acc, bool Exact) {
1245       if ((!FindInterferingWrites || !Acc.isWrite()) &&
1246           (!FindInterferingReads || !Acc.isRead()))
1247         return true;
1248 
1249       // For now we only filter accesses based on CFG reasoning which does not
1250       // work yet if we have threading effects, or the access is complicated.
1251       if (CanUseCFGResoning) {
1252         if ((!Acc.isWrite() ||
1253              !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1254                                          IsLiveInCalleeCB)) &&
1255             (!Acc.isRead() ||
1256              !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1257                                          IsLiveInCalleeCB)))
1258           return true;
1259         if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) &&
1260             IsSameThreadAsLoad(Acc)) {
1261           if (DT->dominates(Acc.getLocalInst(), &I))
1262             DominatingWrites.insert(&Acc);
1263         }
1264       }
1265 
1266       InterferingAccesses.push_back({&Acc, Exact});
1267       return true;
1268     };
1269     if (!State::forallInterferingAccesses(I, AccessCB))
1270       return false;
1271 
1272     // If we cannot use CFG reasoning we only filter the non-write accesses
1273     // and are done here.
1274     if (!CanUseCFGResoning) {
1275       for (auto &It : InterferingAccesses)
1276         if (!UserCB(*It.first, It.second))
1277           return false;
1278       return true;
1279     }
1280 
1281     // Helper to determine if we can skip a specific write access. This is in
1282     // the worst case quadratic as we are looking for another write that will
1283     // hide the effect of this one.
1284     auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1285       if (!IsSameThreadAsLoad(Acc))
1286         return false;
1287       if (!DominatingWrites.count(&Acc))
1288         return false;
1289       for (const Access *DomAcc : DominatingWrites) {
1290         assert(Acc.getLocalInst()->getFunction() ==
1291                    DomAcc->getLocalInst()->getFunction() &&
1292                "Expected dominating writes to be in the same function!");
1293 
1294         if (DomAcc != &Acc &&
1295             DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1296           return true;
1297         }
1298       }
1299       return false;
1300     };
1301 
1302     // Run the user callback on all accesses we cannot skip and return if that
1303     // succeeded for all or not.
1304     unsigned NumInterferingAccesses = InterferingAccesses.size();
1305     for (auto &It : InterferingAccesses) {
1306       if (!DT || NumInterferingAccesses > MaxInterferingAccesses ||
1307           !CanSkipAccess(*It.first, It.second)) {
1308         if (!UserCB(*It.first, It.second))
1309           return false;
1310       }
1311     }
1312     return true;
1313   }
1314 
1315   ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
1316                                     int64_t Offset, CallBase &CB,
1317                                     bool FromCallee = false) {
1318     using namespace AA::PointerInfo;
1319     if (!OtherAA.getState().isValidState() || !isValidState())
1320       return indicatePessimisticFixpoint();
1321 
1322     const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1323     bool IsByval =
1324         FromCallee && OtherAAImpl.getAssociatedArgument()->hasByValAttr();
1325 
1326     // Combine the accesses bin by bin.
1327     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1328     for (auto &It : OtherAAImpl.getState()) {
1329       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1330       if (Offset != OffsetAndSize::Unknown)
1331         OAS = OffsetAndSize(It.first.getOffset() + Offset, It.first.getSize());
1332       Accesses *Bin = AccessBins.lookup(OAS);
1333       for (const AAPointerInfo::Access &RAcc : *It.second) {
1334         if (IsByval && !RAcc.isRead())
1335           continue;
1336         bool UsedAssumedInformation = false;
1337         AccessKind AK = RAcc.getKind();
1338         Optional<Value *> Content = RAcc.getContent();
1339         if (FromCallee) {
1340           Content = A.translateArgumentToCallSiteContent(
1341               RAcc.getContent(), CB, *this, UsedAssumedInformation);
1342           AK = AccessKind(
1343               AK & (IsByval ? AccessKind::AK_READ : AccessKind::AK_READ_WRITE));
1344         }
1345         Changed =
1346             Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1347                                 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1348       }
1349     }
1350     return Changed;
1351   }
1352 
1353   /// Statistic tracking for all AAPointerInfo implementations.
1354   /// See AbstractAttribute::trackStatistics().
1355   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1356 };
1357 
1358 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1359   using AccessKind = AAPointerInfo::AccessKind;
1360   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1361       : AAPointerInfoImpl(IRP, A) {}
1362 
1363   /// See AbstractAttribute::initialize(...).
1364   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1365 
1366   /// Deal with an access and signal if it was handled successfully.
1367   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1368                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1369                     ChangeStatus &Changed, Type *Ty,
1370                     int64_t Size = OffsetAndSize::Unknown) {
1371     using namespace AA::PointerInfo;
1372     // No need to find a size if one is given or the offset is unknown.
1373     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1374         Ty) {
1375       const DataLayout &DL = A.getDataLayout();
1376       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1377       if (!AccessSize.isScalable())
1378         Size = AccessSize.getFixedSize();
1379     }
1380     Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1381     return true;
1382   };
1383 
1384   /// Helper struct, will support ranges eventually.
1385   struct OffsetInfo {
1386     int64_t Offset = OffsetAndSize::Unknown;
1387 
1388     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1389   };
1390 
1391   /// See AbstractAttribute::updateImpl(...).
1392   ChangeStatus updateImpl(Attributor &A) override {
1393     using namespace AA::PointerInfo;
1394     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1395     Value &AssociatedValue = getAssociatedValue();
1396 
1397     const DataLayout &DL = A.getDataLayout();
1398     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1399     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1400 
1401     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1402                                      bool &Follow) {
1403       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1404       UsrOI = PtrOI;
1405       Follow = true;
1406       return true;
1407     };
1408 
1409     const auto *TLI = getAnchorScope()
1410                           ? A.getInfoCache().getTargetLibraryInfoForFunction(
1411                                 *getAnchorScope())
1412                           : nullptr;
1413     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1414       Value *CurPtr = U.get();
1415       User *Usr = U.getUser();
1416       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1417                         << *Usr << "\n");
1418       assert(OffsetInfoMap.count(CurPtr) &&
1419              "The current pointer offset should have been seeded!");
1420 
1421       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1422         if (CE->isCast())
1423           return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1424         if (CE->isCompare())
1425           return true;
1426         if (!isa<GEPOperator>(CE)) {
1427           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1428                             << "\n");
1429           return false;
1430         }
1431       }
1432       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1433         // Note the order here, the Usr access might change the map, CurPtr is
1434         // already in it though.
1435         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1436         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1437         UsrOI = PtrOI;
1438 
1439         // TODO: Use range information.
1440         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1441             !GEP->hasAllConstantIndices()) {
1442           UsrOI.Offset = OffsetAndSize::Unknown;
1443           Follow = true;
1444           return true;
1445         }
1446 
1447         SmallVector<Value *, 8> Indices;
1448         for (Use &Idx : GEP->indices()) {
1449           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1450             Indices.push_back(CIdx);
1451             continue;
1452           }
1453 
1454           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1455                             << " : " << *Idx << "\n");
1456           return false;
1457         }
1458         UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1459                                           GEP->getSourceElementType(), Indices);
1460         Follow = true;
1461         return true;
1462       }
1463       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1464         return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1465 
1466       // For PHIs we need to take care of the recurrence explicitly as the value
1467       // might change while we iterate through a loop. For now, we give up if
1468       // the PHI is not invariant.
1469       if (isa<PHINode>(Usr)) {
1470         // Note the order here, the Usr access might change the map, CurPtr is
1471         // already in it though.
1472         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1473         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1474         // Check if the PHI is invariant (so far).
1475         if (UsrOI == PtrOI)
1476           return true;
1477 
1478         // Check if the PHI operand has already an unknown offset as we can't
1479         // improve on that anymore.
1480         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1481           UsrOI = PtrOI;
1482           Follow = true;
1483           return true;
1484         }
1485 
1486         // Check if the PHI operand is not dependent on the PHI itself.
1487         // TODO: This is not great as we look at the pointer type. However, it
1488         // is unclear where the Offset size comes from with typeless pointers.
1489         APInt Offset(
1490             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1491             0);
1492         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1493                                     DL, Offset, /* AllowNonInbounds */ true)) {
1494           if (Offset != PtrOI.Offset) {
1495             LLVM_DEBUG(dbgs()
1496                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1497                        << *CurPtr << " in " << *Usr << "\n");
1498             return false;
1499           }
1500           return HandlePassthroughUser(Usr, PtrOI, Follow);
1501         }
1502 
1503         // TODO: Approximate in case we know the direction of the recurrence.
1504         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1505                           << *CurPtr << " in " << *Usr << "\n");
1506         UsrOI = PtrOI;
1507         UsrOI.Offset = OffsetAndSize::Unknown;
1508         Follow = true;
1509         return true;
1510       }
1511 
1512       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1513         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1514                             AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1515                             Changed, LoadI->getType());
1516       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1517         if (StoreI->getValueOperand() == CurPtr) {
1518           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1519                             << *StoreI << "\n");
1520           return false;
1521         }
1522         bool UsedAssumedInformation = false;
1523         Optional<Value *> Content = A.getAssumedSimplified(
1524             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1525         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1526                             OffsetInfoMap[CurPtr].Offset, Changed,
1527                             StoreI->getValueOperand()->getType());
1528       }
1529       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1530         if (CB->isLifetimeStartOrEnd())
1531           return true;
1532         if (TLI && isFreeCall(CB, TLI))
1533           return true;
1534         if (CB->isArgOperand(&U)) {
1535           unsigned ArgNo = CB->getArgOperandNo(&U);
1536           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1537               *this, IRPosition::callsite_argument(*CB, ArgNo),
1538               DepClassTy::REQUIRED);
1539           Changed = translateAndAddState(A, CSArgPI,
1540                                          OffsetInfoMap[CurPtr].Offset, *CB) |
1541                     Changed;
1542           return true;
1543         }
1544         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1545                           << "\n");
1546         // TODO: Allow some call uses
1547         return false;
1548       }
1549 
1550       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1551       return false;
1552     };
1553     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1554       if (OffsetInfoMap.count(NewU))
1555         return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1556       OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1557       return true;
1558     };
1559     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1560                            /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1561                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
1562       return indicatePessimisticFixpoint();
1563 
1564     LLVM_DEBUG({
1565       dbgs() << "Accesses by bin after update:\n";
1566       for (auto &It : AccessBins) {
1567         dbgs() << "[" << It.first.getOffset() << "-"
1568                << It.first.getOffset() + It.first.getSize()
1569                << "] : " << It.getSecond()->size() << "\n";
1570         for (auto &Acc : *It.getSecond()) {
1571           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1572                  << "\n";
1573           if (Acc.getLocalInst() != Acc.getRemoteInst())
1574             dbgs() << "     -->                         "
1575                    << *Acc.getRemoteInst() << "\n";
1576           if (!Acc.isWrittenValueYetUndetermined()) {
1577             if (Acc.getWrittenValue())
1578               dbgs() << "       - c: " << *Acc.getWrittenValue() << "\n";
1579             else
1580               dbgs() << "       - c: <unknown>\n";
1581           }
1582         }
1583       }
1584     });
1585 
1586     return Changed;
1587   }
1588 
1589   /// See AbstractAttribute::trackStatistics()
1590   void trackStatistics() const override {
1591     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1592   }
1593 };
1594 
1595 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1596   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1597       : AAPointerInfoImpl(IRP, A) {}
1598 
1599   /// See AbstractAttribute::updateImpl(...).
1600   ChangeStatus updateImpl(Attributor &A) override {
1601     return indicatePessimisticFixpoint();
1602   }
1603 
1604   /// See AbstractAttribute::trackStatistics()
1605   void trackStatistics() const override {
1606     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1607   }
1608 };
1609 
1610 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1611   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1612       : AAPointerInfoFloating(IRP, A) {}
1613 
1614   /// See AbstractAttribute::initialize(...).
1615   void initialize(Attributor &A) override {
1616     AAPointerInfoFloating::initialize(A);
1617     if (getAnchorScope()->isDeclaration())
1618       indicatePessimisticFixpoint();
1619   }
1620 
1621   /// See AbstractAttribute::trackStatistics()
1622   void trackStatistics() const override {
1623     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1624   }
1625 };
1626 
1627 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1628   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1629       : AAPointerInfoFloating(IRP, A) {}
1630 
1631   /// See AbstractAttribute::updateImpl(...).
1632   ChangeStatus updateImpl(Attributor &A) override {
1633     using namespace AA::PointerInfo;
1634     // We handle memory intrinsics explicitly, at least the first (=
1635     // destination) and second (=source) arguments as we know how they are
1636     // accessed.
1637     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1638       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1639       int64_t LengthVal = OffsetAndSize::Unknown;
1640       if (Length)
1641         LengthVal = Length->getSExtValue();
1642       Value &Ptr = getAssociatedValue();
1643       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1644       ChangeStatus Changed = ChangeStatus::UNCHANGED;
1645       if (ArgNo == 0) {
1646         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1647                      nullptr, LengthVal);
1648       } else if (ArgNo == 1) {
1649         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1650                      nullptr, LengthVal);
1651       } else {
1652         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1653                           << *MI << "\n");
1654         return indicatePessimisticFixpoint();
1655       }
1656       return Changed;
1657     }
1658 
1659     // TODO: Once we have call site specific value information we can provide
1660     //       call site specific liveness information and then it makes
1661     //       sense to specialize attributes for call sites arguments instead of
1662     //       redirecting requests to the callee argument.
1663     Argument *Arg = getAssociatedArgument();
1664     if (!Arg)
1665       return indicatePessimisticFixpoint();
1666     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1667     auto &ArgAA =
1668         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1669     return translateAndAddState(A, ArgAA, 0, *cast<CallBase>(getCtxI()),
1670                                 /* FromCallee */ true);
1671   }
1672 
1673   /// See AbstractAttribute::trackStatistics()
1674   void trackStatistics() const override {
1675     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1676   }
1677 };
1678 
1679 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1680   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1681       : AAPointerInfoFloating(IRP, A) {}
1682 
1683   /// See AbstractAttribute::trackStatistics()
1684   void trackStatistics() const override {
1685     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1686   }
1687 };
1688 } // namespace
1689 
1690 /// -----------------------NoUnwind Function Attribute--------------------------
1691 
1692 namespace {
1693 struct AANoUnwindImpl : AANoUnwind {
1694   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1695 
1696   const std::string getAsStr() const override {
1697     return getAssumed() ? "nounwind" : "may-unwind";
1698   }
1699 
1700   /// See AbstractAttribute::updateImpl(...).
1701   ChangeStatus updateImpl(Attributor &A) override {
1702     auto Opcodes = {
1703         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1704         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1705         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1706 
1707     auto CheckForNoUnwind = [&](Instruction &I) {
1708       if (!I.mayThrow())
1709         return true;
1710 
1711       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1712         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1713             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1714         return NoUnwindAA.isAssumedNoUnwind();
1715       }
1716       return false;
1717     };
1718 
1719     bool UsedAssumedInformation = false;
1720     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1721                                    UsedAssumedInformation))
1722       return indicatePessimisticFixpoint();
1723 
1724     return ChangeStatus::UNCHANGED;
1725   }
1726 };
1727 
1728 struct AANoUnwindFunction final : public AANoUnwindImpl {
1729   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1730       : AANoUnwindImpl(IRP, A) {}
1731 
1732   /// See AbstractAttribute::trackStatistics()
1733   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1734 };
1735 
1736 /// NoUnwind attribute deduction for a call sites.
1737 struct AANoUnwindCallSite final : AANoUnwindImpl {
1738   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1739       : AANoUnwindImpl(IRP, A) {}
1740 
1741   /// See AbstractAttribute::initialize(...).
1742   void initialize(Attributor &A) override {
1743     AANoUnwindImpl::initialize(A);
1744     Function *F = getAssociatedFunction();
1745     if (!F || F->isDeclaration())
1746       indicatePessimisticFixpoint();
1747   }
1748 
1749   /// See AbstractAttribute::updateImpl(...).
1750   ChangeStatus updateImpl(Attributor &A) override {
1751     // TODO: Once we have call site specific value information we can provide
1752     //       call site specific liveness information and then it makes
1753     //       sense to specialize attributes for call sites arguments instead of
1754     //       redirecting requests to the callee argument.
1755     Function *F = getAssociatedFunction();
1756     const IRPosition &FnPos = IRPosition::function(*F);
1757     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1758     return clampStateAndIndicateChange(getState(), FnAA.getState());
1759   }
1760 
1761   /// See AbstractAttribute::trackStatistics()
1762   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1763 };
1764 } // namespace
1765 
1766 /// --------------------- Function Return Values -------------------------------
1767 
1768 namespace {
1769 /// "Attribute" that collects all potential returned values and the return
1770 /// instructions that they arise from.
1771 ///
1772 /// If there is a unique returned value R, the manifest method will:
1773 ///   - mark R with the "returned" attribute, if R is an argument.
1774 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1775 
1776   /// Mapping of values potentially returned by the associated function to the
1777   /// return instructions that might return them.
1778   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1779 
1780   /// State flags
1781   ///
1782   ///{
1783   bool IsFixed = false;
1784   bool IsValidState = true;
1785   ///}
1786 
1787 public:
1788   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1789       : AAReturnedValues(IRP, A) {}
1790 
1791   /// See AbstractAttribute::initialize(...).
1792   void initialize(Attributor &A) override {
1793     // Reset the state.
1794     IsFixed = false;
1795     IsValidState = true;
1796     ReturnedValues.clear();
1797 
1798     Function *F = getAssociatedFunction();
1799     if (!F || F->isDeclaration()) {
1800       indicatePessimisticFixpoint();
1801       return;
1802     }
1803     assert(!F->getReturnType()->isVoidTy() &&
1804            "Did not expect a void return type!");
1805 
1806     // The map from instruction opcodes to those instructions in the function.
1807     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1808 
1809     // Look through all arguments, if one is marked as returned we are done.
1810     for (Argument &Arg : F->args()) {
1811       if (Arg.hasReturnedAttr()) {
1812         auto &ReturnInstSet = ReturnedValues[&Arg];
1813         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1814           for (Instruction *RI : *Insts)
1815             ReturnInstSet.insert(cast<ReturnInst>(RI));
1816 
1817         indicateOptimisticFixpoint();
1818         return;
1819       }
1820     }
1821 
1822     if (!A.isFunctionIPOAmendable(*F))
1823       indicatePessimisticFixpoint();
1824   }
1825 
1826   /// See AbstractAttribute::manifest(...).
1827   ChangeStatus manifest(Attributor &A) override;
1828 
1829   /// See AbstractAttribute::getState(...).
1830   AbstractState &getState() override { return *this; }
1831 
1832   /// See AbstractAttribute::getState(...).
1833   const AbstractState &getState() const override { return *this; }
1834 
1835   /// See AbstractAttribute::updateImpl(Attributor &A).
1836   ChangeStatus updateImpl(Attributor &A) override;
1837 
1838   llvm::iterator_range<iterator> returned_values() override {
1839     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1840   }
1841 
1842   llvm::iterator_range<const_iterator> returned_values() const override {
1843     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1844   }
1845 
1846   /// Return the number of potential return values, -1 if unknown.
1847   size_t getNumReturnValues() const override {
1848     return isValidState() ? ReturnedValues.size() : -1;
1849   }
1850 
1851   /// Return an assumed unique return value if a single candidate is found. If
1852   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1853   /// Optional::NoneType.
1854   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1855 
1856   /// See AbstractState::checkForAllReturnedValues(...).
1857   bool checkForAllReturnedValuesAndReturnInsts(
1858       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1859       const override;
1860 
1861   /// Pretty print the attribute similar to the IR representation.
1862   const std::string getAsStr() const override;
1863 
1864   /// See AbstractState::isAtFixpoint().
1865   bool isAtFixpoint() const override { return IsFixed; }
1866 
1867   /// See AbstractState::isValidState().
1868   bool isValidState() const override { return IsValidState; }
1869 
1870   /// See AbstractState::indicateOptimisticFixpoint(...).
1871   ChangeStatus indicateOptimisticFixpoint() override {
1872     IsFixed = true;
1873     return ChangeStatus::UNCHANGED;
1874   }
1875 
1876   ChangeStatus indicatePessimisticFixpoint() override {
1877     IsFixed = true;
1878     IsValidState = false;
1879     return ChangeStatus::CHANGED;
1880   }
1881 };
1882 
1883 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1884   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1885 
1886   // Bookkeeping.
1887   assert(isValidState());
1888   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1889                   "Number of function with known return values");
1890 
1891   // Check if we have an assumed unique return value that we could manifest.
1892   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1893 
1894   if (!UniqueRV || !UniqueRV.value())
1895     return Changed;
1896 
1897   // Bookkeeping.
1898   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1899                   "Number of function with unique return");
1900   // If the assumed unique return value is an argument, annotate it.
1901   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.value())) {
1902     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1903             getAssociatedFunction()->getReturnType())) {
1904       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1905       Changed = IRAttribute::manifest(A);
1906     }
1907   }
1908   return Changed;
1909 }
1910 
1911 const std::string AAReturnedValuesImpl::getAsStr() const {
1912   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1913          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1914 }
1915 
1916 Optional<Value *>
1917 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1918   // If checkForAllReturnedValues provides a unique value, ignoring potential
1919   // undef values that can also be present, it is assumed to be the actual
1920   // return value and forwarded to the caller of this method. If there are
1921   // multiple, a nullptr is returned indicating there cannot be a unique
1922   // returned value.
1923   Optional<Value *> UniqueRV;
1924   Type *Ty = getAssociatedFunction()->getReturnType();
1925 
1926   auto Pred = [&](Value &RV) -> bool {
1927     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1928     return UniqueRV != Optional<Value *>(nullptr);
1929   };
1930 
1931   if (!A.checkForAllReturnedValues(Pred, *this))
1932     UniqueRV = nullptr;
1933 
1934   return UniqueRV;
1935 }
1936 
1937 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1938     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1939     const {
1940   if (!isValidState())
1941     return false;
1942 
1943   // Check all returned values but ignore call sites as long as we have not
1944   // encountered an overdefined one during an update.
1945   for (auto &It : ReturnedValues) {
1946     Value *RV = It.first;
1947     if (!Pred(*RV, It.second))
1948       return false;
1949   }
1950 
1951   return true;
1952 }
1953 
1954 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1955   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1956 
1957   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1958                            bool) -> bool {
1959     assert(AA::isValidInScope(V, Ret.getFunction()) &&
1960            "Assumed returned value should be valid in function scope!");
1961     if (ReturnedValues[&V].insert(&Ret))
1962       Changed = ChangeStatus::CHANGED;
1963     return true;
1964   };
1965 
1966   bool UsedAssumedInformation = false;
1967   auto ReturnInstCB = [&](Instruction &I) {
1968     ReturnInst &Ret = cast<ReturnInst>(I);
1969     return genericValueTraversal<ReturnInst>(
1970         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1971         &I, UsedAssumedInformation, /* UseValueSimplify */ true,
1972         /* MaxValues */ 16,
1973         /* StripCB */ nullptr, AA::Intraprocedural);
1974   };
1975 
1976   // Discover returned values from all live returned instructions in the
1977   // associated function.
1978   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1979                                  UsedAssumedInformation))
1980     return indicatePessimisticFixpoint();
1981   return Changed;
1982 }
1983 
1984 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1985   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1986       : AAReturnedValuesImpl(IRP, A) {}
1987 
1988   /// See AbstractAttribute::trackStatistics()
1989   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1990 };
1991 
1992 /// Returned values information for a call sites.
1993 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1994   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1995       : AAReturnedValuesImpl(IRP, A) {}
1996 
1997   /// See AbstractAttribute::initialize(...).
1998   void initialize(Attributor &A) override {
1999     // TODO: Once we have call site specific value information we can provide
2000     //       call site specific liveness information and then it makes
2001     //       sense to specialize attributes for call sites instead of
2002     //       redirecting requests to the callee.
2003     llvm_unreachable("Abstract attributes for returned values are not "
2004                      "supported for call sites yet!");
2005   }
2006 
2007   /// See AbstractAttribute::updateImpl(...).
2008   ChangeStatus updateImpl(Attributor &A) override {
2009     return indicatePessimisticFixpoint();
2010   }
2011 
2012   /// See AbstractAttribute::trackStatistics()
2013   void trackStatistics() const override {}
2014 };
2015 } // namespace
2016 
2017 /// ------------------------ NoSync Function Attribute -------------------------
2018 
2019 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
2020   if (!I->isAtomic())
2021     return false;
2022 
2023   if (auto *FI = dyn_cast<FenceInst>(I))
2024     // All legal orderings for fence are stronger than monotonic.
2025     return FI->getSyncScopeID() != SyncScope::SingleThread;
2026   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
2027     // Unordered is not a legal ordering for cmpxchg.
2028     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
2029             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
2030   }
2031 
2032   AtomicOrdering Ordering;
2033   switch (I->getOpcode()) {
2034   case Instruction::AtomicRMW:
2035     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
2036     break;
2037   case Instruction::Store:
2038     Ordering = cast<StoreInst>(I)->getOrdering();
2039     break;
2040   case Instruction::Load:
2041     Ordering = cast<LoadInst>(I)->getOrdering();
2042     break;
2043   default:
2044     llvm_unreachable(
2045         "New atomic operations need to be known in the attributor.");
2046   }
2047 
2048   return (Ordering != AtomicOrdering::Unordered &&
2049           Ordering != AtomicOrdering::Monotonic);
2050 }
2051 
2052 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
2053 /// which would be nosync except that they have a volatile flag.  All other
2054 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
2055 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
2056   if (auto *MI = dyn_cast<MemIntrinsic>(I))
2057     return !MI->isVolatile();
2058   return false;
2059 }
2060 
2061 namespace {
2062 struct AANoSyncImpl : AANoSync {
2063   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
2064 
2065   const std::string getAsStr() const override {
2066     return getAssumed() ? "nosync" : "may-sync";
2067   }
2068 
2069   /// See AbstractAttribute::updateImpl(...).
2070   ChangeStatus updateImpl(Attributor &A) override;
2071 };
2072 
2073 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2074 
2075   auto CheckRWInstForNoSync = [&](Instruction &I) {
2076     return AA::isNoSyncInst(A, I, *this);
2077   };
2078 
2079   auto CheckForNoSync = [&](Instruction &I) {
2080     // At this point we handled all read/write effects and they are all
2081     // nosync, so they can be skipped.
2082     if (I.mayReadOrWriteMemory())
2083       return true;
2084 
2085     // non-convergent and readnone imply nosync.
2086     return !cast<CallBase>(I).isConvergent();
2087   };
2088 
2089   bool UsedAssumedInformation = false;
2090   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2091                                           UsedAssumedInformation) ||
2092       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2093                                          UsedAssumedInformation))
2094     return indicatePessimisticFixpoint();
2095 
2096   return ChangeStatus::UNCHANGED;
2097 }
2098 
2099 struct AANoSyncFunction final : public AANoSyncImpl {
2100   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2101       : AANoSyncImpl(IRP, A) {}
2102 
2103   /// See AbstractAttribute::trackStatistics()
2104   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
2105 };
2106 
2107 /// NoSync attribute deduction for a call sites.
2108 struct AANoSyncCallSite final : AANoSyncImpl {
2109   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2110       : AANoSyncImpl(IRP, A) {}
2111 
2112   /// See AbstractAttribute::initialize(...).
2113   void initialize(Attributor &A) override {
2114     AANoSyncImpl::initialize(A);
2115     Function *F = getAssociatedFunction();
2116     if (!F || F->isDeclaration())
2117       indicatePessimisticFixpoint();
2118   }
2119 
2120   /// See AbstractAttribute::updateImpl(...).
2121   ChangeStatus updateImpl(Attributor &A) override {
2122     // TODO: Once we have call site specific value information we can provide
2123     //       call site specific liveness information and then it makes
2124     //       sense to specialize attributes for call sites arguments instead of
2125     //       redirecting requests to the callee argument.
2126     Function *F = getAssociatedFunction();
2127     const IRPosition &FnPos = IRPosition::function(*F);
2128     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
2129     return clampStateAndIndicateChange(getState(), FnAA.getState());
2130   }
2131 
2132   /// See AbstractAttribute::trackStatistics()
2133   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
2134 };
2135 } // namespace
2136 
2137 /// ------------------------ No-Free Attributes ----------------------------
2138 
2139 namespace {
2140 struct AANoFreeImpl : public AANoFree {
2141   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2142 
2143   /// See AbstractAttribute::updateImpl(...).
2144   ChangeStatus updateImpl(Attributor &A) override {
2145     auto CheckForNoFree = [&](Instruction &I) {
2146       const auto &CB = cast<CallBase>(I);
2147       if (CB.hasFnAttr(Attribute::NoFree))
2148         return true;
2149 
2150       const auto &NoFreeAA = A.getAAFor<AANoFree>(
2151           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2152       return NoFreeAA.isAssumedNoFree();
2153     };
2154 
2155     bool UsedAssumedInformation = false;
2156     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2157                                            UsedAssumedInformation))
2158       return indicatePessimisticFixpoint();
2159     return ChangeStatus::UNCHANGED;
2160   }
2161 
2162   /// See AbstractAttribute::getAsStr().
2163   const std::string getAsStr() const override {
2164     return getAssumed() ? "nofree" : "may-free";
2165   }
2166 };
2167 
2168 struct AANoFreeFunction final : public AANoFreeImpl {
2169   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2170       : AANoFreeImpl(IRP, A) {}
2171 
2172   /// See AbstractAttribute::trackStatistics()
2173   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2174 };
2175 
2176 /// NoFree attribute deduction for a call sites.
2177 struct AANoFreeCallSite final : AANoFreeImpl {
2178   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2179       : AANoFreeImpl(IRP, A) {}
2180 
2181   /// See AbstractAttribute::initialize(...).
2182   void initialize(Attributor &A) override {
2183     AANoFreeImpl::initialize(A);
2184     Function *F = getAssociatedFunction();
2185     if (!F || F->isDeclaration())
2186       indicatePessimisticFixpoint();
2187   }
2188 
2189   /// See AbstractAttribute::updateImpl(...).
2190   ChangeStatus updateImpl(Attributor &A) override {
2191     // TODO: Once we have call site specific value information we can provide
2192     //       call site specific liveness information and then it makes
2193     //       sense to specialize attributes for call sites arguments instead of
2194     //       redirecting requests to the callee argument.
2195     Function *F = getAssociatedFunction();
2196     const IRPosition &FnPos = IRPosition::function(*F);
2197     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2198     return clampStateAndIndicateChange(getState(), FnAA.getState());
2199   }
2200 
2201   /// See AbstractAttribute::trackStatistics()
2202   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2203 };
2204 
2205 /// NoFree attribute for floating values.
2206 struct AANoFreeFloating : AANoFreeImpl {
2207   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2208       : AANoFreeImpl(IRP, A) {}
2209 
2210   /// See AbstractAttribute::trackStatistics()
2211   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2212 
2213   /// See Abstract Attribute::updateImpl(...).
2214   ChangeStatus updateImpl(Attributor &A) override {
2215     const IRPosition &IRP = getIRPosition();
2216 
2217     const auto &NoFreeAA = A.getAAFor<AANoFree>(
2218         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2219     if (NoFreeAA.isAssumedNoFree())
2220       return ChangeStatus::UNCHANGED;
2221 
2222     Value &AssociatedValue = getIRPosition().getAssociatedValue();
2223     auto Pred = [&](const Use &U, bool &Follow) -> bool {
2224       Instruction *UserI = cast<Instruction>(U.getUser());
2225       if (auto *CB = dyn_cast<CallBase>(UserI)) {
2226         if (CB->isBundleOperand(&U))
2227           return false;
2228         if (!CB->isArgOperand(&U))
2229           return true;
2230         unsigned ArgNo = CB->getArgOperandNo(&U);
2231 
2232         const auto &NoFreeArg = A.getAAFor<AANoFree>(
2233             *this, IRPosition::callsite_argument(*CB, ArgNo),
2234             DepClassTy::REQUIRED);
2235         return NoFreeArg.isAssumedNoFree();
2236       }
2237 
2238       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2239           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2240         Follow = true;
2241         return true;
2242       }
2243       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2244           isa<ReturnInst>(UserI))
2245         return true;
2246 
2247       // Unknown user.
2248       return false;
2249     };
2250     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2251       return indicatePessimisticFixpoint();
2252 
2253     return ChangeStatus::UNCHANGED;
2254   }
2255 };
2256 
2257 /// NoFree attribute for a call site argument.
2258 struct AANoFreeArgument final : AANoFreeFloating {
2259   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2260       : AANoFreeFloating(IRP, A) {}
2261 
2262   /// See AbstractAttribute::trackStatistics()
2263   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2264 };
2265 
2266 /// NoFree attribute for call site arguments.
2267 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2268   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2269       : AANoFreeFloating(IRP, A) {}
2270 
2271   /// See AbstractAttribute::updateImpl(...).
2272   ChangeStatus updateImpl(Attributor &A) override {
2273     // TODO: Once we have call site specific value information we can provide
2274     //       call site specific liveness information and then it makes
2275     //       sense to specialize attributes for call sites arguments instead of
2276     //       redirecting requests to the callee argument.
2277     Argument *Arg = getAssociatedArgument();
2278     if (!Arg)
2279       return indicatePessimisticFixpoint();
2280     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2281     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2282     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2283   }
2284 
2285   /// See AbstractAttribute::trackStatistics()
2286   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2287 };
2288 
2289 /// NoFree attribute for function return value.
2290 struct AANoFreeReturned final : AANoFreeFloating {
2291   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2292       : AANoFreeFloating(IRP, A) {
2293     llvm_unreachable("NoFree is not applicable to function returns!");
2294   }
2295 
2296   /// See AbstractAttribute::initialize(...).
2297   void initialize(Attributor &A) override {
2298     llvm_unreachable("NoFree is not applicable to function returns!");
2299   }
2300 
2301   /// See AbstractAttribute::updateImpl(...).
2302   ChangeStatus updateImpl(Attributor &A) override {
2303     llvm_unreachable("NoFree is not applicable to function returns!");
2304   }
2305 
2306   /// See AbstractAttribute::trackStatistics()
2307   void trackStatistics() const override {}
2308 };
2309 
2310 /// NoFree attribute deduction for a call site return value.
2311 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2312   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2313       : AANoFreeFloating(IRP, A) {}
2314 
2315   ChangeStatus manifest(Attributor &A) override {
2316     return ChangeStatus::UNCHANGED;
2317   }
2318   /// See AbstractAttribute::trackStatistics()
2319   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2320 };
2321 } // namespace
2322 
2323 /// ------------------------ NonNull Argument Attribute ------------------------
2324 namespace {
2325 static int64_t getKnownNonNullAndDerefBytesForUse(
2326     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2327     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2328   TrackUse = false;
2329 
2330   const Value *UseV = U->get();
2331   if (!UseV->getType()->isPointerTy())
2332     return 0;
2333 
2334   // We need to follow common pointer manipulation uses to the accesses they
2335   // feed into. We can try to be smart to avoid looking through things we do not
2336   // like for now, e.g., non-inbounds GEPs.
2337   if (isa<CastInst>(I)) {
2338     TrackUse = true;
2339     return 0;
2340   }
2341 
2342   if (isa<GetElementPtrInst>(I)) {
2343     TrackUse = true;
2344     return 0;
2345   }
2346 
2347   Type *PtrTy = UseV->getType();
2348   const Function *F = I->getFunction();
2349   bool NullPointerIsDefined =
2350       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2351   const DataLayout &DL = A.getInfoCache().getDL();
2352   if (const auto *CB = dyn_cast<CallBase>(I)) {
2353     if (CB->isBundleOperand(U)) {
2354       if (RetainedKnowledge RK = getKnowledgeFromUse(
2355               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2356         IsNonNull |=
2357             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2358         return RK.ArgValue;
2359       }
2360       return 0;
2361     }
2362 
2363     if (CB->isCallee(U)) {
2364       IsNonNull |= !NullPointerIsDefined;
2365       return 0;
2366     }
2367 
2368     unsigned ArgNo = CB->getArgOperandNo(U);
2369     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2370     // As long as we only use known information there is no need to track
2371     // dependences here.
2372     auto &DerefAA =
2373         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2374     IsNonNull |= DerefAA.isKnownNonNull();
2375     return DerefAA.getKnownDereferenceableBytes();
2376   }
2377 
2378   Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2379   if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2380     return 0;
2381 
2382   int64_t Offset;
2383   const Value *Base =
2384       getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2385   if (Base && Base == &AssociatedValue) {
2386     int64_t DerefBytes = Loc->Size.getValue() + Offset;
2387     IsNonNull |= !NullPointerIsDefined;
2388     return std::max(int64_t(0), DerefBytes);
2389   }
2390 
2391   /// Corner case when an offset is 0.
2392   Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2393                                           /*AllowNonInbounds*/ true);
2394   if (Base && Base == &AssociatedValue && Offset == 0) {
2395     int64_t DerefBytes = Loc->Size.getValue();
2396     IsNonNull |= !NullPointerIsDefined;
2397     return std::max(int64_t(0), DerefBytes);
2398   }
2399 
2400   return 0;
2401 }
2402 
2403 struct AANonNullImpl : AANonNull {
2404   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2405       : AANonNull(IRP, A),
2406         NullIsDefined(NullPointerIsDefined(
2407             getAnchorScope(),
2408             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2409 
2410   /// See AbstractAttribute::initialize(...).
2411   void initialize(Attributor &A) override {
2412     Value &V = *getAssociatedValue().stripPointerCasts();
2413     if (!NullIsDefined &&
2414         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2415                 /* IgnoreSubsumingPositions */ false, &A)) {
2416       indicateOptimisticFixpoint();
2417       return;
2418     }
2419 
2420     if (isa<ConstantPointerNull>(V)) {
2421       indicatePessimisticFixpoint();
2422       return;
2423     }
2424 
2425     AANonNull::initialize(A);
2426 
2427     bool CanBeNull, CanBeFreed;
2428     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2429                                          CanBeFreed)) {
2430       if (!CanBeNull) {
2431         indicateOptimisticFixpoint();
2432         return;
2433       }
2434     }
2435 
2436     if (isa<GlobalValue>(V)) {
2437       indicatePessimisticFixpoint();
2438       return;
2439     }
2440 
2441     if (Instruction *CtxI = getCtxI())
2442       followUsesInMBEC(*this, A, getState(), *CtxI);
2443   }
2444 
2445   /// See followUsesInMBEC
2446   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2447                        AANonNull::StateType &State) {
2448     bool IsNonNull = false;
2449     bool TrackUse = false;
2450     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2451                                        IsNonNull, TrackUse);
2452     State.setKnown(IsNonNull);
2453     return TrackUse;
2454   }
2455 
2456   /// See AbstractAttribute::getAsStr().
2457   const std::string getAsStr() const override {
2458     return getAssumed() ? "nonnull" : "may-null";
2459   }
2460 
2461   /// Flag to determine if the underlying value can be null and still allow
2462   /// valid accesses.
2463   const bool NullIsDefined;
2464 };
2465 
2466 /// NonNull attribute for a floating value.
2467 struct AANonNullFloating : public AANonNullImpl {
2468   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2469       : AANonNullImpl(IRP, A) {}
2470 
2471   /// See AbstractAttribute::updateImpl(...).
2472   ChangeStatus updateImpl(Attributor &A) override {
2473     const DataLayout &DL = A.getDataLayout();
2474 
2475     DominatorTree *DT = nullptr;
2476     AssumptionCache *AC = nullptr;
2477     InformationCache &InfoCache = A.getInfoCache();
2478     if (const Function *Fn = getAnchorScope()) {
2479       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2480       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2481     }
2482 
2483     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2484                             AANonNull::StateType &T, bool Stripped) -> bool {
2485       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2486                                              DepClassTy::REQUIRED);
2487       if (!Stripped && this == &AA) {
2488         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2489           T.indicatePessimisticFixpoint();
2490       } else {
2491         // Use abstract attribute information.
2492         const AANonNull::StateType &NS = AA.getState();
2493         T ^= NS;
2494       }
2495       return T.isValidState();
2496     };
2497 
2498     StateType T;
2499     bool UsedAssumedInformation = false;
2500     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2501                                           VisitValueCB, getCtxI(),
2502                                           UsedAssumedInformation))
2503       return indicatePessimisticFixpoint();
2504 
2505     return clampStateAndIndicateChange(getState(), T);
2506   }
2507 
2508   /// See AbstractAttribute::trackStatistics()
2509   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2510 };
2511 
2512 /// NonNull attribute for function return value.
2513 struct AANonNullReturned final
2514     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2515   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2516       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2517 
2518   /// See AbstractAttribute::getAsStr().
2519   const std::string getAsStr() const override {
2520     return getAssumed() ? "nonnull" : "may-null";
2521   }
2522 
2523   /// See AbstractAttribute::trackStatistics()
2524   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2525 };
2526 
2527 /// NonNull attribute for function argument.
2528 struct AANonNullArgument final
2529     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2530   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2531       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2532 
2533   /// See AbstractAttribute::trackStatistics()
2534   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2535 };
2536 
2537 struct AANonNullCallSiteArgument final : AANonNullFloating {
2538   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2539       : AANonNullFloating(IRP, A) {}
2540 
2541   /// See AbstractAttribute::trackStatistics()
2542   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2543 };
2544 
2545 /// NonNull attribute for a call site return position.
2546 struct AANonNullCallSiteReturned final
2547     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2548   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2549       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2550 
2551   /// See AbstractAttribute::trackStatistics()
2552   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2553 };
2554 } // namespace
2555 
2556 /// ------------------------ No-Recurse Attributes ----------------------------
2557 
2558 namespace {
2559 struct AANoRecurseImpl : public AANoRecurse {
2560   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2561 
2562   /// See AbstractAttribute::getAsStr()
2563   const std::string getAsStr() const override {
2564     return getAssumed() ? "norecurse" : "may-recurse";
2565   }
2566 };
2567 
2568 struct AANoRecurseFunction final : AANoRecurseImpl {
2569   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2570       : AANoRecurseImpl(IRP, A) {}
2571 
2572   /// See AbstractAttribute::updateImpl(...).
2573   ChangeStatus updateImpl(Attributor &A) override {
2574 
2575     // If all live call sites are known to be no-recurse, we are as well.
2576     auto CallSitePred = [&](AbstractCallSite ACS) {
2577       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2578           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2579           DepClassTy::NONE);
2580       return NoRecurseAA.isKnownNoRecurse();
2581     };
2582     bool UsedAssumedInformation = false;
2583     if (A.checkForAllCallSites(CallSitePred, *this, true,
2584                                UsedAssumedInformation)) {
2585       // If we know all call sites and all are known no-recurse, we are done.
2586       // If all known call sites, which might not be all that exist, are known
2587       // to be no-recurse, we are not done but we can continue to assume
2588       // no-recurse. If one of the call sites we have not visited will become
2589       // live, another update is triggered.
2590       if (!UsedAssumedInformation)
2591         indicateOptimisticFixpoint();
2592       return ChangeStatus::UNCHANGED;
2593     }
2594 
2595     const AAFunctionReachability &EdgeReachability =
2596         A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2597                                            DepClassTy::REQUIRED);
2598     if (EdgeReachability.canReach(A, *getAnchorScope()))
2599       return indicatePessimisticFixpoint();
2600     return ChangeStatus::UNCHANGED;
2601   }
2602 
2603   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2604 };
2605 
2606 /// NoRecurse attribute deduction for a call sites.
2607 struct AANoRecurseCallSite final : AANoRecurseImpl {
2608   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2609       : AANoRecurseImpl(IRP, A) {}
2610 
2611   /// See AbstractAttribute::initialize(...).
2612   void initialize(Attributor &A) override {
2613     AANoRecurseImpl::initialize(A);
2614     Function *F = getAssociatedFunction();
2615     if (!F || F->isDeclaration())
2616       indicatePessimisticFixpoint();
2617   }
2618 
2619   /// See AbstractAttribute::updateImpl(...).
2620   ChangeStatus updateImpl(Attributor &A) override {
2621     // TODO: Once we have call site specific value information we can provide
2622     //       call site specific liveness information and then it makes
2623     //       sense to specialize attributes for call sites arguments instead of
2624     //       redirecting requests to the callee argument.
2625     Function *F = getAssociatedFunction();
2626     const IRPosition &FnPos = IRPosition::function(*F);
2627     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2628     return clampStateAndIndicateChange(getState(), FnAA.getState());
2629   }
2630 
2631   /// See AbstractAttribute::trackStatistics()
2632   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2633 };
2634 } // namespace
2635 
2636 /// -------------------- Undefined-Behavior Attributes ------------------------
2637 
2638 namespace {
2639 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2640   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2641       : AAUndefinedBehavior(IRP, A) {}
2642 
2643   /// See AbstractAttribute::updateImpl(...).
2644   // through a pointer (i.e. also branches etc.)
2645   ChangeStatus updateImpl(Attributor &A) override {
2646     const size_t UBPrevSize = KnownUBInsts.size();
2647     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2648 
2649     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2650       // Lang ref now states volatile store is not UB, let's skip them.
2651       if (I.isVolatile() && I.mayWriteToMemory())
2652         return true;
2653 
2654       // Skip instructions that are already saved.
2655       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2656         return true;
2657 
2658       // If we reach here, we know we have an instruction
2659       // that accesses memory through a pointer operand,
2660       // for which getPointerOperand() should give it to us.
2661       Value *PtrOp =
2662           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2663       assert(PtrOp &&
2664              "Expected pointer operand of memory accessing instruction");
2665 
2666       // Either we stopped and the appropriate action was taken,
2667       // or we got back a simplified value to continue.
2668       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2669       if (!SimplifiedPtrOp || !SimplifiedPtrOp.value())
2670         return true;
2671       const Value *PtrOpVal = SimplifiedPtrOp.value();
2672 
2673       // A memory access through a pointer is considered UB
2674       // only if the pointer has constant null value.
2675       // TODO: Expand it to not only check constant values.
2676       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2677         AssumedNoUBInsts.insert(&I);
2678         return true;
2679       }
2680       const Type *PtrTy = PtrOpVal->getType();
2681 
2682       // Because we only consider instructions inside functions,
2683       // assume that a parent function exists.
2684       const Function *F = I.getFunction();
2685 
2686       // A memory access using constant null pointer is only considered UB
2687       // if null pointer is _not_ defined for the target platform.
2688       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2689         AssumedNoUBInsts.insert(&I);
2690       else
2691         KnownUBInsts.insert(&I);
2692       return true;
2693     };
2694 
2695     auto InspectBrInstForUB = [&](Instruction &I) {
2696       // A conditional branch instruction is considered UB if it has `undef`
2697       // condition.
2698 
2699       // Skip instructions that are already saved.
2700       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2701         return true;
2702 
2703       // We know we have a branch instruction.
2704       auto *BrInst = cast<BranchInst>(&I);
2705 
2706       // Unconditional branches are never considered UB.
2707       if (BrInst->isUnconditional())
2708         return true;
2709 
2710       // Either we stopped and the appropriate action was taken,
2711       // or we got back a simplified value to continue.
2712       Optional<Value *> SimplifiedCond =
2713           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2714       if (!SimplifiedCond || !*SimplifiedCond)
2715         return true;
2716       AssumedNoUBInsts.insert(&I);
2717       return true;
2718     };
2719 
2720     auto InspectCallSiteForUB = [&](Instruction &I) {
2721       // Check whether a callsite always cause UB or not
2722 
2723       // Skip instructions that are already saved.
2724       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2725         return true;
2726 
2727       // Check nonnull and noundef argument attribute violation for each
2728       // callsite.
2729       CallBase &CB = cast<CallBase>(I);
2730       Function *Callee = CB.getCalledFunction();
2731       if (!Callee)
2732         return true;
2733       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2734         // If current argument is known to be simplified to null pointer and the
2735         // corresponding argument position is known to have nonnull attribute,
2736         // the argument is poison. Furthermore, if the argument is poison and
2737         // the position is known to have noundef attriubte, this callsite is
2738         // considered UB.
2739         if (idx >= Callee->arg_size())
2740           break;
2741         Value *ArgVal = CB.getArgOperand(idx);
2742         if (!ArgVal)
2743           continue;
2744         // Here, we handle three cases.
2745         //   (1) Not having a value means it is dead. (we can replace the value
2746         //       with undef)
2747         //   (2) Simplified to undef. The argument violate noundef attriubte.
2748         //   (3) Simplified to null pointer where known to be nonnull.
2749         //       The argument is a poison value and violate noundef attribute.
2750         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2751         auto &NoUndefAA =
2752             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2753         if (!NoUndefAA.isKnownNoUndef())
2754           continue;
2755         bool UsedAssumedInformation = false;
2756         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2757             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2758         if (UsedAssumedInformation)
2759           continue;
2760         if (SimplifiedVal && !SimplifiedVal.value())
2761           return true;
2762         if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.value())) {
2763           KnownUBInsts.insert(&I);
2764           continue;
2765         }
2766         if (!ArgVal->getType()->isPointerTy() ||
2767             !isa<ConstantPointerNull>(*SimplifiedVal.value()))
2768           continue;
2769         auto &NonNullAA =
2770             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2771         if (NonNullAA.isKnownNonNull())
2772           KnownUBInsts.insert(&I);
2773       }
2774       return true;
2775     };
2776 
2777     auto InspectReturnInstForUB = [&](Instruction &I) {
2778       auto &RI = cast<ReturnInst>(I);
2779       // Either we stopped and the appropriate action was taken,
2780       // or we got back a simplified return value to continue.
2781       Optional<Value *> SimplifiedRetValue =
2782           stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2783       if (!SimplifiedRetValue || !*SimplifiedRetValue)
2784         return true;
2785 
2786       // Check if a return instruction always cause UB or not
2787       // Note: It is guaranteed that the returned position of the anchor
2788       //       scope has noundef attribute when this is called.
2789       //       We also ensure the return position is not "assumed dead"
2790       //       because the returned value was then potentially simplified to
2791       //       `undef` in AAReturnedValues without removing the `noundef`
2792       //       attribute yet.
2793 
2794       // When the returned position has noundef attriubte, UB occurs in the
2795       // following cases.
2796       //   (1) Returned value is known to be undef.
2797       //   (2) The value is known to be a null pointer and the returned
2798       //       position has nonnull attribute (because the returned value is
2799       //       poison).
2800       if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2801         auto &NonNullAA = A.getAAFor<AANonNull>(
2802             *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2803         if (NonNullAA.isKnownNonNull())
2804           KnownUBInsts.insert(&I);
2805       }
2806 
2807       return true;
2808     };
2809 
2810     bool UsedAssumedInformation = false;
2811     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2812                               {Instruction::Load, Instruction::Store,
2813                                Instruction::AtomicCmpXchg,
2814                                Instruction::AtomicRMW},
2815                               UsedAssumedInformation,
2816                               /* CheckBBLivenessOnly */ true);
2817     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2818                               UsedAssumedInformation,
2819                               /* CheckBBLivenessOnly */ true);
2820     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2821                                       UsedAssumedInformation);
2822 
2823     // If the returned position of the anchor scope has noundef attriubte, check
2824     // all returned instructions.
2825     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2826       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2827       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2828         auto &RetPosNoUndefAA =
2829             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2830         if (RetPosNoUndefAA.isKnownNoUndef())
2831           A.checkForAllInstructions(InspectReturnInstForUB, *this,
2832                                     {Instruction::Ret}, UsedAssumedInformation,
2833                                     /* CheckBBLivenessOnly */ true);
2834       }
2835     }
2836 
2837     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2838         UBPrevSize != KnownUBInsts.size())
2839       return ChangeStatus::CHANGED;
2840     return ChangeStatus::UNCHANGED;
2841   }
2842 
2843   bool isKnownToCauseUB(Instruction *I) const override {
2844     return KnownUBInsts.count(I);
2845   }
2846 
2847   bool isAssumedToCauseUB(Instruction *I) const override {
2848     // In simple words, if an instruction is not in the assumed to _not_
2849     // cause UB, then it is assumed UB (that includes those
2850     // in the KnownUBInsts set). The rest is boilerplate
2851     // is to ensure that it is one of the instructions we test
2852     // for UB.
2853 
2854     switch (I->getOpcode()) {
2855     case Instruction::Load:
2856     case Instruction::Store:
2857     case Instruction::AtomicCmpXchg:
2858     case Instruction::AtomicRMW:
2859       return !AssumedNoUBInsts.count(I);
2860     case Instruction::Br: {
2861       auto *BrInst = cast<BranchInst>(I);
2862       if (BrInst->isUnconditional())
2863         return false;
2864       return !AssumedNoUBInsts.count(I);
2865     } break;
2866     default:
2867       return false;
2868     }
2869     return false;
2870   }
2871 
2872   ChangeStatus manifest(Attributor &A) override {
2873     if (KnownUBInsts.empty())
2874       return ChangeStatus::UNCHANGED;
2875     for (Instruction *I : KnownUBInsts)
2876       A.changeToUnreachableAfterManifest(I);
2877     return ChangeStatus::CHANGED;
2878   }
2879 
2880   /// See AbstractAttribute::getAsStr()
2881   const std::string getAsStr() const override {
2882     return getAssumed() ? "undefined-behavior" : "no-ub";
2883   }
2884 
2885   /// Note: The correctness of this analysis depends on the fact that the
2886   /// following 2 sets will stop changing after some point.
2887   /// "Change" here means that their size changes.
2888   /// The size of each set is monotonically increasing
2889   /// (we only add items to them) and it is upper bounded by the number of
2890   /// instructions in the processed function (we can never save more
2891   /// elements in either set than this number). Hence, at some point,
2892   /// they will stop increasing.
2893   /// Consequently, at some point, both sets will have stopped
2894   /// changing, effectively making the analysis reach a fixpoint.
2895 
2896   /// Note: These 2 sets are disjoint and an instruction can be considered
2897   /// one of 3 things:
2898   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2899   ///    the KnownUBInsts set.
2900   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2901   ///    has a reason to assume it).
2902   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2903   ///    could not find a reason to assume or prove that it can cause UB,
2904   ///    hence it assumes it doesn't. We have a set for these instructions
2905   ///    so that we don't reprocess them in every update.
2906   ///    Note however that instructions in this set may cause UB.
2907 
2908 protected:
2909   /// A set of all live instructions _known_ to cause UB.
2910   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2911 
2912 private:
2913   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2914   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2915 
2916   // Should be called on updates in which if we're processing an instruction
2917   // \p I that depends on a value \p V, one of the following has to happen:
2918   // - If the value is assumed, then stop.
2919   // - If the value is known but undef, then consider it UB.
2920   // - Otherwise, do specific processing with the simplified value.
2921   // We return None in the first 2 cases to signify that an appropriate
2922   // action was taken and the caller should stop.
2923   // Otherwise, we return the simplified value that the caller should
2924   // use for specific processing.
2925   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2926                                          Instruction *I) {
2927     bool UsedAssumedInformation = false;
2928     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2929         IRPosition::value(*V), *this, UsedAssumedInformation);
2930     if (!UsedAssumedInformation) {
2931       // Don't depend on assumed values.
2932       if (!SimplifiedV) {
2933         // If it is known (which we tested above) but it doesn't have a value,
2934         // then we can assume `undef` and hence the instruction is UB.
2935         KnownUBInsts.insert(I);
2936         return llvm::None;
2937       }
2938       if (!*SimplifiedV)
2939         return nullptr;
2940       V = *SimplifiedV;
2941     }
2942     if (isa<UndefValue>(V)) {
2943       KnownUBInsts.insert(I);
2944       return llvm::None;
2945     }
2946     return V;
2947   }
2948 };
2949 
2950 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2951   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2952       : AAUndefinedBehaviorImpl(IRP, A) {}
2953 
2954   /// See AbstractAttribute::trackStatistics()
2955   void trackStatistics() const override {
2956     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2957                "Number of instructions known to have UB");
2958     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2959         KnownUBInsts.size();
2960   }
2961 };
2962 } // namespace
2963 
2964 /// ------------------------ Will-Return Attributes ----------------------------
2965 
2966 namespace {
2967 // Helper function that checks whether a function has any cycle which we don't
2968 // know if it is bounded or not.
2969 // Loops with maximum trip count are considered bounded, any other cycle not.
2970 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2971   ScalarEvolution *SE =
2972       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2973   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2974   // If either SCEV or LoopInfo is not available for the function then we assume
2975   // any cycle to be unbounded cycle.
2976   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2977   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2978   if (!SE || !LI) {
2979     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2980       if (SCCI.hasCycle())
2981         return true;
2982     return false;
2983   }
2984 
2985   // If there's irreducible control, the function may contain non-loop cycles.
2986   if (mayContainIrreducibleControl(F, LI))
2987     return true;
2988 
2989   // Any loop that does not have a max trip count is considered unbounded cycle.
2990   for (auto *L : LI->getLoopsInPreorder()) {
2991     if (!SE->getSmallConstantMaxTripCount(L))
2992       return true;
2993   }
2994   return false;
2995 }
2996 
2997 struct AAWillReturnImpl : public AAWillReturn {
2998   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2999       : AAWillReturn(IRP, A) {}
3000 
3001   /// See AbstractAttribute::initialize(...).
3002   void initialize(Attributor &A) override {
3003     AAWillReturn::initialize(A);
3004 
3005     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
3006       indicateOptimisticFixpoint();
3007       return;
3008     }
3009   }
3010 
3011   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
3012   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
3013     // Check for `mustprogress` in the scope and the associated function which
3014     // might be different if this is a call site.
3015     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
3016         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
3017       return false;
3018 
3019     bool IsKnown;
3020     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3021       return IsKnown || !KnownOnly;
3022     return false;
3023   }
3024 
3025   /// See AbstractAttribute::updateImpl(...).
3026   ChangeStatus updateImpl(Attributor &A) override {
3027     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3028       return ChangeStatus::UNCHANGED;
3029 
3030     auto CheckForWillReturn = [&](Instruction &I) {
3031       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
3032       const auto &WillReturnAA =
3033           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
3034       if (WillReturnAA.isKnownWillReturn())
3035         return true;
3036       if (!WillReturnAA.isAssumedWillReturn())
3037         return false;
3038       const auto &NoRecurseAA =
3039           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
3040       return NoRecurseAA.isAssumedNoRecurse();
3041     };
3042 
3043     bool UsedAssumedInformation = false;
3044     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
3045                                            UsedAssumedInformation))
3046       return indicatePessimisticFixpoint();
3047 
3048     return ChangeStatus::UNCHANGED;
3049   }
3050 
3051   /// See AbstractAttribute::getAsStr()
3052   const std::string getAsStr() const override {
3053     return getAssumed() ? "willreturn" : "may-noreturn";
3054   }
3055 };
3056 
3057 struct AAWillReturnFunction final : AAWillReturnImpl {
3058   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
3059       : AAWillReturnImpl(IRP, A) {}
3060 
3061   /// See AbstractAttribute::initialize(...).
3062   void initialize(Attributor &A) override {
3063     AAWillReturnImpl::initialize(A);
3064 
3065     Function *F = getAnchorScope();
3066     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
3067       indicatePessimisticFixpoint();
3068   }
3069 
3070   /// See AbstractAttribute::trackStatistics()
3071   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
3072 };
3073 
3074 /// WillReturn attribute deduction for a call sites.
3075 struct AAWillReturnCallSite final : AAWillReturnImpl {
3076   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3077       : AAWillReturnImpl(IRP, A) {}
3078 
3079   /// See AbstractAttribute::initialize(...).
3080   void initialize(Attributor &A) override {
3081     AAWillReturnImpl::initialize(A);
3082     Function *F = getAssociatedFunction();
3083     if (!F || !A.isFunctionIPOAmendable(*F))
3084       indicatePessimisticFixpoint();
3085   }
3086 
3087   /// See AbstractAttribute::updateImpl(...).
3088   ChangeStatus updateImpl(Attributor &A) override {
3089     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3090       return ChangeStatus::UNCHANGED;
3091 
3092     // TODO: Once we have call site specific value information we can provide
3093     //       call site specific liveness information and then it makes
3094     //       sense to specialize attributes for call sites arguments instead of
3095     //       redirecting requests to the callee argument.
3096     Function *F = getAssociatedFunction();
3097     const IRPosition &FnPos = IRPosition::function(*F);
3098     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
3099     return clampStateAndIndicateChange(getState(), FnAA.getState());
3100   }
3101 
3102   /// See AbstractAttribute::trackStatistics()
3103   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
3104 };
3105 } // namespace
3106 
3107 /// -------------------AAReachability Attribute--------------------------
3108 
3109 namespace {
3110 struct AAReachabilityImpl : AAReachability {
3111   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
3112       : AAReachability(IRP, A) {}
3113 
3114   const std::string getAsStr() const override {
3115     // TODO: Return the number of reachable queries.
3116     return "reachable";
3117   }
3118 
3119   /// See AbstractAttribute::updateImpl(...).
3120   ChangeStatus updateImpl(Attributor &A) override {
3121     return ChangeStatus::UNCHANGED;
3122   }
3123 };
3124 
3125 struct AAReachabilityFunction final : public AAReachabilityImpl {
3126   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
3127       : AAReachabilityImpl(IRP, A) {}
3128 
3129   /// See AbstractAttribute::trackStatistics()
3130   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
3131 };
3132 } // namespace
3133 
3134 /// ------------------------ NoAlias Argument Attribute ------------------------
3135 
3136 namespace {
3137 struct AANoAliasImpl : AANoAlias {
3138   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3139     assert(getAssociatedType()->isPointerTy() &&
3140            "Noalias is a pointer attribute");
3141   }
3142 
3143   const std::string getAsStr() const override {
3144     return getAssumed() ? "noalias" : "may-alias";
3145   }
3146 };
3147 
3148 /// NoAlias attribute for a floating value.
3149 struct AANoAliasFloating final : AANoAliasImpl {
3150   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3151       : AANoAliasImpl(IRP, A) {}
3152 
3153   /// See AbstractAttribute::initialize(...).
3154   void initialize(Attributor &A) override {
3155     AANoAliasImpl::initialize(A);
3156     Value *Val = &getAssociatedValue();
3157     do {
3158       CastInst *CI = dyn_cast<CastInst>(Val);
3159       if (!CI)
3160         break;
3161       Value *Base = CI->getOperand(0);
3162       if (!Base->hasOneUse())
3163         break;
3164       Val = Base;
3165     } while (true);
3166 
3167     if (!Val->getType()->isPointerTy()) {
3168       indicatePessimisticFixpoint();
3169       return;
3170     }
3171 
3172     if (isa<AllocaInst>(Val))
3173       indicateOptimisticFixpoint();
3174     else if (isa<ConstantPointerNull>(Val) &&
3175              !NullPointerIsDefined(getAnchorScope(),
3176                                    Val->getType()->getPointerAddressSpace()))
3177       indicateOptimisticFixpoint();
3178     else if (Val != &getAssociatedValue()) {
3179       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3180           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3181       if (ValNoAliasAA.isKnownNoAlias())
3182         indicateOptimisticFixpoint();
3183     }
3184   }
3185 
3186   /// See AbstractAttribute::updateImpl(...).
3187   ChangeStatus updateImpl(Attributor &A) override {
3188     // TODO: Implement this.
3189     return indicatePessimisticFixpoint();
3190   }
3191 
3192   /// See AbstractAttribute::trackStatistics()
3193   void trackStatistics() const override {
3194     STATS_DECLTRACK_FLOATING_ATTR(noalias)
3195   }
3196 };
3197 
3198 /// NoAlias attribute for an argument.
3199 struct AANoAliasArgument final
3200     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3201   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3202   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3203 
3204   /// See AbstractAttribute::initialize(...).
3205   void initialize(Attributor &A) override {
3206     Base::initialize(A);
3207     // See callsite argument attribute and callee argument attribute.
3208     if (hasAttr({Attribute::ByVal}))
3209       indicateOptimisticFixpoint();
3210   }
3211 
3212   /// See AbstractAttribute::update(...).
3213   ChangeStatus updateImpl(Attributor &A) override {
3214     // We have to make sure no-alias on the argument does not break
3215     // synchronization when this is a callback argument, see also [1] below.
3216     // If synchronization cannot be affected, we delegate to the base updateImpl
3217     // function, otherwise we give up for now.
3218 
3219     // If the function is no-sync, no-alias cannot break synchronization.
3220     const auto &NoSyncAA =
3221         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3222                              DepClassTy::OPTIONAL);
3223     if (NoSyncAA.isAssumedNoSync())
3224       return Base::updateImpl(A);
3225 
3226     // If the argument is read-only, no-alias cannot break synchronization.
3227     bool IsKnown;
3228     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3229       return Base::updateImpl(A);
3230 
3231     // If the argument is never passed through callbacks, no-alias cannot break
3232     // synchronization.
3233     bool UsedAssumedInformation = false;
3234     if (A.checkForAllCallSites(
3235             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3236             true, UsedAssumedInformation))
3237       return Base::updateImpl(A);
3238 
3239     // TODO: add no-alias but make sure it doesn't break synchronization by
3240     // introducing fake uses. See:
3241     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3242     //     International Workshop on OpenMP 2018,
3243     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3244 
3245     return indicatePessimisticFixpoint();
3246   }
3247 
3248   /// See AbstractAttribute::trackStatistics()
3249   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3250 };
3251 
3252 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3253   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3254       : AANoAliasImpl(IRP, A) {}
3255 
3256   /// See AbstractAttribute::initialize(...).
3257   void initialize(Attributor &A) override {
3258     // See callsite argument attribute and callee argument attribute.
3259     const auto &CB = cast<CallBase>(getAnchorValue());
3260     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3261       indicateOptimisticFixpoint();
3262     Value &Val = getAssociatedValue();
3263     if (isa<ConstantPointerNull>(Val) &&
3264         !NullPointerIsDefined(getAnchorScope(),
3265                               Val.getType()->getPointerAddressSpace()))
3266       indicateOptimisticFixpoint();
3267   }
3268 
3269   /// Determine if the underlying value may alias with the call site argument
3270   /// \p OtherArgNo of \p ICS (= the underlying call site).
3271   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3272                             const AAMemoryBehavior &MemBehaviorAA,
3273                             const CallBase &CB, unsigned OtherArgNo) {
3274     // We do not need to worry about aliasing with the underlying IRP.
3275     if (this->getCalleeArgNo() == (int)OtherArgNo)
3276       return false;
3277 
3278     // If it is not a pointer or pointer vector we do not alias.
3279     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3280     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3281       return false;
3282 
3283     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3284         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3285 
3286     // If the argument is readnone, there is no read-write aliasing.
3287     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3288       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3289       return false;
3290     }
3291 
3292     // If the argument is readonly and the underlying value is readonly, there
3293     // is no read-write aliasing.
3294     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3295     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3296       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3297       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3298       return false;
3299     }
3300 
3301     // We have to utilize actual alias analysis queries so we need the object.
3302     if (!AAR)
3303       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3304 
3305     // Try to rule it out at the call site.
3306     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3307     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3308                          "callsite arguments: "
3309                       << getAssociatedValue() << " " << *ArgOp << " => "
3310                       << (IsAliasing ? "" : "no-") << "alias \n");
3311 
3312     return IsAliasing;
3313   }
3314 
3315   bool
3316   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3317                                          const AAMemoryBehavior &MemBehaviorAA,
3318                                          const AANoAlias &NoAliasAA) {
3319     // We can deduce "noalias" if the following conditions hold.
3320     // (i)   Associated value is assumed to be noalias in the definition.
3321     // (ii)  Associated value is assumed to be no-capture in all the uses
3322     //       possibly executed before this callsite.
3323     // (iii) There is no other pointer argument which could alias with the
3324     //       value.
3325 
3326     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3327     if (!AssociatedValueIsNoAliasAtDef) {
3328       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3329                         << " is not no-alias at the definition\n");
3330       return false;
3331     }
3332 
3333     auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3334       const auto &DerefAA = A.getAAFor<AADereferenceable>(
3335           *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3336       return DerefAA.getAssumedDereferenceableBytes();
3337     };
3338 
3339     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3340 
3341     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3342     const Function *ScopeFn = VIRP.getAnchorScope();
3343     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3344     // Check whether the value is captured in the scope using AANoCapture.
3345     // Look at CFG and check only uses possibly executed before this
3346     // callsite.
3347     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3348       Instruction *UserI = cast<Instruction>(U.getUser());
3349 
3350       // If UserI is the curr instruction and there is a single potential use of
3351       // the value in UserI we allow the use.
3352       // TODO: We should inspect the operands and allow those that cannot alias
3353       //       with the value.
3354       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3355         return true;
3356 
3357       if (ScopeFn) {
3358         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3359           if (CB->isArgOperand(&U)) {
3360 
3361             unsigned ArgNo = CB->getArgOperandNo(&U);
3362 
3363             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3364                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3365                 DepClassTy::OPTIONAL);
3366 
3367             if (NoCaptureAA.isAssumedNoCapture())
3368               return true;
3369           }
3370         }
3371 
3372         if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this))
3373           return true;
3374       }
3375 
3376       // TODO: We should track the capturing uses in AANoCapture but the problem
3377       //       is CGSCC runs. For those we would need to "allow" AANoCapture for
3378       //       a value in the module slice.
3379       switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3380       case UseCaptureKind::NO_CAPTURE:
3381         return true;
3382       case UseCaptureKind::MAY_CAPTURE:
3383         LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3384                           << "\n");
3385         return false;
3386       case UseCaptureKind::PASSTHROUGH:
3387         Follow = true;
3388         return true;
3389       }
3390       llvm_unreachable("unknown UseCaptureKind");
3391     };
3392 
3393     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3394       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3395         LLVM_DEBUG(
3396             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3397                    << " cannot be noalias as it is potentially captured\n");
3398         return false;
3399       }
3400     }
3401     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3402 
3403     // Check there is no other pointer argument which could alias with the
3404     // value passed at this call site.
3405     // TODO: AbstractCallSite
3406     const auto &CB = cast<CallBase>(getAnchorValue());
3407     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3408       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3409         return false;
3410 
3411     return true;
3412   }
3413 
3414   /// See AbstractAttribute::updateImpl(...).
3415   ChangeStatus updateImpl(Attributor &A) override {
3416     // If the argument is readnone we are done as there are no accesses via the
3417     // argument.
3418     auto &MemBehaviorAA =
3419         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3420     if (MemBehaviorAA.isAssumedReadNone()) {
3421       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3422       return ChangeStatus::UNCHANGED;
3423     }
3424 
3425     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3426     const auto &NoAliasAA =
3427         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3428 
3429     AAResults *AAR = nullptr;
3430     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3431                                                NoAliasAA)) {
3432       LLVM_DEBUG(
3433           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3434       return ChangeStatus::UNCHANGED;
3435     }
3436 
3437     return indicatePessimisticFixpoint();
3438   }
3439 
3440   /// See AbstractAttribute::trackStatistics()
3441   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3442 };
3443 
3444 /// NoAlias attribute for function return value.
3445 struct AANoAliasReturned final : AANoAliasImpl {
3446   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3447       : AANoAliasImpl(IRP, A) {}
3448 
3449   /// See AbstractAttribute::initialize(...).
3450   void initialize(Attributor &A) override {
3451     AANoAliasImpl::initialize(A);
3452     Function *F = getAssociatedFunction();
3453     if (!F || F->isDeclaration())
3454       indicatePessimisticFixpoint();
3455   }
3456 
3457   /// See AbstractAttribute::updateImpl(...).
3458   virtual ChangeStatus updateImpl(Attributor &A) override {
3459 
3460     auto CheckReturnValue = [&](Value &RV) -> bool {
3461       if (Constant *C = dyn_cast<Constant>(&RV))
3462         if (C->isNullValue() || isa<UndefValue>(C))
3463           return true;
3464 
3465       /// For now, we can only deduce noalias if we have call sites.
3466       /// FIXME: add more support.
3467       if (!isa<CallBase>(&RV))
3468         return false;
3469 
3470       const IRPosition &RVPos = IRPosition::value(RV);
3471       const auto &NoAliasAA =
3472           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3473       if (!NoAliasAA.isAssumedNoAlias())
3474         return false;
3475 
3476       const auto &NoCaptureAA =
3477           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3478       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3479     };
3480 
3481     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3482       return indicatePessimisticFixpoint();
3483 
3484     return ChangeStatus::UNCHANGED;
3485   }
3486 
3487   /// See AbstractAttribute::trackStatistics()
3488   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3489 };
3490 
3491 /// NoAlias attribute deduction for a call site return value.
3492 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3493   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3494       : AANoAliasImpl(IRP, A) {}
3495 
3496   /// See AbstractAttribute::initialize(...).
3497   void initialize(Attributor &A) override {
3498     AANoAliasImpl::initialize(A);
3499     Function *F = getAssociatedFunction();
3500     if (!F || F->isDeclaration())
3501       indicatePessimisticFixpoint();
3502   }
3503 
3504   /// See AbstractAttribute::updateImpl(...).
3505   ChangeStatus updateImpl(Attributor &A) override {
3506     // TODO: Once we have call site specific value information we can provide
3507     //       call site specific liveness information and then it makes
3508     //       sense to specialize attributes for call sites arguments instead of
3509     //       redirecting requests to the callee argument.
3510     Function *F = getAssociatedFunction();
3511     const IRPosition &FnPos = IRPosition::returned(*F);
3512     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3513     return clampStateAndIndicateChange(getState(), FnAA.getState());
3514   }
3515 
3516   /// See AbstractAttribute::trackStatistics()
3517   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3518 };
3519 } // namespace
3520 
3521 /// -------------------AAIsDead Function Attribute-----------------------
3522 
3523 namespace {
3524 struct AAIsDeadValueImpl : public AAIsDead {
3525   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3526 
3527   /// See AbstractAttribute::initialize(...).
3528   void initialize(Attributor &A) override {
3529     if (auto *Scope = getAnchorScope())
3530       if (!A.isRunOn(*Scope))
3531         indicatePessimisticFixpoint();
3532   }
3533 
3534   /// See AAIsDead::isAssumedDead().
3535   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3536 
3537   /// See AAIsDead::isKnownDead().
3538   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3539 
3540   /// See AAIsDead::isAssumedDead(BasicBlock *).
3541   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3542 
3543   /// See AAIsDead::isKnownDead(BasicBlock *).
3544   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3545 
3546   /// See AAIsDead::isAssumedDead(Instruction *I).
3547   bool isAssumedDead(const Instruction *I) const override {
3548     return I == getCtxI() && isAssumedDead();
3549   }
3550 
3551   /// See AAIsDead::isKnownDead(Instruction *I).
3552   bool isKnownDead(const Instruction *I) const override {
3553     return isAssumedDead(I) && isKnownDead();
3554   }
3555 
3556   /// See AbstractAttribute::getAsStr().
3557   virtual const std::string getAsStr() const override {
3558     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3559   }
3560 
3561   /// Check if all uses are assumed dead.
3562   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3563     // Callers might not check the type, void has no uses.
3564     if (V.getType()->isVoidTy() || V.use_empty())
3565       return true;
3566 
3567     // If we replace a value with a constant there are no uses left afterwards.
3568     if (!isa<Constant>(V)) {
3569       if (auto *I = dyn_cast<Instruction>(&V))
3570         if (!A.isRunOn(*I->getFunction()))
3571           return false;
3572       bool UsedAssumedInformation = false;
3573       Optional<Constant *> C =
3574           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3575       if (!C || *C)
3576         return true;
3577     }
3578 
3579     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3580     // Explicitly set the dependence class to required because we want a long
3581     // chain of N dependent instructions to be considered live as soon as one is
3582     // without going through N update cycles. This is not required for
3583     // correctness.
3584     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3585                              DepClassTy::REQUIRED,
3586                              /* IgnoreDroppableUses */ false);
3587   }
3588 
3589   /// Determine if \p I is assumed to be side-effect free.
3590   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3591     if (!I || wouldInstructionBeTriviallyDead(I))
3592       return true;
3593 
3594     auto *CB = dyn_cast<CallBase>(I);
3595     if (!CB || isa<IntrinsicInst>(CB))
3596       return false;
3597 
3598     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3599     const auto &NoUnwindAA =
3600         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3601     if (!NoUnwindAA.isAssumedNoUnwind())
3602       return false;
3603     if (!NoUnwindAA.isKnownNoUnwind())
3604       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3605 
3606     bool IsKnown;
3607     return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3608   }
3609 };
3610 
3611 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3612   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3613       : AAIsDeadValueImpl(IRP, A) {}
3614 
3615   /// See AbstractAttribute::initialize(...).
3616   void initialize(Attributor &A) override {
3617     AAIsDeadValueImpl::initialize(A);
3618 
3619     if (isa<UndefValue>(getAssociatedValue())) {
3620       indicatePessimisticFixpoint();
3621       return;
3622     }
3623 
3624     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3625     if (!isAssumedSideEffectFree(A, I)) {
3626       if (!isa_and_nonnull<StoreInst>(I))
3627         indicatePessimisticFixpoint();
3628       else
3629         removeAssumedBits(HAS_NO_EFFECT);
3630     }
3631   }
3632 
3633   bool isDeadStore(Attributor &A, StoreInst &SI) {
3634     // Lang ref now states volatile store is not UB/dead, let's skip them.
3635     if (SI.isVolatile())
3636       return false;
3637 
3638     bool UsedAssumedInformation = false;
3639     SmallSetVector<Value *, 4> PotentialCopies;
3640     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3641                                              UsedAssumedInformation))
3642       return false;
3643     return llvm::all_of(PotentialCopies, [&](Value *V) {
3644       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3645                              UsedAssumedInformation);
3646     });
3647   }
3648 
3649   /// See AbstractAttribute::getAsStr().
3650   const std::string getAsStr() const override {
3651     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3652     if (isa_and_nonnull<StoreInst>(I))
3653       if (isValidState())
3654         return "assumed-dead-store";
3655     return AAIsDeadValueImpl::getAsStr();
3656   }
3657 
3658   /// See AbstractAttribute::updateImpl(...).
3659   ChangeStatus updateImpl(Attributor &A) override {
3660     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3661     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3662       if (!isDeadStore(A, *SI))
3663         return indicatePessimisticFixpoint();
3664     } else {
3665       if (!isAssumedSideEffectFree(A, I))
3666         return indicatePessimisticFixpoint();
3667       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3668         return indicatePessimisticFixpoint();
3669     }
3670     return ChangeStatus::UNCHANGED;
3671   }
3672 
3673   bool isRemovableStore() const override {
3674     return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3675   }
3676 
3677   /// See AbstractAttribute::manifest(...).
3678   ChangeStatus manifest(Attributor &A) override {
3679     Value &V = getAssociatedValue();
3680     if (auto *I = dyn_cast<Instruction>(&V)) {
3681       // If we get here we basically know the users are all dead. We check if
3682       // isAssumedSideEffectFree returns true here again because it might not be
3683       // the case and only the users are dead but the instruction (=call) is
3684       // still needed.
3685       if (isa<StoreInst>(I) ||
3686           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3687         A.deleteAfterManifest(*I);
3688         return ChangeStatus::CHANGED;
3689       }
3690     }
3691     return ChangeStatus::UNCHANGED;
3692   }
3693 
3694   /// See AbstractAttribute::trackStatistics()
3695   void trackStatistics() const override {
3696     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3697   }
3698 };
3699 
3700 struct AAIsDeadArgument : public AAIsDeadFloating {
3701   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3702       : AAIsDeadFloating(IRP, A) {}
3703 
3704   /// See AbstractAttribute::initialize(...).
3705   void initialize(Attributor &A) override {
3706     AAIsDeadFloating::initialize(A);
3707     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3708       indicatePessimisticFixpoint();
3709   }
3710 
3711   /// See AbstractAttribute::manifest(...).
3712   ChangeStatus manifest(Attributor &A) override {
3713     Argument &Arg = *getAssociatedArgument();
3714     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3715       if (A.registerFunctionSignatureRewrite(
3716               Arg, /* ReplacementTypes */ {},
3717               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3718               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3719         return ChangeStatus::CHANGED;
3720       }
3721     return ChangeStatus::UNCHANGED;
3722   }
3723 
3724   /// See AbstractAttribute::trackStatistics()
3725   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3726 };
3727 
3728 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3729   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3730       : AAIsDeadValueImpl(IRP, A) {}
3731 
3732   /// See AbstractAttribute::initialize(...).
3733   void initialize(Attributor &A) override {
3734     AAIsDeadValueImpl::initialize(A);
3735     if (isa<UndefValue>(getAssociatedValue()))
3736       indicatePessimisticFixpoint();
3737   }
3738 
3739   /// See AbstractAttribute::updateImpl(...).
3740   ChangeStatus updateImpl(Attributor &A) override {
3741     // TODO: Once we have call site specific value information we can provide
3742     //       call site specific liveness information and then it makes
3743     //       sense to specialize attributes for call sites arguments instead of
3744     //       redirecting requests to the callee argument.
3745     Argument *Arg = getAssociatedArgument();
3746     if (!Arg)
3747       return indicatePessimisticFixpoint();
3748     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3749     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3750     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3751   }
3752 
3753   /// See AbstractAttribute::manifest(...).
3754   ChangeStatus manifest(Attributor &A) override {
3755     CallBase &CB = cast<CallBase>(getAnchorValue());
3756     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3757     assert(!isa<UndefValue>(U.get()) &&
3758            "Expected undef values to be filtered out!");
3759     UndefValue &UV = *UndefValue::get(U->getType());
3760     if (A.changeUseAfterManifest(U, UV))
3761       return ChangeStatus::CHANGED;
3762     return ChangeStatus::UNCHANGED;
3763   }
3764 
3765   /// See AbstractAttribute::trackStatistics()
3766   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3767 };
3768 
3769 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3770   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3771       : AAIsDeadFloating(IRP, A) {}
3772 
3773   /// See AAIsDead::isAssumedDead().
3774   bool isAssumedDead() const override {
3775     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3776   }
3777 
3778   /// See AbstractAttribute::initialize(...).
3779   void initialize(Attributor &A) override {
3780     AAIsDeadFloating::initialize(A);
3781     if (isa<UndefValue>(getAssociatedValue())) {
3782       indicatePessimisticFixpoint();
3783       return;
3784     }
3785 
3786     // We track this separately as a secondary state.
3787     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3788   }
3789 
3790   /// See AbstractAttribute::updateImpl(...).
3791   ChangeStatus updateImpl(Attributor &A) override {
3792     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3793     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3794       IsAssumedSideEffectFree = false;
3795       Changed = ChangeStatus::CHANGED;
3796     }
3797     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3798       return indicatePessimisticFixpoint();
3799     return Changed;
3800   }
3801 
3802   /// See AbstractAttribute::trackStatistics()
3803   void trackStatistics() const override {
3804     if (IsAssumedSideEffectFree)
3805       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3806     else
3807       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3808   }
3809 
3810   /// See AbstractAttribute::getAsStr().
3811   const std::string getAsStr() const override {
3812     return isAssumedDead()
3813                ? "assumed-dead"
3814                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3815   }
3816 
3817 private:
3818   bool IsAssumedSideEffectFree = true;
3819 };
3820 
3821 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3822   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3823       : AAIsDeadValueImpl(IRP, A) {}
3824 
3825   /// See AbstractAttribute::updateImpl(...).
3826   ChangeStatus updateImpl(Attributor &A) override {
3827 
3828     bool UsedAssumedInformation = false;
3829     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3830                               {Instruction::Ret}, UsedAssumedInformation);
3831 
3832     auto PredForCallSite = [&](AbstractCallSite ACS) {
3833       if (ACS.isCallbackCall() || !ACS.getInstruction())
3834         return false;
3835       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3836     };
3837 
3838     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3839                                 UsedAssumedInformation))
3840       return indicatePessimisticFixpoint();
3841 
3842     return ChangeStatus::UNCHANGED;
3843   }
3844 
3845   /// See AbstractAttribute::manifest(...).
3846   ChangeStatus manifest(Attributor &A) override {
3847     // TODO: Rewrite the signature to return void?
3848     bool AnyChange = false;
3849     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3850     auto RetInstPred = [&](Instruction &I) {
3851       ReturnInst &RI = cast<ReturnInst>(I);
3852       if (!isa<UndefValue>(RI.getReturnValue()))
3853         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3854       return true;
3855     };
3856     bool UsedAssumedInformation = false;
3857     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3858                               UsedAssumedInformation);
3859     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3860   }
3861 
3862   /// See AbstractAttribute::trackStatistics()
3863   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3864 };
3865 
3866 struct AAIsDeadFunction : public AAIsDead {
3867   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3868 
3869   /// See AbstractAttribute::initialize(...).
3870   void initialize(Attributor &A) override {
3871     Function *F = getAnchorScope();
3872     if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3873       indicatePessimisticFixpoint();
3874       return;
3875     }
3876     ToBeExploredFrom.insert(&F->getEntryBlock().front());
3877     assumeLive(A, F->getEntryBlock());
3878   }
3879 
3880   /// See AbstractAttribute::getAsStr().
3881   const std::string getAsStr() const override {
3882     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3883            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3884            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3885            std::to_string(KnownDeadEnds.size()) + "]";
3886   }
3887 
3888   /// See AbstractAttribute::manifest(...).
3889   ChangeStatus manifest(Attributor &A) override {
3890     assert(getState().isValidState() &&
3891            "Attempted to manifest an invalid state!");
3892 
3893     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3894     Function &F = *getAnchorScope();
3895 
3896     if (AssumedLiveBlocks.empty()) {
3897       A.deleteAfterManifest(F);
3898       return ChangeStatus::CHANGED;
3899     }
3900 
3901     // Flag to determine if we can change an invoke to a call assuming the
3902     // callee is nounwind. This is not possible if the personality of the
3903     // function allows to catch asynchronous exceptions.
3904     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3905 
3906     KnownDeadEnds.set_union(ToBeExploredFrom);
3907     for (const Instruction *DeadEndI : KnownDeadEnds) {
3908       auto *CB = dyn_cast<CallBase>(DeadEndI);
3909       if (!CB)
3910         continue;
3911       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3912           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3913       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3914       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3915         continue;
3916 
3917       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3918         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3919       else
3920         A.changeToUnreachableAfterManifest(
3921             const_cast<Instruction *>(DeadEndI->getNextNode()));
3922       HasChanged = ChangeStatus::CHANGED;
3923     }
3924 
3925     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3926     for (BasicBlock &BB : F)
3927       if (!AssumedLiveBlocks.count(&BB)) {
3928         A.deleteAfterManifest(BB);
3929         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3930         HasChanged = ChangeStatus::CHANGED;
3931       }
3932 
3933     return HasChanged;
3934   }
3935 
3936   /// See AbstractAttribute::updateImpl(...).
3937   ChangeStatus updateImpl(Attributor &A) override;
3938 
3939   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3940     assert(From->getParent() == getAnchorScope() &&
3941            To->getParent() == getAnchorScope() &&
3942            "Used AAIsDead of the wrong function");
3943     return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3944   }
3945 
3946   /// See AbstractAttribute::trackStatistics()
3947   void trackStatistics() const override {}
3948 
3949   /// Returns true if the function is assumed dead.
3950   bool isAssumedDead() const override { return false; }
3951 
3952   /// See AAIsDead::isKnownDead().
3953   bool isKnownDead() const override { return false; }
3954 
3955   /// See AAIsDead::isAssumedDead(BasicBlock *).
3956   bool isAssumedDead(const BasicBlock *BB) const override {
3957     assert(BB->getParent() == getAnchorScope() &&
3958            "BB must be in the same anchor scope function.");
3959 
3960     if (!getAssumed())
3961       return false;
3962     return !AssumedLiveBlocks.count(BB);
3963   }
3964 
3965   /// See AAIsDead::isKnownDead(BasicBlock *).
3966   bool isKnownDead(const BasicBlock *BB) const override {
3967     return getKnown() && isAssumedDead(BB);
3968   }
3969 
3970   /// See AAIsDead::isAssumed(Instruction *I).
3971   bool isAssumedDead(const Instruction *I) const override {
3972     assert(I->getParent()->getParent() == getAnchorScope() &&
3973            "Instruction must be in the same anchor scope function.");
3974 
3975     if (!getAssumed())
3976       return false;
3977 
3978     // If it is not in AssumedLiveBlocks then it for sure dead.
3979     // Otherwise, it can still be after noreturn call in a live block.
3980     if (!AssumedLiveBlocks.count(I->getParent()))
3981       return true;
3982 
3983     // If it is not after a liveness barrier it is live.
3984     const Instruction *PrevI = I->getPrevNode();
3985     while (PrevI) {
3986       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3987         return true;
3988       PrevI = PrevI->getPrevNode();
3989     }
3990     return false;
3991   }
3992 
3993   /// See AAIsDead::isKnownDead(Instruction *I).
3994   bool isKnownDead(const Instruction *I) const override {
3995     return getKnown() && isAssumedDead(I);
3996   }
3997 
3998   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3999   /// that internal function called from \p BB should now be looked at.
4000   bool assumeLive(Attributor &A, const BasicBlock &BB) {
4001     if (!AssumedLiveBlocks.insert(&BB).second)
4002       return false;
4003 
4004     // We assume that all of BB is (probably) live now and if there are calls to
4005     // internal functions we will assume that those are now live as well. This
4006     // is a performance optimization for blocks with calls to a lot of internal
4007     // functions. It can however cause dead functions to be treated as live.
4008     for (const Instruction &I : BB)
4009       if (const auto *CB = dyn_cast<CallBase>(&I))
4010         if (const Function *F = CB->getCalledFunction())
4011           if (F->hasLocalLinkage())
4012             A.markLiveInternalFunction(*F);
4013     return true;
4014   }
4015 
4016   /// Collection of instructions that need to be explored again, e.g., we
4017   /// did assume they do not transfer control to (one of their) successors.
4018   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
4019 
4020   /// Collection of instructions that are known to not transfer control.
4021   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
4022 
4023   /// Collection of all assumed live edges
4024   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
4025 
4026   /// Collection of all assumed live BasicBlocks.
4027   DenseSet<const BasicBlock *> AssumedLiveBlocks;
4028 };
4029 
4030 static bool
4031 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
4032                         AbstractAttribute &AA,
4033                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4034   const IRPosition &IPos = IRPosition::callsite_function(CB);
4035 
4036   const auto &NoReturnAA =
4037       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
4038   if (NoReturnAA.isAssumedNoReturn())
4039     return !NoReturnAA.isKnownNoReturn();
4040   if (CB.isTerminator())
4041     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
4042   else
4043     AliveSuccessors.push_back(CB.getNextNode());
4044   return false;
4045 }
4046 
4047 static bool
4048 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
4049                         AbstractAttribute &AA,
4050                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4051   bool UsedAssumedInformation =
4052       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
4053 
4054   // First, determine if we can change an invoke to a call assuming the
4055   // callee is nounwind. This is not possible if the personality of the
4056   // function allows to catch asynchronous exceptions.
4057   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
4058     AliveSuccessors.push_back(&II.getUnwindDest()->front());
4059   } else {
4060     const IRPosition &IPos = IRPosition::callsite_function(II);
4061     const auto &AANoUnw =
4062         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
4063     if (AANoUnw.isAssumedNoUnwind()) {
4064       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
4065     } else {
4066       AliveSuccessors.push_back(&II.getUnwindDest()->front());
4067     }
4068   }
4069   return UsedAssumedInformation;
4070 }
4071 
4072 static bool
4073 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
4074                         AbstractAttribute &AA,
4075                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4076   bool UsedAssumedInformation = false;
4077   if (BI.getNumSuccessors() == 1) {
4078     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4079   } else {
4080     Optional<Constant *> C =
4081         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
4082     if (!C || isa_and_nonnull<UndefValue>(*C)) {
4083       // No value yet, assume both edges are dead.
4084     } else if (isa_and_nonnull<ConstantInt>(*C)) {
4085       const BasicBlock *SuccBB =
4086           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
4087       AliveSuccessors.push_back(&SuccBB->front());
4088     } else {
4089       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4090       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4091       UsedAssumedInformation = false;
4092     }
4093   }
4094   return UsedAssumedInformation;
4095 }
4096 
4097 static bool
4098 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4099                         AbstractAttribute &AA,
4100                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4101   bool UsedAssumedInformation = false;
4102   Optional<Constant *> C =
4103       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
4104   if (!C || isa_and_nonnull<UndefValue>(C.value())) {
4105     // No value yet, assume all edges are dead.
4106   } else if (isa_and_nonnull<ConstantInt>(C.value())) {
4107     for (auto &CaseIt : SI.cases()) {
4108       if (CaseIt.getCaseValue() == C.value()) {
4109         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4110         return UsedAssumedInformation;
4111       }
4112     }
4113     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4114     return UsedAssumedInformation;
4115   } else {
4116     for (const BasicBlock *SuccBB : successors(SI.getParent()))
4117       AliveSuccessors.push_back(&SuccBB->front());
4118   }
4119   return UsedAssumedInformation;
4120 }
4121 
4122 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4123   ChangeStatus Change = ChangeStatus::UNCHANGED;
4124 
4125   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4126                     << getAnchorScope()->size() << "] BBs and "
4127                     << ToBeExploredFrom.size() << " exploration points and "
4128                     << KnownDeadEnds.size() << " known dead ends\n");
4129 
4130   // Copy and clear the list of instructions we need to explore from. It is
4131   // refilled with instructions the next update has to look at.
4132   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4133                                                ToBeExploredFrom.end());
4134   decltype(ToBeExploredFrom) NewToBeExploredFrom;
4135 
4136   SmallVector<const Instruction *, 8> AliveSuccessors;
4137   while (!Worklist.empty()) {
4138     const Instruction *I = Worklist.pop_back_val();
4139     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4140 
4141     // Fast forward for uninteresting instructions. We could look for UB here
4142     // though.
4143     while (!I->isTerminator() && !isa<CallBase>(I))
4144       I = I->getNextNode();
4145 
4146     AliveSuccessors.clear();
4147 
4148     bool UsedAssumedInformation = false;
4149     switch (I->getOpcode()) {
4150     // TODO: look for (assumed) UB to backwards propagate "deadness".
4151     default:
4152       assert(I->isTerminator() &&
4153              "Expected non-terminators to be handled already!");
4154       for (const BasicBlock *SuccBB : successors(I->getParent()))
4155         AliveSuccessors.push_back(&SuccBB->front());
4156       break;
4157     case Instruction::Call:
4158       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4159                                                        *this, AliveSuccessors);
4160       break;
4161     case Instruction::Invoke:
4162       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4163                                                        *this, AliveSuccessors);
4164       break;
4165     case Instruction::Br:
4166       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4167                                                        *this, AliveSuccessors);
4168       break;
4169     case Instruction::Switch:
4170       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4171                                                        *this, AliveSuccessors);
4172       break;
4173     }
4174 
4175     if (UsedAssumedInformation) {
4176       NewToBeExploredFrom.insert(I);
4177     } else if (AliveSuccessors.empty() ||
4178                (I->isTerminator() &&
4179                 AliveSuccessors.size() < I->getNumSuccessors())) {
4180       if (KnownDeadEnds.insert(I))
4181         Change = ChangeStatus::CHANGED;
4182     }
4183 
4184     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4185                       << AliveSuccessors.size() << " UsedAssumedInformation: "
4186                       << UsedAssumedInformation << "\n");
4187 
4188     for (const Instruction *AliveSuccessor : AliveSuccessors) {
4189       if (!I->isTerminator()) {
4190         assert(AliveSuccessors.size() == 1 &&
4191                "Non-terminator expected to have a single successor!");
4192         Worklist.push_back(AliveSuccessor);
4193       } else {
4194         // record the assumed live edge
4195         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4196         if (AssumedLiveEdges.insert(Edge).second)
4197           Change = ChangeStatus::CHANGED;
4198         if (assumeLive(A, *AliveSuccessor->getParent()))
4199           Worklist.push_back(AliveSuccessor);
4200       }
4201     }
4202   }
4203 
4204   // Check if the content of ToBeExploredFrom changed, ignore the order.
4205   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4206       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4207         return !ToBeExploredFrom.count(I);
4208       })) {
4209     Change = ChangeStatus::CHANGED;
4210     ToBeExploredFrom = std::move(NewToBeExploredFrom);
4211   }
4212 
4213   // If we know everything is live there is no need to query for liveness.
4214   // Instead, indicating a pessimistic fixpoint will cause the state to be
4215   // "invalid" and all queries to be answered conservatively without lookups.
4216   // To be in this state we have to (1) finished the exploration and (3) not
4217   // discovered any non-trivial dead end and (2) not ruled unreachable code
4218   // dead.
4219   if (ToBeExploredFrom.empty() &&
4220       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4221       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4222         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4223       }))
4224     return indicatePessimisticFixpoint();
4225   return Change;
4226 }
4227 
4228 /// Liveness information for a call sites.
4229 struct AAIsDeadCallSite final : AAIsDeadFunction {
4230   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4231       : AAIsDeadFunction(IRP, A) {}
4232 
4233   /// See AbstractAttribute::initialize(...).
4234   void initialize(Attributor &A) override {
4235     // TODO: Once we have call site specific value information we can provide
4236     //       call site specific liveness information and then it makes
4237     //       sense to specialize attributes for call sites instead of
4238     //       redirecting requests to the callee.
4239     llvm_unreachable("Abstract attributes for liveness are not "
4240                      "supported for call sites yet!");
4241   }
4242 
4243   /// See AbstractAttribute::updateImpl(...).
4244   ChangeStatus updateImpl(Attributor &A) override {
4245     return indicatePessimisticFixpoint();
4246   }
4247 
4248   /// See AbstractAttribute::trackStatistics()
4249   void trackStatistics() const override {}
4250 };
4251 } // namespace
4252 
4253 /// -------------------- Dereferenceable Argument Attribute --------------------
4254 
4255 namespace {
4256 struct AADereferenceableImpl : AADereferenceable {
4257   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4258       : AADereferenceable(IRP, A) {}
4259   using StateType = DerefState;
4260 
4261   /// See AbstractAttribute::initialize(...).
4262   void initialize(Attributor &A) override {
4263     Value &V = *getAssociatedValue().stripPointerCasts();
4264     SmallVector<Attribute, 4> Attrs;
4265     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4266              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4267     for (const Attribute &Attr : Attrs)
4268       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4269 
4270     const IRPosition &IRP = this->getIRPosition();
4271     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4272 
4273     bool CanBeNull, CanBeFreed;
4274     takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4275         A.getDataLayout(), CanBeNull, CanBeFreed));
4276 
4277     bool IsFnInterface = IRP.isFnInterfaceKind();
4278     Function *FnScope = IRP.getAnchorScope();
4279     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4280       indicatePessimisticFixpoint();
4281       return;
4282     }
4283 
4284     if (Instruction *CtxI = getCtxI())
4285       followUsesInMBEC(*this, A, getState(), *CtxI);
4286   }
4287 
4288   /// See AbstractAttribute::getState()
4289   /// {
4290   StateType &getState() override { return *this; }
4291   const StateType &getState() const override { return *this; }
4292   /// }
4293 
4294   /// Helper function for collecting accessed bytes in must-be-executed-context
4295   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4296                               DerefState &State) {
4297     const Value *UseV = U->get();
4298     if (!UseV->getType()->isPointerTy())
4299       return;
4300 
4301     Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4302     if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4303       return;
4304 
4305     int64_t Offset;
4306     const Value *Base = GetPointerBaseWithConstantOffset(
4307         Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4308     if (Base && Base == &getAssociatedValue())
4309       State.addAccessedBytes(Offset, Loc->Size.getValue());
4310   }
4311 
4312   /// See followUsesInMBEC
4313   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4314                        AADereferenceable::StateType &State) {
4315     bool IsNonNull = false;
4316     bool TrackUse = false;
4317     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4318         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4319     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4320                       << " for instruction " << *I << "\n");
4321 
4322     addAccessedBytesForUse(A, U, I, State);
4323     State.takeKnownDerefBytesMaximum(DerefBytes);
4324     return TrackUse;
4325   }
4326 
4327   /// See AbstractAttribute::manifest(...).
4328   ChangeStatus manifest(Attributor &A) override {
4329     ChangeStatus Change = AADereferenceable::manifest(A);
4330     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4331       removeAttrs({Attribute::DereferenceableOrNull});
4332       return ChangeStatus::CHANGED;
4333     }
4334     return Change;
4335   }
4336 
4337   void getDeducedAttributes(LLVMContext &Ctx,
4338                             SmallVectorImpl<Attribute> &Attrs) const override {
4339     // TODO: Add *_globally support
4340     if (isAssumedNonNull())
4341       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4342           Ctx, getAssumedDereferenceableBytes()));
4343     else
4344       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4345           Ctx, getAssumedDereferenceableBytes()));
4346   }
4347 
4348   /// See AbstractAttribute::getAsStr().
4349   const std::string getAsStr() const override {
4350     if (!getAssumedDereferenceableBytes())
4351       return "unknown-dereferenceable";
4352     return std::string("dereferenceable") +
4353            (isAssumedNonNull() ? "" : "_or_null") +
4354            (isAssumedGlobal() ? "_globally" : "") + "<" +
4355            std::to_string(getKnownDereferenceableBytes()) + "-" +
4356            std::to_string(getAssumedDereferenceableBytes()) + ">";
4357   }
4358 };
4359 
4360 /// Dereferenceable attribute for a floating value.
4361 struct AADereferenceableFloating : AADereferenceableImpl {
4362   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4363       : AADereferenceableImpl(IRP, A) {}
4364 
4365   /// See AbstractAttribute::updateImpl(...).
4366   ChangeStatus updateImpl(Attributor &A) override {
4367     const DataLayout &DL = A.getDataLayout();
4368 
4369     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4370                             bool Stripped) -> bool {
4371       unsigned IdxWidth =
4372           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4373       APInt Offset(IdxWidth, 0);
4374       const Value *Base = stripAndAccumulateOffsets(
4375           A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4376           /* AllowNonInbounds */ true);
4377 
4378       const auto &AA = A.getAAFor<AADereferenceable>(
4379           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4380       int64_t DerefBytes = 0;
4381       if (!Stripped && this == &AA) {
4382         // Use IR information if we did not strip anything.
4383         // TODO: track globally.
4384         bool CanBeNull, CanBeFreed;
4385         DerefBytes =
4386             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4387         T.GlobalState.indicatePessimisticFixpoint();
4388       } else {
4389         const DerefState &DS = AA.getState();
4390         DerefBytes = DS.DerefBytesState.getAssumed();
4391         T.GlobalState &= DS.GlobalState;
4392       }
4393 
4394       // For now we do not try to "increase" dereferenceability due to negative
4395       // indices as we first have to come up with code to deal with loops and
4396       // for overflows of the dereferenceable bytes.
4397       int64_t OffsetSExt = Offset.getSExtValue();
4398       if (OffsetSExt < 0)
4399         OffsetSExt = 0;
4400 
4401       T.takeAssumedDerefBytesMinimum(
4402           std::max(int64_t(0), DerefBytes - OffsetSExt));
4403 
4404       if (this == &AA) {
4405         if (!Stripped) {
4406           // If nothing was stripped IR information is all we got.
4407           T.takeKnownDerefBytesMaximum(
4408               std::max(int64_t(0), DerefBytes - OffsetSExt));
4409           T.indicatePessimisticFixpoint();
4410         } else if (OffsetSExt > 0) {
4411           // If something was stripped but there is circular reasoning we look
4412           // for the offset. If it is positive we basically decrease the
4413           // dereferenceable bytes in a circluar loop now, which will simply
4414           // drive them down to the known value in a very slow way which we
4415           // can accelerate.
4416           T.indicatePessimisticFixpoint();
4417         }
4418       }
4419 
4420       return T.isValidState();
4421     };
4422 
4423     DerefState T;
4424     bool UsedAssumedInformation = false;
4425     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4426                                            VisitValueCB, getCtxI(),
4427                                            UsedAssumedInformation))
4428       return indicatePessimisticFixpoint();
4429 
4430     return clampStateAndIndicateChange(getState(), T);
4431   }
4432 
4433   /// See AbstractAttribute::trackStatistics()
4434   void trackStatistics() const override {
4435     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4436   }
4437 };
4438 
4439 /// Dereferenceable attribute for a return value.
4440 struct AADereferenceableReturned final
4441     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4442   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4443       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4444             IRP, A) {}
4445 
4446   /// See AbstractAttribute::trackStatistics()
4447   void trackStatistics() const override {
4448     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4449   }
4450 };
4451 
4452 /// Dereferenceable attribute for an argument
4453 struct AADereferenceableArgument final
4454     : AAArgumentFromCallSiteArguments<AADereferenceable,
4455                                       AADereferenceableImpl> {
4456   using Base =
4457       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4458   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4459       : Base(IRP, A) {}
4460 
4461   /// See AbstractAttribute::trackStatistics()
4462   void trackStatistics() const override {
4463     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4464   }
4465 };
4466 
4467 /// Dereferenceable attribute for a call site argument.
4468 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4469   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4470       : AADereferenceableFloating(IRP, A) {}
4471 
4472   /// See AbstractAttribute::trackStatistics()
4473   void trackStatistics() const override {
4474     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4475   }
4476 };
4477 
4478 /// Dereferenceable attribute deduction for a call site return value.
4479 struct AADereferenceableCallSiteReturned final
4480     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4481   using Base =
4482       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4483   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4484       : Base(IRP, A) {}
4485 
4486   /// See AbstractAttribute::trackStatistics()
4487   void trackStatistics() const override {
4488     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4489   }
4490 };
4491 } // namespace
4492 
4493 // ------------------------ Align Argument Attribute ------------------------
4494 
4495 namespace {
4496 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4497                                     Value &AssociatedValue, const Use *U,
4498                                     const Instruction *I, bool &TrackUse) {
4499   // We need to follow common pointer manipulation uses to the accesses they
4500   // feed into.
4501   if (isa<CastInst>(I)) {
4502     // Follow all but ptr2int casts.
4503     TrackUse = !isa<PtrToIntInst>(I);
4504     return 0;
4505   }
4506   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4507     if (GEP->hasAllConstantIndices())
4508       TrackUse = true;
4509     return 0;
4510   }
4511 
4512   MaybeAlign MA;
4513   if (const auto *CB = dyn_cast<CallBase>(I)) {
4514     if (CB->isBundleOperand(U) || CB->isCallee(U))
4515       return 0;
4516 
4517     unsigned ArgNo = CB->getArgOperandNo(U);
4518     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4519     // As long as we only use known information there is no need to track
4520     // dependences here.
4521     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4522     MA = MaybeAlign(AlignAA.getKnownAlign());
4523   }
4524 
4525   const DataLayout &DL = A.getDataLayout();
4526   const Value *UseV = U->get();
4527   if (auto *SI = dyn_cast<StoreInst>(I)) {
4528     if (SI->getPointerOperand() == UseV)
4529       MA = SI->getAlign();
4530   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4531     if (LI->getPointerOperand() == UseV)
4532       MA = LI->getAlign();
4533   }
4534 
4535   if (!MA || *MA <= QueryingAA.getKnownAlign())
4536     return 0;
4537 
4538   unsigned Alignment = MA->value();
4539   int64_t Offset;
4540 
4541   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4542     if (Base == &AssociatedValue) {
4543       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4544       // So we can say that the maximum power of two which is a divisor of
4545       // gcd(Offset, Alignment) is an alignment.
4546 
4547       uint32_t gcd =
4548           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4549       Alignment = llvm::PowerOf2Floor(gcd);
4550     }
4551   }
4552 
4553   return Alignment;
4554 }
4555 
4556 struct AAAlignImpl : AAAlign {
4557   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4558 
4559   /// See AbstractAttribute::initialize(...).
4560   void initialize(Attributor &A) override {
4561     SmallVector<Attribute, 4> Attrs;
4562     getAttrs({Attribute::Alignment}, Attrs);
4563     for (const Attribute &Attr : Attrs)
4564       takeKnownMaximum(Attr.getValueAsInt());
4565 
4566     Value &V = *getAssociatedValue().stripPointerCasts();
4567     takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4568 
4569     if (getIRPosition().isFnInterfaceKind() &&
4570         (!getAnchorScope() ||
4571          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4572       indicatePessimisticFixpoint();
4573       return;
4574     }
4575 
4576     if (Instruction *CtxI = getCtxI())
4577       followUsesInMBEC(*this, A, getState(), *CtxI);
4578   }
4579 
4580   /// See AbstractAttribute::manifest(...).
4581   ChangeStatus manifest(Attributor &A) override {
4582     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4583 
4584     // Check for users that allow alignment annotations.
4585     Value &AssociatedValue = getAssociatedValue();
4586     for (const Use &U : AssociatedValue.uses()) {
4587       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4588         if (SI->getPointerOperand() == &AssociatedValue)
4589           if (SI->getAlign() < getAssumedAlign()) {
4590             STATS_DECLTRACK(AAAlign, Store,
4591                             "Number of times alignment added to a store");
4592             SI->setAlignment(getAssumedAlign());
4593             LoadStoreChanged = ChangeStatus::CHANGED;
4594           }
4595       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4596         if (LI->getPointerOperand() == &AssociatedValue)
4597           if (LI->getAlign() < getAssumedAlign()) {
4598             LI->setAlignment(getAssumedAlign());
4599             STATS_DECLTRACK(AAAlign, Load,
4600                             "Number of times alignment added to a load");
4601             LoadStoreChanged = ChangeStatus::CHANGED;
4602           }
4603       }
4604     }
4605 
4606     ChangeStatus Changed = AAAlign::manifest(A);
4607 
4608     Align InheritAlign =
4609         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4610     if (InheritAlign >= getAssumedAlign())
4611       return LoadStoreChanged;
4612     return Changed | LoadStoreChanged;
4613   }
4614 
4615   // TODO: Provide a helper to determine the implied ABI alignment and check in
4616   //       the existing manifest method and a new one for AAAlignImpl that value
4617   //       to avoid making the alignment explicit if it did not improve.
4618 
4619   /// See AbstractAttribute::getDeducedAttributes
4620   virtual void
4621   getDeducedAttributes(LLVMContext &Ctx,
4622                        SmallVectorImpl<Attribute> &Attrs) const override {
4623     if (getAssumedAlign() > 1)
4624       Attrs.emplace_back(
4625           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4626   }
4627 
4628   /// See followUsesInMBEC
4629   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4630                        AAAlign::StateType &State) {
4631     bool TrackUse = false;
4632 
4633     unsigned int KnownAlign =
4634         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4635     State.takeKnownMaximum(KnownAlign);
4636 
4637     return TrackUse;
4638   }
4639 
4640   /// See AbstractAttribute::getAsStr().
4641   const std::string getAsStr() const override {
4642     return "align<" + std::to_string(getKnownAlign().value()) + "-" +
4643            std::to_string(getAssumedAlign().value()) + ">";
4644   }
4645 };
4646 
4647 /// Align attribute for a floating value.
4648 struct AAAlignFloating : AAAlignImpl {
4649   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4650 
4651   /// See AbstractAttribute::updateImpl(...).
4652   ChangeStatus updateImpl(Attributor &A) override {
4653     const DataLayout &DL = A.getDataLayout();
4654 
4655     auto VisitValueCB = [&](Value &V, const Instruction *,
4656                             AAAlign::StateType &T, bool Stripped) -> bool {
4657       if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4658         return true;
4659       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4660                                            DepClassTy::REQUIRED);
4661       if (!Stripped && this == &AA) {
4662         int64_t Offset;
4663         unsigned Alignment = 1;
4664         if (const Value *Base =
4665                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4666           // TODO: Use AAAlign for the base too.
4667           Align PA = Base->getPointerAlignment(DL);
4668           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4669           // So we can say that the maximum power of two which is a divisor of
4670           // gcd(Offset, Alignment) is an alignment.
4671 
4672           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4673                                                uint32_t(PA.value()));
4674           Alignment = llvm::PowerOf2Floor(gcd);
4675         } else {
4676           Alignment = V.getPointerAlignment(DL).value();
4677         }
4678         // Use only IR information if we did not strip anything.
4679         T.takeKnownMaximum(Alignment);
4680         T.indicatePessimisticFixpoint();
4681       } else {
4682         // Use abstract attribute information.
4683         const AAAlign::StateType &DS = AA.getState();
4684         T ^= DS;
4685       }
4686       return T.isValidState();
4687     };
4688 
4689     StateType T;
4690     bool UsedAssumedInformation = false;
4691     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4692                                           VisitValueCB, getCtxI(),
4693                                           UsedAssumedInformation))
4694       return indicatePessimisticFixpoint();
4695 
4696     // TODO: If we know we visited all incoming values, thus no are assumed
4697     // dead, we can take the known information from the state T.
4698     return clampStateAndIndicateChange(getState(), T);
4699   }
4700 
4701   /// See AbstractAttribute::trackStatistics()
4702   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4703 };
4704 
4705 /// Align attribute for function return value.
4706 struct AAAlignReturned final
4707     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4708   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4709   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4710 
4711   /// See AbstractAttribute::initialize(...).
4712   void initialize(Attributor &A) override {
4713     Base::initialize(A);
4714     Function *F = getAssociatedFunction();
4715     if (!F || F->isDeclaration())
4716       indicatePessimisticFixpoint();
4717   }
4718 
4719   /// See AbstractAttribute::trackStatistics()
4720   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4721 };
4722 
4723 /// Align attribute for function argument.
4724 struct AAAlignArgument final
4725     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4726   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4727   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4728 
4729   /// See AbstractAttribute::manifest(...).
4730   ChangeStatus manifest(Attributor &A) override {
4731     // If the associated argument is involved in a must-tail call we give up
4732     // because we would need to keep the argument alignments of caller and
4733     // callee in-sync. Just does not seem worth the trouble right now.
4734     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4735       return ChangeStatus::UNCHANGED;
4736     return Base::manifest(A);
4737   }
4738 
4739   /// See AbstractAttribute::trackStatistics()
4740   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4741 };
4742 
4743 struct AAAlignCallSiteArgument final : AAAlignFloating {
4744   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4745       : AAAlignFloating(IRP, A) {}
4746 
4747   /// See AbstractAttribute::manifest(...).
4748   ChangeStatus manifest(Attributor &A) override {
4749     // If the associated argument is involved in a must-tail call we give up
4750     // because we would need to keep the argument alignments of caller and
4751     // callee in-sync. Just does not seem worth the trouble right now.
4752     if (Argument *Arg = getAssociatedArgument())
4753       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4754         return ChangeStatus::UNCHANGED;
4755     ChangeStatus Changed = AAAlignImpl::manifest(A);
4756     Align InheritAlign =
4757         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4758     if (InheritAlign >= getAssumedAlign())
4759       Changed = ChangeStatus::UNCHANGED;
4760     return Changed;
4761   }
4762 
4763   /// See AbstractAttribute::updateImpl(Attributor &A).
4764   ChangeStatus updateImpl(Attributor &A) override {
4765     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4766     if (Argument *Arg = getAssociatedArgument()) {
4767       // We only take known information from the argument
4768       // so we do not need to track a dependence.
4769       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4770           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4771       takeKnownMaximum(ArgAlignAA.getKnownAlign().value());
4772     }
4773     return Changed;
4774   }
4775 
4776   /// See AbstractAttribute::trackStatistics()
4777   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4778 };
4779 
4780 /// Align attribute deduction for a call site return value.
4781 struct AAAlignCallSiteReturned final
4782     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4783   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4784   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4785       : Base(IRP, A) {}
4786 
4787   /// See AbstractAttribute::initialize(...).
4788   void initialize(Attributor &A) override {
4789     Base::initialize(A);
4790     Function *F = getAssociatedFunction();
4791     if (!F || F->isDeclaration())
4792       indicatePessimisticFixpoint();
4793   }
4794 
4795   /// See AbstractAttribute::trackStatistics()
4796   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4797 };
4798 } // namespace
4799 
4800 /// ------------------ Function No-Return Attribute ----------------------------
4801 namespace {
4802 struct AANoReturnImpl : public AANoReturn {
4803   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4804 
4805   /// See AbstractAttribute::initialize(...).
4806   void initialize(Attributor &A) override {
4807     AANoReturn::initialize(A);
4808     Function *F = getAssociatedFunction();
4809     if (!F || F->isDeclaration())
4810       indicatePessimisticFixpoint();
4811   }
4812 
4813   /// See AbstractAttribute::getAsStr().
4814   const std::string getAsStr() const override {
4815     return getAssumed() ? "noreturn" : "may-return";
4816   }
4817 
4818   /// See AbstractAttribute::updateImpl(Attributor &A).
4819   virtual ChangeStatus updateImpl(Attributor &A) override {
4820     auto CheckForNoReturn = [](Instruction &) { return false; };
4821     bool UsedAssumedInformation = false;
4822     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4823                                    {(unsigned)Instruction::Ret},
4824                                    UsedAssumedInformation))
4825       return indicatePessimisticFixpoint();
4826     return ChangeStatus::UNCHANGED;
4827   }
4828 };
4829 
4830 struct AANoReturnFunction final : AANoReturnImpl {
4831   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4832       : AANoReturnImpl(IRP, A) {}
4833 
4834   /// See AbstractAttribute::trackStatistics()
4835   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4836 };
4837 
4838 /// NoReturn attribute deduction for a call sites.
4839 struct AANoReturnCallSite final : AANoReturnImpl {
4840   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4841       : AANoReturnImpl(IRP, A) {}
4842 
4843   /// See AbstractAttribute::initialize(...).
4844   void initialize(Attributor &A) override {
4845     AANoReturnImpl::initialize(A);
4846     if (Function *F = getAssociatedFunction()) {
4847       const IRPosition &FnPos = IRPosition::function(*F);
4848       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4849       if (!FnAA.isAssumedNoReturn())
4850         indicatePessimisticFixpoint();
4851     }
4852   }
4853 
4854   /// See AbstractAttribute::updateImpl(...).
4855   ChangeStatus updateImpl(Attributor &A) override {
4856     // TODO: Once we have call site specific value information we can provide
4857     //       call site specific liveness information and then it makes
4858     //       sense to specialize attributes for call sites arguments instead of
4859     //       redirecting requests to the callee argument.
4860     Function *F = getAssociatedFunction();
4861     const IRPosition &FnPos = IRPosition::function(*F);
4862     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4863     return clampStateAndIndicateChange(getState(), FnAA.getState());
4864   }
4865 
4866   /// See AbstractAttribute::trackStatistics()
4867   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4868 };
4869 } // namespace
4870 
4871 /// ----------------------- Instance Info ---------------------------------
4872 
4873 namespace {
4874 /// A class to hold the state of for no-capture attributes.
4875 struct AAInstanceInfoImpl : public AAInstanceInfo {
4876   AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4877       : AAInstanceInfo(IRP, A) {}
4878 
4879   /// See AbstractAttribute::initialize(...).
4880   void initialize(Attributor &A) override {
4881     Value &V = getAssociatedValue();
4882     if (auto *C = dyn_cast<Constant>(&V)) {
4883       if (C->isThreadDependent())
4884         indicatePessimisticFixpoint();
4885       else
4886         indicateOptimisticFixpoint();
4887       return;
4888     }
4889     if (auto *CB = dyn_cast<CallBase>(&V))
4890       if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4891           !CB->mayReadFromMemory()) {
4892         indicateOptimisticFixpoint();
4893         return;
4894       }
4895   }
4896 
4897   /// See AbstractAttribute::updateImpl(...).
4898   ChangeStatus updateImpl(Attributor &A) override {
4899     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4900 
4901     Value &V = getAssociatedValue();
4902     const Function *Scope = nullptr;
4903     if (auto *I = dyn_cast<Instruction>(&V))
4904       Scope = I->getFunction();
4905     if (auto *A = dyn_cast<Argument>(&V)) {
4906       Scope = A->getParent();
4907       if (!Scope->hasLocalLinkage())
4908         return Changed;
4909     }
4910     if (!Scope)
4911       return indicateOptimisticFixpoint();
4912 
4913     auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4914         *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
4915     if (NoRecurseAA.isAssumedNoRecurse())
4916       return Changed;
4917 
4918     auto UsePred = [&](const Use &U, bool &Follow) {
4919       const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4920       if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4921           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4922         Follow = true;
4923         return true;
4924       }
4925       if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4926           (isa<StoreInst>(UserI) &&
4927            cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4928         return true;
4929       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4930         // This check is not guaranteeing uniqueness but for now that we cannot
4931         // end up with two versions of \p U thinking it was one.
4932         if (!CB->getCalledFunction() ||
4933             !CB->getCalledFunction()->hasLocalLinkage())
4934           return true;
4935         if (!CB->isArgOperand(&U))
4936           return false;
4937         const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4938             *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4939             DepClassTy::OPTIONAL);
4940         if (!ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4941           return false;
4942         // If this call base might reach the scope again we might forward the
4943         // argument back here. This is very conservative.
4944         if (AA::isPotentiallyReachable(A, *CB, *Scope, *this, nullptr))
4945           return false;
4946         return true;
4947       }
4948       return false;
4949     };
4950 
4951     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4952       if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4953         auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4954         if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr))
4955           return true;
4956         auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(
4957             *SI->getFunction());
4958         if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr))
4959           return true;
4960       }
4961       return false;
4962     };
4963 
4964     if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4965                            DepClassTy::OPTIONAL,
4966                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
4967       return indicatePessimisticFixpoint();
4968 
4969     return Changed;
4970   }
4971 
4972   /// See AbstractState::getAsStr().
4973   const std::string getAsStr() const override {
4974     return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4975   }
4976 
4977   /// See AbstractAttribute::trackStatistics()
4978   void trackStatistics() const override {}
4979 };
4980 
4981 /// InstanceInfo attribute for floating values.
4982 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4983   AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4984       : AAInstanceInfoImpl(IRP, A) {}
4985 };
4986 
4987 /// NoCapture attribute for function arguments.
4988 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4989   AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4990       : AAInstanceInfoFloating(IRP, A) {}
4991 };
4992 
4993 /// InstanceInfo attribute for call site arguments.
4994 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4995   AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4996       : AAInstanceInfoImpl(IRP, A) {}
4997 
4998   /// See AbstractAttribute::updateImpl(...).
4999   ChangeStatus updateImpl(Attributor &A) override {
5000     // TODO: Once we have call site specific value information we can provide
5001     //       call site specific liveness information and then it makes
5002     //       sense to specialize attributes for call sites arguments instead of
5003     //       redirecting requests to the callee argument.
5004     Argument *Arg = getAssociatedArgument();
5005     if (!Arg)
5006       return indicatePessimisticFixpoint();
5007     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5008     auto &ArgAA =
5009         A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
5010     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5011   }
5012 };
5013 
5014 /// InstanceInfo attribute for function return value.
5015 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
5016   AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
5017       : AAInstanceInfoImpl(IRP, A) {
5018     llvm_unreachable("InstanceInfo is not applicable to function returns!");
5019   }
5020 
5021   /// See AbstractAttribute::initialize(...).
5022   void initialize(Attributor &A) override {
5023     llvm_unreachable("InstanceInfo is not applicable to function returns!");
5024   }
5025 
5026   /// See AbstractAttribute::updateImpl(...).
5027   ChangeStatus updateImpl(Attributor &A) override {
5028     llvm_unreachable("InstanceInfo is not applicable to function returns!");
5029   }
5030 };
5031 
5032 /// InstanceInfo attribute deduction for a call site return value.
5033 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
5034   AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
5035       : AAInstanceInfoFloating(IRP, A) {}
5036 };
5037 } // namespace
5038 
5039 /// ----------------------- Variable Capturing ---------------------------------
5040 
5041 namespace {
5042 /// A class to hold the state of for no-capture attributes.
5043 struct AANoCaptureImpl : public AANoCapture {
5044   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
5045 
5046   /// See AbstractAttribute::initialize(...).
5047   void initialize(Attributor &A) override {
5048     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
5049       indicateOptimisticFixpoint();
5050       return;
5051     }
5052     Function *AnchorScope = getAnchorScope();
5053     if (isFnInterfaceKind() &&
5054         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
5055       indicatePessimisticFixpoint();
5056       return;
5057     }
5058 
5059     // You cannot "capture" null in the default address space.
5060     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
5061         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
5062       indicateOptimisticFixpoint();
5063       return;
5064     }
5065 
5066     const Function *F =
5067         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
5068 
5069     // Check what state the associated function can actually capture.
5070     if (F)
5071       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5072     else
5073       indicatePessimisticFixpoint();
5074   }
5075 
5076   /// See AbstractAttribute::updateImpl(...).
5077   ChangeStatus updateImpl(Attributor &A) override;
5078 
5079   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
5080   virtual void
5081   getDeducedAttributes(LLVMContext &Ctx,
5082                        SmallVectorImpl<Attribute> &Attrs) const override {
5083     if (!isAssumedNoCaptureMaybeReturned())
5084       return;
5085 
5086     if (isArgumentPosition()) {
5087       if (isAssumedNoCapture())
5088         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
5089       else if (ManifestInternal)
5090         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
5091     }
5092   }
5093 
5094   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
5095   /// depending on the ability of the function associated with \p IRP to capture
5096   /// state in memory and through "returning/throwing", respectively.
5097   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
5098                                                    const Function &F,
5099                                                    BitIntegerState &State) {
5100     // TODO: Once we have memory behavior attributes we should use them here.
5101 
5102     // If we know we cannot communicate or write to memory, we do not care about
5103     // ptr2int anymore.
5104     if (F.onlyReadsMemory() && F.doesNotThrow() &&
5105         F.getReturnType()->isVoidTy()) {
5106       State.addKnownBits(NO_CAPTURE);
5107       return;
5108     }
5109 
5110     // A function cannot capture state in memory if it only reads memory, it can
5111     // however return/throw state and the state might be influenced by the
5112     // pointer value, e.g., loading from a returned pointer might reveal a bit.
5113     if (F.onlyReadsMemory())
5114       State.addKnownBits(NOT_CAPTURED_IN_MEM);
5115 
5116     // A function cannot communicate state back if it does not through
5117     // exceptions and doesn not return values.
5118     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
5119       State.addKnownBits(NOT_CAPTURED_IN_RET);
5120 
5121     // Check existing "returned" attributes.
5122     int ArgNo = IRP.getCalleeArgNo();
5123     if (F.doesNotThrow() && ArgNo >= 0) {
5124       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
5125         if (F.hasParamAttribute(u, Attribute::Returned)) {
5126           if (u == unsigned(ArgNo))
5127             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5128           else if (F.onlyReadsMemory())
5129             State.addKnownBits(NO_CAPTURE);
5130           else
5131             State.addKnownBits(NOT_CAPTURED_IN_RET);
5132           break;
5133         }
5134     }
5135   }
5136 
5137   /// See AbstractState::getAsStr().
5138   const std::string getAsStr() const override {
5139     if (isKnownNoCapture())
5140       return "known not-captured";
5141     if (isAssumedNoCapture())
5142       return "assumed not-captured";
5143     if (isKnownNoCaptureMaybeReturned())
5144       return "known not-captured-maybe-returned";
5145     if (isAssumedNoCaptureMaybeReturned())
5146       return "assumed not-captured-maybe-returned";
5147     return "assumed-captured";
5148   }
5149 
5150   /// Check the use \p U and update \p State accordingly. Return true if we
5151   /// should continue to update the state.
5152   bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5153                 bool &Follow) {
5154     Instruction *UInst = cast<Instruction>(U.getUser());
5155     LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5156                       << *UInst << "\n");
5157 
5158     // Deal with ptr2int by following uses.
5159     if (isa<PtrToIntInst>(UInst)) {
5160       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5161       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5162                           /* Return */ true);
5163     }
5164 
5165     // For stores we already checked if we can follow them, if they make it
5166     // here we give up.
5167     if (isa<StoreInst>(UInst))
5168       return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5169                           /* Return */ false);
5170 
5171     // Explicitly catch return instructions.
5172     if (isa<ReturnInst>(UInst)) {
5173       if (UInst->getFunction() == getAnchorScope())
5174         return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5175                             /* Return */ true);
5176       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5177                           /* Return */ true);
5178     }
5179 
5180     // For now we only use special logic for call sites. However, the tracker
5181     // itself knows about a lot of other non-capturing cases already.
5182     auto *CB = dyn_cast<CallBase>(UInst);
5183     if (!CB || !CB->isArgOperand(&U))
5184       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5185                           /* Return */ true);
5186 
5187     unsigned ArgNo = CB->getArgOperandNo(&U);
5188     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5189     // If we have a abstract no-capture attribute for the argument we can use
5190     // it to justify a non-capture attribute here. This allows recursion!
5191     auto &ArgNoCaptureAA =
5192         A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5193     if (ArgNoCaptureAA.isAssumedNoCapture())
5194       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5195                           /* Return */ false);
5196     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5197       Follow = true;
5198       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5199                           /* Return */ false);
5200     }
5201 
5202     // Lastly, we could not find a reason no-capture can be assumed so we don't.
5203     return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5204                         /* Return */ true);
5205   }
5206 
5207   /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5208   /// \p CapturedInRet, then return true if we should continue updating the
5209   /// state.
5210   static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5211                            bool CapturedInInt, bool CapturedInRet) {
5212     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5213                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5214     if (CapturedInMem)
5215       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5216     if (CapturedInInt)
5217       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5218     if (CapturedInRet)
5219       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5220     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5221   }
5222 };
5223 
5224 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5225   const IRPosition &IRP = getIRPosition();
5226   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5227                                   : &IRP.getAssociatedValue();
5228   if (!V)
5229     return indicatePessimisticFixpoint();
5230 
5231   const Function *F =
5232       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5233   assert(F && "Expected a function!");
5234   const IRPosition &FnPos = IRPosition::function(*F);
5235 
5236   AANoCapture::StateType T;
5237 
5238   // Readonly means we cannot capture through memory.
5239   bool IsKnown;
5240   if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5241     T.addKnownBits(NOT_CAPTURED_IN_MEM);
5242     if (IsKnown)
5243       addKnownBits(NOT_CAPTURED_IN_MEM);
5244   }
5245 
5246   // Make sure all returned values are different than the underlying value.
5247   // TODO: we could do this in a more sophisticated way inside
5248   //       AAReturnedValues, e.g., track all values that escape through returns
5249   //       directly somehow.
5250   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5251     if (!RVAA.getState().isValidState())
5252       return false;
5253     bool SeenConstant = false;
5254     for (auto &It : RVAA.returned_values()) {
5255       if (isa<Constant>(It.first)) {
5256         if (SeenConstant)
5257           return false;
5258         SeenConstant = true;
5259       } else if (!isa<Argument>(It.first) ||
5260                  It.first == getAssociatedArgument())
5261         return false;
5262     }
5263     return true;
5264   };
5265 
5266   const auto &NoUnwindAA =
5267       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5268   if (NoUnwindAA.isAssumedNoUnwind()) {
5269     bool IsVoidTy = F->getReturnType()->isVoidTy();
5270     const AAReturnedValues *RVAA =
5271         IsVoidTy ? nullptr
5272                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5273 
5274                                                  DepClassTy::OPTIONAL);
5275     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5276       T.addKnownBits(NOT_CAPTURED_IN_RET);
5277       if (T.isKnown(NOT_CAPTURED_IN_MEM))
5278         return ChangeStatus::UNCHANGED;
5279       if (NoUnwindAA.isKnownNoUnwind() &&
5280           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5281         addKnownBits(NOT_CAPTURED_IN_RET);
5282         if (isKnown(NOT_CAPTURED_IN_MEM))
5283           return indicateOptimisticFixpoint();
5284       }
5285     }
5286   }
5287 
5288   auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5289     const auto &DerefAA = A.getAAFor<AADereferenceable>(
5290         *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5291     return DerefAA.getAssumedDereferenceableBytes();
5292   };
5293 
5294   auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5295     switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5296     case UseCaptureKind::NO_CAPTURE:
5297       return true;
5298     case UseCaptureKind::MAY_CAPTURE:
5299       return checkUse(A, T, U, Follow);
5300     case UseCaptureKind::PASSTHROUGH:
5301       Follow = true;
5302       return true;
5303     }
5304     llvm_unreachable("Unexpected use capture kind!");
5305   };
5306 
5307   if (!A.checkForAllUses(UseCheck, *this, *V))
5308     return indicatePessimisticFixpoint();
5309 
5310   AANoCapture::StateType &S = getState();
5311   auto Assumed = S.getAssumed();
5312   S.intersectAssumedBits(T.getAssumed());
5313   if (!isAssumedNoCaptureMaybeReturned())
5314     return indicatePessimisticFixpoint();
5315   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5316                                    : ChangeStatus::CHANGED;
5317 }
5318 
5319 /// NoCapture attribute for function arguments.
5320 struct AANoCaptureArgument final : AANoCaptureImpl {
5321   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5322       : AANoCaptureImpl(IRP, A) {}
5323 
5324   /// See AbstractAttribute::trackStatistics()
5325   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5326 };
5327 
5328 /// NoCapture attribute for call site arguments.
5329 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5330   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5331       : AANoCaptureImpl(IRP, A) {}
5332 
5333   /// See AbstractAttribute::initialize(...).
5334   void initialize(Attributor &A) override {
5335     if (Argument *Arg = getAssociatedArgument())
5336       if (Arg->hasByValAttr())
5337         indicateOptimisticFixpoint();
5338     AANoCaptureImpl::initialize(A);
5339   }
5340 
5341   /// See AbstractAttribute::updateImpl(...).
5342   ChangeStatus updateImpl(Attributor &A) override {
5343     // TODO: Once we have call site specific value information we can provide
5344     //       call site specific liveness information and then it makes
5345     //       sense to specialize attributes for call sites arguments instead of
5346     //       redirecting requests to the callee argument.
5347     Argument *Arg = getAssociatedArgument();
5348     if (!Arg)
5349       return indicatePessimisticFixpoint();
5350     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5351     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5352     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5353   }
5354 
5355   /// See AbstractAttribute::trackStatistics()
5356   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5357 };
5358 
5359 /// NoCapture attribute for floating values.
5360 struct AANoCaptureFloating final : AANoCaptureImpl {
5361   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5362       : AANoCaptureImpl(IRP, A) {}
5363 
5364   /// See AbstractAttribute::trackStatistics()
5365   void trackStatistics() const override {
5366     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5367   }
5368 };
5369 
5370 /// NoCapture attribute for function return value.
5371 struct AANoCaptureReturned final : AANoCaptureImpl {
5372   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5373       : AANoCaptureImpl(IRP, A) {
5374     llvm_unreachable("NoCapture is not applicable to function returns!");
5375   }
5376 
5377   /// See AbstractAttribute::initialize(...).
5378   void initialize(Attributor &A) override {
5379     llvm_unreachable("NoCapture is not applicable to function returns!");
5380   }
5381 
5382   /// See AbstractAttribute::updateImpl(...).
5383   ChangeStatus updateImpl(Attributor &A) override {
5384     llvm_unreachable("NoCapture is not applicable to function returns!");
5385   }
5386 
5387   /// See AbstractAttribute::trackStatistics()
5388   void trackStatistics() const override {}
5389 };
5390 
5391 /// NoCapture attribute deduction for a call site return value.
5392 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5393   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5394       : AANoCaptureImpl(IRP, A) {}
5395 
5396   /// See AbstractAttribute::initialize(...).
5397   void initialize(Attributor &A) override {
5398     const Function *F = getAnchorScope();
5399     // Check what state the associated function can actually capture.
5400     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5401   }
5402 
5403   /// See AbstractAttribute::trackStatistics()
5404   void trackStatistics() const override {
5405     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5406   }
5407 };
5408 } // namespace
5409 
5410 /// ------------------ Value Simplify Attribute ----------------------------
5411 
5412 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5413   // FIXME: Add a typecast support.
5414   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5415       SimplifiedAssociatedValue, Other, Ty);
5416   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5417     return false;
5418 
5419   LLVM_DEBUG({
5420     if (SimplifiedAssociatedValue)
5421       dbgs() << "[ValueSimplify] is assumed to be "
5422              << **SimplifiedAssociatedValue << "\n";
5423     else
5424       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5425   });
5426   return true;
5427 }
5428 
5429 namespace {
5430 struct AAValueSimplifyImpl : AAValueSimplify {
5431   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5432       : AAValueSimplify(IRP, A) {}
5433 
5434   /// See AbstractAttribute::initialize(...).
5435   void initialize(Attributor &A) override {
5436     if (getAssociatedValue().getType()->isVoidTy())
5437       indicatePessimisticFixpoint();
5438     if (A.hasSimplificationCallback(getIRPosition()))
5439       indicatePessimisticFixpoint();
5440   }
5441 
5442   /// See AbstractAttribute::getAsStr().
5443   const std::string getAsStr() const override {
5444     LLVM_DEBUG({
5445       dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
5446       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5447         dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5448     });
5449     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5450                           : "not-simple";
5451   }
5452 
5453   /// See AbstractAttribute::trackStatistics()
5454   void trackStatistics() const override {}
5455 
5456   /// See AAValueSimplify::getAssumedSimplifiedValue()
5457   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5458     return SimplifiedAssociatedValue;
5459   }
5460 
5461   /// Ensure the return value is \p V with type \p Ty, if not possible return
5462   /// nullptr. If \p Check is true we will only verify such an operation would
5463   /// suceed and return a non-nullptr value if that is the case. No IR is
5464   /// generated or modified.
5465   static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
5466                            bool Check) {
5467     if (auto *TypedV = AA::getWithType(V, Ty))
5468       return TypedV;
5469     if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
5470       return Check ? &V
5471                    : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty,
5472                                                                       "", CtxI);
5473     return nullptr;
5474   }
5475 
5476   /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
5477   /// If \p Check is true we will only verify such an operation would suceed and
5478   /// return a non-nullptr value if that is the case. No IR is generated or
5479   /// modified.
5480   static Value *reproduceInst(Attributor &A,
5481                               const AbstractAttribute &QueryingAA,
5482                               Instruction &I, Type &Ty, Instruction *CtxI,
5483                               bool Check, ValueToValueMapTy &VMap) {
5484     assert(CtxI && "Cannot reproduce an instruction without context!");
5485     if (Check && (I.mayReadFromMemory() ||
5486                   !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
5487                                                 /* TLI */ nullptr)))
5488       return nullptr;
5489     for (Value *Op : I.operands()) {
5490       Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
5491       if (!NewOp) {
5492         assert(Check && "Manifest of new value unexpectedly failed!");
5493         return nullptr;
5494       }
5495       if (!Check)
5496         VMap[Op] = NewOp;
5497     }
5498     if (Check)
5499       return &I;
5500 
5501     Instruction *CloneI = I.clone();
5502     // TODO: Try to salvage debug information here.
5503     CloneI->setDebugLoc(DebugLoc());
5504     VMap[&I] = CloneI;
5505     CloneI->insertBefore(CtxI);
5506     RemapInstruction(CloneI, VMap);
5507     return CloneI;
5508   }
5509 
5510   /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
5511   /// If \p Check is true we will only verify such an operation would suceed and
5512   /// return a non-nullptr value if that is the case. No IR is generated or
5513   /// modified.
5514   static Value *reproduceValue(Attributor &A,
5515                                const AbstractAttribute &QueryingAA, Value &V,
5516                                Type &Ty, Instruction *CtxI, bool Check,
5517                                ValueToValueMapTy &VMap) {
5518     if (const auto &NewV = VMap.lookup(&V))
5519       return NewV;
5520     bool UsedAssumedInformation = false;
5521     Optional<Value *> SimpleV =
5522         A.getAssumedSimplified(V, QueryingAA, UsedAssumedInformation);
5523     if (!SimpleV)
5524       return PoisonValue::get(&Ty);
5525     Value *EffectiveV = &V;
5526     if (SimpleV.value())
5527       EffectiveV = SimpleV.value();
5528     if (auto *C = dyn_cast<Constant>(EffectiveV))
5529       return C;
5530     if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
5531                                       A.getInfoCache()))
5532       return ensureType(A, *EffectiveV, Ty, CtxI, Check);
5533     if (auto *I = dyn_cast<Instruction>(EffectiveV))
5534       if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
5535         return ensureType(A, *NewV, Ty, CtxI, Check);
5536     return nullptr;
5537   }
5538 
5539   /// Return a value we can use as replacement for the associated one, or
5540   /// nullptr if we don't have one that makes sense.
5541   Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
5542     Value *NewV = SimplifiedAssociatedValue
5543                       ? SimplifiedAssociatedValue.value()
5544                       : UndefValue::get(getAssociatedType());
5545     if (NewV && NewV != &getAssociatedValue()) {
5546       ValueToValueMapTy VMap;
5547       // First verify we can reprduce the value with the required type at the
5548       // context location before we actually start modifying the IR.
5549       if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5550                          /* CheckOnly */ true, VMap))
5551         return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5552                               /* CheckOnly */ false, VMap);
5553     }
5554     return nullptr;
5555   }
5556 
5557   /// Helper function for querying AAValueSimplify and updating candicate.
5558   /// \param IRP The value position we are trying to unify with SimplifiedValue
5559   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5560                       const IRPosition &IRP, bool Simplify = true) {
5561     bool UsedAssumedInformation = false;
5562     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5563     if (Simplify)
5564       QueryingValueSimplified =
5565           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5566     return unionAssumed(QueryingValueSimplified);
5567   }
5568 
5569   /// Returns a candidate is found or not
5570   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5571     if (!getAssociatedValue().getType()->isIntegerTy())
5572       return false;
5573 
5574     // This will also pass the call base context.
5575     const auto &AA =
5576         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5577 
5578     Optional<Constant *> COpt = AA.getAssumedConstant(A);
5579 
5580     if (!COpt) {
5581       SimplifiedAssociatedValue = llvm::None;
5582       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5583       return true;
5584     }
5585     if (auto *C = *COpt) {
5586       SimplifiedAssociatedValue = C;
5587       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5588       return true;
5589     }
5590     return false;
5591   }
5592 
5593   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5594     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5595       return true;
5596     if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
5597       return true;
5598     return false;
5599   }
5600 
5601   /// See AbstractAttribute::manifest(...).
5602   ChangeStatus manifest(Attributor &A) override {
5603     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5604     for (auto &U : getAssociatedValue().uses()) {
5605       // Check if we need to adjust the insertion point to make sure the IR is
5606       // valid.
5607       Instruction *IP = dyn_cast<Instruction>(U.getUser());
5608       if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
5609         IP = PHI->getIncomingBlock(U)->getTerminator();
5610       if (auto *NewV = manifestReplacementValue(A, IP)) {
5611         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
5612                           << " -> " << *NewV << " :: " << *this << "\n");
5613         if (A.changeUseAfterManifest(U, *NewV))
5614           Changed = ChangeStatus::CHANGED;
5615       }
5616     }
5617 
5618     return Changed | AAValueSimplify::manifest(A);
5619   }
5620 
5621   /// See AbstractState::indicatePessimisticFixpoint(...).
5622   ChangeStatus indicatePessimisticFixpoint() override {
5623     SimplifiedAssociatedValue = &getAssociatedValue();
5624     return AAValueSimplify::indicatePessimisticFixpoint();
5625   }
5626 };
5627 
5628 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5629   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5630       : AAValueSimplifyImpl(IRP, A) {}
5631 
5632   void initialize(Attributor &A) override {
5633     AAValueSimplifyImpl::initialize(A);
5634     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5635       indicatePessimisticFixpoint();
5636     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5637                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5638                 /* IgnoreSubsumingPositions */ true))
5639       indicatePessimisticFixpoint();
5640   }
5641 
5642   /// See AbstractAttribute::updateImpl(...).
5643   ChangeStatus updateImpl(Attributor &A) override {
5644     // Byval is only replacable if it is readonly otherwise we would write into
5645     // the replaced value and not the copy that byval creates implicitly.
5646     Argument *Arg = getAssociatedArgument();
5647     if (Arg->hasByValAttr()) {
5648       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5649       //       there is no race by not copying a constant byval.
5650       bool IsKnown;
5651       if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
5652         return indicatePessimisticFixpoint();
5653     }
5654 
5655     auto Before = SimplifiedAssociatedValue;
5656 
5657     auto PredForCallSite = [&](AbstractCallSite ACS) {
5658       const IRPosition &ACSArgPos =
5659           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5660       // Check if a coresponding argument was found or if it is on not
5661       // associated (which can happen for callback calls).
5662       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5663         return false;
5664 
5665       // Simplify the argument operand explicitly and check if the result is
5666       // valid in the current scope. This avoids refering to simplified values
5667       // in other functions, e.g., we don't want to say a an argument in a
5668       // static function is actually an argument in a different function.
5669       bool UsedAssumedInformation = false;
5670       Optional<Constant *> SimpleArgOp =
5671           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5672       if (!SimpleArgOp)
5673         return true;
5674       if (!SimpleArgOp.value())
5675         return false;
5676       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5677         return false;
5678       return unionAssumed(*SimpleArgOp);
5679     };
5680 
5681     // Generate a answer specific to a call site context.
5682     bool Success;
5683     bool UsedAssumedInformation = false;
5684     if (hasCallBaseContext() &&
5685         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5686       Success = PredForCallSite(
5687           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5688     else
5689       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5690                                        UsedAssumedInformation);
5691 
5692     if (!Success)
5693       if (!askSimplifiedValueForOtherAAs(A))
5694         return indicatePessimisticFixpoint();
5695 
5696     // If a candicate was found in this update, return CHANGED.
5697     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5698                                                : ChangeStatus ::CHANGED;
5699   }
5700 
5701   /// See AbstractAttribute::trackStatistics()
5702   void trackStatistics() const override {
5703     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5704   }
5705 };
5706 
5707 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5708   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5709       : AAValueSimplifyImpl(IRP, A) {}
5710 
5711   /// See AAValueSimplify::getAssumedSimplifiedValue()
5712   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5713     if (!isValidState())
5714       return nullptr;
5715     return SimplifiedAssociatedValue;
5716   }
5717 
5718   /// See AbstractAttribute::updateImpl(...).
5719   ChangeStatus updateImpl(Attributor &A) override {
5720     auto Before = SimplifiedAssociatedValue;
5721 
5722     auto ReturnInstCB = [&](Instruction &I) {
5723       auto &RI = cast<ReturnInst>(I);
5724       return checkAndUpdate(
5725           A, *this,
5726           IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
5727     };
5728 
5729     bool UsedAssumedInformation = false;
5730     if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
5731                                    UsedAssumedInformation))
5732       if (!askSimplifiedValueForOtherAAs(A))
5733         return indicatePessimisticFixpoint();
5734 
5735     // If a candicate was found in this update, return CHANGED.
5736     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5737                                                : ChangeStatus ::CHANGED;
5738   }
5739 
5740   ChangeStatus manifest(Attributor &A) override {
5741     // We queried AAValueSimplify for the returned values so they will be
5742     // replaced if a simplified form was found. Nothing to do here.
5743     return ChangeStatus::UNCHANGED;
5744   }
5745 
5746   /// See AbstractAttribute::trackStatistics()
5747   void trackStatistics() const override {
5748     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5749   }
5750 };
5751 
5752 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5753   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5754       : AAValueSimplifyImpl(IRP, A) {}
5755 
5756   /// See AbstractAttribute::initialize(...).
5757   void initialize(Attributor &A) override {
5758     AAValueSimplifyImpl::initialize(A);
5759     Value &V = getAnchorValue();
5760 
5761     // TODO: add other stuffs
5762     if (isa<Constant>(V))
5763       indicatePessimisticFixpoint();
5764   }
5765 
5766   /// Check if \p Cmp is a comparison we can simplify.
5767   ///
5768   /// We handle multiple cases, one in which at least one operand is an
5769   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5770   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5771   /// will be updated.
5772   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5773     auto Union = [&](Value &V) {
5774       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5775           SimplifiedAssociatedValue, &V, V.getType());
5776       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5777     };
5778 
5779     Value *LHS = Cmp.getOperand(0);
5780     Value *RHS = Cmp.getOperand(1);
5781 
5782     // Simplify the operands first.
5783     bool UsedAssumedInformation = false;
5784     const auto &SimplifiedLHS =
5785         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5786                                *this, UsedAssumedInformation);
5787     if (!SimplifiedLHS)
5788       return true;
5789     if (!SimplifiedLHS.value())
5790       return false;
5791     LHS = *SimplifiedLHS;
5792 
5793     const auto &SimplifiedRHS =
5794         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5795                                *this, UsedAssumedInformation);
5796     if (!SimplifiedRHS)
5797       return true;
5798     if (!SimplifiedRHS.value())
5799       return false;
5800     RHS = *SimplifiedRHS;
5801 
5802     LLVMContext &Ctx = Cmp.getContext();
5803     // Handle the trivial case first in which we don't even need to think about
5804     // null or non-null.
5805     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5806       Constant *NewVal =
5807           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5808       if (!Union(*NewVal))
5809         return false;
5810       if (!UsedAssumedInformation)
5811         indicateOptimisticFixpoint();
5812       return true;
5813     }
5814 
5815     // From now on we only handle equalities (==, !=).
5816     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5817     if (!ICmp || !ICmp->isEquality())
5818       return false;
5819 
5820     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5821     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5822     if (!LHSIsNull && !RHSIsNull)
5823       return false;
5824 
5825     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5826     // non-nullptr operand and if we assume it's non-null we can conclude the
5827     // result of the comparison.
5828     assert((LHSIsNull || RHSIsNull) &&
5829            "Expected nullptr versus non-nullptr comparison at this point");
5830 
5831     // The index is the operand that we assume is not null.
5832     unsigned PtrIdx = LHSIsNull;
5833     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5834         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5835         DepClassTy::REQUIRED);
5836     if (!PtrNonNullAA.isAssumedNonNull())
5837       return false;
5838     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5839 
5840     // The new value depends on the predicate, true for != and false for ==.
5841     Constant *NewVal = ConstantInt::get(
5842         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5843     if (!Union(*NewVal))
5844       return false;
5845 
5846     if (!UsedAssumedInformation)
5847       indicateOptimisticFixpoint();
5848 
5849     return true;
5850   }
5851 
5852   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5853   /// simplify any operand of the instruction \p I. Return true if successful,
5854   /// in that case SimplifiedAssociatedValue will be updated.
5855   bool handleGenericInst(Attributor &A, Instruction &I) {
5856     bool SomeSimplified = false;
5857     bool UsedAssumedInformation = false;
5858 
5859     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5860     int Idx = 0;
5861     for (Value *Op : I.operands()) {
5862       const auto &SimplifiedOp =
5863           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5864                                  *this, UsedAssumedInformation);
5865       // If we are not sure about any operand we are not sure about the entire
5866       // instruction, we'll wait.
5867       if (!SimplifiedOp)
5868         return true;
5869 
5870       if (SimplifiedOp.value())
5871         NewOps[Idx] = SimplifiedOp.value();
5872       else
5873         NewOps[Idx] = Op;
5874 
5875       SomeSimplified |= (NewOps[Idx] != Op);
5876       ++Idx;
5877     }
5878 
5879     // We won't bother with the InstSimplify interface if we didn't simplify any
5880     // operand ourselves.
5881     if (!SomeSimplified)
5882       return false;
5883 
5884     InformationCache &InfoCache = A.getInfoCache();
5885     Function *F = I.getFunction();
5886     const auto *DT =
5887         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5888     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5889     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5890     OptimizationRemarkEmitter *ORE = nullptr;
5891 
5892     const DataLayout &DL = I.getModule()->getDataLayout();
5893     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5894     if (Value *SimplifiedI =
5895             simplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5896       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5897           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5898       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5899     }
5900     return false;
5901   }
5902 
5903   /// See AbstractAttribute::updateImpl(...).
5904   ChangeStatus updateImpl(Attributor &A) override {
5905     auto Before = SimplifiedAssociatedValue;
5906 
5907     // Do not simplify loads that are only used in llvm.assume if we cannot also
5908     // remove all stores that may feed into the load. The reason is that the
5909     // assume is probably worth something as long as the stores are around.
5910     if (auto *LI = dyn_cast<LoadInst>(&getAssociatedValue())) {
5911       InformationCache &InfoCache = A.getInfoCache();
5912       if (InfoCache.isOnlyUsedByAssume(*LI)) {
5913         SmallSetVector<Value *, 4> PotentialCopies;
5914         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
5915         bool UsedAssumedInformation = false;
5916         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
5917                                            PotentialValueOrigins, *this,
5918                                            UsedAssumedInformation,
5919                                            /* OnlyExact */ true)) {
5920           if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
5921                 if (!I)
5922                   return true;
5923                 if (auto *SI = dyn_cast<StoreInst>(I))
5924                   return A.isAssumedDead(SI->getOperandUse(0), this,
5925                                          /* LivenessAA */ nullptr,
5926                                          UsedAssumedInformation,
5927                                          /* CheckBBLivenessOnly */ false);
5928                 return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
5929                                        UsedAssumedInformation,
5930                                        /* CheckBBLivenessOnly */ false);
5931               }))
5932             return indicatePessimisticFixpoint();
5933         }
5934       }
5935     }
5936 
5937     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5938                             bool Stripped) -> bool {
5939       auto &AA = A.getAAFor<AAValueSimplify>(
5940           *this, IRPosition::value(V, getCallBaseContext()),
5941           DepClassTy::REQUIRED);
5942       if (!Stripped && this == &AA) {
5943 
5944         if (auto *I = dyn_cast<Instruction>(&V)) {
5945           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5946             if (handleCmp(A, *Cmp))
5947               return true;
5948           if (handleGenericInst(A, *I))
5949             return true;
5950         }
5951         // TODO: Look the instruction and check recursively.
5952 
5953         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5954                           << "\n");
5955         return false;
5956       }
5957       return checkAndUpdate(A, *this,
5958                             IRPosition::value(V, getCallBaseContext()));
5959     };
5960 
5961     bool Dummy = false;
5962     bool UsedAssumedInformation = false;
5963     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5964                                      VisitValueCB, getCtxI(),
5965                                      UsedAssumedInformation,
5966                                      /* UseValueSimplify */ false))
5967       if (!askSimplifiedValueForOtherAAs(A))
5968         return indicatePessimisticFixpoint();
5969 
5970     // If a candicate was found in this update, return CHANGED.
5971     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5972                                                : ChangeStatus ::CHANGED;
5973   }
5974 
5975   /// See AbstractAttribute::trackStatistics()
5976   void trackStatistics() const override {
5977     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5978   }
5979 };
5980 
5981 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5982   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5983       : AAValueSimplifyImpl(IRP, A) {}
5984 
5985   /// See AbstractAttribute::initialize(...).
5986   void initialize(Attributor &A) override {
5987     SimplifiedAssociatedValue = nullptr;
5988     indicateOptimisticFixpoint();
5989   }
5990   /// See AbstractAttribute::initialize(...).
5991   ChangeStatus updateImpl(Attributor &A) override {
5992     llvm_unreachable(
5993         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5994   }
5995   /// See AbstractAttribute::trackStatistics()
5996   void trackStatistics() const override {
5997     STATS_DECLTRACK_FN_ATTR(value_simplify)
5998   }
5999 };
6000 
6001 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
6002   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
6003       : AAValueSimplifyFunction(IRP, A) {}
6004   /// See AbstractAttribute::trackStatistics()
6005   void trackStatistics() const override {
6006     STATS_DECLTRACK_CS_ATTR(value_simplify)
6007   }
6008 };
6009 
6010 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
6011   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
6012       : AAValueSimplifyImpl(IRP, A) {}
6013 
6014   void initialize(Attributor &A) override {
6015     AAValueSimplifyImpl::initialize(A);
6016     Function *Fn = getAssociatedFunction();
6017     if (!Fn) {
6018       indicatePessimisticFixpoint();
6019       return;
6020     }
6021     for (Argument &Arg : Fn->args()) {
6022       if (Arg.hasReturnedAttr()) {
6023         auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
6024                                                  Arg.getArgNo());
6025         if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
6026             checkAndUpdate(A, *this, IRP))
6027           indicateOptimisticFixpoint();
6028         else
6029           indicatePessimisticFixpoint();
6030         return;
6031       }
6032     }
6033   }
6034 
6035   /// See AbstractAttribute::updateImpl(...).
6036   ChangeStatus updateImpl(Attributor &A) override {
6037     auto Before = SimplifiedAssociatedValue;
6038     auto &RetAA = A.getAAFor<AAReturnedValues>(
6039         *this, IRPosition::function(*getAssociatedFunction()),
6040         DepClassTy::REQUIRED);
6041     auto PredForReturned =
6042         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
6043           bool UsedAssumedInformation = false;
6044           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
6045               &RetVal, *cast<CallBase>(getCtxI()), *this,
6046               UsedAssumedInformation);
6047           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
6048               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
6049           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
6050         };
6051     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
6052       if (!askSimplifiedValueForOtherAAs(A))
6053         return indicatePessimisticFixpoint();
6054     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6055                                                : ChangeStatus ::CHANGED;
6056   }
6057 
6058   void trackStatistics() const override {
6059     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
6060   }
6061 };
6062 
6063 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
6064   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
6065       : AAValueSimplifyFloating(IRP, A) {}
6066 
6067   /// See AbstractAttribute::manifest(...).
6068   ChangeStatus manifest(Attributor &A) override {
6069     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6070     // TODO: We should avoid simplification duplication to begin with.
6071     auto *FloatAA = A.lookupAAFor<AAValueSimplify>(
6072         IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE);
6073     if (FloatAA && FloatAA->getState().isValidState())
6074       return Changed;
6075 
6076     if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
6077       Use &U = cast<CallBase>(&getAnchorValue())
6078                    ->getArgOperandUse(getCallSiteArgNo());
6079       if (A.changeUseAfterManifest(U, *NewV))
6080         Changed = ChangeStatus::CHANGED;
6081     }
6082 
6083     return Changed | AAValueSimplify::manifest(A);
6084   }
6085 
6086   void trackStatistics() const override {
6087     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
6088   }
6089 };
6090 } // namespace
6091 
6092 /// ----------------------- Heap-To-Stack Conversion ---------------------------
6093 namespace {
6094 struct AAHeapToStackFunction final : public AAHeapToStack {
6095 
6096   struct AllocationInfo {
6097     /// The call that allocates the memory.
6098     CallBase *const CB;
6099 
6100     /// The library function id for the allocation.
6101     LibFunc LibraryFunctionId = NotLibFunc;
6102 
6103     /// The status wrt. a rewrite.
6104     enum {
6105       STACK_DUE_TO_USE,
6106       STACK_DUE_TO_FREE,
6107       INVALID,
6108     } Status = STACK_DUE_TO_USE;
6109 
6110     /// Flag to indicate if we encountered a use that might free this allocation
6111     /// but which is not in the deallocation infos.
6112     bool HasPotentiallyFreeingUnknownUses = false;
6113 
6114     /// Flag to indicate that we should place the new alloca in the function
6115     /// entry block rather than where the call site (CB) is.
6116     bool MoveAllocaIntoEntry = true;
6117 
6118     /// The set of free calls that use this allocation.
6119     SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
6120   };
6121 
6122   struct DeallocationInfo {
6123     /// The call that deallocates the memory.
6124     CallBase *const CB;
6125 
6126     /// Flag to indicate if we don't know all objects this deallocation might
6127     /// free.
6128     bool MightFreeUnknownObjects = false;
6129 
6130     /// The set of allocation calls that are potentially freed.
6131     SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
6132   };
6133 
6134   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
6135       : AAHeapToStack(IRP, A) {}
6136 
6137   ~AAHeapToStackFunction() {
6138     // Ensure we call the destructor so we release any memory allocated in the
6139     // sets.
6140     for (auto &It : AllocationInfos)
6141       It.second->~AllocationInfo();
6142     for (auto &It : DeallocationInfos)
6143       It.second->~DeallocationInfo();
6144   }
6145 
6146   void initialize(Attributor &A) override {
6147     AAHeapToStack::initialize(A);
6148 
6149     const Function *F = getAnchorScope();
6150     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6151 
6152     auto AllocationIdentifierCB = [&](Instruction &I) {
6153       CallBase *CB = dyn_cast<CallBase>(&I);
6154       if (!CB)
6155         return true;
6156       if (isFreeCall(CB, TLI)) {
6157         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
6158         return true;
6159       }
6160       // To do heap to stack, we need to know that the allocation itself is
6161       // removable once uses are rewritten, and that we can initialize the
6162       // alloca to the same pattern as the original allocation result.
6163       if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) {
6164         auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
6165         if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
6166           AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
6167           AllocationInfos[CB] = AI;
6168           if (TLI)
6169             TLI->getLibFunc(*CB, AI->LibraryFunctionId);
6170         }
6171       }
6172       return true;
6173     };
6174 
6175     bool UsedAssumedInformation = false;
6176     bool Success = A.checkForAllCallLikeInstructions(
6177         AllocationIdentifierCB, *this, UsedAssumedInformation,
6178         /* CheckBBLivenessOnly */ false,
6179         /* CheckPotentiallyDead */ true);
6180     (void)Success;
6181     assert(Success && "Did not expect the call base visit callback to fail!");
6182 
6183     Attributor::SimplifictionCallbackTy SCB =
6184         [](const IRPosition &, const AbstractAttribute *,
6185            bool &) -> Optional<Value *> { return nullptr; };
6186     for (const auto &It : AllocationInfos)
6187       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6188                                        SCB);
6189     for (const auto &It : DeallocationInfos)
6190       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6191                                        SCB);
6192   }
6193 
6194   const std::string getAsStr() const override {
6195     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
6196     for (const auto &It : AllocationInfos) {
6197       if (It.second->Status == AllocationInfo::INVALID)
6198         ++NumInvalidMallocs;
6199       else
6200         ++NumH2SMallocs;
6201     }
6202     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
6203            std::to_string(NumInvalidMallocs);
6204   }
6205 
6206   /// See AbstractAttribute::trackStatistics().
6207   void trackStatistics() const override {
6208     STATS_DECL(
6209         MallocCalls, Function,
6210         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
6211     for (auto &It : AllocationInfos)
6212       if (It.second->Status != AllocationInfo::INVALID)
6213         ++BUILD_STAT_NAME(MallocCalls, Function);
6214   }
6215 
6216   bool isAssumedHeapToStack(const CallBase &CB) const override {
6217     if (isValidState())
6218       if (AllocationInfo *AI =
6219               AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
6220         return AI->Status != AllocationInfo::INVALID;
6221     return false;
6222   }
6223 
6224   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
6225     if (!isValidState())
6226       return false;
6227 
6228     for (auto &It : AllocationInfos) {
6229       AllocationInfo &AI = *It.second;
6230       if (AI.Status == AllocationInfo::INVALID)
6231         continue;
6232 
6233       if (AI.PotentialFreeCalls.count(&CB))
6234         return true;
6235     }
6236 
6237     return false;
6238   }
6239 
6240   ChangeStatus manifest(Attributor &A) override {
6241     assert(getState().isValidState() &&
6242            "Attempted to manifest an invalid state!");
6243 
6244     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
6245     Function *F = getAnchorScope();
6246     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6247 
6248     for (auto &It : AllocationInfos) {
6249       AllocationInfo &AI = *It.second;
6250       if (AI.Status == AllocationInfo::INVALID)
6251         continue;
6252 
6253       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
6254         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
6255         A.deleteAfterManifest(*FreeCall);
6256         HasChanged = ChangeStatus::CHANGED;
6257       }
6258 
6259       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
6260                         << "\n");
6261 
6262       auto Remark = [&](OptimizationRemark OR) {
6263         LibFunc IsAllocShared;
6264         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
6265           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
6266             return OR << "Moving globalized variable to the stack.";
6267         return OR << "Moving memory allocation from the heap to the stack.";
6268       };
6269       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6270         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
6271       else
6272         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
6273 
6274       const DataLayout &DL = A.getInfoCache().getDL();
6275       Value *Size;
6276       Optional<APInt> SizeAPI = getSize(A, *this, AI);
6277       if (SizeAPI) {
6278         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
6279       } else {
6280         LLVMContext &Ctx = AI.CB->getContext();
6281         ObjectSizeOpts Opts;
6282         ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
6283         SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
6284         assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
6285                cast<ConstantInt>(SizeOffsetPair.second)->isZero());
6286         Size = SizeOffsetPair.first;
6287       }
6288 
6289       Instruction *IP =
6290           AI.MoveAllocaIntoEntry ? &F->getEntryBlock().front() : AI.CB;
6291 
6292       Align Alignment(1);
6293       if (MaybeAlign RetAlign = AI.CB->getRetAlign())
6294         Alignment = std::max(Alignment, *RetAlign);
6295       if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6296         Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
6297         assert(AlignmentAPI && AlignmentAPI.value().getZExtValue() > 0 &&
6298                "Expected an alignment during manifest!");
6299         Alignment = std::max(
6300             Alignment, assumeAligned(AlignmentAPI.value().getZExtValue()));
6301       }
6302 
6303       // TODO: Hoist the alloca towards the function entry.
6304       unsigned AS = DL.getAllocaAddrSpace();
6305       Instruction *Alloca =
6306           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
6307                          AI.CB->getName() + ".h2s", IP);
6308 
6309       if (Alloca->getType() != AI.CB->getType())
6310         Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6311             Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
6312 
6313       auto *I8Ty = Type::getInt8Ty(F->getContext());
6314       auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6315       assert(InitVal &&
6316              "Must be able to materialize initial memory state of allocation");
6317 
6318       A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca);
6319 
6320       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6321         auto *NBB = II->getNormalDest();
6322         BranchInst::Create(NBB, AI.CB->getParent());
6323         A.deleteAfterManifest(*AI.CB);
6324       } else {
6325         A.deleteAfterManifest(*AI.CB);
6326       }
6327 
6328       // Initialize the alloca with the same value as used by the allocation
6329       // function.  We can skip undef as the initial value of an alloc is
6330       // undef, and the memset would simply end up being DSEd.
6331       if (!isa<UndefValue>(InitVal)) {
6332         IRBuilder<> Builder(Alloca->getNextNode());
6333         // TODO: Use alignment above if align!=1
6334         Builder.CreateMemSet(Alloca, InitVal, Size, None);
6335       }
6336       HasChanged = ChangeStatus::CHANGED;
6337     }
6338 
6339     return HasChanged;
6340   }
6341 
6342   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6343                            Value &V) {
6344     bool UsedAssumedInformation = false;
6345     Optional<Constant *> SimpleV =
6346         A.getAssumedConstant(V, AA, UsedAssumedInformation);
6347     if (!SimpleV)
6348       return APInt(64, 0);
6349     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.value()))
6350       return CI->getValue();
6351     return llvm::None;
6352   }
6353 
6354   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6355                           AllocationInfo &AI) {
6356     auto Mapper = [&](const Value *V) -> const Value * {
6357       bool UsedAssumedInformation = false;
6358       if (Optional<Constant *> SimpleV =
6359               A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6360         if (*SimpleV)
6361           return *SimpleV;
6362       return V;
6363     };
6364 
6365     const Function *F = getAnchorScope();
6366     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6367     return getAllocSize(AI.CB, TLI, Mapper);
6368   }
6369 
6370   /// Collection of all malloc-like calls in a function with associated
6371   /// information.
6372   MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6373 
6374   /// Collection of all free-like calls in a function with associated
6375   /// information.
6376   MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6377 
6378   ChangeStatus updateImpl(Attributor &A) override;
6379 };
6380 
6381 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6382   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6383   const Function *F = getAnchorScope();
6384   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6385 
6386   const auto &LivenessAA =
6387       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6388 
6389   MustBeExecutedContextExplorer &Explorer =
6390       A.getInfoCache().getMustBeExecutedContextExplorer();
6391 
6392   bool StackIsAccessibleByOtherThreads =
6393       A.getInfoCache().stackIsAccessibleByOtherThreads();
6394 
6395   LoopInfo *LI =
6396       A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(*F);
6397   Optional<bool> MayContainIrreducibleControl;
6398   auto IsInLoop = [&](BasicBlock &BB) {
6399     if (&F->getEntryBlock() == &BB)
6400       return false;
6401     if (!MayContainIrreducibleControl.has_value())
6402       MayContainIrreducibleControl = mayContainIrreducibleControl(*F, LI);
6403     if (MayContainIrreducibleControl.value())
6404       return true;
6405     if (!LI)
6406       return true;
6407     return LI->getLoopFor(&BB) != nullptr;
6408   };
6409 
6410   // Flag to ensure we update our deallocation information at most once per
6411   // updateImpl call and only if we use the free check reasoning.
6412   bool HasUpdatedFrees = false;
6413 
6414   auto UpdateFrees = [&]() {
6415     HasUpdatedFrees = true;
6416 
6417     for (auto &It : DeallocationInfos) {
6418       DeallocationInfo &DI = *It.second;
6419       // For now we cannot use deallocations that have unknown inputs, skip
6420       // them.
6421       if (DI.MightFreeUnknownObjects)
6422         continue;
6423 
6424       // No need to analyze dead calls, ignore them instead.
6425       bool UsedAssumedInformation = false;
6426       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6427                           /* CheckBBLivenessOnly */ true))
6428         continue;
6429 
6430       // Use the optimistic version to get the freed objects, ignoring dead
6431       // branches etc.
6432       SmallVector<Value *, 8> Objects;
6433       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6434                                            *this, DI.CB,
6435                                            UsedAssumedInformation)) {
6436         LLVM_DEBUG(
6437             dbgs()
6438             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6439         DI.MightFreeUnknownObjects = true;
6440         continue;
6441       }
6442 
6443       // Check each object explicitly.
6444       for (auto *Obj : Objects) {
6445         // Free of null and undef can be ignored as no-ops (or UB in the latter
6446         // case).
6447         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6448           continue;
6449 
6450         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6451         if (!ObjCB) {
6452           LLVM_DEBUG(dbgs()
6453                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6454           DI.MightFreeUnknownObjects = true;
6455           continue;
6456         }
6457 
6458         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6459         if (!AI) {
6460           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6461                             << "\n");
6462           DI.MightFreeUnknownObjects = true;
6463           continue;
6464         }
6465 
6466         DI.PotentialAllocationCalls.insert(ObjCB);
6467       }
6468     }
6469   };
6470 
6471   auto FreeCheck = [&](AllocationInfo &AI) {
6472     // If the stack is not accessible by other threads, the "must-free" logic
6473     // doesn't apply as the pointer could be shared and needs to be places in
6474     // "shareable" memory.
6475     if (!StackIsAccessibleByOtherThreads) {
6476       auto &NoSyncAA =
6477           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6478       if (!NoSyncAA.isAssumedNoSync()) {
6479         LLVM_DEBUG(
6480             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6481                       "other threads and function is not nosync:\n");
6482         return false;
6483       }
6484     }
6485     if (!HasUpdatedFrees)
6486       UpdateFrees();
6487 
6488     // TODO: Allow multi exit functions that have different free calls.
6489     if (AI.PotentialFreeCalls.size() != 1) {
6490       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6491                         << AI.PotentialFreeCalls.size() << "\n");
6492       return false;
6493     }
6494     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6495     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6496     if (!DI) {
6497       LLVM_DEBUG(
6498           dbgs() << "[H2S] unique free call was not known as deallocation call "
6499                  << *UniqueFree << "\n");
6500       return false;
6501     }
6502     if (DI->MightFreeUnknownObjects) {
6503       LLVM_DEBUG(
6504           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6505       return false;
6506     }
6507     if (DI->PotentialAllocationCalls.empty())
6508       return true;
6509     if (DI->PotentialAllocationCalls.size() > 1) {
6510       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6511                         << DI->PotentialAllocationCalls.size()
6512                         << " different allocations\n");
6513       return false;
6514     }
6515     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6516       LLVM_DEBUG(
6517           dbgs()
6518           << "[H2S] unique free call not known to free this allocation but "
6519           << **DI->PotentialAllocationCalls.begin() << "\n");
6520       return false;
6521     }
6522     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6523     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6524       LLVM_DEBUG(
6525           dbgs()
6526           << "[H2S] unique free call might not be executed with the allocation "
6527           << *UniqueFree << "\n");
6528       return false;
6529     }
6530     return true;
6531   };
6532 
6533   auto UsesCheck = [&](AllocationInfo &AI) {
6534     bool ValidUsesOnly = true;
6535 
6536     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6537       Instruction *UserI = cast<Instruction>(U.getUser());
6538       if (isa<LoadInst>(UserI))
6539         return true;
6540       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6541         if (SI->getValueOperand() == U.get()) {
6542           LLVM_DEBUG(dbgs()
6543                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6544           ValidUsesOnly = false;
6545         } else {
6546           // A store into the malloc'ed memory is fine.
6547         }
6548         return true;
6549       }
6550       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6551         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6552           return true;
6553         if (DeallocationInfos.count(CB)) {
6554           AI.PotentialFreeCalls.insert(CB);
6555           return true;
6556         }
6557 
6558         unsigned ArgNo = CB->getArgOperandNo(&U);
6559 
6560         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6561             *this, IRPosition::callsite_argument(*CB, ArgNo),
6562             DepClassTy::OPTIONAL);
6563 
6564         // If a call site argument use is nofree, we are fine.
6565         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6566             *this, IRPosition::callsite_argument(*CB, ArgNo),
6567             DepClassTy::OPTIONAL);
6568 
6569         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6570         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6571         if (MaybeCaptured ||
6572             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6573              MaybeFreed)) {
6574           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6575 
6576           // Emit a missed remark if this is missed OpenMP globalization.
6577           auto Remark = [&](OptimizationRemarkMissed ORM) {
6578             return ORM
6579                    << "Could not move globalized variable to the stack. "
6580                       "Variable is potentially captured in call. Mark "
6581                       "parameter as `__attribute__((noescape))` to override.";
6582           };
6583 
6584           if (ValidUsesOnly &&
6585               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6586             A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
6587 
6588           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6589           ValidUsesOnly = false;
6590         }
6591         return true;
6592       }
6593 
6594       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6595           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6596         Follow = true;
6597         return true;
6598       }
6599       // Unknown user for which we can not track uses further (in a way that
6600       // makes sense).
6601       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6602       ValidUsesOnly = false;
6603       return true;
6604     };
6605     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6606       return false;
6607     return ValidUsesOnly;
6608   };
6609 
6610   // The actual update starts here. We look at all allocations and depending on
6611   // their status perform the appropriate check(s).
6612   for (auto &It : AllocationInfos) {
6613     AllocationInfo &AI = *It.second;
6614     if (AI.Status == AllocationInfo::INVALID)
6615       continue;
6616 
6617     if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6618       Optional<APInt> APAlign = getAPInt(A, *this, *Align);
6619       if (!APAlign) {
6620         // Can't generate an alloca which respects the required alignment
6621         // on the allocation.
6622         LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6623                           << "\n");
6624         AI.Status = AllocationInfo::INVALID;
6625         Changed = ChangeStatus::CHANGED;
6626         continue;
6627       }
6628       if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
6629           !APAlign->isPowerOf2()) {
6630         LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
6631                           << "\n");
6632         AI.Status = AllocationInfo::INVALID;
6633         Changed = ChangeStatus::CHANGED;
6634         continue;
6635       }
6636     }
6637 
6638     Optional<APInt> Size = getSize(A, *this, AI);
6639     if (MaxHeapToStackSize != -1) {
6640       if (!Size || Size.value().ugt(MaxHeapToStackSize)) {
6641         LLVM_DEBUG({
6642           if (!Size)
6643             dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
6644           else
6645             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6646                    << MaxHeapToStackSize << "\n";
6647         });
6648 
6649         AI.Status = AllocationInfo::INVALID;
6650         Changed = ChangeStatus::CHANGED;
6651         continue;
6652       }
6653     }
6654 
6655     switch (AI.Status) {
6656     case AllocationInfo::STACK_DUE_TO_USE:
6657       if (UsesCheck(AI))
6658         break;
6659       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6660       LLVM_FALLTHROUGH;
6661     case AllocationInfo::STACK_DUE_TO_FREE:
6662       if (FreeCheck(AI))
6663         break;
6664       AI.Status = AllocationInfo::INVALID;
6665       Changed = ChangeStatus::CHANGED;
6666       break;
6667     case AllocationInfo::INVALID:
6668       llvm_unreachable("Invalid allocations should never reach this point!");
6669     };
6670 
6671     // Check if we still think we can move it into the entry block.
6672     if (AI.MoveAllocaIntoEntry &&
6673         (!Size.has_value() || IsInLoop(*AI.CB->getParent())))
6674       AI.MoveAllocaIntoEntry = false;
6675   }
6676 
6677   return Changed;
6678 }
6679 } // namespace
6680 
6681 /// ----------------------- Privatizable Pointers ------------------------------
6682 namespace {
6683 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6684   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6685       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6686 
6687   ChangeStatus indicatePessimisticFixpoint() override {
6688     AAPrivatizablePtr::indicatePessimisticFixpoint();
6689     PrivatizableType = nullptr;
6690     return ChangeStatus::CHANGED;
6691   }
6692 
6693   /// Identify the type we can chose for a private copy of the underlying
6694   /// argument. None means it is not clear yet, nullptr means there is none.
6695   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6696 
6697   /// Return a privatizable type that encloses both T0 and T1.
6698   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6699   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6700     if (!T0)
6701       return T1;
6702     if (!T1)
6703       return T0;
6704     if (T0 == T1)
6705       return T0;
6706     return nullptr;
6707   }
6708 
6709   Optional<Type *> getPrivatizableType() const override {
6710     return PrivatizableType;
6711   }
6712 
6713   const std::string getAsStr() const override {
6714     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6715   }
6716 
6717 protected:
6718   Optional<Type *> PrivatizableType;
6719 };
6720 
6721 // TODO: Do this for call site arguments (probably also other values) as well.
6722 
6723 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6724   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6725       : AAPrivatizablePtrImpl(IRP, A) {}
6726 
6727   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6728   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6729     // If this is a byval argument and we know all the call sites (so we can
6730     // rewrite them), there is no need to check them explicitly.
6731     bool UsedAssumedInformation = false;
6732     SmallVector<Attribute, 1> Attrs;
6733     getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true);
6734     if (!Attrs.empty() &&
6735         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6736                                true, UsedAssumedInformation))
6737       return Attrs[0].getValueAsType();
6738 
6739     Optional<Type *> Ty;
6740     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6741 
6742     // Make sure the associated call site argument has the same type at all call
6743     // sites and it is an allocation we know is safe to privatize, for now that
6744     // means we only allow alloca instructions.
6745     // TODO: We can additionally analyze the accesses in the callee to  create
6746     //       the type from that information instead. That is a little more
6747     //       involved and will be done in a follow up patch.
6748     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6749       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6750       // Check if a coresponding argument was found or if it is one not
6751       // associated (which can happen for callback calls).
6752       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6753         return false;
6754 
6755       // Check that all call sites agree on a type.
6756       auto &PrivCSArgAA =
6757           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6758       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6759 
6760       LLVM_DEBUG({
6761         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6762         if (CSTy && CSTy.value())
6763           CSTy.value()->print(dbgs());
6764         else if (CSTy)
6765           dbgs() << "<nullptr>";
6766         else
6767           dbgs() << "<none>";
6768       });
6769 
6770       Ty = combineTypes(Ty, CSTy);
6771 
6772       LLVM_DEBUG({
6773         dbgs() << " : New Type: ";
6774         if (Ty && Ty.value())
6775           Ty.value()->print(dbgs());
6776         else if (Ty)
6777           dbgs() << "<nullptr>";
6778         else
6779           dbgs() << "<none>";
6780         dbgs() << "\n";
6781       });
6782 
6783       return !Ty || Ty.value();
6784     };
6785 
6786     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6787                                 UsedAssumedInformation))
6788       return nullptr;
6789     return Ty;
6790   }
6791 
6792   /// See AbstractAttribute::updateImpl(...).
6793   ChangeStatus updateImpl(Attributor &A) override {
6794     PrivatizableType = identifyPrivatizableType(A);
6795     if (!PrivatizableType)
6796       return ChangeStatus::UNCHANGED;
6797     if (!PrivatizableType.value())
6798       return indicatePessimisticFixpoint();
6799 
6800     // The dependence is optional so we don't give up once we give up on the
6801     // alignment.
6802     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6803                         DepClassTy::OPTIONAL);
6804 
6805     // Avoid arguments with padding for now.
6806     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6807         !isDenselyPacked(*PrivatizableType, A.getInfoCache().getDL())) {
6808       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6809       return indicatePessimisticFixpoint();
6810     }
6811 
6812     // Collect the types that will replace the privatizable type in the function
6813     // signature.
6814     SmallVector<Type *, 16> ReplacementTypes;
6815     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
6816 
6817     // Verify callee and caller agree on how the promoted argument would be
6818     // passed.
6819     Function &Fn = *getIRPosition().getAnchorScope();
6820     const auto *TTI =
6821         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6822     if (!TTI) {
6823       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
6824                         << Fn.getName() << "\n");
6825       return indicatePessimisticFixpoint();
6826     }
6827 
6828     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6829       CallBase *CB = ACS.getInstruction();
6830       return TTI->areTypesABICompatible(
6831           CB->getCaller(), CB->getCalledFunction(), ReplacementTypes);
6832     };
6833     bool UsedAssumedInformation = false;
6834     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6835                                 UsedAssumedInformation)) {
6836       LLVM_DEBUG(
6837           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6838                  << Fn.getName() << "\n");
6839       return indicatePessimisticFixpoint();
6840     }
6841 
6842     // Register a rewrite of the argument.
6843     Argument *Arg = getAssociatedArgument();
6844     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6845       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6846       return indicatePessimisticFixpoint();
6847     }
6848 
6849     unsigned ArgNo = Arg->getArgNo();
6850 
6851     // Helper to check if for the given call site the associated argument is
6852     // passed to a callback where the privatization would be different.
6853     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6854       SmallVector<const Use *, 4> CallbackUses;
6855       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6856       for (const Use *U : CallbackUses) {
6857         AbstractCallSite CBACS(U);
6858         assert(CBACS && CBACS.isCallbackCall());
6859         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6860           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6861 
6862           LLVM_DEBUG({
6863             dbgs()
6864                 << "[AAPrivatizablePtr] Argument " << *Arg
6865                 << "check if can be privatized in the context of its parent ("
6866                 << Arg->getParent()->getName()
6867                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6868                    "callback ("
6869                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6870                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6871                 << CBACS.getCallArgOperand(CBArg) << " vs "
6872                 << CB.getArgOperand(ArgNo) << "\n"
6873                 << "[AAPrivatizablePtr] " << CBArg << " : "
6874                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6875           });
6876 
6877           if (CBArgNo != int(ArgNo))
6878             continue;
6879           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6880               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6881           if (CBArgPrivAA.isValidState()) {
6882             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6883             if (!CBArgPrivTy)
6884               continue;
6885             if (CBArgPrivTy.value() == PrivatizableType)
6886               continue;
6887           }
6888 
6889           LLVM_DEBUG({
6890             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6891                    << " cannot be privatized in the context of its parent ("
6892                    << Arg->getParent()->getName()
6893                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6894                       "callback ("
6895                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6896                    << ").\n[AAPrivatizablePtr] for which the argument "
6897                       "privatization is not compatible.\n";
6898           });
6899           return false;
6900         }
6901       }
6902       return true;
6903     };
6904 
6905     // Helper to check if for the given call site the associated argument is
6906     // passed to a direct call where the privatization would be different.
6907     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6908       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6909       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6910       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6911              "Expected a direct call operand for callback call operand");
6912 
6913       LLVM_DEBUG({
6914         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6915                << " check if be privatized in the context of its parent ("
6916                << Arg->getParent()->getName()
6917                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6918                   "direct call of ("
6919                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6920                << ").\n";
6921       });
6922 
6923       Function *DCCallee = DC->getCalledFunction();
6924       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6925         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6926             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6927             DepClassTy::REQUIRED);
6928         if (DCArgPrivAA.isValidState()) {
6929           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6930           if (!DCArgPrivTy)
6931             return true;
6932           if (DCArgPrivTy.value() == PrivatizableType)
6933             return true;
6934         }
6935       }
6936 
6937       LLVM_DEBUG({
6938         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6939                << " cannot be privatized in the context of its parent ("
6940                << Arg->getParent()->getName()
6941                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6942                   "direct call of ("
6943                << ACS.getInstruction()->getCalledFunction()->getName()
6944                << ").\n[AAPrivatizablePtr] for which the argument "
6945                   "privatization is not compatible.\n";
6946       });
6947       return false;
6948     };
6949 
6950     // Helper to check if the associated argument is used at the given abstract
6951     // call site in a way that is incompatible with the privatization assumed
6952     // here.
6953     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6954       if (ACS.isDirectCall())
6955         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6956       if (ACS.isCallbackCall())
6957         return IsCompatiblePrivArgOfDirectCS(ACS);
6958       return false;
6959     };
6960 
6961     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6962                                 UsedAssumedInformation))
6963       return indicatePessimisticFixpoint();
6964 
6965     return ChangeStatus::UNCHANGED;
6966   }
6967 
6968   /// Given a type to private \p PrivType, collect the constituates (which are
6969   /// used) in \p ReplacementTypes.
6970   static void
6971   identifyReplacementTypes(Type *PrivType,
6972                            SmallVectorImpl<Type *> &ReplacementTypes) {
6973     // TODO: For now we expand the privatization type to the fullest which can
6974     //       lead to dead arguments that need to be removed later.
6975     assert(PrivType && "Expected privatizable type!");
6976 
6977     // Traverse the type, extract constituate types on the outermost level.
6978     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6979       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6980         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6981     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6982       ReplacementTypes.append(PrivArrayType->getNumElements(),
6983                               PrivArrayType->getElementType());
6984     } else {
6985       ReplacementTypes.push_back(PrivType);
6986     }
6987   }
6988 
6989   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6990   /// The values needed are taken from the arguments of \p F starting at
6991   /// position \p ArgNo.
6992   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6993                                    unsigned ArgNo, Instruction &IP) {
6994     assert(PrivType && "Expected privatizable type!");
6995 
6996     IRBuilder<NoFolder> IRB(&IP);
6997     const DataLayout &DL = F.getParent()->getDataLayout();
6998 
6999     // Traverse the type, build GEPs and stores.
7000     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7001       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
7002       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
7003         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
7004         Value *Ptr =
7005             constructPointer(PointeeTy, PrivType, &Base,
7006                              PrivStructLayout->getElementOffset(u), IRB, DL);
7007         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
7008       }
7009     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7010       Type *PointeeTy = PrivArrayType->getElementType();
7011       Type *PointeePtrTy = PointeeTy->getPointerTo();
7012       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
7013       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
7014         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
7015                                       u * PointeeTySize, IRB, DL);
7016         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
7017       }
7018     } else {
7019       new StoreInst(F.getArg(ArgNo), &Base, &IP);
7020     }
7021   }
7022 
7023   /// Extract values from \p Base according to the type \p PrivType at the
7024   /// call position \p ACS. The values are appended to \p ReplacementValues.
7025   void createReplacementValues(Align Alignment, Type *PrivType,
7026                                AbstractCallSite ACS, Value *Base,
7027                                SmallVectorImpl<Value *> &ReplacementValues) {
7028     assert(Base && "Expected base value!");
7029     assert(PrivType && "Expected privatizable type!");
7030     Instruction *IP = ACS.getInstruction();
7031 
7032     IRBuilder<NoFolder> IRB(IP);
7033     const DataLayout &DL = IP->getModule()->getDataLayout();
7034 
7035     Type *PrivPtrType = PrivType->getPointerTo();
7036     if (Base->getType() != PrivPtrType)
7037       Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7038           Base, PrivPtrType, "", ACS.getInstruction());
7039 
7040     // Traverse the type, build GEPs and loads.
7041     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7042       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
7043       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
7044         Type *PointeeTy = PrivStructType->getElementType(u);
7045         Value *Ptr =
7046             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
7047                              PrivStructLayout->getElementOffset(u), IRB, DL);
7048         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
7049         L->setAlignment(Alignment);
7050         ReplacementValues.push_back(L);
7051       }
7052     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7053       Type *PointeeTy = PrivArrayType->getElementType();
7054       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
7055       Type *PointeePtrTy = PointeeTy->getPointerTo();
7056       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
7057         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
7058                                       u * PointeeTySize, IRB, DL);
7059         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
7060         L->setAlignment(Alignment);
7061         ReplacementValues.push_back(L);
7062       }
7063     } else {
7064       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
7065       L->setAlignment(Alignment);
7066       ReplacementValues.push_back(L);
7067     }
7068   }
7069 
7070   /// See AbstractAttribute::manifest(...)
7071   ChangeStatus manifest(Attributor &A) override {
7072     if (!PrivatizableType)
7073       return ChangeStatus::UNCHANGED;
7074     assert(PrivatizableType.value() && "Expected privatizable type!");
7075 
7076     // Collect all tail calls in the function as we cannot allow new allocas to
7077     // escape into tail recursion.
7078     // TODO: Be smarter about new allocas escaping into tail calls.
7079     SmallVector<CallInst *, 16> TailCalls;
7080     bool UsedAssumedInformation = false;
7081     if (!A.checkForAllInstructions(
7082             [&](Instruction &I) {
7083               CallInst &CI = cast<CallInst>(I);
7084               if (CI.isTailCall())
7085                 TailCalls.push_back(&CI);
7086               return true;
7087             },
7088             *this, {Instruction::Call}, UsedAssumedInformation))
7089       return ChangeStatus::UNCHANGED;
7090 
7091     Argument *Arg = getAssociatedArgument();
7092     // Query AAAlign attribute for alignment of associated argument to
7093     // determine the best alignment of loads.
7094     const auto &AlignAA =
7095         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
7096 
7097     // Callback to repair the associated function. A new alloca is placed at the
7098     // beginning and initialized with the values passed through arguments. The
7099     // new alloca replaces the use of the old pointer argument.
7100     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
7101         [=](const Attributor::ArgumentReplacementInfo &ARI,
7102             Function &ReplacementFn, Function::arg_iterator ArgIt) {
7103           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
7104           Instruction *IP = &*EntryBB.getFirstInsertionPt();
7105           const DataLayout &DL = IP->getModule()->getDataLayout();
7106           unsigned AS = DL.getAllocaAddrSpace();
7107           Instruction *AI = new AllocaInst(PrivatizableType.value(), AS,
7108                                            Arg->getName() + ".priv", IP);
7109           createInitialization(PrivatizableType.value(), *AI, ReplacementFn,
7110                                ArgIt->getArgNo(), *IP);
7111 
7112           if (AI->getType() != Arg->getType())
7113             AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7114                 AI, Arg->getType(), "", IP);
7115           Arg->replaceAllUsesWith(AI);
7116 
7117           for (CallInst *CI : TailCalls)
7118             CI->setTailCall(false);
7119         };
7120 
7121     // Callback to repair a call site of the associated function. The elements
7122     // of the privatizable type are loaded prior to the call and passed to the
7123     // new function version.
7124     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
7125         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
7126                       AbstractCallSite ACS,
7127                       SmallVectorImpl<Value *> &NewArgOperands) {
7128           // When no alignment is specified for the load instruction,
7129           // natural alignment is assumed.
7130           createReplacementValues(
7131               AlignAA.getAssumedAlign(), *PrivatizableType, ACS,
7132               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
7133               NewArgOperands);
7134         };
7135 
7136     // Collect the types that will replace the privatizable type in the function
7137     // signature.
7138     SmallVector<Type *, 16> ReplacementTypes;
7139     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
7140 
7141     // Register a rewrite of the argument.
7142     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
7143                                            std::move(FnRepairCB),
7144                                            std::move(ACSRepairCB)))
7145       return ChangeStatus::CHANGED;
7146     return ChangeStatus::UNCHANGED;
7147   }
7148 
7149   /// See AbstractAttribute::trackStatistics()
7150   void trackStatistics() const override {
7151     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
7152   }
7153 };
7154 
7155 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
7156   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
7157       : AAPrivatizablePtrImpl(IRP, A) {}
7158 
7159   /// See AbstractAttribute::initialize(...).
7160   virtual void initialize(Attributor &A) override {
7161     // TODO: We can privatize more than arguments.
7162     indicatePessimisticFixpoint();
7163   }
7164 
7165   ChangeStatus updateImpl(Attributor &A) override {
7166     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
7167                      "updateImpl will not be called");
7168   }
7169 
7170   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
7171   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
7172     Value *Obj = getUnderlyingObject(&getAssociatedValue());
7173     if (!Obj) {
7174       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
7175       return nullptr;
7176     }
7177 
7178     if (auto *AI = dyn_cast<AllocaInst>(Obj))
7179       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
7180         if (CI->isOne())
7181           return AI->getAllocatedType();
7182     if (auto *Arg = dyn_cast<Argument>(Obj)) {
7183       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
7184           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
7185       if (PrivArgAA.isAssumedPrivatizablePtr())
7186         return PrivArgAA.getPrivatizableType();
7187     }
7188 
7189     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
7190                          "alloca nor privatizable argument: "
7191                       << *Obj << "!\n");
7192     return nullptr;
7193   }
7194 
7195   /// See AbstractAttribute::trackStatistics()
7196   void trackStatistics() const override {
7197     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
7198   }
7199 };
7200 
7201 struct AAPrivatizablePtrCallSiteArgument final
7202     : public AAPrivatizablePtrFloating {
7203   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
7204       : AAPrivatizablePtrFloating(IRP, A) {}
7205 
7206   /// See AbstractAttribute::initialize(...).
7207   void initialize(Attributor &A) override {
7208     if (getIRPosition().hasAttr(Attribute::ByVal))
7209       indicateOptimisticFixpoint();
7210   }
7211 
7212   /// See AbstractAttribute::updateImpl(...).
7213   ChangeStatus updateImpl(Attributor &A) override {
7214     PrivatizableType = identifyPrivatizableType(A);
7215     if (!PrivatizableType)
7216       return ChangeStatus::UNCHANGED;
7217     if (!PrivatizableType.value())
7218       return indicatePessimisticFixpoint();
7219 
7220     const IRPosition &IRP = getIRPosition();
7221     auto &NoCaptureAA =
7222         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
7223     if (!NoCaptureAA.isAssumedNoCapture()) {
7224       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
7225       return indicatePessimisticFixpoint();
7226     }
7227 
7228     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
7229     if (!NoAliasAA.isAssumedNoAlias()) {
7230       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
7231       return indicatePessimisticFixpoint();
7232     }
7233 
7234     bool IsKnown;
7235     if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
7236       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
7237       return indicatePessimisticFixpoint();
7238     }
7239 
7240     return ChangeStatus::UNCHANGED;
7241   }
7242 
7243   /// See AbstractAttribute::trackStatistics()
7244   void trackStatistics() const override {
7245     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
7246   }
7247 };
7248 
7249 struct AAPrivatizablePtrCallSiteReturned final
7250     : public AAPrivatizablePtrFloating {
7251   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
7252       : AAPrivatizablePtrFloating(IRP, A) {}
7253 
7254   /// See AbstractAttribute::initialize(...).
7255   void initialize(Attributor &A) override {
7256     // TODO: We can privatize more than arguments.
7257     indicatePessimisticFixpoint();
7258   }
7259 
7260   /// See AbstractAttribute::trackStatistics()
7261   void trackStatistics() const override {
7262     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
7263   }
7264 };
7265 
7266 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
7267   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
7268       : AAPrivatizablePtrFloating(IRP, A) {}
7269 
7270   /// See AbstractAttribute::initialize(...).
7271   void initialize(Attributor &A) override {
7272     // TODO: We can privatize more than arguments.
7273     indicatePessimisticFixpoint();
7274   }
7275 
7276   /// See AbstractAttribute::trackStatistics()
7277   void trackStatistics() const override {
7278     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
7279   }
7280 };
7281 } // namespace
7282 
7283 /// -------------------- Memory Behavior Attributes ----------------------------
7284 /// Includes read-none, read-only, and write-only.
7285 /// ----------------------------------------------------------------------------
7286 namespace {
7287 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
7288   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
7289       : AAMemoryBehavior(IRP, A) {}
7290 
7291   /// See AbstractAttribute::initialize(...).
7292   void initialize(Attributor &A) override {
7293     intersectAssumedBits(BEST_STATE);
7294     getKnownStateFromValue(getIRPosition(), getState());
7295     AAMemoryBehavior::initialize(A);
7296   }
7297 
7298   /// Return the memory behavior information encoded in the IR for \p IRP.
7299   static void getKnownStateFromValue(const IRPosition &IRP,
7300                                      BitIntegerState &State,
7301                                      bool IgnoreSubsumingPositions = false) {
7302     SmallVector<Attribute, 2> Attrs;
7303     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7304     for (const Attribute &Attr : Attrs) {
7305       switch (Attr.getKindAsEnum()) {
7306       case Attribute::ReadNone:
7307         State.addKnownBits(NO_ACCESSES);
7308         break;
7309       case Attribute::ReadOnly:
7310         State.addKnownBits(NO_WRITES);
7311         break;
7312       case Attribute::WriteOnly:
7313         State.addKnownBits(NO_READS);
7314         break;
7315       default:
7316         llvm_unreachable("Unexpected attribute!");
7317       }
7318     }
7319 
7320     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7321       if (!I->mayReadFromMemory())
7322         State.addKnownBits(NO_READS);
7323       if (!I->mayWriteToMemory())
7324         State.addKnownBits(NO_WRITES);
7325     }
7326   }
7327 
7328   /// See AbstractAttribute::getDeducedAttributes(...).
7329   void getDeducedAttributes(LLVMContext &Ctx,
7330                             SmallVectorImpl<Attribute> &Attrs) const override {
7331     assert(Attrs.size() == 0);
7332     if (isAssumedReadNone())
7333       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7334     else if (isAssumedReadOnly())
7335       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7336     else if (isAssumedWriteOnly())
7337       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7338     assert(Attrs.size() <= 1);
7339   }
7340 
7341   /// See AbstractAttribute::manifest(...).
7342   ChangeStatus manifest(Attributor &A) override {
7343     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
7344       return ChangeStatus::UNCHANGED;
7345 
7346     const IRPosition &IRP = getIRPosition();
7347 
7348     // Check if we would improve the existing attributes first.
7349     SmallVector<Attribute, 4> DeducedAttrs;
7350     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7351     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7352           return IRP.hasAttr(Attr.getKindAsEnum(),
7353                              /* IgnoreSubsumingPositions */ true);
7354         }))
7355       return ChangeStatus::UNCHANGED;
7356 
7357     // Clear existing attributes.
7358     IRP.removeAttrs(AttrKinds);
7359 
7360     // Use the generic manifest method.
7361     return IRAttribute::manifest(A);
7362   }
7363 
7364   /// See AbstractState::getAsStr().
7365   const std::string getAsStr() const override {
7366     if (isAssumedReadNone())
7367       return "readnone";
7368     if (isAssumedReadOnly())
7369       return "readonly";
7370     if (isAssumedWriteOnly())
7371       return "writeonly";
7372     return "may-read/write";
7373   }
7374 
7375   /// The set of IR attributes AAMemoryBehavior deals with.
7376   static const Attribute::AttrKind AttrKinds[3];
7377 };
7378 
7379 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7380     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7381 
7382 /// Memory behavior attribute for a floating value.
7383 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7384   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7385       : AAMemoryBehaviorImpl(IRP, A) {}
7386 
7387   /// See AbstractAttribute::updateImpl(...).
7388   ChangeStatus updateImpl(Attributor &A) override;
7389 
7390   /// See AbstractAttribute::trackStatistics()
7391   void trackStatistics() const override {
7392     if (isAssumedReadNone())
7393       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7394     else if (isAssumedReadOnly())
7395       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7396     else if (isAssumedWriteOnly())
7397       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7398   }
7399 
7400 private:
7401   /// Return true if users of \p UserI might access the underlying
7402   /// variable/location described by \p U and should therefore be analyzed.
7403   bool followUsersOfUseIn(Attributor &A, const Use &U,
7404                           const Instruction *UserI);
7405 
7406   /// Update the state according to the effect of use \p U in \p UserI.
7407   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7408 };
7409 
7410 /// Memory behavior attribute for function argument.
7411 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7412   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7413       : AAMemoryBehaviorFloating(IRP, A) {}
7414 
7415   /// See AbstractAttribute::initialize(...).
7416   void initialize(Attributor &A) override {
7417     intersectAssumedBits(BEST_STATE);
7418     const IRPosition &IRP = getIRPosition();
7419     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7420     // can query it when we use has/getAttr. That would allow us to reuse the
7421     // initialize of the base class here.
7422     bool HasByVal =
7423         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7424     getKnownStateFromValue(IRP, getState(),
7425                            /* IgnoreSubsumingPositions */ HasByVal);
7426 
7427     // Initialize the use vector with all direct uses of the associated value.
7428     Argument *Arg = getAssociatedArgument();
7429     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7430       indicatePessimisticFixpoint();
7431   }
7432 
7433   ChangeStatus manifest(Attributor &A) override {
7434     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7435     if (!getAssociatedValue().getType()->isPointerTy())
7436       return ChangeStatus::UNCHANGED;
7437 
7438     // TODO: From readattrs.ll: "inalloca parameters are always
7439     //                           considered written"
7440     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7441       removeKnownBits(NO_WRITES);
7442       removeAssumedBits(NO_WRITES);
7443     }
7444     return AAMemoryBehaviorFloating::manifest(A);
7445   }
7446 
7447   /// See AbstractAttribute::trackStatistics()
7448   void trackStatistics() const override {
7449     if (isAssumedReadNone())
7450       STATS_DECLTRACK_ARG_ATTR(readnone)
7451     else if (isAssumedReadOnly())
7452       STATS_DECLTRACK_ARG_ATTR(readonly)
7453     else if (isAssumedWriteOnly())
7454       STATS_DECLTRACK_ARG_ATTR(writeonly)
7455   }
7456 };
7457 
7458 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7459   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7460       : AAMemoryBehaviorArgument(IRP, A) {}
7461 
7462   /// See AbstractAttribute::initialize(...).
7463   void initialize(Attributor &A) override {
7464     // If we don't have an associated attribute this is either a variadic call
7465     // or an indirect call, either way, nothing to do here.
7466     Argument *Arg = getAssociatedArgument();
7467     if (!Arg) {
7468       indicatePessimisticFixpoint();
7469       return;
7470     }
7471     if (Arg->hasByValAttr()) {
7472       addKnownBits(NO_WRITES);
7473       removeKnownBits(NO_READS);
7474       removeAssumedBits(NO_READS);
7475     }
7476     AAMemoryBehaviorArgument::initialize(A);
7477     if (getAssociatedFunction()->isDeclaration())
7478       indicatePessimisticFixpoint();
7479   }
7480 
7481   /// See AbstractAttribute::updateImpl(...).
7482   ChangeStatus updateImpl(Attributor &A) override {
7483     // TODO: Once we have call site specific value information we can provide
7484     //       call site specific liveness liveness information and then it makes
7485     //       sense to specialize attributes for call sites arguments instead of
7486     //       redirecting requests to the callee argument.
7487     Argument *Arg = getAssociatedArgument();
7488     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7489     auto &ArgAA =
7490         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7491     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7492   }
7493 
7494   /// See AbstractAttribute::trackStatistics()
7495   void trackStatistics() const override {
7496     if (isAssumedReadNone())
7497       STATS_DECLTRACK_CSARG_ATTR(readnone)
7498     else if (isAssumedReadOnly())
7499       STATS_DECLTRACK_CSARG_ATTR(readonly)
7500     else if (isAssumedWriteOnly())
7501       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7502   }
7503 };
7504 
7505 /// Memory behavior attribute for a call site return position.
7506 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7507   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7508       : AAMemoryBehaviorFloating(IRP, A) {}
7509 
7510   /// See AbstractAttribute::initialize(...).
7511   void initialize(Attributor &A) override {
7512     AAMemoryBehaviorImpl::initialize(A);
7513     Function *F = getAssociatedFunction();
7514     if (!F || F->isDeclaration())
7515       indicatePessimisticFixpoint();
7516   }
7517 
7518   /// See AbstractAttribute::manifest(...).
7519   ChangeStatus manifest(Attributor &A) override {
7520     // We do not annotate returned values.
7521     return ChangeStatus::UNCHANGED;
7522   }
7523 
7524   /// See AbstractAttribute::trackStatistics()
7525   void trackStatistics() const override {}
7526 };
7527 
7528 /// An AA to represent the memory behavior function attributes.
7529 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7530   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7531       : AAMemoryBehaviorImpl(IRP, A) {}
7532 
7533   /// See AbstractAttribute::updateImpl(Attributor &A).
7534   virtual ChangeStatus updateImpl(Attributor &A) override;
7535 
7536   /// See AbstractAttribute::manifest(...).
7537   ChangeStatus manifest(Attributor &A) override {
7538     Function &F = cast<Function>(getAnchorValue());
7539     if (isAssumedReadNone()) {
7540       F.removeFnAttr(Attribute::ArgMemOnly);
7541       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7542       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7543     }
7544     return AAMemoryBehaviorImpl::manifest(A);
7545   }
7546 
7547   /// See AbstractAttribute::trackStatistics()
7548   void trackStatistics() const override {
7549     if (isAssumedReadNone())
7550       STATS_DECLTRACK_FN_ATTR(readnone)
7551     else if (isAssumedReadOnly())
7552       STATS_DECLTRACK_FN_ATTR(readonly)
7553     else if (isAssumedWriteOnly())
7554       STATS_DECLTRACK_FN_ATTR(writeonly)
7555   }
7556 };
7557 
7558 /// AAMemoryBehavior attribute for call sites.
7559 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7560   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7561       : AAMemoryBehaviorImpl(IRP, A) {}
7562 
7563   /// See AbstractAttribute::initialize(...).
7564   void initialize(Attributor &A) override {
7565     AAMemoryBehaviorImpl::initialize(A);
7566     Function *F = getAssociatedFunction();
7567     if (!F || F->isDeclaration())
7568       indicatePessimisticFixpoint();
7569   }
7570 
7571   /// See AbstractAttribute::updateImpl(...).
7572   ChangeStatus updateImpl(Attributor &A) override {
7573     // TODO: Once we have call site specific value information we can provide
7574     //       call site specific liveness liveness information and then it makes
7575     //       sense to specialize attributes for call sites arguments instead of
7576     //       redirecting requests to the callee argument.
7577     Function *F = getAssociatedFunction();
7578     const IRPosition &FnPos = IRPosition::function(*F);
7579     auto &FnAA =
7580         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7581     return clampStateAndIndicateChange(getState(), FnAA.getState());
7582   }
7583 
7584   /// See AbstractAttribute::trackStatistics()
7585   void trackStatistics() const override {
7586     if (isAssumedReadNone())
7587       STATS_DECLTRACK_CS_ATTR(readnone)
7588     else if (isAssumedReadOnly())
7589       STATS_DECLTRACK_CS_ATTR(readonly)
7590     else if (isAssumedWriteOnly())
7591       STATS_DECLTRACK_CS_ATTR(writeonly)
7592   }
7593 };
7594 
7595 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7596 
7597   // The current assumed state used to determine a change.
7598   auto AssumedState = getAssumed();
7599 
7600   auto CheckRWInst = [&](Instruction &I) {
7601     // If the instruction has an own memory behavior state, use it to restrict
7602     // the local state. No further analysis is required as the other memory
7603     // state is as optimistic as it gets.
7604     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7605       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7606           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7607       intersectAssumedBits(MemBehaviorAA.getAssumed());
7608       return !isAtFixpoint();
7609     }
7610 
7611     // Remove access kind modifiers if necessary.
7612     if (I.mayReadFromMemory())
7613       removeAssumedBits(NO_READS);
7614     if (I.mayWriteToMemory())
7615       removeAssumedBits(NO_WRITES);
7616     return !isAtFixpoint();
7617   };
7618 
7619   bool UsedAssumedInformation = false;
7620   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7621                                           UsedAssumedInformation))
7622     return indicatePessimisticFixpoint();
7623 
7624   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7625                                         : ChangeStatus::UNCHANGED;
7626 }
7627 
7628 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7629 
7630   const IRPosition &IRP = getIRPosition();
7631   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7632   AAMemoryBehavior::StateType &S = getState();
7633 
7634   // First, check the function scope. We take the known information and we avoid
7635   // work if the assumed information implies the current assumed information for
7636   // this attribute. This is a valid for all but byval arguments.
7637   Argument *Arg = IRP.getAssociatedArgument();
7638   AAMemoryBehavior::base_t FnMemAssumedState =
7639       AAMemoryBehavior::StateType::getWorstState();
7640   if (!Arg || !Arg->hasByValAttr()) {
7641     const auto &FnMemAA =
7642         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7643     FnMemAssumedState = FnMemAA.getAssumed();
7644     S.addKnownBits(FnMemAA.getKnown());
7645     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7646       return ChangeStatus::UNCHANGED;
7647   }
7648 
7649   // The current assumed state used to determine a change.
7650   auto AssumedState = S.getAssumed();
7651 
7652   // Make sure the value is not captured (except through "return"), if
7653   // it is, any information derived would be irrelevant anyway as we cannot
7654   // check the potential aliases introduced by the capture. However, no need
7655   // to fall back to anythign less optimistic than the function state.
7656   const auto &ArgNoCaptureAA =
7657       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7658   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7659     S.intersectAssumedBits(FnMemAssumedState);
7660     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7661                                           : ChangeStatus::UNCHANGED;
7662   }
7663 
7664   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7665   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7666     Instruction *UserI = cast<Instruction>(U.getUser());
7667     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7668                       << " \n");
7669 
7670     // Droppable users, e.g., llvm::assume does not actually perform any action.
7671     if (UserI->isDroppable())
7672       return true;
7673 
7674     // Check if the users of UserI should also be visited.
7675     Follow = followUsersOfUseIn(A, U, UserI);
7676 
7677     // If UserI might touch memory we analyze the use in detail.
7678     if (UserI->mayReadOrWriteMemory())
7679       analyzeUseIn(A, U, UserI);
7680 
7681     return !isAtFixpoint();
7682   };
7683 
7684   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7685     return indicatePessimisticFixpoint();
7686 
7687   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7688                                         : ChangeStatus::UNCHANGED;
7689 }
7690 
7691 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7692                                                   const Instruction *UserI) {
7693   // The loaded value is unrelated to the pointer argument, no need to
7694   // follow the users of the load.
7695   if (isa<LoadInst>(UserI))
7696     return false;
7697 
7698   // By default we follow all uses assuming UserI might leak information on U,
7699   // we have special handling for call sites operands though.
7700   const auto *CB = dyn_cast<CallBase>(UserI);
7701   if (!CB || !CB->isArgOperand(&U))
7702     return true;
7703 
7704   // If the use is a call argument known not to be captured, the users of
7705   // the call do not need to be visited because they have to be unrelated to
7706   // the input. Note that this check is not trivial even though we disallow
7707   // general capturing of the underlying argument. The reason is that the
7708   // call might the argument "through return", which we allow and for which we
7709   // need to check call users.
7710   if (U.get()->getType()->isPointerTy()) {
7711     unsigned ArgNo = CB->getArgOperandNo(&U);
7712     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7713         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7714     return !ArgNoCaptureAA.isAssumedNoCapture();
7715   }
7716 
7717   return true;
7718 }
7719 
7720 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7721                                             const Instruction *UserI) {
7722   assert(UserI->mayReadOrWriteMemory());
7723 
7724   switch (UserI->getOpcode()) {
7725   default:
7726     // TODO: Handle all atomics and other side-effect operations we know of.
7727     break;
7728   case Instruction::Load:
7729     // Loads cause the NO_READS property to disappear.
7730     removeAssumedBits(NO_READS);
7731     return;
7732 
7733   case Instruction::Store:
7734     // Stores cause the NO_WRITES property to disappear if the use is the
7735     // pointer operand. Note that while capturing was taken care of somewhere
7736     // else we need to deal with stores of the value that is not looked through.
7737     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7738       removeAssumedBits(NO_WRITES);
7739     else
7740       indicatePessimisticFixpoint();
7741     return;
7742 
7743   case Instruction::Call:
7744   case Instruction::CallBr:
7745   case Instruction::Invoke: {
7746     // For call sites we look at the argument memory behavior attribute (this
7747     // could be recursive!) in order to restrict our own state.
7748     const auto *CB = cast<CallBase>(UserI);
7749 
7750     // Give up on operand bundles.
7751     if (CB->isBundleOperand(&U)) {
7752       indicatePessimisticFixpoint();
7753       return;
7754     }
7755 
7756     // Calling a function does read the function pointer, maybe write it if the
7757     // function is self-modifying.
7758     if (CB->isCallee(&U)) {
7759       removeAssumedBits(NO_READS);
7760       break;
7761     }
7762 
7763     // Adjust the possible access behavior based on the information on the
7764     // argument.
7765     IRPosition Pos;
7766     if (U.get()->getType()->isPointerTy())
7767       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7768     else
7769       Pos = IRPosition::callsite_function(*CB);
7770     const auto &MemBehaviorAA =
7771         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7772     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7773     // and at least "known".
7774     intersectAssumedBits(MemBehaviorAA.getAssumed());
7775     return;
7776   }
7777   };
7778 
7779   // Generally, look at the "may-properties" and adjust the assumed state if we
7780   // did not trigger special handling before.
7781   if (UserI->mayReadFromMemory())
7782     removeAssumedBits(NO_READS);
7783   if (UserI->mayWriteToMemory())
7784     removeAssumedBits(NO_WRITES);
7785 }
7786 } // namespace
7787 
7788 /// -------------------- Memory Locations Attributes ---------------------------
7789 /// Includes read-none, argmemonly, inaccessiblememonly,
7790 /// inaccessiblememorargmemonly
7791 /// ----------------------------------------------------------------------------
7792 
7793 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7794     AAMemoryLocation::MemoryLocationsKind MLK) {
7795   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7796     return "all memory";
7797   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7798     return "no memory";
7799   std::string S = "memory:";
7800   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7801     S += "stack,";
7802   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7803     S += "constant,";
7804   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7805     S += "internal global,";
7806   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7807     S += "external global,";
7808   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7809     S += "argument,";
7810   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7811     S += "inaccessible,";
7812   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7813     S += "malloced,";
7814   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7815     S += "unknown,";
7816   S.pop_back();
7817   return S;
7818 }
7819 
7820 namespace {
7821 struct AAMemoryLocationImpl : public AAMemoryLocation {
7822 
7823   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7824       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7825     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7826       AccessKind2Accesses[u] = nullptr;
7827   }
7828 
7829   ~AAMemoryLocationImpl() {
7830     // The AccessSets are allocated via a BumpPtrAllocator, we call
7831     // the destructor manually.
7832     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7833       if (AccessKind2Accesses[u])
7834         AccessKind2Accesses[u]->~AccessSet();
7835   }
7836 
7837   /// See AbstractAttribute::initialize(...).
7838   void initialize(Attributor &A) override {
7839     intersectAssumedBits(BEST_STATE);
7840     getKnownStateFromValue(A, getIRPosition(), getState());
7841     AAMemoryLocation::initialize(A);
7842   }
7843 
7844   /// Return the memory behavior information encoded in the IR for \p IRP.
7845   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7846                                      BitIntegerState &State,
7847                                      bool IgnoreSubsumingPositions = false) {
7848     // For internal functions we ignore `argmemonly` and
7849     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7850     // constant propagation. It is unclear if this is the best way but it is
7851     // unlikely this will cause real performance problems. If we are deriving
7852     // attributes for the anchor function we even remove the attribute in
7853     // addition to ignoring it.
7854     bool UseArgMemOnly = true;
7855     Function *AnchorFn = IRP.getAnchorScope();
7856     if (AnchorFn && A.isRunOn(*AnchorFn))
7857       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7858 
7859     SmallVector<Attribute, 2> Attrs;
7860     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7861     for (const Attribute &Attr : Attrs) {
7862       switch (Attr.getKindAsEnum()) {
7863       case Attribute::ReadNone:
7864         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7865         break;
7866       case Attribute::InaccessibleMemOnly:
7867         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7868         break;
7869       case Attribute::ArgMemOnly:
7870         if (UseArgMemOnly)
7871           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7872         else
7873           IRP.removeAttrs({Attribute::ArgMemOnly});
7874         break;
7875       case Attribute::InaccessibleMemOrArgMemOnly:
7876         if (UseArgMemOnly)
7877           State.addKnownBits(inverseLocation(
7878               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7879         else
7880           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7881         break;
7882       default:
7883         llvm_unreachable("Unexpected attribute!");
7884       }
7885     }
7886   }
7887 
7888   /// See AbstractAttribute::getDeducedAttributes(...).
7889   void getDeducedAttributes(LLVMContext &Ctx,
7890                             SmallVectorImpl<Attribute> &Attrs) const override {
7891     assert(Attrs.size() == 0);
7892     if (isAssumedReadNone()) {
7893       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7894     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7895       if (isAssumedInaccessibleMemOnly())
7896         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7897       else if (isAssumedArgMemOnly())
7898         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7899       else if (isAssumedInaccessibleOrArgMemOnly())
7900         Attrs.push_back(
7901             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7902     }
7903     assert(Attrs.size() <= 1);
7904   }
7905 
7906   /// See AbstractAttribute::manifest(...).
7907   ChangeStatus manifest(Attributor &A) override {
7908     const IRPosition &IRP = getIRPosition();
7909 
7910     // Check if we would improve the existing attributes first.
7911     SmallVector<Attribute, 4> DeducedAttrs;
7912     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7913     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7914           return IRP.hasAttr(Attr.getKindAsEnum(),
7915                              /* IgnoreSubsumingPositions */ true);
7916         }))
7917       return ChangeStatus::UNCHANGED;
7918 
7919     // Clear existing attributes.
7920     IRP.removeAttrs(AttrKinds);
7921     if (isAssumedReadNone())
7922       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7923 
7924     // Use the generic manifest method.
7925     return IRAttribute::manifest(A);
7926   }
7927 
7928   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7929   bool checkForAllAccessesToMemoryKind(
7930       function_ref<bool(const Instruction *, const Value *, AccessKind,
7931                         MemoryLocationsKind)>
7932           Pred,
7933       MemoryLocationsKind RequestedMLK) const override {
7934     if (!isValidState())
7935       return false;
7936 
7937     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7938     if (AssumedMLK == NO_LOCATIONS)
7939       return true;
7940 
7941     unsigned Idx = 0;
7942     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7943          CurMLK *= 2, ++Idx) {
7944       if (CurMLK & RequestedMLK)
7945         continue;
7946 
7947       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7948         for (const AccessInfo &AI : *Accesses)
7949           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7950             return false;
7951     }
7952 
7953     return true;
7954   }
7955 
7956   ChangeStatus indicatePessimisticFixpoint() override {
7957     // If we give up and indicate a pessimistic fixpoint this instruction will
7958     // become an access for all potential access kinds:
7959     // TODO: Add pointers for argmemonly and globals to improve the results of
7960     //       checkForAllAccessesToMemoryKind.
7961     bool Changed = false;
7962     MemoryLocationsKind KnownMLK = getKnown();
7963     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7964     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7965       if (!(CurMLK & KnownMLK))
7966         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7967                                   getAccessKindFromInst(I));
7968     return AAMemoryLocation::indicatePessimisticFixpoint();
7969   }
7970 
7971 protected:
7972   /// Helper struct to tie together an instruction that has a read or write
7973   /// effect with the pointer it accesses (if any).
7974   struct AccessInfo {
7975 
7976     /// The instruction that caused the access.
7977     const Instruction *I;
7978 
7979     /// The base pointer that is accessed, or null if unknown.
7980     const Value *Ptr;
7981 
7982     /// The kind of access (read/write/read+write).
7983     AccessKind Kind;
7984 
7985     bool operator==(const AccessInfo &RHS) const {
7986       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7987     }
7988     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7989       if (LHS.I != RHS.I)
7990         return LHS.I < RHS.I;
7991       if (LHS.Ptr != RHS.Ptr)
7992         return LHS.Ptr < RHS.Ptr;
7993       if (LHS.Kind != RHS.Kind)
7994         return LHS.Kind < RHS.Kind;
7995       return false;
7996     }
7997   };
7998 
7999   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
8000   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
8001   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
8002   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
8003 
8004   /// Categorize the pointer arguments of CB that might access memory in
8005   /// AccessedLoc and update the state and access map accordingly.
8006   void
8007   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
8008                                      AAMemoryLocation::StateType &AccessedLocs,
8009                                      bool &Changed);
8010 
8011   /// Return the kind(s) of location that may be accessed by \p V.
8012   AAMemoryLocation::MemoryLocationsKind
8013   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
8014 
8015   /// Return the access kind as determined by \p I.
8016   AccessKind getAccessKindFromInst(const Instruction *I) {
8017     AccessKind AK = READ_WRITE;
8018     if (I) {
8019       AK = I->mayReadFromMemory() ? READ : NONE;
8020       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
8021     }
8022     return AK;
8023   }
8024 
8025   /// Update the state \p State and the AccessKind2Accesses given that \p I is
8026   /// an access of kind \p AK to a \p MLK memory location with the access
8027   /// pointer \p Ptr.
8028   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
8029                                  MemoryLocationsKind MLK, const Instruction *I,
8030                                  const Value *Ptr, bool &Changed,
8031                                  AccessKind AK = READ_WRITE) {
8032 
8033     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
8034     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
8035     if (!Accesses)
8036       Accesses = new (Allocator) AccessSet();
8037     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
8038     State.removeAssumedBits(MLK);
8039   }
8040 
8041   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
8042   /// arguments, and update the state and access map accordingly.
8043   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
8044                           AAMemoryLocation::StateType &State, bool &Changed);
8045 
8046   /// Used to allocate access sets.
8047   BumpPtrAllocator &Allocator;
8048 
8049   /// The set of IR attributes AAMemoryLocation deals with.
8050   static const Attribute::AttrKind AttrKinds[4];
8051 };
8052 
8053 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
8054     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
8055     Attribute::InaccessibleMemOrArgMemOnly};
8056 
8057 void AAMemoryLocationImpl::categorizePtrValue(
8058     Attributor &A, const Instruction &I, const Value &Ptr,
8059     AAMemoryLocation::StateType &State, bool &Changed) {
8060   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
8061                     << Ptr << " ["
8062                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
8063 
8064   SmallVector<Value *, 8> Objects;
8065   bool UsedAssumedInformation = false;
8066   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
8067                                        UsedAssumedInformation,
8068                                        AA::Intraprocedural)) {
8069     LLVM_DEBUG(
8070         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
8071     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
8072                               getAccessKindFromInst(&I));
8073     return;
8074   }
8075 
8076   for (Value *Obj : Objects) {
8077     // TODO: recognize the TBAA used for constant accesses.
8078     MemoryLocationsKind MLK = NO_LOCATIONS;
8079     if (isa<UndefValue>(Obj))
8080       continue;
8081     if (isa<Argument>(Obj)) {
8082       // TODO: For now we do not treat byval arguments as local copies performed
8083       // on the call edge, though, we should. To make that happen we need to
8084       // teach various passes, e.g., DSE, about the copy effect of a byval. That
8085       // would also allow us to mark functions only accessing byval arguments as
8086       // readnone again, atguably their acceses have no effect outside of the
8087       // function, like accesses to allocas.
8088       MLK = NO_ARGUMENT_MEM;
8089     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
8090       // Reading constant memory is not treated as a read "effect" by the
8091       // function attr pass so we won't neither. Constants defined by TBAA are
8092       // similar. (We know we do not write it because it is constant.)
8093       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
8094         if (GVar->isConstant())
8095           continue;
8096 
8097       if (GV->hasLocalLinkage())
8098         MLK = NO_GLOBAL_INTERNAL_MEM;
8099       else
8100         MLK = NO_GLOBAL_EXTERNAL_MEM;
8101     } else if (isa<ConstantPointerNull>(Obj) &&
8102                !NullPointerIsDefined(getAssociatedFunction(),
8103                                      Ptr.getType()->getPointerAddressSpace())) {
8104       continue;
8105     } else if (isa<AllocaInst>(Obj)) {
8106       MLK = NO_LOCAL_MEM;
8107     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
8108       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
8109           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
8110       if (NoAliasAA.isAssumedNoAlias())
8111         MLK = NO_MALLOCED_MEM;
8112       else
8113         MLK = NO_UNKOWN_MEM;
8114     } else {
8115       MLK = NO_UNKOWN_MEM;
8116     }
8117 
8118     assert(MLK != NO_LOCATIONS && "No location specified!");
8119     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
8120                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
8121                       << "\n");
8122     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
8123                               getAccessKindFromInst(&I));
8124   }
8125 
8126   LLVM_DEBUG(
8127       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
8128              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
8129 }
8130 
8131 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
8132     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
8133     bool &Changed) {
8134   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
8135 
8136     // Skip non-pointer arguments.
8137     const Value *ArgOp = CB.getArgOperand(ArgNo);
8138     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
8139       continue;
8140 
8141     // Skip readnone arguments.
8142     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
8143     const auto &ArgOpMemLocationAA =
8144         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
8145 
8146     if (ArgOpMemLocationAA.isAssumedReadNone())
8147       continue;
8148 
8149     // Categorize potentially accessed pointer arguments as if there was an
8150     // access instruction with them as pointer.
8151     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
8152   }
8153 }
8154 
8155 AAMemoryLocation::MemoryLocationsKind
8156 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
8157                                                   bool &Changed) {
8158   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
8159                     << I << "\n");
8160 
8161   AAMemoryLocation::StateType AccessedLocs;
8162   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
8163 
8164   if (auto *CB = dyn_cast<CallBase>(&I)) {
8165 
8166     // First check if we assume any memory is access is visible.
8167     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
8168         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
8169     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
8170                       << " [" << CBMemLocationAA << "]\n");
8171 
8172     if (CBMemLocationAA.isAssumedReadNone())
8173       return NO_LOCATIONS;
8174 
8175     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
8176       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
8177                                 Changed, getAccessKindFromInst(&I));
8178       return AccessedLocs.getAssumed();
8179     }
8180 
8181     uint32_t CBAssumedNotAccessedLocs =
8182         CBMemLocationAA.getAssumedNotAccessedLocation();
8183 
8184     // Set the argmemonly and global bit as we handle them separately below.
8185     uint32_t CBAssumedNotAccessedLocsNoArgMem =
8186         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
8187 
8188     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
8189       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
8190         continue;
8191       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
8192                                 getAccessKindFromInst(&I));
8193     }
8194 
8195     // Now handle global memory if it might be accessed. This is slightly tricky
8196     // as NO_GLOBAL_MEM has multiple bits set.
8197     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
8198     if (HasGlobalAccesses) {
8199       auto AccessPred = [&](const Instruction *, const Value *Ptr,
8200                             AccessKind Kind, MemoryLocationsKind MLK) {
8201         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
8202                                   getAccessKindFromInst(&I));
8203         return true;
8204       };
8205       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
8206               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
8207         return AccessedLocs.getWorstState();
8208     }
8209 
8210     LLVM_DEBUG(
8211         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
8212                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8213 
8214     // Now handle argument memory if it might be accessed.
8215     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
8216     if (HasArgAccesses)
8217       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
8218 
8219     LLVM_DEBUG(
8220         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
8221                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8222 
8223     return AccessedLocs.getAssumed();
8224   }
8225 
8226   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
8227     LLVM_DEBUG(
8228         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
8229                << I << " [" << *Ptr << "]\n");
8230     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
8231     return AccessedLocs.getAssumed();
8232   }
8233 
8234   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
8235                     << I << "\n");
8236   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
8237                             getAccessKindFromInst(&I));
8238   return AccessedLocs.getAssumed();
8239 }
8240 
8241 /// An AA to represent the memory behavior function attributes.
8242 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
8243   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
8244       : AAMemoryLocationImpl(IRP, A) {}
8245 
8246   /// See AbstractAttribute::updateImpl(Attributor &A).
8247   virtual ChangeStatus updateImpl(Attributor &A) override {
8248 
8249     const auto &MemBehaviorAA =
8250         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
8251     if (MemBehaviorAA.isAssumedReadNone()) {
8252       if (MemBehaviorAA.isKnownReadNone())
8253         return indicateOptimisticFixpoint();
8254       assert(isAssumedReadNone() &&
8255              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
8256       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
8257       return ChangeStatus::UNCHANGED;
8258     }
8259 
8260     // The current assumed state used to determine a change.
8261     auto AssumedState = getAssumed();
8262     bool Changed = false;
8263 
8264     auto CheckRWInst = [&](Instruction &I) {
8265       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
8266       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
8267                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
8268       removeAssumedBits(inverseLocation(MLK, false, false));
8269       // Stop once only the valid bit set in the *not assumed location*, thus
8270       // once we don't actually exclude any memory locations in the state.
8271       return getAssumedNotAccessedLocation() != VALID_STATE;
8272     };
8273 
8274     bool UsedAssumedInformation = false;
8275     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8276                                             UsedAssumedInformation))
8277       return indicatePessimisticFixpoint();
8278 
8279     Changed |= AssumedState != getAssumed();
8280     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8281   }
8282 
8283   /// See AbstractAttribute::trackStatistics()
8284   void trackStatistics() const override {
8285     if (isAssumedReadNone())
8286       STATS_DECLTRACK_FN_ATTR(readnone)
8287     else if (isAssumedArgMemOnly())
8288       STATS_DECLTRACK_FN_ATTR(argmemonly)
8289     else if (isAssumedInaccessibleMemOnly())
8290       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
8291     else if (isAssumedInaccessibleOrArgMemOnly())
8292       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
8293   }
8294 };
8295 
8296 /// AAMemoryLocation attribute for call sites.
8297 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
8298   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
8299       : AAMemoryLocationImpl(IRP, A) {}
8300 
8301   /// See AbstractAttribute::initialize(...).
8302   void initialize(Attributor &A) override {
8303     AAMemoryLocationImpl::initialize(A);
8304     Function *F = getAssociatedFunction();
8305     if (!F || F->isDeclaration())
8306       indicatePessimisticFixpoint();
8307   }
8308 
8309   /// See AbstractAttribute::updateImpl(...).
8310   ChangeStatus updateImpl(Attributor &A) override {
8311     // TODO: Once we have call site specific value information we can provide
8312     //       call site specific liveness liveness information and then it makes
8313     //       sense to specialize attributes for call sites arguments instead of
8314     //       redirecting requests to the callee argument.
8315     Function *F = getAssociatedFunction();
8316     const IRPosition &FnPos = IRPosition::function(*F);
8317     auto &FnAA =
8318         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8319     bool Changed = false;
8320     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8321                           AccessKind Kind, MemoryLocationsKind MLK) {
8322       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8323                                 getAccessKindFromInst(I));
8324       return true;
8325     };
8326     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8327       return indicatePessimisticFixpoint();
8328     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8329   }
8330 
8331   /// See AbstractAttribute::trackStatistics()
8332   void trackStatistics() const override {
8333     if (isAssumedReadNone())
8334       STATS_DECLTRACK_CS_ATTR(readnone)
8335   }
8336 };
8337 } // namespace
8338 
8339 /// ------------------ Value Constant Range Attribute -------------------------
8340 
8341 namespace {
8342 struct AAValueConstantRangeImpl : AAValueConstantRange {
8343   using StateType = IntegerRangeState;
8344   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8345       : AAValueConstantRange(IRP, A) {}
8346 
8347   /// See AbstractAttribute::initialize(..).
8348   void initialize(Attributor &A) override {
8349     if (A.hasSimplificationCallback(getIRPosition())) {
8350       indicatePessimisticFixpoint();
8351       return;
8352     }
8353 
8354     // Intersect a range given by SCEV.
8355     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8356 
8357     // Intersect a range given by LVI.
8358     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8359   }
8360 
8361   /// See AbstractAttribute::getAsStr().
8362   const std::string getAsStr() const override {
8363     std::string Str;
8364     llvm::raw_string_ostream OS(Str);
8365     OS << "range(" << getBitWidth() << ")<";
8366     getKnown().print(OS);
8367     OS << " / ";
8368     getAssumed().print(OS);
8369     OS << ">";
8370     return OS.str();
8371   }
8372 
8373   /// Helper function to get a SCEV expr for the associated value at program
8374   /// point \p I.
8375   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8376     if (!getAnchorScope())
8377       return nullptr;
8378 
8379     ScalarEvolution *SE =
8380         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8381             *getAnchorScope());
8382 
8383     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8384         *getAnchorScope());
8385 
8386     if (!SE || !LI)
8387       return nullptr;
8388 
8389     const SCEV *S = SE->getSCEV(&getAssociatedValue());
8390     if (!I)
8391       return S;
8392 
8393     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8394   }
8395 
8396   /// Helper function to get a range from SCEV for the associated value at
8397   /// program point \p I.
8398   ConstantRange getConstantRangeFromSCEV(Attributor &A,
8399                                          const Instruction *I = nullptr) const {
8400     if (!getAnchorScope())
8401       return getWorstState(getBitWidth());
8402 
8403     ScalarEvolution *SE =
8404         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8405             *getAnchorScope());
8406 
8407     const SCEV *S = getSCEV(A, I);
8408     if (!SE || !S)
8409       return getWorstState(getBitWidth());
8410 
8411     return SE->getUnsignedRange(S);
8412   }
8413 
8414   /// Helper function to get a range from LVI for the associated value at
8415   /// program point \p I.
8416   ConstantRange
8417   getConstantRangeFromLVI(Attributor &A,
8418                           const Instruction *CtxI = nullptr) const {
8419     if (!getAnchorScope())
8420       return getWorstState(getBitWidth());
8421 
8422     LazyValueInfo *LVI =
8423         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8424             *getAnchorScope());
8425 
8426     if (!LVI || !CtxI)
8427       return getWorstState(getBitWidth());
8428     return LVI->getConstantRange(&getAssociatedValue(),
8429                                  const_cast<Instruction *>(CtxI));
8430   }
8431 
8432   /// Return true if \p CtxI is valid for querying outside analyses.
8433   /// This basically makes sure we do not ask intra-procedural analysis
8434   /// about a context in the wrong function or a context that violates
8435   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8436   /// if the original context of this AA is OK or should be considered invalid.
8437   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8438                                                const Instruction *CtxI,
8439                                                bool AllowAACtxI) const {
8440     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8441       return false;
8442 
8443     // Our context might be in a different function, neither intra-procedural
8444     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8445     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8446       return false;
8447 
8448     // If the context is not dominated by the value there are paths to the
8449     // context that do not define the value. This cannot be handled by
8450     // LazyValueInfo so we need to bail.
8451     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8452       InformationCache &InfoCache = A.getInfoCache();
8453       const DominatorTree *DT =
8454           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8455               *I->getFunction());
8456       return DT && DT->dominates(I, CtxI);
8457     }
8458 
8459     return true;
8460   }
8461 
8462   /// See AAValueConstantRange::getKnownConstantRange(..).
8463   ConstantRange
8464   getKnownConstantRange(Attributor &A,
8465                         const Instruction *CtxI = nullptr) const override {
8466     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8467                                                  /* AllowAACtxI */ false))
8468       return getKnown();
8469 
8470     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8471     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8472     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8473   }
8474 
8475   /// See AAValueConstantRange::getAssumedConstantRange(..).
8476   ConstantRange
8477   getAssumedConstantRange(Attributor &A,
8478                           const Instruction *CtxI = nullptr) const override {
8479     // TODO: Make SCEV use Attributor assumption.
8480     //       We may be able to bound a variable range via assumptions in
8481     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8482     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8483     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8484                                                  /* AllowAACtxI */ false))
8485       return getAssumed();
8486 
8487     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8488     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8489     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8490   }
8491 
8492   /// Helper function to create MDNode for range metadata.
8493   static MDNode *
8494   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8495                             const ConstantRange &AssumedConstantRange) {
8496     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8497                                   Ty, AssumedConstantRange.getLower())),
8498                               ConstantAsMetadata::get(ConstantInt::get(
8499                                   Ty, AssumedConstantRange.getUpper()))};
8500     return MDNode::get(Ctx, LowAndHigh);
8501   }
8502 
8503   /// Return true if \p Assumed is included in \p KnownRanges.
8504   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8505 
8506     if (Assumed.isFullSet())
8507       return false;
8508 
8509     if (!KnownRanges)
8510       return true;
8511 
8512     // If multiple ranges are annotated in IR, we give up to annotate assumed
8513     // range for now.
8514 
8515     // TODO:  If there exists a known range which containts assumed range, we
8516     // can say assumed range is better.
8517     if (KnownRanges->getNumOperands() > 2)
8518       return false;
8519 
8520     ConstantInt *Lower =
8521         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8522     ConstantInt *Upper =
8523         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8524 
8525     ConstantRange Known(Lower->getValue(), Upper->getValue());
8526     return Known.contains(Assumed) && Known != Assumed;
8527   }
8528 
8529   /// Helper function to set range metadata.
8530   static bool
8531   setRangeMetadataIfisBetterRange(Instruction *I,
8532                                   const ConstantRange &AssumedConstantRange) {
8533     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8534     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8535       if (!AssumedConstantRange.isEmptySet()) {
8536         I->setMetadata(LLVMContext::MD_range,
8537                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8538                                                  AssumedConstantRange));
8539         return true;
8540       }
8541     }
8542     return false;
8543   }
8544 
8545   /// See AbstractAttribute::manifest()
8546   ChangeStatus manifest(Attributor &A) override {
8547     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8548     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8549     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8550 
8551     auto &V = getAssociatedValue();
8552     if (!AssumedConstantRange.isEmptySet() &&
8553         !AssumedConstantRange.isSingleElement()) {
8554       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8555         assert(I == getCtxI() && "Should not annotate an instruction which is "
8556                                  "not the context instruction");
8557         if (isa<CallInst>(I) || isa<LoadInst>(I))
8558           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8559             Changed = ChangeStatus::CHANGED;
8560       }
8561     }
8562 
8563     return Changed;
8564   }
8565 };
8566 
8567 struct AAValueConstantRangeArgument final
8568     : AAArgumentFromCallSiteArguments<
8569           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8570           true /* BridgeCallBaseContext */> {
8571   using Base = AAArgumentFromCallSiteArguments<
8572       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8573       true /* BridgeCallBaseContext */>;
8574   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8575       : Base(IRP, A) {}
8576 
8577   /// See AbstractAttribute::initialize(..).
8578   void initialize(Attributor &A) override {
8579     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8580       indicatePessimisticFixpoint();
8581     } else {
8582       Base::initialize(A);
8583     }
8584   }
8585 
8586   /// See AbstractAttribute::trackStatistics()
8587   void trackStatistics() const override {
8588     STATS_DECLTRACK_ARG_ATTR(value_range)
8589   }
8590 };
8591 
8592 struct AAValueConstantRangeReturned
8593     : AAReturnedFromReturnedValues<AAValueConstantRange,
8594                                    AAValueConstantRangeImpl,
8595                                    AAValueConstantRangeImpl::StateType,
8596                                    /* PropogateCallBaseContext */ true> {
8597   using Base =
8598       AAReturnedFromReturnedValues<AAValueConstantRange,
8599                                    AAValueConstantRangeImpl,
8600                                    AAValueConstantRangeImpl::StateType,
8601                                    /* PropogateCallBaseContext */ true>;
8602   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8603       : Base(IRP, A) {}
8604 
8605   /// See AbstractAttribute::initialize(...).
8606   void initialize(Attributor &A) override {}
8607 
8608   /// See AbstractAttribute::trackStatistics()
8609   void trackStatistics() const override {
8610     STATS_DECLTRACK_FNRET_ATTR(value_range)
8611   }
8612 };
8613 
8614 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8615   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8616       : AAValueConstantRangeImpl(IRP, A) {}
8617 
8618   /// See AbstractAttribute::initialize(...).
8619   void initialize(Attributor &A) override {
8620     AAValueConstantRangeImpl::initialize(A);
8621     if (isAtFixpoint())
8622       return;
8623 
8624     Value &V = getAssociatedValue();
8625 
8626     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8627       unionAssumed(ConstantRange(C->getValue()));
8628       indicateOptimisticFixpoint();
8629       return;
8630     }
8631 
8632     if (isa<UndefValue>(&V)) {
8633       // Collapse the undef state to 0.
8634       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8635       indicateOptimisticFixpoint();
8636       return;
8637     }
8638 
8639     if (isa<CallBase>(&V))
8640       return;
8641 
8642     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8643       return;
8644 
8645     // If it is a load instruction with range metadata, use it.
8646     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8647       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8648         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8649         return;
8650       }
8651 
8652     // We can work with PHI and select instruction as we traverse their operands
8653     // during update.
8654     if (isa<SelectInst>(V) || isa<PHINode>(V))
8655       return;
8656 
8657     // Otherwise we give up.
8658     indicatePessimisticFixpoint();
8659 
8660     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8661                       << getAssociatedValue() << "\n");
8662   }
8663 
8664   bool calculateBinaryOperator(
8665       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8666       const Instruction *CtxI,
8667       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8668     Value *LHS = BinOp->getOperand(0);
8669     Value *RHS = BinOp->getOperand(1);
8670 
8671     // Simplify the operands first.
8672     bool UsedAssumedInformation = false;
8673     const auto &SimplifiedLHS =
8674         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8675                                *this, UsedAssumedInformation);
8676     if (!SimplifiedLHS)
8677       return true;
8678     if (!SimplifiedLHS.value())
8679       return false;
8680     LHS = *SimplifiedLHS;
8681 
8682     const auto &SimplifiedRHS =
8683         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8684                                *this, UsedAssumedInformation);
8685     if (!SimplifiedRHS)
8686       return true;
8687     if (!SimplifiedRHS.value())
8688       return false;
8689     RHS = *SimplifiedRHS;
8690 
8691     // TODO: Allow non integers as well.
8692     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8693       return false;
8694 
8695     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8696         *this, IRPosition::value(*LHS, getCallBaseContext()),
8697         DepClassTy::REQUIRED);
8698     QuerriedAAs.push_back(&LHSAA);
8699     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8700 
8701     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8702         *this, IRPosition::value(*RHS, getCallBaseContext()),
8703         DepClassTy::REQUIRED);
8704     QuerriedAAs.push_back(&RHSAA);
8705     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8706 
8707     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8708 
8709     T.unionAssumed(AssumedRange);
8710 
8711     // TODO: Track a known state too.
8712 
8713     return T.isValidState();
8714   }
8715 
8716   bool calculateCastInst(
8717       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8718       const Instruction *CtxI,
8719       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8720     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8721     // TODO: Allow non integers as well.
8722     Value *OpV = CastI->getOperand(0);
8723 
8724     // Simplify the operand first.
8725     bool UsedAssumedInformation = false;
8726     const auto &SimplifiedOpV =
8727         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8728                                *this, UsedAssumedInformation);
8729     if (!SimplifiedOpV)
8730       return true;
8731     if (!SimplifiedOpV.value())
8732       return false;
8733     OpV = *SimplifiedOpV;
8734 
8735     if (!OpV->getType()->isIntegerTy())
8736       return false;
8737 
8738     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8739         *this, IRPosition::value(*OpV, getCallBaseContext()),
8740         DepClassTy::REQUIRED);
8741     QuerriedAAs.push_back(&OpAA);
8742     T.unionAssumed(
8743         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8744     return T.isValidState();
8745   }
8746 
8747   bool
8748   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8749                    const Instruction *CtxI,
8750                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8751     Value *LHS = CmpI->getOperand(0);
8752     Value *RHS = CmpI->getOperand(1);
8753 
8754     // Simplify the operands first.
8755     bool UsedAssumedInformation = false;
8756     const auto &SimplifiedLHS =
8757         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8758                                *this, UsedAssumedInformation);
8759     if (!SimplifiedLHS)
8760       return true;
8761     if (!SimplifiedLHS.value())
8762       return false;
8763     LHS = *SimplifiedLHS;
8764 
8765     const auto &SimplifiedRHS =
8766         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8767                                *this, UsedAssumedInformation);
8768     if (!SimplifiedRHS)
8769       return true;
8770     if (!SimplifiedRHS.value())
8771       return false;
8772     RHS = *SimplifiedRHS;
8773 
8774     // TODO: Allow non integers as well.
8775     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8776       return false;
8777 
8778     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8779         *this, IRPosition::value(*LHS, getCallBaseContext()),
8780         DepClassTy::REQUIRED);
8781     QuerriedAAs.push_back(&LHSAA);
8782     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8783         *this, IRPosition::value(*RHS, getCallBaseContext()),
8784         DepClassTy::REQUIRED);
8785     QuerriedAAs.push_back(&RHSAA);
8786     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8787     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8788 
8789     // If one of them is empty set, we can't decide.
8790     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8791       return true;
8792 
8793     bool MustTrue = false, MustFalse = false;
8794 
8795     auto AllowedRegion =
8796         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8797 
8798     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8799       MustFalse = true;
8800 
8801     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8802       MustTrue = true;
8803 
8804     assert((!MustTrue || !MustFalse) &&
8805            "Either MustTrue or MustFalse should be false!");
8806 
8807     if (MustTrue)
8808       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8809     else if (MustFalse)
8810       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8811     else
8812       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8813 
8814     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8815                       << " " << RHSAA << "\n");
8816 
8817     // TODO: Track a known state too.
8818     return T.isValidState();
8819   }
8820 
8821   /// See AbstractAttribute::updateImpl(...).
8822   ChangeStatus updateImpl(Attributor &A) override {
8823     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8824                             IntegerRangeState &T, bool Stripped) -> bool {
8825       Instruction *I = dyn_cast<Instruction>(&V);
8826       if (!I || isa<CallBase>(I)) {
8827 
8828         // Simplify the operand first.
8829         bool UsedAssumedInformation = false;
8830         const auto &SimplifiedOpV =
8831             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8832                                    *this, UsedAssumedInformation);
8833         if (!SimplifiedOpV)
8834           return true;
8835         if (!SimplifiedOpV.value())
8836           return false;
8837         Value *VPtr = *SimplifiedOpV;
8838 
8839         // If the value is not instruction, we query AA to Attributor.
8840         const auto &AA = A.getAAFor<AAValueConstantRange>(
8841             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8842             DepClassTy::REQUIRED);
8843 
8844         // Clamp operator is not used to utilize a program point CtxI.
8845         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8846 
8847         return T.isValidState();
8848       }
8849 
8850       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8851       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8852         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8853           return false;
8854       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8855         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8856           return false;
8857       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8858         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8859           return false;
8860       } else {
8861         // Give up with other instructions.
8862         // TODO: Add other instructions
8863 
8864         T.indicatePessimisticFixpoint();
8865         return false;
8866       }
8867 
8868       // Catch circular reasoning in a pessimistic way for now.
8869       // TODO: Check how the range evolves and if we stripped anything, see also
8870       //       AADereferenceable or AAAlign for similar situations.
8871       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8872         if (QueriedAA != this)
8873           continue;
8874         // If we are in a stady state we do not need to worry.
8875         if (T.getAssumed() == getState().getAssumed())
8876           continue;
8877         T.indicatePessimisticFixpoint();
8878       }
8879 
8880       return T.isValidState();
8881     };
8882 
8883     IntegerRangeState T(getBitWidth());
8884 
8885     bool UsedAssumedInformation = false;
8886     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8887                                                   VisitValueCB, getCtxI(),
8888                                                   UsedAssumedInformation,
8889                                                   /* UseValueSimplify */ false))
8890       return indicatePessimisticFixpoint();
8891 
8892     // Ensure that long def-use chains can't cause circular reasoning either by
8893     // introducing a cutoff below.
8894     if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
8895       return ChangeStatus::UNCHANGED;
8896     if (++NumChanges > MaxNumChanges) {
8897       LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
8898                         << " but only " << MaxNumChanges
8899                         << " are allowed to avoid cyclic reasoning.");
8900       return indicatePessimisticFixpoint();
8901     }
8902     return ChangeStatus::CHANGED;
8903   }
8904 
8905   /// See AbstractAttribute::trackStatistics()
8906   void trackStatistics() const override {
8907     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8908   }
8909 
8910   /// Tracker to bail after too many widening steps of the constant range.
8911   int NumChanges = 0;
8912 
8913   /// Upper bound for the number of allowed changes (=widening steps) for the
8914   /// constant range before we give up.
8915   static constexpr int MaxNumChanges = 5;
8916 };
8917 
8918 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8919   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8920       : AAValueConstantRangeImpl(IRP, A) {}
8921 
8922   /// See AbstractAttribute::initialize(...).
8923   ChangeStatus updateImpl(Attributor &A) override {
8924     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8925                      "not be called");
8926   }
8927 
8928   /// See AbstractAttribute::trackStatistics()
8929   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8930 };
8931 
8932 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8933   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8934       : AAValueConstantRangeFunction(IRP, A) {}
8935 
8936   /// See AbstractAttribute::trackStatistics()
8937   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8938 };
8939 
8940 struct AAValueConstantRangeCallSiteReturned
8941     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8942                                      AAValueConstantRangeImpl,
8943                                      AAValueConstantRangeImpl::StateType,
8944                                      /* IntroduceCallBaseContext */ true> {
8945   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8946       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8947                                        AAValueConstantRangeImpl,
8948                                        AAValueConstantRangeImpl::StateType,
8949                                        /* IntroduceCallBaseContext */ true>(IRP,
8950                                                                             A) {
8951   }
8952 
8953   /// See AbstractAttribute::initialize(...).
8954   void initialize(Attributor &A) override {
8955     // If it is a load instruction with range metadata, use the metadata.
8956     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8957       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8958         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8959 
8960     AAValueConstantRangeImpl::initialize(A);
8961   }
8962 
8963   /// See AbstractAttribute::trackStatistics()
8964   void trackStatistics() const override {
8965     STATS_DECLTRACK_CSRET_ATTR(value_range)
8966   }
8967 };
8968 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8969   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8970       : AAValueConstantRangeFloating(IRP, A) {}
8971 
8972   /// See AbstractAttribute::manifest()
8973   ChangeStatus manifest(Attributor &A) override {
8974     return ChangeStatus::UNCHANGED;
8975   }
8976 
8977   /// See AbstractAttribute::trackStatistics()
8978   void trackStatistics() const override {
8979     STATS_DECLTRACK_CSARG_ATTR(value_range)
8980   }
8981 };
8982 } // namespace
8983 
8984 /// ------------------ Potential Values Attribute -------------------------
8985 
8986 namespace {
8987 struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
8988   using StateType = PotentialConstantIntValuesState;
8989 
8990   AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
8991       : AAPotentialConstantValues(IRP, A) {}
8992 
8993   /// See AbstractAttribute::initialize(..).
8994   void initialize(Attributor &A) override {
8995     if (A.hasSimplificationCallback(getIRPosition()))
8996       indicatePessimisticFixpoint();
8997     else
8998       AAPotentialConstantValues::initialize(A);
8999   }
9000 
9001   /// See AbstractAttribute::getAsStr().
9002   const std::string getAsStr() const override {
9003     std::string Str;
9004     llvm::raw_string_ostream OS(Str);
9005     OS << getState();
9006     return OS.str();
9007   }
9008 
9009   /// See AbstractAttribute::updateImpl(...).
9010   ChangeStatus updateImpl(Attributor &A) override {
9011     return indicatePessimisticFixpoint();
9012   }
9013 };
9014 
9015 struct AAPotentialConstantValuesArgument final
9016     : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
9017                                       AAPotentialConstantValuesImpl,
9018                                       PotentialConstantIntValuesState> {
9019   using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
9020                                                AAPotentialConstantValuesImpl,
9021                                                PotentialConstantIntValuesState>;
9022   AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
9023       : Base(IRP, A) {}
9024 
9025   /// See AbstractAttribute::initialize(..).
9026   void initialize(Attributor &A) override {
9027     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
9028       indicatePessimisticFixpoint();
9029     } else {
9030       Base::initialize(A);
9031     }
9032   }
9033 
9034   /// See AbstractAttribute::trackStatistics()
9035   void trackStatistics() const override {
9036     STATS_DECLTRACK_ARG_ATTR(potential_values)
9037   }
9038 };
9039 
9040 struct AAPotentialConstantValuesReturned
9041     : AAReturnedFromReturnedValues<AAPotentialConstantValues,
9042                                    AAPotentialConstantValuesImpl> {
9043   using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
9044                                             AAPotentialConstantValuesImpl>;
9045   AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
9046       : Base(IRP, A) {}
9047 
9048   /// See AbstractAttribute::trackStatistics()
9049   void trackStatistics() const override {
9050     STATS_DECLTRACK_FNRET_ATTR(potential_values)
9051   }
9052 };
9053 
9054 struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
9055   AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
9056       : AAPotentialConstantValuesImpl(IRP, A) {}
9057 
9058   /// See AbstractAttribute::initialize(..).
9059   void initialize(Attributor &A) override {
9060     AAPotentialConstantValuesImpl::initialize(A);
9061     if (isAtFixpoint())
9062       return;
9063 
9064     Value &V = getAssociatedValue();
9065 
9066     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9067       unionAssumed(C->getValue());
9068       indicateOptimisticFixpoint();
9069       return;
9070     }
9071 
9072     if (isa<UndefValue>(&V)) {
9073       unionAssumedWithUndef();
9074       indicateOptimisticFixpoint();
9075       return;
9076     }
9077 
9078     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
9079       return;
9080 
9081     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
9082       return;
9083 
9084     indicatePessimisticFixpoint();
9085 
9086     LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "
9087                       << getAssociatedValue() << "\n");
9088   }
9089 
9090   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
9091                                 const APInt &RHS) {
9092     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
9093   }
9094 
9095   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
9096                                  uint32_t ResultBitWidth) {
9097     Instruction::CastOps CastOp = CI->getOpcode();
9098     switch (CastOp) {
9099     default:
9100       llvm_unreachable("unsupported or not integer cast");
9101     case Instruction::Trunc:
9102       return Src.trunc(ResultBitWidth);
9103     case Instruction::SExt:
9104       return Src.sext(ResultBitWidth);
9105     case Instruction::ZExt:
9106       return Src.zext(ResultBitWidth);
9107     case Instruction::BitCast:
9108       return Src;
9109     }
9110   }
9111 
9112   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
9113                                        const APInt &LHS, const APInt &RHS,
9114                                        bool &SkipOperation, bool &Unsupported) {
9115     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
9116     // Unsupported is set to true when the binary operator is not supported.
9117     // SkipOperation is set to true when UB occur with the given operand pair
9118     // (LHS, RHS).
9119     // TODO: we should look at nsw and nuw keywords to handle operations
9120     //       that create poison or undef value.
9121     switch (BinOpcode) {
9122     default:
9123       Unsupported = true;
9124       return LHS;
9125     case Instruction::Add:
9126       return LHS + RHS;
9127     case Instruction::Sub:
9128       return LHS - RHS;
9129     case Instruction::Mul:
9130       return LHS * RHS;
9131     case Instruction::UDiv:
9132       if (RHS.isZero()) {
9133         SkipOperation = true;
9134         return LHS;
9135       }
9136       return LHS.udiv(RHS);
9137     case Instruction::SDiv:
9138       if (RHS.isZero()) {
9139         SkipOperation = true;
9140         return LHS;
9141       }
9142       return LHS.sdiv(RHS);
9143     case Instruction::URem:
9144       if (RHS.isZero()) {
9145         SkipOperation = true;
9146         return LHS;
9147       }
9148       return LHS.urem(RHS);
9149     case Instruction::SRem:
9150       if (RHS.isZero()) {
9151         SkipOperation = true;
9152         return LHS;
9153       }
9154       return LHS.srem(RHS);
9155     case Instruction::Shl:
9156       return LHS.shl(RHS);
9157     case Instruction::LShr:
9158       return LHS.lshr(RHS);
9159     case Instruction::AShr:
9160       return LHS.ashr(RHS);
9161     case Instruction::And:
9162       return LHS & RHS;
9163     case Instruction::Or:
9164       return LHS | RHS;
9165     case Instruction::Xor:
9166       return LHS ^ RHS;
9167     }
9168   }
9169 
9170   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
9171                                            const APInt &LHS, const APInt &RHS) {
9172     bool SkipOperation = false;
9173     bool Unsupported = false;
9174     APInt Result =
9175         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
9176     if (Unsupported)
9177       return false;
9178     // If SkipOperation is true, we can ignore this operand pair (L, R).
9179     if (!SkipOperation)
9180       unionAssumed(Result);
9181     return isValidState();
9182   }
9183 
9184   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
9185     auto AssumedBefore = getAssumed();
9186     Value *LHS = ICI->getOperand(0);
9187     Value *RHS = ICI->getOperand(1);
9188 
9189     // Simplify the operands first.
9190     bool UsedAssumedInformation = false;
9191     const auto &SimplifiedLHS =
9192         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9193                                *this, UsedAssumedInformation);
9194     if (!SimplifiedLHS)
9195       return ChangeStatus::UNCHANGED;
9196     if (!SimplifiedLHS.value())
9197       return indicatePessimisticFixpoint();
9198     LHS = *SimplifiedLHS;
9199 
9200     const auto &SimplifiedRHS =
9201         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9202                                *this, UsedAssumedInformation);
9203     if (!SimplifiedRHS)
9204       return ChangeStatus::UNCHANGED;
9205     if (!SimplifiedRHS.value())
9206       return indicatePessimisticFixpoint();
9207     RHS = *SimplifiedRHS;
9208 
9209     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9210       return indicatePessimisticFixpoint();
9211 
9212     auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9213         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9214     if (!LHSAA.isValidState())
9215       return indicatePessimisticFixpoint();
9216 
9217     auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9218         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9219     if (!RHSAA.isValidState())
9220       return indicatePessimisticFixpoint();
9221 
9222     const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9223     const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9224 
9225     // TODO: make use of undef flag to limit potential values aggressively.
9226     bool MaybeTrue = false, MaybeFalse = false;
9227     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
9228     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9229       // The result of any comparison between undefs can be soundly replaced
9230       // with undef.
9231       unionAssumedWithUndef();
9232     } else if (LHSAA.undefIsContained()) {
9233       for (const APInt &R : RHSAAPVS) {
9234         bool CmpResult = calculateICmpInst(ICI, Zero, R);
9235         MaybeTrue |= CmpResult;
9236         MaybeFalse |= !CmpResult;
9237         if (MaybeTrue & MaybeFalse)
9238           return indicatePessimisticFixpoint();
9239       }
9240     } else if (RHSAA.undefIsContained()) {
9241       for (const APInt &L : LHSAAPVS) {
9242         bool CmpResult = calculateICmpInst(ICI, L, Zero);
9243         MaybeTrue |= CmpResult;
9244         MaybeFalse |= !CmpResult;
9245         if (MaybeTrue & MaybeFalse)
9246           return indicatePessimisticFixpoint();
9247       }
9248     } else {
9249       for (const APInt &L : LHSAAPVS) {
9250         for (const APInt &R : RHSAAPVS) {
9251           bool CmpResult = calculateICmpInst(ICI, L, R);
9252           MaybeTrue |= CmpResult;
9253           MaybeFalse |= !CmpResult;
9254           if (MaybeTrue & MaybeFalse)
9255             return indicatePessimisticFixpoint();
9256         }
9257       }
9258     }
9259     if (MaybeTrue)
9260       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
9261     if (MaybeFalse)
9262       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
9263     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9264                                          : ChangeStatus::CHANGED;
9265   }
9266 
9267   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
9268     auto AssumedBefore = getAssumed();
9269     Value *LHS = SI->getTrueValue();
9270     Value *RHS = SI->getFalseValue();
9271 
9272     // Simplify the operands first.
9273     bool UsedAssumedInformation = false;
9274     const auto &SimplifiedLHS =
9275         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9276                                *this, UsedAssumedInformation);
9277     if (!SimplifiedLHS)
9278       return ChangeStatus::UNCHANGED;
9279     if (!SimplifiedLHS.value())
9280       return indicatePessimisticFixpoint();
9281     LHS = *SimplifiedLHS;
9282 
9283     const auto &SimplifiedRHS =
9284         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9285                                *this, UsedAssumedInformation);
9286     if (!SimplifiedRHS)
9287       return ChangeStatus::UNCHANGED;
9288     if (!SimplifiedRHS.value())
9289       return indicatePessimisticFixpoint();
9290     RHS = *SimplifiedRHS;
9291 
9292     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9293       return indicatePessimisticFixpoint();
9294 
9295     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
9296                                                   UsedAssumedInformation);
9297 
9298     // Check if we only need one operand.
9299     bool OnlyLeft = false, OnlyRight = false;
9300     if (C && *C && (*C)->isOneValue())
9301       OnlyLeft = true;
9302     else if (C && *C && (*C)->isZeroValue())
9303       OnlyRight = true;
9304 
9305     const AAPotentialConstantValues *LHSAA = nullptr, *RHSAA = nullptr;
9306     if (!OnlyRight) {
9307       LHSAA = &A.getAAFor<AAPotentialConstantValues>(
9308           *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9309       if (!LHSAA->isValidState())
9310         return indicatePessimisticFixpoint();
9311     }
9312     if (!OnlyLeft) {
9313       RHSAA = &A.getAAFor<AAPotentialConstantValues>(
9314           *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9315       if (!RHSAA->isValidState())
9316         return indicatePessimisticFixpoint();
9317     }
9318 
9319     if (!LHSAA || !RHSAA) {
9320       // select (true/false), lhs, rhs
9321       auto *OpAA = LHSAA ? LHSAA : RHSAA;
9322 
9323       if (OpAA->undefIsContained())
9324         unionAssumedWithUndef();
9325       else
9326         unionAssumed(*OpAA);
9327 
9328     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
9329       // select i1 *, undef , undef => undef
9330       unionAssumedWithUndef();
9331     } else {
9332       unionAssumed(*LHSAA);
9333       unionAssumed(*RHSAA);
9334     }
9335     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9336                                          : ChangeStatus::CHANGED;
9337   }
9338 
9339   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9340     auto AssumedBefore = getAssumed();
9341     if (!CI->isIntegerCast())
9342       return indicatePessimisticFixpoint();
9343     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
9344     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9345     Value *Src = CI->getOperand(0);
9346 
9347     // Simplify the operand first.
9348     bool UsedAssumedInformation = false;
9349     const auto &SimplifiedSrc =
9350         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
9351                                *this, UsedAssumedInformation);
9352     if (!SimplifiedSrc)
9353       return ChangeStatus::UNCHANGED;
9354     if (!SimplifiedSrc.value())
9355       return indicatePessimisticFixpoint();
9356     Src = *SimplifiedSrc;
9357 
9358     auto &SrcAA = A.getAAFor<AAPotentialConstantValues>(
9359         *this, IRPosition::value(*Src), DepClassTy::REQUIRED);
9360     if (!SrcAA.isValidState())
9361       return indicatePessimisticFixpoint();
9362     const SetTy &SrcAAPVS = SrcAA.getAssumedSet();
9363     if (SrcAA.undefIsContained())
9364       unionAssumedWithUndef();
9365     else {
9366       for (const APInt &S : SrcAAPVS) {
9367         APInt T = calculateCastInst(CI, S, ResultBitWidth);
9368         unionAssumed(T);
9369       }
9370     }
9371     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9372                                          : ChangeStatus::CHANGED;
9373   }
9374 
9375   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9376     auto AssumedBefore = getAssumed();
9377     Value *LHS = BinOp->getOperand(0);
9378     Value *RHS = BinOp->getOperand(1);
9379 
9380     // Simplify the operands first.
9381     bool UsedAssumedInformation = false;
9382     const auto &SimplifiedLHS =
9383         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9384                                *this, UsedAssumedInformation);
9385     if (!SimplifiedLHS)
9386       return ChangeStatus::UNCHANGED;
9387     if (!SimplifiedLHS.value())
9388       return indicatePessimisticFixpoint();
9389     LHS = *SimplifiedLHS;
9390 
9391     const auto &SimplifiedRHS =
9392         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9393                                *this, UsedAssumedInformation);
9394     if (!SimplifiedRHS)
9395       return ChangeStatus::UNCHANGED;
9396     if (!SimplifiedRHS.value())
9397       return indicatePessimisticFixpoint();
9398     RHS = *SimplifiedRHS;
9399 
9400     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9401       return indicatePessimisticFixpoint();
9402 
9403     auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9404         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9405     if (!LHSAA.isValidState())
9406       return indicatePessimisticFixpoint();
9407 
9408     auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9409         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9410     if (!RHSAA.isValidState())
9411       return indicatePessimisticFixpoint();
9412 
9413     const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9414     const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9415     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9416 
9417     // TODO: make use of undef flag to limit potential values aggressively.
9418     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9419       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9420         return indicatePessimisticFixpoint();
9421     } else if (LHSAA.undefIsContained()) {
9422       for (const APInt &R : RHSAAPVS) {
9423         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9424           return indicatePessimisticFixpoint();
9425       }
9426     } else if (RHSAA.undefIsContained()) {
9427       for (const APInt &L : LHSAAPVS) {
9428         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9429           return indicatePessimisticFixpoint();
9430       }
9431     } else {
9432       for (const APInt &L : LHSAAPVS) {
9433         for (const APInt &R : RHSAAPVS) {
9434           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9435             return indicatePessimisticFixpoint();
9436         }
9437       }
9438     }
9439     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9440                                          : ChangeStatus::CHANGED;
9441   }
9442 
9443   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9444     auto AssumedBefore = getAssumed();
9445     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9446       Value *IncomingValue = PHI->getIncomingValue(u);
9447 
9448       // Simplify the operand first.
9449       bool UsedAssumedInformation = false;
9450       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9451           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9452           UsedAssumedInformation);
9453       if (!SimplifiedIncomingValue)
9454         continue;
9455       if (!SimplifiedIncomingValue.value())
9456         return indicatePessimisticFixpoint();
9457       IncomingValue = *SimplifiedIncomingValue;
9458 
9459       auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
9460           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9461       if (!PotentialValuesAA.isValidState())
9462         return indicatePessimisticFixpoint();
9463       if (PotentialValuesAA.undefIsContained())
9464         unionAssumedWithUndef();
9465       else
9466         unionAssumed(PotentialValuesAA.getAssumed());
9467     }
9468     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9469                                          : ChangeStatus::CHANGED;
9470   }
9471 
9472   /// See AbstractAttribute::updateImpl(...).
9473   ChangeStatus updateImpl(Attributor &A) override {
9474     Value &V = getAssociatedValue();
9475     Instruction *I = dyn_cast<Instruction>(&V);
9476 
9477     if (auto *ICI = dyn_cast<ICmpInst>(I))
9478       return updateWithICmpInst(A, ICI);
9479 
9480     if (auto *SI = dyn_cast<SelectInst>(I))
9481       return updateWithSelectInst(A, SI);
9482 
9483     if (auto *CI = dyn_cast<CastInst>(I))
9484       return updateWithCastInst(A, CI);
9485 
9486     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9487       return updateWithBinaryOperator(A, BinOp);
9488 
9489     if (auto *PHI = dyn_cast<PHINode>(I))
9490       return updateWithPHINode(A, PHI);
9491 
9492     return indicatePessimisticFixpoint();
9493   }
9494 
9495   /// See AbstractAttribute::trackStatistics()
9496   void trackStatistics() const override {
9497     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9498   }
9499 };
9500 
9501 struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
9502   AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
9503       : AAPotentialConstantValuesImpl(IRP, A) {}
9504 
9505   /// See AbstractAttribute::initialize(...).
9506   ChangeStatus updateImpl(Attributor &A) override {
9507     llvm_unreachable(
9508         "AAPotentialConstantValues(Function|CallSite)::updateImpl will "
9509         "not be called");
9510   }
9511 
9512   /// See AbstractAttribute::trackStatistics()
9513   void trackStatistics() const override {
9514     STATS_DECLTRACK_FN_ATTR(potential_values)
9515   }
9516 };
9517 
9518 struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
9519   AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
9520       : AAPotentialConstantValuesFunction(IRP, A) {}
9521 
9522   /// See AbstractAttribute::trackStatistics()
9523   void trackStatistics() const override {
9524     STATS_DECLTRACK_CS_ATTR(potential_values)
9525   }
9526 };
9527 
9528 struct AAPotentialConstantValuesCallSiteReturned
9529     : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9530                                      AAPotentialConstantValuesImpl> {
9531   AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
9532                                             Attributor &A)
9533       : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9534                                        AAPotentialConstantValuesImpl>(IRP, A) {}
9535 
9536   /// See AbstractAttribute::trackStatistics()
9537   void trackStatistics() const override {
9538     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9539   }
9540 };
9541 
9542 struct AAPotentialConstantValuesCallSiteArgument
9543     : AAPotentialConstantValuesFloating {
9544   AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
9545                                             Attributor &A)
9546       : AAPotentialConstantValuesFloating(IRP, A) {}
9547 
9548   /// See AbstractAttribute::initialize(..).
9549   void initialize(Attributor &A) override {
9550     AAPotentialConstantValuesImpl::initialize(A);
9551     if (isAtFixpoint())
9552       return;
9553 
9554     Value &V = getAssociatedValue();
9555 
9556     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9557       unionAssumed(C->getValue());
9558       indicateOptimisticFixpoint();
9559       return;
9560     }
9561 
9562     if (isa<UndefValue>(&V)) {
9563       unionAssumedWithUndef();
9564       indicateOptimisticFixpoint();
9565       return;
9566     }
9567   }
9568 
9569   /// See AbstractAttribute::updateImpl(...).
9570   ChangeStatus updateImpl(Attributor &A) override {
9571     Value &V = getAssociatedValue();
9572     auto AssumedBefore = getAssumed();
9573     auto &AA = A.getAAFor<AAPotentialConstantValues>(
9574         *this, IRPosition::value(V), DepClassTy::REQUIRED);
9575     const auto &S = AA.getAssumed();
9576     unionAssumed(S);
9577     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9578                                          : ChangeStatus::CHANGED;
9579   }
9580 
9581   /// See AbstractAttribute::trackStatistics()
9582   void trackStatistics() const override {
9583     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9584   }
9585 };
9586 
9587 /// ------------------------ NoUndef Attribute ---------------------------------
9588 struct AANoUndefImpl : AANoUndef {
9589   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9590 
9591   /// See AbstractAttribute::initialize(...).
9592   void initialize(Attributor &A) override {
9593     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9594       indicateOptimisticFixpoint();
9595       return;
9596     }
9597     Value &V = getAssociatedValue();
9598     if (isa<UndefValue>(V))
9599       indicatePessimisticFixpoint();
9600     else if (isa<FreezeInst>(V))
9601       indicateOptimisticFixpoint();
9602     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9603              isGuaranteedNotToBeUndefOrPoison(&V))
9604       indicateOptimisticFixpoint();
9605     else
9606       AANoUndef::initialize(A);
9607   }
9608 
9609   /// See followUsesInMBEC
9610   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9611                        AANoUndef::StateType &State) {
9612     const Value *UseV = U->get();
9613     const DominatorTree *DT = nullptr;
9614     AssumptionCache *AC = nullptr;
9615     InformationCache &InfoCache = A.getInfoCache();
9616     if (Function *F = getAnchorScope()) {
9617       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9618       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9619     }
9620     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9621     bool TrackUse = false;
9622     // Track use for instructions which must produce undef or poison bits when
9623     // at least one operand contains such bits.
9624     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9625       TrackUse = true;
9626     return TrackUse;
9627   }
9628 
9629   /// See AbstractAttribute::getAsStr().
9630   const std::string getAsStr() const override {
9631     return getAssumed() ? "noundef" : "may-undef-or-poison";
9632   }
9633 
9634   ChangeStatus manifest(Attributor &A) override {
9635     // We don't manifest noundef attribute for dead positions because the
9636     // associated values with dead positions would be replaced with undef
9637     // values.
9638     bool UsedAssumedInformation = false;
9639     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9640                         UsedAssumedInformation))
9641       return ChangeStatus::UNCHANGED;
9642     // A position whose simplified value does not have any value is
9643     // considered to be dead. We don't manifest noundef in such positions for
9644     // the same reason above.
9645     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9646              .has_value())
9647       return ChangeStatus::UNCHANGED;
9648     return AANoUndef::manifest(A);
9649   }
9650 };
9651 
9652 struct AANoUndefFloating : public AANoUndefImpl {
9653   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9654       : AANoUndefImpl(IRP, A) {}
9655 
9656   /// See AbstractAttribute::initialize(...).
9657   void initialize(Attributor &A) override {
9658     AANoUndefImpl::initialize(A);
9659     if (!getState().isAtFixpoint())
9660       if (Instruction *CtxI = getCtxI())
9661         followUsesInMBEC(*this, A, getState(), *CtxI);
9662   }
9663 
9664   /// See AbstractAttribute::updateImpl(...).
9665   ChangeStatus updateImpl(Attributor &A) override {
9666     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9667                             AANoUndef::StateType &T, bool Stripped) -> bool {
9668       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9669                                              DepClassTy::REQUIRED);
9670       if (!Stripped && this == &AA) {
9671         T.indicatePessimisticFixpoint();
9672       } else {
9673         const AANoUndef::StateType &S =
9674             static_cast<const AANoUndef::StateType &>(AA.getState());
9675         T ^= S;
9676       }
9677       return T.isValidState();
9678     };
9679 
9680     StateType T;
9681     bool UsedAssumedInformation = false;
9682     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9683                                           VisitValueCB, getCtxI(),
9684                                           UsedAssumedInformation))
9685       return indicatePessimisticFixpoint();
9686 
9687     return clampStateAndIndicateChange(getState(), T);
9688   }
9689 
9690   /// See AbstractAttribute::trackStatistics()
9691   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9692 };
9693 
9694 struct AANoUndefReturned final
9695     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9696   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9697       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9698 
9699   /// See AbstractAttribute::trackStatistics()
9700   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9701 };
9702 
9703 struct AANoUndefArgument final
9704     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9705   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9706       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9707 
9708   /// See AbstractAttribute::trackStatistics()
9709   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9710 };
9711 
9712 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9713   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9714       : AANoUndefFloating(IRP, A) {}
9715 
9716   /// See AbstractAttribute::trackStatistics()
9717   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9718 };
9719 
9720 struct AANoUndefCallSiteReturned final
9721     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9722   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9723       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9724 
9725   /// See AbstractAttribute::trackStatistics()
9726   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9727 };
9728 
9729 struct AACallEdgesImpl : public AACallEdges {
9730   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9731 
9732   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9733     return CalledFunctions;
9734   }
9735 
9736   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9737 
9738   virtual bool hasNonAsmUnknownCallee() const override {
9739     return HasUnknownCalleeNonAsm;
9740   }
9741 
9742   const std::string getAsStr() const override {
9743     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9744            std::to_string(CalledFunctions.size()) + "]";
9745   }
9746 
9747   void trackStatistics() const override {}
9748 
9749 protected:
9750   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9751     if (CalledFunctions.insert(Fn)) {
9752       Change = ChangeStatus::CHANGED;
9753       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9754                         << "\n");
9755     }
9756   }
9757 
9758   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9759     if (!HasUnknownCallee)
9760       Change = ChangeStatus::CHANGED;
9761     if (NonAsm && !HasUnknownCalleeNonAsm)
9762       Change = ChangeStatus::CHANGED;
9763     HasUnknownCalleeNonAsm |= NonAsm;
9764     HasUnknownCallee = true;
9765   }
9766 
9767 private:
9768   /// Optimistic set of functions that might be called by this position.
9769   SetVector<Function *> CalledFunctions;
9770 
9771   /// Is there any call with a unknown callee.
9772   bool HasUnknownCallee = false;
9773 
9774   /// Is there any call with a unknown callee, excluding any inline asm.
9775   bool HasUnknownCalleeNonAsm = false;
9776 };
9777 
9778 struct AACallEdgesCallSite : public AACallEdgesImpl {
9779   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9780       : AACallEdgesImpl(IRP, A) {}
9781   /// See AbstractAttribute::updateImpl(...).
9782   ChangeStatus updateImpl(Attributor &A) override {
9783     ChangeStatus Change = ChangeStatus::UNCHANGED;
9784 
9785     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9786                           bool Stripped) -> bool {
9787       if (Function *Fn = dyn_cast<Function>(&V)) {
9788         addCalledFunction(Fn, Change);
9789       } else {
9790         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9791         setHasUnknownCallee(true, Change);
9792       }
9793 
9794       // Explore all values.
9795       return true;
9796     };
9797 
9798     // Process any value that we might call.
9799     auto ProcessCalledOperand = [&](Value *V) {
9800       bool DummyValue = false;
9801       bool UsedAssumedInformation = false;
9802       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9803                                        DummyValue, VisitValue, nullptr,
9804                                        UsedAssumedInformation, false)) {
9805         // If we haven't gone through all values, assume that there are unknown
9806         // callees.
9807         setHasUnknownCallee(true, Change);
9808       }
9809     };
9810 
9811     CallBase *CB = cast<CallBase>(getCtxI());
9812 
9813     if (CB->isInlineAsm()) {
9814       if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
9815           !hasAssumption(*CB, "ompx_no_call_asm"))
9816         setHasUnknownCallee(false, Change);
9817       return Change;
9818     }
9819 
9820     // Process callee metadata if available.
9821     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9822       for (auto &Op : MD->operands()) {
9823         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9824         if (Callee)
9825           addCalledFunction(Callee, Change);
9826       }
9827       return Change;
9828     }
9829 
9830     // The most simple case.
9831     ProcessCalledOperand(CB->getCalledOperand());
9832 
9833     // Process callback functions.
9834     SmallVector<const Use *, 4u> CallbackUses;
9835     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9836     for (const Use *U : CallbackUses)
9837       ProcessCalledOperand(U->get());
9838 
9839     return Change;
9840   }
9841 };
9842 
9843 struct AACallEdgesFunction : public AACallEdgesImpl {
9844   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9845       : AACallEdgesImpl(IRP, A) {}
9846 
9847   /// See AbstractAttribute::updateImpl(...).
9848   ChangeStatus updateImpl(Attributor &A) override {
9849     ChangeStatus Change = ChangeStatus::UNCHANGED;
9850 
9851     auto ProcessCallInst = [&](Instruction &Inst) {
9852       CallBase &CB = cast<CallBase>(Inst);
9853 
9854       auto &CBEdges = A.getAAFor<AACallEdges>(
9855           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9856       if (CBEdges.hasNonAsmUnknownCallee())
9857         setHasUnknownCallee(true, Change);
9858       if (CBEdges.hasUnknownCallee())
9859         setHasUnknownCallee(false, Change);
9860 
9861       for (Function *F : CBEdges.getOptimisticEdges())
9862         addCalledFunction(F, Change);
9863 
9864       return true;
9865     };
9866 
9867     // Visit all callable instructions.
9868     bool UsedAssumedInformation = false;
9869     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9870                                            UsedAssumedInformation,
9871                                            /* CheckBBLivenessOnly */ true)) {
9872       // If we haven't looked at all call like instructions, assume that there
9873       // are unknown callees.
9874       setHasUnknownCallee(true, Change);
9875     }
9876 
9877     return Change;
9878   }
9879 };
9880 
9881 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9882 private:
9883   struct QuerySet {
9884     void markReachable(const Function &Fn) {
9885       Reachable.insert(&Fn);
9886       Unreachable.erase(&Fn);
9887     }
9888 
9889     /// If there is no information about the function None is returned.
9890     Optional<bool> isCachedReachable(const Function &Fn) {
9891       // Assume that we can reach the function.
9892       // TODO: Be more specific with the unknown callee.
9893       if (CanReachUnknownCallee)
9894         return true;
9895 
9896       if (Reachable.count(&Fn))
9897         return true;
9898 
9899       if (Unreachable.count(&Fn))
9900         return false;
9901 
9902       return llvm::None;
9903     }
9904 
9905     /// Set of functions that we know for sure is reachable.
9906     DenseSet<const Function *> Reachable;
9907 
9908     /// Set of functions that are unreachable, but might become reachable.
9909     DenseSet<const Function *> Unreachable;
9910 
9911     /// If we can reach a function with a call to a unknown function we assume
9912     /// that we can reach any function.
9913     bool CanReachUnknownCallee = false;
9914   };
9915 
9916   struct QueryResolver : public QuerySet {
9917     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9918                         ArrayRef<const AACallEdges *> AAEdgesList) {
9919       ChangeStatus Change = ChangeStatus::UNCHANGED;
9920 
9921       for (auto *AAEdges : AAEdgesList) {
9922         if (AAEdges->hasUnknownCallee()) {
9923           if (!CanReachUnknownCallee)
9924             Change = ChangeStatus::CHANGED;
9925           CanReachUnknownCallee = true;
9926           return Change;
9927         }
9928       }
9929 
9930       for (const Function *Fn : make_early_inc_range(Unreachable)) {
9931         if (checkIfReachable(A, AA, AAEdgesList, *Fn)) {
9932           Change = ChangeStatus::CHANGED;
9933           markReachable(*Fn);
9934         }
9935       }
9936       return Change;
9937     }
9938 
9939     bool isReachable(Attributor &A, AAFunctionReachability &AA,
9940                      ArrayRef<const AACallEdges *> AAEdgesList,
9941                      const Function &Fn) {
9942       Optional<bool> Cached = isCachedReachable(Fn);
9943       if (Cached)
9944         return Cached.value();
9945 
9946       // The query was not cached, thus it is new. We need to request an update
9947       // explicitly to make sure this the information is properly run to a
9948       // fixpoint.
9949       A.registerForUpdate(AA);
9950 
9951       // We need to assume that this function can't reach Fn to prevent
9952       // an infinite loop if this function is recursive.
9953       Unreachable.insert(&Fn);
9954 
9955       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9956       if (Result)
9957         markReachable(Fn);
9958       return Result;
9959     }
9960 
9961     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9962                           ArrayRef<const AACallEdges *> AAEdgesList,
9963                           const Function &Fn) const {
9964 
9965       // Handle the most trivial case first.
9966       for (auto *AAEdges : AAEdgesList) {
9967         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9968 
9969         if (Edges.count(const_cast<Function *>(&Fn)))
9970           return true;
9971       }
9972 
9973       SmallVector<const AAFunctionReachability *, 8> Deps;
9974       for (auto &AAEdges : AAEdgesList) {
9975         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9976 
9977         for (Function *Edge : Edges) {
9978           // Functions that do not call back into the module can be ignored.
9979           if (Edge->hasFnAttribute(Attribute::NoCallback))
9980             continue;
9981 
9982           // We don't need a dependency if the result is reachable.
9983           const AAFunctionReachability &EdgeReachability =
9984               A.getAAFor<AAFunctionReachability>(
9985                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9986           Deps.push_back(&EdgeReachability);
9987 
9988           if (EdgeReachability.canReach(A, Fn))
9989             return true;
9990         }
9991       }
9992 
9993       // The result is false for now, set dependencies and leave.
9994       for (auto *Dep : Deps)
9995         A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
9996 
9997       return false;
9998     }
9999   };
10000 
10001   /// Get call edges that can be reached by this instruction.
10002   bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability,
10003                              const Instruction &Inst,
10004                              SmallVector<const AACallEdges *> &Result) const {
10005     // Determine call like instructions that we can reach from the inst.
10006     auto CheckCallBase = [&](Instruction &CBInst) {
10007       if (!Reachability.isAssumedReachable(A, Inst, CBInst))
10008         return true;
10009 
10010       auto &CB = cast<CallBase>(CBInst);
10011       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
10012           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
10013 
10014       Result.push_back(&AAEdges);
10015       return true;
10016     };
10017 
10018     bool UsedAssumedInformation = false;
10019     return A.checkForAllCallLikeInstructions(CheckCallBase, *this,
10020                                              UsedAssumedInformation,
10021                                              /* CheckBBLivenessOnly */ true);
10022   }
10023 
10024 public:
10025   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
10026       : AAFunctionReachability(IRP, A) {}
10027 
10028   bool canReach(Attributor &A, const Function &Fn) const override {
10029     if (!isValidState())
10030       return true;
10031 
10032     const AACallEdges &AAEdges =
10033         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
10034 
10035     // Attributor returns attributes as const, so this function has to be
10036     // const for users of this attribute to use it without having to do
10037     // a const_cast.
10038     // This is a hack for us to be able to cache queries.
10039     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
10040     bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis,
10041                                                           {&AAEdges}, Fn);
10042 
10043     return Result;
10044   }
10045 
10046   /// Can \p CB reach \p Fn
10047   bool canReach(Attributor &A, CallBase &CB,
10048                 const Function &Fn) const override {
10049     if (!isValidState())
10050       return true;
10051 
10052     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
10053         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
10054 
10055     // Attributor returns attributes as const, so this function has to be
10056     // const for users of this attribute to use it without having to do
10057     // a const_cast.
10058     // This is a hack for us to be able to cache queries.
10059     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
10060     QueryResolver &CBQuery = NonConstThis->CBQueries[&CB];
10061 
10062     bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn);
10063 
10064     return Result;
10065   }
10066 
10067   bool instructionCanReach(Attributor &A, const Instruction &Inst,
10068                            const Function &Fn,
10069                            bool UseBackwards) const override {
10070     if (!isValidState())
10071       return true;
10072 
10073     if (UseBackwards)
10074       return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr);
10075 
10076     const auto &Reachability = A.getAAFor<AAReachability>(
10077         *this, IRPosition::function(*getAssociatedFunction()),
10078         DepClassTy::REQUIRED);
10079 
10080     SmallVector<const AACallEdges *> CallEdges;
10081     bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges);
10082     // Attributor returns attributes as const, so this function has to be
10083     // const for users of this attribute to use it without having to do
10084     // a const_cast.
10085     // This is a hack for us to be able to cache queries.
10086     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
10087     QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
10088     if (!AllKnown)
10089       InstQSet.CanReachUnknownCallee = true;
10090 
10091     return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
10092   }
10093 
10094   /// See AbstractAttribute::updateImpl(...).
10095   ChangeStatus updateImpl(Attributor &A) override {
10096     const AACallEdges &AAEdges =
10097         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
10098     ChangeStatus Change = ChangeStatus::UNCHANGED;
10099 
10100     Change |= WholeFunction.update(A, *this, {&AAEdges});
10101 
10102     for (auto &CBPair : CBQueries) {
10103       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
10104           *this, IRPosition::callsite_function(*CBPair.first),
10105           DepClassTy::REQUIRED);
10106 
10107       Change |= CBPair.second.update(A, *this, {&AAEdges});
10108     }
10109 
10110     // Update the Instruction queries.
10111     if (!InstQueries.empty()) {
10112       const AAReachability *Reachability = &A.getAAFor<AAReachability>(
10113           *this, IRPosition::function(*getAssociatedFunction()),
10114           DepClassTy::REQUIRED);
10115 
10116       // Check for local callbases first.
10117       for (auto &InstPair : InstQueries) {
10118         SmallVector<const AACallEdges *> CallEdges;
10119         bool AllKnown =
10120             getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
10121         // Update will return change if we this effects any queries.
10122         if (!AllKnown)
10123           InstPair.second.CanReachUnknownCallee = true;
10124         Change |= InstPair.second.update(A, *this, CallEdges);
10125       }
10126     }
10127 
10128     return Change;
10129   }
10130 
10131   const std::string getAsStr() const override {
10132     size_t QueryCount =
10133         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
10134 
10135     return "FunctionReachability [" +
10136            std::to_string(WholeFunction.Reachable.size()) + "," +
10137            std::to_string(QueryCount) + "]";
10138   }
10139 
10140   void trackStatistics() const override {}
10141 
10142 private:
10143   bool canReachUnknownCallee() const override {
10144     return WholeFunction.CanReachUnknownCallee;
10145   }
10146 
10147   /// Used to answer if a the whole function can reacha a specific function.
10148   QueryResolver WholeFunction;
10149 
10150   /// Used to answer if a call base inside this function can reach a specific
10151   /// function.
10152   MapVector<const CallBase *, QueryResolver> CBQueries;
10153 
10154   /// This is for instruction queries than scan "forward".
10155   MapVector<const Instruction *, QueryResolver> InstQueries;
10156 };
10157 } // namespace
10158 
10159 /// ---------------------- Assumption Propagation ------------------------------
10160 namespace {
10161 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
10162   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
10163                        const DenseSet<StringRef> &Known)
10164       : AAAssumptionInfo(IRP, A, Known) {}
10165 
10166   bool hasAssumption(const StringRef Assumption) const override {
10167     return isValidState() && setContains(Assumption);
10168   }
10169 
10170   /// See AbstractAttribute::getAsStr()
10171   const std::string getAsStr() const override {
10172     const SetContents &Known = getKnown();
10173     const SetContents &Assumed = getAssumed();
10174 
10175     const std::string KnownStr =
10176         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
10177     const std::string AssumedStr =
10178         (Assumed.isUniversal())
10179             ? "Universal"
10180             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
10181 
10182     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
10183   }
10184 };
10185 
10186 /// Propagates assumption information from parent functions to all of their
10187 /// successors. An assumption can be propagated if the containing function
10188 /// dominates the called function.
10189 ///
10190 /// We start with a "known" set of assumptions already valid for the associated
10191 /// function and an "assumed" set that initially contains all possible
10192 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
10193 /// contents as concrete values are known. The concrete values are seeded by the
10194 /// first nodes that are either entries into the call graph, or contains no
10195 /// assumptions. Each node is updated as the intersection of the assumed state
10196 /// with all of its predecessors.
10197 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
10198   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
10199       : AAAssumptionInfoImpl(IRP, A,
10200                              getAssumptions(*IRP.getAssociatedFunction())) {}
10201 
10202   /// See AbstractAttribute::manifest(...).
10203   ChangeStatus manifest(Attributor &A) override {
10204     const auto &Assumptions = getKnown();
10205 
10206     // Don't manifest a universal set if it somehow made it here.
10207     if (Assumptions.isUniversal())
10208       return ChangeStatus::UNCHANGED;
10209 
10210     Function *AssociatedFunction = getAssociatedFunction();
10211 
10212     bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
10213 
10214     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10215   }
10216 
10217   /// See AbstractAttribute::updateImpl(...).
10218   ChangeStatus updateImpl(Attributor &A) override {
10219     bool Changed = false;
10220 
10221     auto CallSitePred = [&](AbstractCallSite ACS) {
10222       const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
10223           *this, IRPosition::callsite_function(*ACS.getInstruction()),
10224           DepClassTy::REQUIRED);
10225       // Get the set of assumptions shared by all of this function's callers.
10226       Changed |= getIntersection(AssumptionAA.getAssumed());
10227       return !getAssumed().empty() || !getKnown().empty();
10228     };
10229 
10230     bool UsedAssumedInformation = false;
10231     // Get the intersection of all assumptions held by this node's predecessors.
10232     // If we don't know all the call sites then this is either an entry into the
10233     // call graph or an empty node. This node is known to only contain its own
10234     // assumptions and can be propagated to its successors.
10235     if (!A.checkForAllCallSites(CallSitePred, *this, true,
10236                                 UsedAssumedInformation))
10237       return indicatePessimisticFixpoint();
10238 
10239     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10240   }
10241 
10242   void trackStatistics() const override {}
10243 };
10244 
10245 /// Assumption Info defined for call sites.
10246 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
10247 
10248   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
10249       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
10250 
10251   /// See AbstractAttribute::initialize(...).
10252   void initialize(Attributor &A) override {
10253     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10254     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10255   }
10256 
10257   /// See AbstractAttribute::manifest(...).
10258   ChangeStatus manifest(Attributor &A) override {
10259     // Don't manifest a universal set if it somehow made it here.
10260     if (getKnown().isUniversal())
10261       return ChangeStatus::UNCHANGED;
10262 
10263     CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
10264     bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
10265 
10266     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10267   }
10268 
10269   /// See AbstractAttribute::updateImpl(...).
10270   ChangeStatus updateImpl(Attributor &A) override {
10271     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10272     auto &AssumptionAA =
10273         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10274     bool Changed = getIntersection(AssumptionAA.getAssumed());
10275     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10276   }
10277 
10278   /// See AbstractAttribute::trackStatistics()
10279   void trackStatistics() const override {}
10280 
10281 private:
10282   /// Helper to initialized the known set as all the assumptions this call and
10283   /// the callee contain.
10284   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
10285     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
10286     auto Assumptions = getAssumptions(CB);
10287     if (Function *F = IRP.getAssociatedFunction())
10288       set_union(Assumptions, getAssumptions(*F));
10289     if (Function *F = IRP.getAssociatedFunction())
10290       set_union(Assumptions, getAssumptions(*F));
10291     return Assumptions;
10292   }
10293 };
10294 } // namespace
10295 
10296 AACallGraphNode *AACallEdgeIterator::operator*() const {
10297   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
10298       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
10299 }
10300 
10301 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
10302 
10303 const char AAReturnedValues::ID = 0;
10304 const char AANoUnwind::ID = 0;
10305 const char AANoSync::ID = 0;
10306 const char AANoFree::ID = 0;
10307 const char AANonNull::ID = 0;
10308 const char AANoRecurse::ID = 0;
10309 const char AAWillReturn::ID = 0;
10310 const char AAUndefinedBehavior::ID = 0;
10311 const char AANoAlias::ID = 0;
10312 const char AAReachability::ID = 0;
10313 const char AANoReturn::ID = 0;
10314 const char AAIsDead::ID = 0;
10315 const char AADereferenceable::ID = 0;
10316 const char AAAlign::ID = 0;
10317 const char AAInstanceInfo::ID = 0;
10318 const char AANoCapture::ID = 0;
10319 const char AAValueSimplify::ID = 0;
10320 const char AAHeapToStack::ID = 0;
10321 const char AAPrivatizablePtr::ID = 0;
10322 const char AAMemoryBehavior::ID = 0;
10323 const char AAMemoryLocation::ID = 0;
10324 const char AAValueConstantRange::ID = 0;
10325 const char AAPotentialConstantValues::ID = 0;
10326 const char AANoUndef::ID = 0;
10327 const char AACallEdges::ID = 0;
10328 const char AAFunctionReachability::ID = 0;
10329 const char AAPointerInfo::ID = 0;
10330 const char AAAssumptionInfo::ID = 0;
10331 
10332 // Macro magic to create the static generator function for attributes that
10333 // follow the naming scheme.
10334 
10335 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
10336   case IRPosition::PK:                                                         \
10337     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
10338 
10339 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
10340   case IRPosition::PK:                                                         \
10341     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
10342     ++NumAAs;                                                                  \
10343     break;
10344 
10345 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
10346   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10347     CLASS *AA = nullptr;                                                       \
10348     switch (IRP.getPositionKind()) {                                           \
10349       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10350       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10351       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10352       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10353       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10354       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10355       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10356       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10357     }                                                                          \
10358     return *AA;                                                                \
10359   }
10360 
10361 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
10362   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10363     CLASS *AA = nullptr;                                                       \
10364     switch (IRP.getPositionKind()) {                                           \
10365       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10366       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
10367       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10368       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10369       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10370       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10371       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10372       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10373     }                                                                          \
10374     return *AA;                                                                \
10375   }
10376 
10377 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
10378   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10379     CLASS *AA = nullptr;                                                       \
10380     switch (IRP.getPositionKind()) {                                           \
10381       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10382       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10383       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10384       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10385       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10386       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10387       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10388       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10389     }                                                                          \
10390     return *AA;                                                                \
10391   }
10392 
10393 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
10394   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10395     CLASS *AA = nullptr;                                                       \
10396     switch (IRP.getPositionKind()) {                                           \
10397       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10398       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10399       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10400       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10401       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10402       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10403       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10404       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10405     }                                                                          \
10406     return *AA;                                                                \
10407   }
10408 
10409 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
10410   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10411     CLASS *AA = nullptr;                                                       \
10412     switch (IRP.getPositionKind()) {                                           \
10413       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10414       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10415       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10416       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10417       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10418       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10419       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10420       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10421     }                                                                          \
10422     return *AA;                                                                \
10423   }
10424 
10425 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
10426 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
10427 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
10428 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
10429 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
10430 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
10431 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
10432 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
10433 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
10434 
10435 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
10436 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
10437 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
10438 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
10439 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
10440 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
10441 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
10442 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
10443 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
10444 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
10445 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
10446 
10447 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
10448 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
10449 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
10450 
10451 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
10452 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
10453 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
10454 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
10455 
10456 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
10457 
10458 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
10459 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
10460 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
10461 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
10462 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
10463 #undef SWITCH_PK_CREATE
10464 #undef SWITCH_PK_INV
10465