1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/SCCIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetOperations.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumeBundleQueries.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/Analysis/LazyValueInfo.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/TargetTransformInfo.h"
34 #include "llvm/Analysis/ValueTracking.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Assumptions.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/GlobalValue.h"
41 #include "llvm/IR/IRBuilder.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/NoFolder.h"
46 #include "llvm/IR/Value.h"
47 #include "llvm/IR/ValueHandle.h"
48 #include "llvm/Support/Alignment.h"
49 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/GraphWriter.h"
53 #include "llvm/Support/MathExtras.h"
54 #include "llvm/Support/raw_ostream.h"
55 #include "llvm/Transforms/Utils/Local.h"
56 #include "llvm/Transforms/Utils/ValueMapper.h"
57 #include <cassert>
58 
59 using namespace llvm;
60 
61 #define DEBUG_TYPE "attributor"
62 
63 static cl::opt<bool> ManifestInternal(
64     "attributor-manifest-internal", cl::Hidden,
65     cl::desc("Manifest Attributor internal string attributes."),
66     cl::init(false));
67 
68 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
69                                        cl::Hidden);
70 
71 template <>
72 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
73 
74 static cl::opt<unsigned, true> MaxPotentialValues(
75     "attributor-max-potential-values", cl::Hidden,
76     cl::desc("Maximum number of potential values to be "
77              "tracked for each position."),
78     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
79     cl::init(7));
80 
81 static cl::opt<unsigned> MaxInterferingAccesses(
82     "attributor-max-interfering-accesses", cl::Hidden,
83     cl::desc("Maximum number of interfering accesses to "
84              "check before assuming all might interfere."),
85     cl::init(6));
86 
87 STATISTIC(NumAAs, "Number of abstract attributes created");
88 
89 // Some helper macros to deal with statistics tracking.
90 //
91 // Usage:
92 // For simple IR attribute tracking overload trackStatistics in the abstract
93 // attribute and choose the right STATS_DECLTRACK_********* macro,
94 // e.g.,:
95 //  void trackStatistics() const override {
96 //    STATS_DECLTRACK_ARG_ATTR(returned)
97 //  }
98 // If there is a single "increment" side one can use the macro
99 // STATS_DECLTRACK with a custom message. If there are multiple increment
100 // sides, STATS_DECL and STATS_TRACK can also be used separately.
101 //
102 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
103   ("Number of " #TYPE " marked '" #NAME "'")
104 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
105 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
106 #define STATS_DECL(NAME, TYPE, MSG)                                            \
107   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
108 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
109 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
110   {                                                                            \
111     STATS_DECL(NAME, TYPE, MSG)                                                \
112     STATS_TRACK(NAME, TYPE)                                                    \
113   }
114 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
115   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
116 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
117   STATS_DECLTRACK(NAME, CSArguments,                                           \
118                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
119 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
120   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
121 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
122   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
123 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
124   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
125                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
126 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
127   STATS_DECLTRACK(NAME, CSReturn,                                              \
128                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
129 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
130   STATS_DECLTRACK(NAME, Floating,                                              \
131                   ("Number of floating values known to be '" #NAME "'"))
132 
133 // Specialization of the operator<< for abstract attributes subclasses. This
134 // disambiguates situations where multiple operators are applicable.
135 namespace llvm {
136 #define PIPE_OPERATOR(CLASS)                                                   \
137   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
138     return OS << static_cast<const AbstractAttribute &>(AA);                   \
139   }
140 
141 PIPE_OPERATOR(AAIsDead)
142 PIPE_OPERATOR(AANoUnwind)
143 PIPE_OPERATOR(AANoSync)
144 PIPE_OPERATOR(AANoRecurse)
145 PIPE_OPERATOR(AAWillReturn)
146 PIPE_OPERATOR(AANoReturn)
147 PIPE_OPERATOR(AAReturnedValues)
148 PIPE_OPERATOR(AANonNull)
149 PIPE_OPERATOR(AANoAlias)
150 PIPE_OPERATOR(AADereferenceable)
151 PIPE_OPERATOR(AAAlign)
152 PIPE_OPERATOR(AAInstanceInfo)
153 PIPE_OPERATOR(AANoCapture)
154 PIPE_OPERATOR(AAValueSimplify)
155 PIPE_OPERATOR(AANoFree)
156 PIPE_OPERATOR(AAHeapToStack)
157 PIPE_OPERATOR(AAReachability)
158 PIPE_OPERATOR(AAMemoryBehavior)
159 PIPE_OPERATOR(AAMemoryLocation)
160 PIPE_OPERATOR(AAValueConstantRange)
161 PIPE_OPERATOR(AAPrivatizablePtr)
162 PIPE_OPERATOR(AAUndefinedBehavior)
163 PIPE_OPERATOR(AAPotentialConstantValues)
164 PIPE_OPERATOR(AANoUndef)
165 PIPE_OPERATOR(AACallEdges)
166 PIPE_OPERATOR(AAFunctionReachability)
167 PIPE_OPERATOR(AAPointerInfo)
168 PIPE_OPERATOR(AAAssumptionInfo)
169 
170 #undef PIPE_OPERATOR
171 
172 template <>
173 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
174                                                      const DerefState &R) {
175   ChangeStatus CS0 =
176       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
177   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
178   return CS0 | CS1;
179 }
180 
181 } // namespace llvm
182 
183 /// Checks if a type could have padding bytes.
184 static bool isDenselyPacked(Type *Ty, const DataLayout &DL) {
185   // There is no size information, so be conservative.
186   if (!Ty->isSized())
187     return false;
188 
189   // If the alloc size is not equal to the storage size, then there are padding
190   // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
191   if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty))
192     return false;
193 
194   // FIXME: This isn't the right way to check for padding in vectors with
195   // non-byte-size elements.
196   if (VectorType *SeqTy = dyn_cast<VectorType>(Ty))
197     return isDenselyPacked(SeqTy->getElementType(), DL);
198 
199   // For array types, check for padding within members.
200   if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty))
201     return isDenselyPacked(SeqTy->getElementType(), DL);
202 
203   if (!isa<StructType>(Ty))
204     return true;
205 
206   // Check for padding within and between elements of a struct.
207   StructType *StructTy = cast<StructType>(Ty);
208   const StructLayout *Layout = DL.getStructLayout(StructTy);
209   uint64_t StartPos = 0;
210   for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) {
211     Type *ElTy = StructTy->getElementType(I);
212     if (!isDenselyPacked(ElTy, DL))
213       return false;
214     if (StartPos != Layout->getElementOffsetInBits(I))
215       return false;
216     StartPos += DL.getTypeAllocSizeInBits(ElTy);
217   }
218 
219   return true;
220 }
221 
222 /// Get pointer operand of memory accessing instruction. If \p I is
223 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
224 /// is set to false and the instruction is volatile, return nullptr.
225 static const Value *getPointerOperand(const Instruction *I,
226                                       bool AllowVolatile) {
227   if (!AllowVolatile && I->isVolatile())
228     return nullptr;
229 
230   if (auto *LI = dyn_cast<LoadInst>(I)) {
231     return LI->getPointerOperand();
232   }
233 
234   if (auto *SI = dyn_cast<StoreInst>(I)) {
235     return SI->getPointerOperand();
236   }
237 
238   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
239     return CXI->getPointerOperand();
240   }
241 
242   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
243     return RMWI->getPointerOperand();
244   }
245 
246   return nullptr;
247 }
248 
249 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
250 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
251 /// getelement pointer instructions that traverse the natural type of \p Ptr if
252 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
253 /// through a cast to i8*.
254 ///
255 /// TODO: This could probably live somewhere more prominantly if it doesn't
256 ///       already exist.
257 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
258                                int64_t Offset, IRBuilder<NoFolder> &IRB,
259                                const DataLayout &DL) {
260   assert(Offset >= 0 && "Negative offset not supported yet!");
261   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
262                     << "-bytes as " << *ResTy << "\n");
263 
264   if (Offset) {
265     Type *Ty = PtrElemTy;
266     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
267     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
268 
269     SmallVector<Value *, 4> ValIndices;
270     std::string GEPName = Ptr->getName().str();
271     for (const APInt &Index : IntIndices) {
272       ValIndices.push_back(IRB.getInt(Index));
273       GEPName += "." + std::to_string(Index.getZExtValue());
274     }
275 
276     // Create a GEP for the indices collected above.
277     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
278 
279     // If an offset is left we use byte-wise adjustment.
280     if (IntOffset != 0) {
281       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
282       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
283                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
284     }
285   }
286 
287   // Ensure the result has the requested type.
288   Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
289                                                 Ptr->getName() + ".cast");
290 
291   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
292   return Ptr;
293 }
294 
295 /// Recursively visit all values that might become \p IRP at some point. This
296 /// will be done by looking through cast instructions, selects, phis, and calls
297 /// with the "returned" attribute. Once we cannot look through the value any
298 /// further, the callback \p VisitValueCB is invoked and passed the current
299 /// value, the \p State, and a flag to indicate if we stripped anything.
300 /// Stripped means that we unpacked the value associated with \p IRP at least
301 /// once. Note that the value used for the callback may still be the value
302 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
303 /// we will never visit more values than specified by \p MaxValues.
304 /// If \p VS does not contain the Interprocedural bit, only values valid in the
305 /// scope of \p CtxI will be visited and simplification into other scopes is
306 /// prevented.
307 template <typename StateTy>
308 static bool genericValueTraversal(
309     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
310     StateTy &State,
311     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
312         VisitValueCB,
313     const Instruction *CtxI, bool &UsedAssumedInformation,
314     bool UseValueSimplify = true, int MaxValues = 16,
315     function_ref<Value *(Value *)> StripCB = nullptr,
316     AA::ValueScope VS = AA::Interprocedural) {
317 
318   struct LivenessInfo {
319     const AAIsDead *LivenessAA = nullptr;
320     bool AnyDead = false;
321   };
322   SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
323   auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
324     LivenessInfo &LI = LivenessAAs[&F];
325     if (!LI.LivenessAA)
326       LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F),
327                                             DepClassTy::NONE);
328     return LI;
329   };
330 
331   Value *InitialV = &IRP.getAssociatedValue();
332   using Item = std::pair<Value *, const Instruction *>;
333   SmallSet<Item, 16> Visited;
334   SmallVector<Item, 16> Worklist;
335   Worklist.push_back({InitialV, CtxI});
336 
337   int Iteration = 0;
338   do {
339     Item I = Worklist.pop_back_val();
340     Value *V = I.first;
341     CtxI = I.second;
342     if (StripCB)
343       V = StripCB(V);
344 
345     // Check if we should process the current value. To prevent endless
346     // recursion keep a record of the values we followed!
347     if (!Visited.insert(I).second)
348       continue;
349 
350     // Make sure we limit the compile time for complex expressions.
351     if (Iteration++ >= MaxValues) {
352       LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
353                         << Iteration << "!\n");
354       return false;
355     }
356 
357     // Explicitly look through calls with a "returned" attribute if we do
358     // not have a pointer as stripPointerCasts only works on them.
359     Value *NewV = nullptr;
360     if (V->getType()->isPointerTy()) {
361       NewV = V->stripPointerCasts();
362     } else {
363       auto *CB = dyn_cast<CallBase>(V);
364       if (CB && CB->getCalledFunction()) {
365         for (Argument &Arg : CB->getCalledFunction()->args())
366           if (Arg.hasReturnedAttr()) {
367             NewV = CB->getArgOperand(Arg.getArgNo());
368             break;
369           }
370       }
371     }
372     if (NewV && NewV != V) {
373       Worklist.push_back({NewV, CtxI});
374       continue;
375     }
376 
377     // Look through select instructions, visit assumed potential values.
378     if (auto *SI = dyn_cast<SelectInst>(V)) {
379       Optional<Constant *> C = A.getAssumedConstant(
380           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
381       bool NoValueYet = !C;
382       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
383         continue;
384       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
385         if (CI->isZero())
386           Worklist.push_back({SI->getFalseValue(), CtxI});
387         else
388           Worklist.push_back({SI->getTrueValue(), CtxI});
389         continue;
390       }
391       // We could not simplify the condition, assume both values.(
392       Worklist.push_back({SI->getTrueValue(), CtxI});
393       Worklist.push_back({SI->getFalseValue(), CtxI});
394       continue;
395     }
396 
397     // Look through phi nodes, visit all live operands.
398     if (auto *PHI = dyn_cast<PHINode>(V)) {
399       LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction());
400       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
401         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
402         if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) {
403           LI.AnyDead = true;
404           UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint();
405           continue;
406         }
407         Worklist.push_back(
408             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
409       }
410       continue;
411     }
412 
413     if (auto *Arg = dyn_cast<Argument>(V)) {
414       if ((VS & AA::Interprocedural) && !Arg->hasPassPointeeByValueCopyAttr()) {
415         SmallVector<Item> CallSiteValues;
416         bool UsedAssumedInformation = false;
417         if (A.checkForAllCallSites(
418                 [&](AbstractCallSite ACS) {
419                   // Callbacks might not have a corresponding call site operand,
420                   // stick with the argument in that case.
421                   Value *CSOp = ACS.getCallArgOperand(*Arg);
422                   if (!CSOp)
423                     return false;
424                   CallSiteValues.push_back({CSOp, ACS.getInstruction()});
425                   return true;
426                 },
427                 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) {
428           Worklist.append(CallSiteValues);
429           continue;
430         }
431       }
432     }
433 
434     if (UseValueSimplify && !isa<Constant>(V)) {
435       Optional<Value *> SimpleV =
436           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
437       if (!SimpleV)
438         continue;
439       Value *NewV = SimpleV.getValue();
440       if (NewV && NewV != V) {
441         if ((VS & AA::Interprocedural) || !CtxI ||
442             AA::isValidInScope(*NewV, CtxI->getFunction())) {
443           Worklist.push_back({NewV, CtxI});
444           continue;
445         }
446       }
447     }
448 
449     if (auto *LI = dyn_cast<LoadInst>(V)) {
450       bool UsedAssumedInformation = false;
451       // If we ask for the potentially loaded values from the initial pointer we
452       // will simply end up here again. The load is as far as we can make it.
453       if (LI->getPointerOperand() != InitialV) {
454         SmallSetVector<Value *, 4> PotentialCopies;
455         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
456         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
457                                            PotentialValueOrigins, QueryingAA,
458                                            UsedAssumedInformation,
459                                            /* OnlyExact */ true)) {
460           // Values have to be dynamically unique or we loose the fact that a
461           // single llvm::Value might represent two runtime values (e.g., stack
462           // locations in different recursive calls).
463           bool DynamicallyUnique =
464               llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) {
465                 return AA::isDynamicallyUnique(A, QueryingAA, *PC);
466               });
467           if (DynamicallyUnique &&
468               ((VS & AA::Interprocedural) || !CtxI ||
469                llvm::all_of(PotentialCopies, [CtxI](Value *PC) {
470                  return AA::isValidInScope(*PC, CtxI->getFunction());
471                }))) {
472             for (auto *PotentialCopy : PotentialCopies)
473               Worklist.push_back({PotentialCopy, CtxI});
474             continue;
475           }
476         }
477       }
478     }
479 
480     // Once a leaf is reached we inform the user through the callback.
481     if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) {
482       LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: "
483                         << *V << "!\n");
484       return false;
485     }
486   } while (!Worklist.empty());
487 
488   // If we actually used liveness information so we have to record a dependence.
489   for (auto &It : LivenessAAs)
490     if (It.second.AnyDead)
491       A.recordDependence(*It.second.LivenessAA, QueryingAA,
492                          DepClassTy::OPTIONAL);
493 
494   // All values have been visited.
495   return true;
496 }
497 
498 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
499                                      SmallVectorImpl<Value *> &Objects,
500                                      const AbstractAttribute &QueryingAA,
501                                      const Instruction *CtxI,
502                                      bool &UsedAssumedInformation,
503                                      AA::ValueScope VS) {
504   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
505   SmallPtrSet<Value *, 8> SeenObjects;
506   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
507                                      SmallVectorImpl<Value *> &Objects,
508                                      bool) -> bool {
509     if (SeenObjects.insert(&Val).second)
510       Objects.push_back(&Val);
511     return true;
512   };
513   if (!genericValueTraversal<decltype(Objects)>(
514           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
515           UsedAssumedInformation, true, 32, StripCB, VS))
516     return false;
517   return true;
518 }
519 
520 static const Value *
521 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
522                           const Value *Val, const DataLayout &DL, APInt &Offset,
523                           bool GetMinOffset, bool AllowNonInbounds,
524                           bool UseAssumed = false) {
525 
526   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
527     const IRPosition &Pos = IRPosition::value(V);
528     // Only track dependence if we are going to use the assumed info.
529     const AAValueConstantRange &ValueConstantRangeAA =
530         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
531                                          UseAssumed ? DepClassTy::OPTIONAL
532                                                     : DepClassTy::NONE);
533     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
534                                      : ValueConstantRangeAA.getKnown();
535     if (Range.isFullSet())
536       return false;
537 
538     // We can only use the lower part of the range because the upper part can
539     // be higher than what the value can really be.
540     if (GetMinOffset)
541       ROffset = Range.getSignedMin();
542     else
543       ROffset = Range.getSignedMax();
544     return true;
545   };
546 
547   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
548                                                 /* AllowInvariant */ true,
549                                                 AttributorAnalysis);
550 }
551 
552 static const Value *
553 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
554                         const Value *Ptr, int64_t &BytesOffset,
555                         const DataLayout &DL, bool AllowNonInbounds = false) {
556   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
557   const Value *Base =
558       stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
559                                 /* GetMinOffset */ true, AllowNonInbounds);
560 
561   BytesOffset = OffsetAPInt.getSExtValue();
562   return Base;
563 }
564 
565 /// Clamp the information known for all returned values of a function
566 /// (identified by \p QueryingAA) into \p S.
567 template <typename AAType, typename StateType = typename AAType::StateType>
568 static void clampReturnedValueStates(
569     Attributor &A, const AAType &QueryingAA, StateType &S,
570     const IRPosition::CallBaseContext *CBContext = nullptr) {
571   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
572                     << QueryingAA << " into " << S << "\n");
573 
574   assert((QueryingAA.getIRPosition().getPositionKind() ==
575               IRPosition::IRP_RETURNED ||
576           QueryingAA.getIRPosition().getPositionKind() ==
577               IRPosition::IRP_CALL_SITE_RETURNED) &&
578          "Can only clamp returned value states for a function returned or call "
579          "site returned position!");
580 
581   // Use an optional state as there might not be any return values and we want
582   // to join (IntegerState::operator&) the state of all there are.
583   Optional<StateType> T;
584 
585   // Callback for each possibly returned value.
586   auto CheckReturnValue = [&](Value &RV) -> bool {
587     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
588     const AAType &AA =
589         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
590     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
591                       << " @ " << RVPos << "\n");
592     const StateType &AAS = AA.getState();
593     if (!T)
594       T = StateType::getBestState(AAS);
595     *T &= AAS;
596     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
597                       << "\n");
598     return T->isValidState();
599   };
600 
601   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
602     S.indicatePessimisticFixpoint();
603   else if (T)
604     S ^= *T;
605 }
606 
607 namespace {
608 /// Helper class for generic deduction: return value -> returned position.
609 template <typename AAType, typename BaseType,
610           typename StateType = typename BaseType::StateType,
611           bool PropagateCallBaseContext = false>
612 struct AAReturnedFromReturnedValues : public BaseType {
613   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
614       : BaseType(IRP, A) {}
615 
616   /// See AbstractAttribute::updateImpl(...).
617   ChangeStatus updateImpl(Attributor &A) override {
618     StateType S(StateType::getBestState(this->getState()));
619     clampReturnedValueStates<AAType, StateType>(
620         A, *this, S,
621         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
622     // TODO: If we know we visited all returned values, thus no are assumed
623     // dead, we can take the known information from the state T.
624     return clampStateAndIndicateChange<StateType>(this->getState(), S);
625   }
626 };
627 
628 /// Clamp the information known at all call sites for a given argument
629 /// (identified by \p QueryingAA) into \p S.
630 template <typename AAType, typename StateType = typename AAType::StateType>
631 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
632                                         StateType &S) {
633   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
634                     << QueryingAA << " into " << S << "\n");
635 
636   assert(QueryingAA.getIRPosition().getPositionKind() ==
637              IRPosition::IRP_ARGUMENT &&
638          "Can only clamp call site argument states for an argument position!");
639 
640   // Use an optional state as there might not be any return values and we want
641   // to join (IntegerState::operator&) the state of all there are.
642   Optional<StateType> T;
643 
644   // The argument number which is also the call site argument number.
645   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
646 
647   auto CallSiteCheck = [&](AbstractCallSite ACS) {
648     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
649     // Check if a coresponding argument was found or if it is on not associated
650     // (which can happen for callback calls).
651     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
652       return false;
653 
654     const AAType &AA =
655         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
656     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
657                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
658     const StateType &AAS = AA.getState();
659     if (!T)
660       T = StateType::getBestState(AAS);
661     *T &= AAS;
662     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
663                       << "\n");
664     return T->isValidState();
665   };
666 
667   bool UsedAssumedInformation = false;
668   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
669                               UsedAssumedInformation))
670     S.indicatePessimisticFixpoint();
671   else if (T)
672     S ^= *T;
673 }
674 
675 /// This function is the bridge between argument position and the call base
676 /// context.
677 template <typename AAType, typename BaseType,
678           typename StateType = typename AAType::StateType>
679 bool getArgumentStateFromCallBaseContext(Attributor &A,
680                                          BaseType &QueryingAttribute,
681                                          IRPosition &Pos, StateType &State) {
682   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
683          "Expected an 'argument' position !");
684   const CallBase *CBContext = Pos.getCallBaseContext();
685   if (!CBContext)
686     return false;
687 
688   int ArgNo = Pos.getCallSiteArgNo();
689   assert(ArgNo >= 0 && "Invalid Arg No!");
690 
691   const auto &AA = A.getAAFor<AAType>(
692       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
693       DepClassTy::REQUIRED);
694   const StateType &CBArgumentState =
695       static_cast<const StateType &>(AA.getState());
696 
697   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
698                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
699                     << "\n");
700 
701   // NOTE: If we want to do call site grouping it should happen here.
702   State ^= CBArgumentState;
703   return true;
704 }
705 
706 /// Helper class for generic deduction: call site argument -> argument position.
707 template <typename AAType, typename BaseType,
708           typename StateType = typename AAType::StateType,
709           bool BridgeCallBaseContext = false>
710 struct AAArgumentFromCallSiteArguments : public BaseType {
711   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
712       : BaseType(IRP, A) {}
713 
714   /// See AbstractAttribute::updateImpl(...).
715   ChangeStatus updateImpl(Attributor &A) override {
716     StateType S = StateType::getBestState(this->getState());
717 
718     if (BridgeCallBaseContext) {
719       bool Success =
720           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
721               A, *this, this->getIRPosition(), S);
722       if (Success)
723         return clampStateAndIndicateChange<StateType>(this->getState(), S);
724     }
725     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
726 
727     // TODO: If we know we visited all incoming values, thus no are assumed
728     // dead, we can take the known information from the state T.
729     return clampStateAndIndicateChange<StateType>(this->getState(), S);
730   }
731 };
732 
733 /// Helper class for generic replication: function returned -> cs returned.
734 template <typename AAType, typename BaseType,
735           typename StateType = typename BaseType::StateType,
736           bool IntroduceCallBaseContext = false>
737 struct AACallSiteReturnedFromReturned : public BaseType {
738   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
739       : BaseType(IRP, A) {}
740 
741   /// See AbstractAttribute::updateImpl(...).
742   ChangeStatus updateImpl(Attributor &A) override {
743     assert(this->getIRPosition().getPositionKind() ==
744                IRPosition::IRP_CALL_SITE_RETURNED &&
745            "Can only wrap function returned positions for call site returned "
746            "positions!");
747     auto &S = this->getState();
748 
749     const Function *AssociatedFunction =
750         this->getIRPosition().getAssociatedFunction();
751     if (!AssociatedFunction)
752       return S.indicatePessimisticFixpoint();
753 
754     CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
755     if (IntroduceCallBaseContext)
756       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
757                         << CBContext << "\n");
758 
759     IRPosition FnPos = IRPosition::returned(
760         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
761     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
762     return clampStateAndIndicateChange(S, AA.getState());
763   }
764 };
765 
766 /// Helper function to accumulate uses.
767 template <class AAType, typename StateType = typename AAType::StateType>
768 static void followUsesInContext(AAType &AA, Attributor &A,
769                                 MustBeExecutedContextExplorer &Explorer,
770                                 const Instruction *CtxI,
771                                 SetVector<const Use *> &Uses,
772                                 StateType &State) {
773   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
774   for (unsigned u = 0; u < Uses.size(); ++u) {
775     const Use *U = Uses[u];
776     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
777       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
778       if (Found && AA.followUseInMBEC(A, U, UserI, State))
779         for (const Use &Us : UserI->uses())
780           Uses.insert(&Us);
781     }
782   }
783 }
784 
785 /// Use the must-be-executed-context around \p I to add information into \p S.
786 /// The AAType class is required to have `followUseInMBEC` method with the
787 /// following signature and behaviour:
788 ///
789 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
790 /// U - Underlying use.
791 /// I - The user of the \p U.
792 /// Returns true if the value should be tracked transitively.
793 ///
794 template <class AAType, typename StateType = typename AAType::StateType>
795 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
796                              Instruction &CtxI) {
797 
798   // Container for (transitive) uses of the associated value.
799   SetVector<const Use *> Uses;
800   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
801     Uses.insert(&U);
802 
803   MustBeExecutedContextExplorer &Explorer =
804       A.getInfoCache().getMustBeExecutedContextExplorer();
805 
806   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
807 
808   if (S.isAtFixpoint())
809     return;
810 
811   SmallVector<const BranchInst *, 4> BrInsts;
812   auto Pred = [&](const Instruction *I) {
813     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
814       if (Br->isConditional())
815         BrInsts.push_back(Br);
816     return true;
817   };
818 
819   // Here, accumulate conditional branch instructions in the context. We
820   // explore the child paths and collect the known states. The disjunction of
821   // those states can be merged to its own state. Let ParentState_i be a state
822   // to indicate the known information for an i-th branch instruction in the
823   // context. ChildStates are created for its successors respectively.
824   //
825   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
826   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
827   //      ...
828   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
829   //
830   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
831   //
832   // FIXME: Currently, recursive branches are not handled. For example, we
833   // can't deduce that ptr must be dereferenced in below function.
834   //
835   // void f(int a, int c, int *ptr) {
836   //    if(a)
837   //      if (b) {
838   //        *ptr = 0;
839   //      } else {
840   //        *ptr = 1;
841   //      }
842   //    else {
843   //      if (b) {
844   //        *ptr = 0;
845   //      } else {
846   //        *ptr = 1;
847   //      }
848   //    }
849   // }
850 
851   Explorer.checkForAllContext(&CtxI, Pred);
852   for (const BranchInst *Br : BrInsts) {
853     StateType ParentState;
854 
855     // The known state of the parent state is a conjunction of children's
856     // known states so it is initialized with a best state.
857     ParentState.indicateOptimisticFixpoint();
858 
859     for (const BasicBlock *BB : Br->successors()) {
860       StateType ChildState;
861 
862       size_t BeforeSize = Uses.size();
863       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
864 
865       // Erase uses which only appear in the child.
866       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
867         It = Uses.erase(It);
868 
869       ParentState &= ChildState;
870     }
871 
872     // Use only known state.
873     S += ParentState;
874   }
875 }
876 } // namespace
877 
878 /// ------------------------ PointerInfo ---------------------------------------
879 
880 namespace llvm {
881 namespace AA {
882 namespace PointerInfo {
883 
884 struct State;
885 
886 } // namespace PointerInfo
887 } // namespace AA
888 
889 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
890 template <>
891 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
892   using Access = AAPointerInfo::Access;
893   static inline Access getEmptyKey();
894   static inline Access getTombstoneKey();
895   static unsigned getHashValue(const Access &A);
896   static bool isEqual(const Access &LHS, const Access &RHS);
897 };
898 
899 /// Helper that allows OffsetAndSize as a key in a DenseMap.
900 template <>
901 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
902     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
903 
904 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
905 /// but the instruction
906 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
907   using Base = DenseMapInfo<Instruction *>;
908   using Access = AAPointerInfo::Access;
909   static inline Access getEmptyKey();
910   static inline Access getTombstoneKey();
911   static unsigned getHashValue(const Access &A);
912   static bool isEqual(const Access &LHS, const Access &RHS);
913 };
914 
915 } // namespace llvm
916 
917 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
918 struct AA::PointerInfo::State : public AbstractState {
919 
920   ~State() {
921     // We do not delete the Accesses objects but need to destroy them still.
922     for (auto &It : AccessBins)
923       It.second->~Accesses();
924   }
925 
926   /// Return the best possible representable state.
927   static State getBestState(const State &SIS) { return State(); }
928 
929   /// Return the worst possible representable state.
930   static State getWorstState(const State &SIS) {
931     State R;
932     R.indicatePessimisticFixpoint();
933     return R;
934   }
935 
936   State() = default;
937   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {
938     SIS.AccessBins.clear();
939   }
940 
941   const State &getAssumed() const { return *this; }
942 
943   /// See AbstractState::isValidState().
944   bool isValidState() const override { return BS.isValidState(); }
945 
946   /// See AbstractState::isAtFixpoint().
947   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
948 
949   /// See AbstractState::indicateOptimisticFixpoint().
950   ChangeStatus indicateOptimisticFixpoint() override {
951     BS.indicateOptimisticFixpoint();
952     return ChangeStatus::UNCHANGED;
953   }
954 
955   /// See AbstractState::indicatePessimisticFixpoint().
956   ChangeStatus indicatePessimisticFixpoint() override {
957     BS.indicatePessimisticFixpoint();
958     return ChangeStatus::CHANGED;
959   }
960 
961   State &operator=(const State &R) {
962     if (this == &R)
963       return *this;
964     BS = R.BS;
965     AccessBins = R.AccessBins;
966     return *this;
967   }
968 
969   State &operator=(State &&R) {
970     if (this == &R)
971       return *this;
972     std::swap(BS, R.BS);
973     std::swap(AccessBins, R.AccessBins);
974     return *this;
975   }
976 
977   bool operator==(const State &R) const {
978     if (BS != R.BS)
979       return false;
980     if (AccessBins.size() != R.AccessBins.size())
981       return false;
982     auto It = begin(), RIt = R.begin(), E = end();
983     while (It != E) {
984       if (It->getFirst() != RIt->getFirst())
985         return false;
986       auto &Accs = It->getSecond();
987       auto &RAccs = RIt->getSecond();
988       if (Accs->size() != RAccs->size())
989         return false;
990       for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
991         if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
992           return false;
993       ++It;
994       ++RIt;
995     }
996     return true;
997   }
998   bool operator!=(const State &R) const { return !(*this == R); }
999 
1000   /// We store accesses in a set with the instruction as key.
1001   struct Accesses {
1002     SmallVector<AAPointerInfo::Access, 4> Accesses;
1003     DenseMap<const Instruction *, unsigned> Map;
1004 
1005     unsigned size() const { return Accesses.size(); }
1006 
1007     using vec_iterator = decltype(Accesses)::iterator;
1008     vec_iterator begin() { return Accesses.begin(); }
1009     vec_iterator end() { return Accesses.end(); }
1010 
1011     using iterator = decltype(Map)::const_iterator;
1012     iterator find(AAPointerInfo::Access &Acc) {
1013       return Map.find(Acc.getRemoteInst());
1014     }
1015     iterator find_end() { return Map.end(); }
1016 
1017     AAPointerInfo::Access &get(iterator &It) {
1018       return Accesses[It->getSecond()];
1019     }
1020 
1021     void insert(AAPointerInfo::Access &Acc) {
1022       Map[Acc.getRemoteInst()] = Accesses.size();
1023       Accesses.push_back(Acc);
1024     }
1025   };
1026 
1027   /// We store all accesses in bins denoted by their offset and size.
1028   using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>;
1029 
1030   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
1031   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
1032 
1033 protected:
1034   /// The bins with all the accesses for the associated pointer.
1035   AccessBinsTy AccessBins;
1036 
1037   /// Add a new access to the state at offset \p Offset and with size \p Size.
1038   /// The access is associated with \p I, writes \p Content (if anything), and
1039   /// is of kind \p Kind.
1040   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
1041   ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
1042                          Instruction &I, Optional<Value *> Content,
1043                          AAPointerInfo::AccessKind Kind, Type *Ty,
1044                          Instruction *RemoteI = nullptr,
1045                          Accesses *BinPtr = nullptr) {
1046     AAPointerInfo::OffsetAndSize Key{Offset, Size};
1047     Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
1048     if (!Bin)
1049       Bin = new (A.Allocator) Accesses;
1050     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1051     // Check if we have an access for this instruction in this bin, if not,
1052     // simply add it.
1053     auto It = Bin->find(Acc);
1054     if (It == Bin->find_end()) {
1055       Bin->insert(Acc);
1056       return ChangeStatus::CHANGED;
1057     }
1058     // If the existing access is the same as then new one, nothing changed.
1059     AAPointerInfo::Access &Current = Bin->get(It);
1060     AAPointerInfo::Access Before = Current;
1061     // The new one will be combined with the existing one.
1062     Current &= Acc;
1063     return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1064   }
1065 
1066   /// See AAPointerInfo::forallInterferingAccesses.
1067   bool forallInterferingAccesses(
1068       AAPointerInfo::OffsetAndSize OAS,
1069       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1070     if (!isValidState())
1071       return false;
1072 
1073     for (auto &It : AccessBins) {
1074       AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
1075       if (!OAS.mayOverlap(ItOAS))
1076         continue;
1077       bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
1078       for (auto &Access : *It.getSecond())
1079         if (!CB(Access, IsExact))
1080           return false;
1081     }
1082     return true;
1083   }
1084 
1085   /// See AAPointerInfo::forallInterferingAccesses.
1086   bool forallInterferingAccesses(
1087       Instruction &I,
1088       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1089     if (!isValidState())
1090       return false;
1091 
1092     // First find the offset and size of I.
1093     AAPointerInfo::OffsetAndSize OAS(-1, -1);
1094     for (auto &It : AccessBins) {
1095       for (auto &Access : *It.getSecond()) {
1096         if (Access.getRemoteInst() == &I) {
1097           OAS = It.getFirst();
1098           break;
1099         }
1100       }
1101       if (OAS.getSize() != -1)
1102         break;
1103     }
1104     // No access for I was found, we are done.
1105     if (OAS.getSize() == -1)
1106       return true;
1107 
1108     // Now that we have an offset and size, find all overlapping ones and use
1109     // the callback on the accesses.
1110     return forallInterferingAccesses(OAS, CB);
1111   }
1112 
1113 private:
1114   /// State to track fixpoint and validity.
1115   BooleanState BS;
1116 };
1117 
1118 namespace {
1119 struct AAPointerInfoImpl
1120     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1121   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1122   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1123 
1124   /// See AbstractAttribute::initialize(...).
1125   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1126 
1127   /// See AbstractAttribute::getAsStr().
1128   const std::string getAsStr() const override {
1129     return std::string("PointerInfo ") +
1130            (isValidState() ? (std::string("#") +
1131                               std::to_string(AccessBins.size()) + " bins")
1132                            : "<invalid>");
1133   }
1134 
1135   /// See AbstractAttribute::manifest(...).
1136   ChangeStatus manifest(Attributor &A) override {
1137     return AAPointerInfo::manifest(A);
1138   }
1139 
1140   bool forallInterferingAccesses(
1141       OffsetAndSize OAS,
1142       function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1143       const override {
1144     return State::forallInterferingAccesses(OAS, CB);
1145   }
1146   bool forallInterferingAccesses(
1147       Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
1148       function_ref<bool(const Access &, bool)> UserCB) const override {
1149     SmallPtrSet<const Access *, 8> DominatingWrites;
1150     SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
1151 
1152     Function &Scope = *I.getFunction();
1153     const auto &NoSyncAA = A.getAAFor<AANoSync>(
1154         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1155     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1156         IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
1157     const bool NoSync = NoSyncAA.isAssumedNoSync();
1158 
1159     // Helper to determine if we need to consider threading, which we cannot
1160     // right now. However, if the function is (assumed) nosync or the thread
1161     // executing all instructions is the main thread only we can ignore
1162     // threading.
1163     auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
1164       if (NoSync)
1165         return true;
1166       if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
1167         return true;
1168       return false;
1169     };
1170 
1171     // Helper to determine if the access is executed by the same thread as the
1172     // load, for now it is sufficient to avoid any potential threading effects
1173     // as we cannot deal with them anyway.
1174     auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
1175       return CanIgnoreThreading(*Acc.getLocalInst());
1176     };
1177 
1178     // TODO: Use inter-procedural reachability and dominance.
1179     const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1180         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1181 
1182     const bool FindInterferingWrites = I.mayReadFromMemory();
1183     const bool FindInterferingReads = I.mayWriteToMemory();
1184     const bool UseDominanceReasoning = FindInterferingWrites;
1185     const bool CanUseCFGResoning = CanIgnoreThreading(I);
1186     InformationCache &InfoCache = A.getInfoCache();
1187     const DominatorTree *DT =
1188         NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning
1189             ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
1190                   Scope)
1191             : nullptr;
1192 
1193     enum GPUAddressSpace : unsigned {
1194       Generic = 0,
1195       Global = 1,
1196       Shared = 3,
1197       Constant = 4,
1198       Local = 5,
1199     };
1200 
1201     // Helper to check if a value has "kernel lifetime", that is it will not
1202     // outlive a GPU kernel. This is true for shared, constant, and local
1203     // globals on AMD and NVIDIA GPUs.
1204     auto HasKernelLifetime = [&](Value *V, Module &M) {
1205       Triple T(M.getTargetTriple());
1206       if (!(T.isAMDGPU() || T.isNVPTX()))
1207         return false;
1208       switch (V->getType()->getPointerAddressSpace()) {
1209       case GPUAddressSpace::Shared:
1210       case GPUAddressSpace::Constant:
1211       case GPUAddressSpace::Local:
1212         return true;
1213       default:
1214         return false;
1215       };
1216     };
1217 
1218     // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1219     // to determine if we should look at reachability from the callee. For
1220     // certain pointers we know the lifetime and we do not have to step into the
1221     // callee to determine reachability as the pointer would be dead in the
1222     // callee. See the conditional initialization below.
1223     std::function<bool(const Function &)> IsLiveInCalleeCB;
1224 
1225     if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1226       // If the alloca containing function is not recursive the alloca
1227       // must be dead in the callee.
1228       const Function *AIFn = AI->getFunction();
1229       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1230           *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL);
1231       if (NoRecurseAA.isAssumedNoRecurse()) {
1232         IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1233       }
1234     } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1235       // If the global has kernel lifetime we can stop if we reach a kernel
1236       // as it is "dead" in the (unknown) callees.
1237       if (HasKernelLifetime(GV, *GV->getParent()))
1238         IsLiveInCalleeCB = [](const Function &Fn) {
1239           return !Fn.hasFnAttribute("kernel");
1240         };
1241     }
1242 
1243     auto AccessCB = [&](const Access &Acc, bool Exact) {
1244       if ((!FindInterferingWrites || !Acc.isWrite()) &&
1245           (!FindInterferingReads || !Acc.isRead()))
1246         return true;
1247 
1248       // For now we only filter accesses based on CFG reasoning which does not
1249       // work yet if we have threading effects, or the access is complicated.
1250       if (CanUseCFGResoning) {
1251         if ((!Acc.isWrite() ||
1252              !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1253                                          IsLiveInCalleeCB)) &&
1254             (!Acc.isRead() ||
1255              !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1256                                          IsLiveInCalleeCB)))
1257           return true;
1258         if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) &&
1259             IsSameThreadAsLoad(Acc)) {
1260           if (DT->dominates(Acc.getLocalInst(), &I))
1261             DominatingWrites.insert(&Acc);
1262         }
1263       }
1264 
1265       InterferingAccesses.push_back({&Acc, Exact});
1266       return true;
1267     };
1268     if (!State::forallInterferingAccesses(I, AccessCB))
1269       return false;
1270 
1271     // If we cannot use CFG reasoning we only filter the non-write accesses
1272     // and are done here.
1273     if (!CanUseCFGResoning) {
1274       for (auto &It : InterferingAccesses)
1275         if (!UserCB(*It.first, It.second))
1276           return false;
1277       return true;
1278     }
1279 
1280     // Helper to determine if we can skip a specific write access. This is in
1281     // the worst case quadratic as we are looking for another write that will
1282     // hide the effect of this one.
1283     auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1284       if (!IsSameThreadAsLoad(Acc))
1285         return false;
1286       if (!DominatingWrites.count(&Acc))
1287         return false;
1288       for (const Access *DomAcc : DominatingWrites) {
1289         assert(Acc.getLocalInst()->getFunction() ==
1290                    DomAcc->getLocalInst()->getFunction() &&
1291                "Expected dominating writes to be in the same function!");
1292 
1293         if (DomAcc != &Acc &&
1294             DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1295           return true;
1296         }
1297       }
1298       return false;
1299     };
1300 
1301     // Run the user callback on all accesses we cannot skip and return if that
1302     // succeeded for all or not.
1303     unsigned NumInterferingAccesses = InterferingAccesses.size();
1304     for (auto &It : InterferingAccesses) {
1305       if (!DT || NumInterferingAccesses > MaxInterferingAccesses ||
1306           !CanSkipAccess(*It.first, It.second)) {
1307         if (!UserCB(*It.first, It.second))
1308           return false;
1309       }
1310     }
1311     return true;
1312   }
1313 
1314   ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
1315                                     int64_t Offset, CallBase &CB,
1316                                     bool FromCallee = false) {
1317     using namespace AA::PointerInfo;
1318     if (!OtherAA.getState().isValidState() || !isValidState())
1319       return indicatePessimisticFixpoint();
1320 
1321     const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1322     bool IsByval =
1323         FromCallee && OtherAAImpl.getAssociatedArgument()->hasByValAttr();
1324 
1325     // Combine the accesses bin by bin.
1326     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1327     for (auto &It : OtherAAImpl.getState()) {
1328       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1329       if (Offset != OffsetAndSize::Unknown)
1330         OAS = OffsetAndSize(It.first.getOffset() + Offset, It.first.getSize());
1331       Accesses *Bin = AccessBins.lookup(OAS);
1332       for (const AAPointerInfo::Access &RAcc : *It.second) {
1333         if (IsByval && !RAcc.isRead())
1334           continue;
1335         bool UsedAssumedInformation = false;
1336         AccessKind AK = RAcc.getKind();
1337         Optional<Value *> Content = RAcc.getContent();
1338         if (FromCallee) {
1339           Content = A.translateArgumentToCallSiteContent(
1340               RAcc.getContent(), CB, *this, UsedAssumedInformation);
1341           AK = AccessKind(
1342               AK & (IsByval ? AccessKind::AK_READ : AccessKind::AK_READ_WRITE));
1343         }
1344         Changed =
1345             Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1346                                 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1347       }
1348     }
1349     return Changed;
1350   }
1351 
1352   /// Statistic tracking for all AAPointerInfo implementations.
1353   /// See AbstractAttribute::trackStatistics().
1354   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1355 };
1356 
1357 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1358   using AccessKind = AAPointerInfo::AccessKind;
1359   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1360       : AAPointerInfoImpl(IRP, A) {}
1361 
1362   /// See AbstractAttribute::initialize(...).
1363   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1364 
1365   /// Deal with an access and signal if it was handled successfully.
1366   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1367                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1368                     ChangeStatus &Changed, Type *Ty,
1369                     int64_t Size = OffsetAndSize::Unknown) {
1370     using namespace AA::PointerInfo;
1371     // No need to find a size if one is given or the offset is unknown.
1372     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1373         Ty) {
1374       const DataLayout &DL = A.getDataLayout();
1375       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1376       if (!AccessSize.isScalable())
1377         Size = AccessSize.getFixedSize();
1378     }
1379     Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1380     return true;
1381   };
1382 
1383   /// Helper struct, will support ranges eventually.
1384   struct OffsetInfo {
1385     int64_t Offset = OffsetAndSize::Unknown;
1386 
1387     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1388   };
1389 
1390   /// See AbstractAttribute::updateImpl(...).
1391   ChangeStatus updateImpl(Attributor &A) override {
1392     using namespace AA::PointerInfo;
1393     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1394     Value &AssociatedValue = getAssociatedValue();
1395 
1396     const DataLayout &DL = A.getDataLayout();
1397     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1398     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1399 
1400     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1401                                      bool &Follow) {
1402       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1403       UsrOI = PtrOI;
1404       Follow = true;
1405       return true;
1406     };
1407 
1408     const auto *TLI = getAnchorScope()
1409                           ? A.getInfoCache().getTargetLibraryInfoForFunction(
1410                                 *getAnchorScope())
1411                           : nullptr;
1412     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1413       Value *CurPtr = U.get();
1414       User *Usr = U.getUser();
1415       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1416                         << *Usr << "\n");
1417       assert(OffsetInfoMap.count(CurPtr) &&
1418              "The current pointer offset should have been seeded!");
1419 
1420       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1421         if (CE->isCast())
1422           return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1423         if (CE->isCompare())
1424           return true;
1425         if (!isa<GEPOperator>(CE)) {
1426           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1427                             << "\n");
1428           return false;
1429         }
1430       }
1431       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1432         // Note the order here, the Usr access might change the map, CurPtr is
1433         // already in it though.
1434         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1435         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1436         UsrOI = PtrOI;
1437 
1438         // TODO: Use range information.
1439         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1440             !GEP->hasAllConstantIndices()) {
1441           UsrOI.Offset = OffsetAndSize::Unknown;
1442           Follow = true;
1443           return true;
1444         }
1445 
1446         SmallVector<Value *, 8> Indices;
1447         for (Use &Idx : GEP->indices()) {
1448           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1449             Indices.push_back(CIdx);
1450             continue;
1451           }
1452 
1453           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1454                             << " : " << *Idx << "\n");
1455           return false;
1456         }
1457         UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1458                                           GEP->getSourceElementType(), Indices);
1459         Follow = true;
1460         return true;
1461       }
1462       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1463         return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1464 
1465       // For PHIs we need to take care of the recurrence explicitly as the value
1466       // might change while we iterate through a loop. For now, we give up if
1467       // the PHI is not invariant.
1468       if (isa<PHINode>(Usr)) {
1469         // Note the order here, the Usr access might change the map, CurPtr is
1470         // already in it though.
1471         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1472         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1473         // Check if the PHI is invariant (so far).
1474         if (UsrOI == PtrOI)
1475           return true;
1476 
1477         // Check if the PHI operand has already an unknown offset as we can't
1478         // improve on that anymore.
1479         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1480           UsrOI = PtrOI;
1481           Follow = true;
1482           return true;
1483         }
1484 
1485         // Check if the PHI operand is not dependent on the PHI itself.
1486         // TODO: This is not great as we look at the pointer type. However, it
1487         // is unclear where the Offset size comes from with typeless pointers.
1488         APInt Offset(
1489             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1490             0);
1491         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1492                                     DL, Offset, /* AllowNonInbounds */ true)) {
1493           if (Offset != PtrOI.Offset) {
1494             LLVM_DEBUG(dbgs()
1495                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1496                        << *CurPtr << " in " << *Usr << "\n");
1497             return false;
1498           }
1499           return HandlePassthroughUser(Usr, PtrOI, Follow);
1500         }
1501 
1502         // TODO: Approximate in case we know the direction of the recurrence.
1503         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1504                           << *CurPtr << " in " << *Usr << "\n");
1505         UsrOI = PtrOI;
1506         UsrOI.Offset = OffsetAndSize::Unknown;
1507         Follow = true;
1508         return true;
1509       }
1510 
1511       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1512         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1513                             AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1514                             Changed, LoadI->getType());
1515       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1516         if (StoreI->getValueOperand() == CurPtr) {
1517           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1518                             << *StoreI << "\n");
1519           return false;
1520         }
1521         bool UsedAssumedInformation = false;
1522         Optional<Value *> Content = A.getAssumedSimplified(
1523             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1524         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1525                             OffsetInfoMap[CurPtr].Offset, Changed,
1526                             StoreI->getValueOperand()->getType());
1527       }
1528       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1529         if (CB->isLifetimeStartOrEnd())
1530           return true;
1531         if (TLI && isFreeCall(CB, TLI))
1532           return true;
1533         if (CB->isArgOperand(&U)) {
1534           unsigned ArgNo = CB->getArgOperandNo(&U);
1535           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1536               *this, IRPosition::callsite_argument(*CB, ArgNo),
1537               DepClassTy::REQUIRED);
1538           Changed = translateAndAddState(A, CSArgPI,
1539                                          OffsetInfoMap[CurPtr].Offset, *CB) |
1540                     Changed;
1541           return true;
1542         }
1543         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1544                           << "\n");
1545         // TODO: Allow some call uses
1546         return false;
1547       }
1548 
1549       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1550       return false;
1551     };
1552     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1553       if (OffsetInfoMap.count(NewU))
1554         return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1555       OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1556       return true;
1557     };
1558     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1559                            /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1560                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
1561       return indicatePessimisticFixpoint();
1562 
1563     LLVM_DEBUG({
1564       dbgs() << "Accesses by bin after update:\n";
1565       for (auto &It : AccessBins) {
1566         dbgs() << "[" << It.first.getOffset() << "-"
1567                << It.first.getOffset() + It.first.getSize()
1568                << "] : " << It.getSecond()->size() << "\n";
1569         for (auto &Acc : *It.getSecond()) {
1570           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1571                  << "\n";
1572           if (Acc.getLocalInst() != Acc.getRemoteInst())
1573             dbgs() << "     -->                         "
1574                    << *Acc.getRemoteInst() << "\n";
1575           if (!Acc.isWrittenValueYetUndetermined()) {
1576             if (Acc.getWrittenValue())
1577               dbgs() << "       - c: " << *Acc.getWrittenValue() << "\n";
1578             else
1579               dbgs() << "       - c: <unknown>\n";
1580           }
1581         }
1582       }
1583     });
1584 
1585     return Changed;
1586   }
1587 
1588   /// See AbstractAttribute::trackStatistics()
1589   void trackStatistics() const override {
1590     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1591   }
1592 };
1593 
1594 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1595   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1596       : AAPointerInfoImpl(IRP, A) {}
1597 
1598   /// See AbstractAttribute::updateImpl(...).
1599   ChangeStatus updateImpl(Attributor &A) override {
1600     return indicatePessimisticFixpoint();
1601   }
1602 
1603   /// See AbstractAttribute::trackStatistics()
1604   void trackStatistics() const override {
1605     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1606   }
1607 };
1608 
1609 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1610   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1611       : AAPointerInfoFloating(IRP, A) {}
1612 
1613   /// See AbstractAttribute::initialize(...).
1614   void initialize(Attributor &A) override {
1615     AAPointerInfoFloating::initialize(A);
1616     if (getAnchorScope()->isDeclaration())
1617       indicatePessimisticFixpoint();
1618   }
1619 
1620   /// See AbstractAttribute::trackStatistics()
1621   void trackStatistics() const override {
1622     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1623   }
1624 };
1625 
1626 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1627   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1628       : AAPointerInfoFloating(IRP, A) {}
1629 
1630   /// See AbstractAttribute::updateImpl(...).
1631   ChangeStatus updateImpl(Attributor &A) override {
1632     using namespace AA::PointerInfo;
1633     // We handle memory intrinsics explicitly, at least the first (=
1634     // destination) and second (=source) arguments as we know how they are
1635     // accessed.
1636     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1637       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1638       int64_t LengthVal = OffsetAndSize::Unknown;
1639       if (Length)
1640         LengthVal = Length->getSExtValue();
1641       Value &Ptr = getAssociatedValue();
1642       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1643       ChangeStatus Changed = ChangeStatus::UNCHANGED;
1644       if (ArgNo == 0) {
1645         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1646                      nullptr, LengthVal);
1647       } else if (ArgNo == 1) {
1648         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1649                      nullptr, LengthVal);
1650       } else {
1651         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1652                           << *MI << "\n");
1653         return indicatePessimisticFixpoint();
1654       }
1655       return Changed;
1656     }
1657 
1658     // TODO: Once we have call site specific value information we can provide
1659     //       call site specific liveness information and then it makes
1660     //       sense to specialize attributes for call sites arguments instead of
1661     //       redirecting requests to the callee argument.
1662     Argument *Arg = getAssociatedArgument();
1663     if (!Arg)
1664       return indicatePessimisticFixpoint();
1665     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1666     auto &ArgAA =
1667         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1668     return translateAndAddState(A, ArgAA, 0, *cast<CallBase>(getCtxI()),
1669                                 /* FromCallee */ true);
1670   }
1671 
1672   /// See AbstractAttribute::trackStatistics()
1673   void trackStatistics() const override {
1674     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1675   }
1676 };
1677 
1678 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1679   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1680       : AAPointerInfoFloating(IRP, A) {}
1681 
1682   /// See AbstractAttribute::trackStatistics()
1683   void trackStatistics() const override {
1684     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1685   }
1686 };
1687 } // namespace
1688 
1689 /// -----------------------NoUnwind Function Attribute--------------------------
1690 
1691 namespace {
1692 struct AANoUnwindImpl : AANoUnwind {
1693   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1694 
1695   const std::string getAsStr() const override {
1696     return getAssumed() ? "nounwind" : "may-unwind";
1697   }
1698 
1699   /// See AbstractAttribute::updateImpl(...).
1700   ChangeStatus updateImpl(Attributor &A) override {
1701     auto Opcodes = {
1702         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1703         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1704         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1705 
1706     auto CheckForNoUnwind = [&](Instruction &I) {
1707       if (!I.mayThrow())
1708         return true;
1709 
1710       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1711         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1712             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1713         return NoUnwindAA.isAssumedNoUnwind();
1714       }
1715       return false;
1716     };
1717 
1718     bool UsedAssumedInformation = false;
1719     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1720                                    UsedAssumedInformation))
1721       return indicatePessimisticFixpoint();
1722 
1723     return ChangeStatus::UNCHANGED;
1724   }
1725 };
1726 
1727 struct AANoUnwindFunction final : public AANoUnwindImpl {
1728   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1729       : AANoUnwindImpl(IRP, A) {}
1730 
1731   /// See AbstractAttribute::trackStatistics()
1732   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1733 };
1734 
1735 /// NoUnwind attribute deduction for a call sites.
1736 struct AANoUnwindCallSite final : AANoUnwindImpl {
1737   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1738       : AANoUnwindImpl(IRP, A) {}
1739 
1740   /// See AbstractAttribute::initialize(...).
1741   void initialize(Attributor &A) override {
1742     AANoUnwindImpl::initialize(A);
1743     Function *F = getAssociatedFunction();
1744     if (!F || F->isDeclaration())
1745       indicatePessimisticFixpoint();
1746   }
1747 
1748   /// See AbstractAttribute::updateImpl(...).
1749   ChangeStatus updateImpl(Attributor &A) override {
1750     // TODO: Once we have call site specific value information we can provide
1751     //       call site specific liveness information and then it makes
1752     //       sense to specialize attributes for call sites arguments instead of
1753     //       redirecting requests to the callee argument.
1754     Function *F = getAssociatedFunction();
1755     const IRPosition &FnPos = IRPosition::function(*F);
1756     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1757     return clampStateAndIndicateChange(getState(), FnAA.getState());
1758   }
1759 
1760   /// See AbstractAttribute::trackStatistics()
1761   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1762 };
1763 } // namespace
1764 
1765 /// --------------------- Function Return Values -------------------------------
1766 
1767 namespace {
1768 /// "Attribute" that collects all potential returned values and the return
1769 /// instructions that they arise from.
1770 ///
1771 /// If there is a unique returned value R, the manifest method will:
1772 ///   - mark R with the "returned" attribute, if R is an argument.
1773 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1774 
1775   /// Mapping of values potentially returned by the associated function to the
1776   /// return instructions that might return them.
1777   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1778 
1779   /// State flags
1780   ///
1781   ///{
1782   bool IsFixed = false;
1783   bool IsValidState = true;
1784   ///}
1785 
1786 public:
1787   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1788       : AAReturnedValues(IRP, A) {}
1789 
1790   /// See AbstractAttribute::initialize(...).
1791   void initialize(Attributor &A) override {
1792     // Reset the state.
1793     IsFixed = false;
1794     IsValidState = true;
1795     ReturnedValues.clear();
1796 
1797     Function *F = getAssociatedFunction();
1798     if (!F || F->isDeclaration()) {
1799       indicatePessimisticFixpoint();
1800       return;
1801     }
1802     assert(!F->getReturnType()->isVoidTy() &&
1803            "Did not expect a void return type!");
1804 
1805     // The map from instruction opcodes to those instructions in the function.
1806     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1807 
1808     // Look through all arguments, if one is marked as returned we are done.
1809     for (Argument &Arg : F->args()) {
1810       if (Arg.hasReturnedAttr()) {
1811         auto &ReturnInstSet = ReturnedValues[&Arg];
1812         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1813           for (Instruction *RI : *Insts)
1814             ReturnInstSet.insert(cast<ReturnInst>(RI));
1815 
1816         indicateOptimisticFixpoint();
1817         return;
1818       }
1819     }
1820 
1821     if (!A.isFunctionIPOAmendable(*F))
1822       indicatePessimisticFixpoint();
1823   }
1824 
1825   /// See AbstractAttribute::manifest(...).
1826   ChangeStatus manifest(Attributor &A) override;
1827 
1828   /// See AbstractAttribute::getState(...).
1829   AbstractState &getState() override { return *this; }
1830 
1831   /// See AbstractAttribute::getState(...).
1832   const AbstractState &getState() const override { return *this; }
1833 
1834   /// See AbstractAttribute::updateImpl(Attributor &A).
1835   ChangeStatus updateImpl(Attributor &A) override;
1836 
1837   llvm::iterator_range<iterator> returned_values() override {
1838     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1839   }
1840 
1841   llvm::iterator_range<const_iterator> returned_values() const override {
1842     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1843   }
1844 
1845   /// Return the number of potential return values, -1 if unknown.
1846   size_t getNumReturnValues() const override {
1847     return isValidState() ? ReturnedValues.size() : -1;
1848   }
1849 
1850   /// Return an assumed unique return value if a single candidate is found. If
1851   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1852   /// Optional::NoneType.
1853   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1854 
1855   /// See AbstractState::checkForAllReturnedValues(...).
1856   bool checkForAllReturnedValuesAndReturnInsts(
1857       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1858       const override;
1859 
1860   /// Pretty print the attribute similar to the IR representation.
1861   const std::string getAsStr() const override;
1862 
1863   /// See AbstractState::isAtFixpoint().
1864   bool isAtFixpoint() const override { return IsFixed; }
1865 
1866   /// See AbstractState::isValidState().
1867   bool isValidState() const override { return IsValidState; }
1868 
1869   /// See AbstractState::indicateOptimisticFixpoint(...).
1870   ChangeStatus indicateOptimisticFixpoint() override {
1871     IsFixed = true;
1872     return ChangeStatus::UNCHANGED;
1873   }
1874 
1875   ChangeStatus indicatePessimisticFixpoint() override {
1876     IsFixed = true;
1877     IsValidState = false;
1878     return ChangeStatus::CHANGED;
1879   }
1880 };
1881 
1882 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1883   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1884 
1885   // Bookkeeping.
1886   assert(isValidState());
1887   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1888                   "Number of function with known return values");
1889 
1890   // Check if we have an assumed unique return value that we could manifest.
1891   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1892 
1893   if (!UniqueRV || !UniqueRV.getValue())
1894     return Changed;
1895 
1896   // Bookkeeping.
1897   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1898                   "Number of function with unique return");
1899   // If the assumed unique return value is an argument, annotate it.
1900   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1901     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1902             getAssociatedFunction()->getReturnType())) {
1903       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1904       Changed = IRAttribute::manifest(A);
1905     }
1906   }
1907   return Changed;
1908 }
1909 
1910 const std::string AAReturnedValuesImpl::getAsStr() const {
1911   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1912          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1913 }
1914 
1915 Optional<Value *>
1916 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1917   // If checkForAllReturnedValues provides a unique value, ignoring potential
1918   // undef values that can also be present, it is assumed to be the actual
1919   // return value and forwarded to the caller of this method. If there are
1920   // multiple, a nullptr is returned indicating there cannot be a unique
1921   // returned value.
1922   Optional<Value *> UniqueRV;
1923   Type *Ty = getAssociatedFunction()->getReturnType();
1924 
1925   auto Pred = [&](Value &RV) -> bool {
1926     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1927     return UniqueRV != Optional<Value *>(nullptr);
1928   };
1929 
1930   if (!A.checkForAllReturnedValues(Pred, *this))
1931     UniqueRV = nullptr;
1932 
1933   return UniqueRV;
1934 }
1935 
1936 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1937     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1938     const {
1939   if (!isValidState())
1940     return false;
1941 
1942   // Check all returned values but ignore call sites as long as we have not
1943   // encountered an overdefined one during an update.
1944   for (auto &It : ReturnedValues) {
1945     Value *RV = It.first;
1946     if (!Pred(*RV, It.second))
1947       return false;
1948   }
1949 
1950   return true;
1951 }
1952 
1953 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1954   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1955 
1956   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1957                            bool) -> bool {
1958     assert(AA::isValidInScope(V, Ret.getFunction()) &&
1959            "Assumed returned value should be valid in function scope!");
1960     if (ReturnedValues[&V].insert(&Ret))
1961       Changed = ChangeStatus::CHANGED;
1962     return true;
1963   };
1964 
1965   bool UsedAssumedInformation = false;
1966   auto ReturnInstCB = [&](Instruction &I) {
1967     ReturnInst &Ret = cast<ReturnInst>(I);
1968     return genericValueTraversal<ReturnInst>(
1969         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1970         &I, UsedAssumedInformation, /* UseValueSimplify */ true,
1971         /* MaxValues */ 16,
1972         /* StripCB */ nullptr, AA::Intraprocedural);
1973   };
1974 
1975   // Discover returned values from all live returned instructions in the
1976   // associated function.
1977   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1978                                  UsedAssumedInformation))
1979     return indicatePessimisticFixpoint();
1980   return Changed;
1981 }
1982 
1983 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1984   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1985       : AAReturnedValuesImpl(IRP, A) {}
1986 
1987   /// See AbstractAttribute::trackStatistics()
1988   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1989 };
1990 
1991 /// Returned values information for a call sites.
1992 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1993   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1994       : AAReturnedValuesImpl(IRP, A) {}
1995 
1996   /// See AbstractAttribute::initialize(...).
1997   void initialize(Attributor &A) override {
1998     // TODO: Once we have call site specific value information we can provide
1999     //       call site specific liveness information and then it makes
2000     //       sense to specialize attributes for call sites instead of
2001     //       redirecting requests to the callee.
2002     llvm_unreachable("Abstract attributes for returned values are not "
2003                      "supported for call sites yet!");
2004   }
2005 
2006   /// See AbstractAttribute::updateImpl(...).
2007   ChangeStatus updateImpl(Attributor &A) override {
2008     return indicatePessimisticFixpoint();
2009   }
2010 
2011   /// See AbstractAttribute::trackStatistics()
2012   void trackStatistics() const override {}
2013 };
2014 } // namespace
2015 
2016 /// ------------------------ NoSync Function Attribute -------------------------
2017 
2018 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
2019   if (!I->isAtomic())
2020     return false;
2021 
2022   if (auto *FI = dyn_cast<FenceInst>(I))
2023     // All legal orderings for fence are stronger than monotonic.
2024     return FI->getSyncScopeID() != SyncScope::SingleThread;
2025   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
2026     // Unordered is not a legal ordering for cmpxchg.
2027     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
2028             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
2029   }
2030 
2031   AtomicOrdering Ordering;
2032   switch (I->getOpcode()) {
2033   case Instruction::AtomicRMW:
2034     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
2035     break;
2036   case Instruction::Store:
2037     Ordering = cast<StoreInst>(I)->getOrdering();
2038     break;
2039   case Instruction::Load:
2040     Ordering = cast<LoadInst>(I)->getOrdering();
2041     break;
2042   default:
2043     llvm_unreachable(
2044         "New atomic operations need to be known in the attributor.");
2045   }
2046 
2047   return (Ordering != AtomicOrdering::Unordered &&
2048           Ordering != AtomicOrdering::Monotonic);
2049 }
2050 
2051 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
2052 /// which would be nosync except that they have a volatile flag.  All other
2053 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
2054 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
2055   if (auto *MI = dyn_cast<MemIntrinsic>(I))
2056     return !MI->isVolatile();
2057   return false;
2058 }
2059 
2060 namespace {
2061 struct AANoSyncImpl : AANoSync {
2062   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
2063 
2064   const std::string getAsStr() const override {
2065     return getAssumed() ? "nosync" : "may-sync";
2066   }
2067 
2068   /// See AbstractAttribute::updateImpl(...).
2069   ChangeStatus updateImpl(Attributor &A) override;
2070 };
2071 
2072 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2073 
2074   auto CheckRWInstForNoSync = [&](Instruction &I) {
2075     return AA::isNoSyncInst(A, I, *this);
2076   };
2077 
2078   auto CheckForNoSync = [&](Instruction &I) {
2079     // At this point we handled all read/write effects and they are all
2080     // nosync, so they can be skipped.
2081     if (I.mayReadOrWriteMemory())
2082       return true;
2083 
2084     // non-convergent and readnone imply nosync.
2085     return !cast<CallBase>(I).isConvergent();
2086   };
2087 
2088   bool UsedAssumedInformation = false;
2089   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2090                                           UsedAssumedInformation) ||
2091       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2092                                          UsedAssumedInformation))
2093     return indicatePessimisticFixpoint();
2094 
2095   return ChangeStatus::UNCHANGED;
2096 }
2097 
2098 struct AANoSyncFunction final : public AANoSyncImpl {
2099   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2100       : AANoSyncImpl(IRP, A) {}
2101 
2102   /// See AbstractAttribute::trackStatistics()
2103   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
2104 };
2105 
2106 /// NoSync attribute deduction for a call sites.
2107 struct AANoSyncCallSite final : AANoSyncImpl {
2108   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2109       : AANoSyncImpl(IRP, A) {}
2110 
2111   /// See AbstractAttribute::initialize(...).
2112   void initialize(Attributor &A) override {
2113     AANoSyncImpl::initialize(A);
2114     Function *F = getAssociatedFunction();
2115     if (!F || F->isDeclaration())
2116       indicatePessimisticFixpoint();
2117   }
2118 
2119   /// See AbstractAttribute::updateImpl(...).
2120   ChangeStatus updateImpl(Attributor &A) override {
2121     // TODO: Once we have call site specific value information we can provide
2122     //       call site specific liveness information and then it makes
2123     //       sense to specialize attributes for call sites arguments instead of
2124     //       redirecting requests to the callee argument.
2125     Function *F = getAssociatedFunction();
2126     const IRPosition &FnPos = IRPosition::function(*F);
2127     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
2128     return clampStateAndIndicateChange(getState(), FnAA.getState());
2129   }
2130 
2131   /// See AbstractAttribute::trackStatistics()
2132   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
2133 };
2134 } // namespace
2135 
2136 /// ------------------------ No-Free Attributes ----------------------------
2137 
2138 namespace {
2139 struct AANoFreeImpl : public AANoFree {
2140   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2141 
2142   /// See AbstractAttribute::updateImpl(...).
2143   ChangeStatus updateImpl(Attributor &A) override {
2144     auto CheckForNoFree = [&](Instruction &I) {
2145       const auto &CB = cast<CallBase>(I);
2146       if (CB.hasFnAttr(Attribute::NoFree))
2147         return true;
2148 
2149       const auto &NoFreeAA = A.getAAFor<AANoFree>(
2150           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2151       return NoFreeAA.isAssumedNoFree();
2152     };
2153 
2154     bool UsedAssumedInformation = false;
2155     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2156                                            UsedAssumedInformation))
2157       return indicatePessimisticFixpoint();
2158     return ChangeStatus::UNCHANGED;
2159   }
2160 
2161   /// See AbstractAttribute::getAsStr().
2162   const std::string getAsStr() const override {
2163     return getAssumed() ? "nofree" : "may-free";
2164   }
2165 };
2166 
2167 struct AANoFreeFunction final : public AANoFreeImpl {
2168   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2169       : AANoFreeImpl(IRP, A) {}
2170 
2171   /// See AbstractAttribute::trackStatistics()
2172   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2173 };
2174 
2175 /// NoFree attribute deduction for a call sites.
2176 struct AANoFreeCallSite final : AANoFreeImpl {
2177   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2178       : AANoFreeImpl(IRP, A) {}
2179 
2180   /// See AbstractAttribute::initialize(...).
2181   void initialize(Attributor &A) override {
2182     AANoFreeImpl::initialize(A);
2183     Function *F = getAssociatedFunction();
2184     if (!F || F->isDeclaration())
2185       indicatePessimisticFixpoint();
2186   }
2187 
2188   /// See AbstractAttribute::updateImpl(...).
2189   ChangeStatus updateImpl(Attributor &A) override {
2190     // TODO: Once we have call site specific value information we can provide
2191     //       call site specific liveness information and then it makes
2192     //       sense to specialize attributes for call sites arguments instead of
2193     //       redirecting requests to the callee argument.
2194     Function *F = getAssociatedFunction();
2195     const IRPosition &FnPos = IRPosition::function(*F);
2196     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2197     return clampStateAndIndicateChange(getState(), FnAA.getState());
2198   }
2199 
2200   /// See AbstractAttribute::trackStatistics()
2201   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2202 };
2203 
2204 /// NoFree attribute for floating values.
2205 struct AANoFreeFloating : AANoFreeImpl {
2206   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2207       : AANoFreeImpl(IRP, A) {}
2208 
2209   /// See AbstractAttribute::trackStatistics()
2210   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2211 
2212   /// See Abstract Attribute::updateImpl(...).
2213   ChangeStatus updateImpl(Attributor &A) override {
2214     const IRPosition &IRP = getIRPosition();
2215 
2216     const auto &NoFreeAA = A.getAAFor<AANoFree>(
2217         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2218     if (NoFreeAA.isAssumedNoFree())
2219       return ChangeStatus::UNCHANGED;
2220 
2221     Value &AssociatedValue = getIRPosition().getAssociatedValue();
2222     auto Pred = [&](const Use &U, bool &Follow) -> bool {
2223       Instruction *UserI = cast<Instruction>(U.getUser());
2224       if (auto *CB = dyn_cast<CallBase>(UserI)) {
2225         if (CB->isBundleOperand(&U))
2226           return false;
2227         if (!CB->isArgOperand(&U))
2228           return true;
2229         unsigned ArgNo = CB->getArgOperandNo(&U);
2230 
2231         const auto &NoFreeArg = A.getAAFor<AANoFree>(
2232             *this, IRPosition::callsite_argument(*CB, ArgNo),
2233             DepClassTy::REQUIRED);
2234         return NoFreeArg.isAssumedNoFree();
2235       }
2236 
2237       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2238           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2239         Follow = true;
2240         return true;
2241       }
2242       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2243           isa<ReturnInst>(UserI))
2244         return true;
2245 
2246       // Unknown user.
2247       return false;
2248     };
2249     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2250       return indicatePessimisticFixpoint();
2251 
2252     return ChangeStatus::UNCHANGED;
2253   }
2254 };
2255 
2256 /// NoFree attribute for a call site argument.
2257 struct AANoFreeArgument final : AANoFreeFloating {
2258   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2259       : AANoFreeFloating(IRP, A) {}
2260 
2261   /// See AbstractAttribute::trackStatistics()
2262   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2263 };
2264 
2265 /// NoFree attribute for call site arguments.
2266 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2267   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2268       : AANoFreeFloating(IRP, A) {}
2269 
2270   /// See AbstractAttribute::updateImpl(...).
2271   ChangeStatus updateImpl(Attributor &A) override {
2272     // TODO: Once we have call site specific value information we can provide
2273     //       call site specific liveness information and then it makes
2274     //       sense to specialize attributes for call sites arguments instead of
2275     //       redirecting requests to the callee argument.
2276     Argument *Arg = getAssociatedArgument();
2277     if (!Arg)
2278       return indicatePessimisticFixpoint();
2279     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2280     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2281     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2282   }
2283 
2284   /// See AbstractAttribute::trackStatistics()
2285   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2286 };
2287 
2288 /// NoFree attribute for function return value.
2289 struct AANoFreeReturned final : AANoFreeFloating {
2290   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2291       : AANoFreeFloating(IRP, A) {
2292     llvm_unreachable("NoFree is not applicable to function returns!");
2293   }
2294 
2295   /// See AbstractAttribute::initialize(...).
2296   void initialize(Attributor &A) override {
2297     llvm_unreachable("NoFree is not applicable to function returns!");
2298   }
2299 
2300   /// See AbstractAttribute::updateImpl(...).
2301   ChangeStatus updateImpl(Attributor &A) override {
2302     llvm_unreachable("NoFree is not applicable to function returns!");
2303   }
2304 
2305   /// See AbstractAttribute::trackStatistics()
2306   void trackStatistics() const override {}
2307 };
2308 
2309 /// NoFree attribute deduction for a call site return value.
2310 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2311   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2312       : AANoFreeFloating(IRP, A) {}
2313 
2314   ChangeStatus manifest(Attributor &A) override {
2315     return ChangeStatus::UNCHANGED;
2316   }
2317   /// See AbstractAttribute::trackStatistics()
2318   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2319 };
2320 } // namespace
2321 
2322 /// ------------------------ NonNull Argument Attribute ------------------------
2323 namespace {
2324 static int64_t getKnownNonNullAndDerefBytesForUse(
2325     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2326     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2327   TrackUse = false;
2328 
2329   const Value *UseV = U->get();
2330   if (!UseV->getType()->isPointerTy())
2331     return 0;
2332 
2333   // We need to follow common pointer manipulation uses to the accesses they
2334   // feed into. We can try to be smart to avoid looking through things we do not
2335   // like for now, e.g., non-inbounds GEPs.
2336   if (isa<CastInst>(I)) {
2337     TrackUse = true;
2338     return 0;
2339   }
2340 
2341   if (isa<GetElementPtrInst>(I)) {
2342     TrackUse = true;
2343     return 0;
2344   }
2345 
2346   Type *PtrTy = UseV->getType();
2347   const Function *F = I->getFunction();
2348   bool NullPointerIsDefined =
2349       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2350   const DataLayout &DL = A.getInfoCache().getDL();
2351   if (const auto *CB = dyn_cast<CallBase>(I)) {
2352     if (CB->isBundleOperand(U)) {
2353       if (RetainedKnowledge RK = getKnowledgeFromUse(
2354               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2355         IsNonNull |=
2356             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2357         return RK.ArgValue;
2358       }
2359       return 0;
2360     }
2361 
2362     if (CB->isCallee(U)) {
2363       IsNonNull |= !NullPointerIsDefined;
2364       return 0;
2365     }
2366 
2367     unsigned ArgNo = CB->getArgOperandNo(U);
2368     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2369     // As long as we only use known information there is no need to track
2370     // dependences here.
2371     auto &DerefAA =
2372         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2373     IsNonNull |= DerefAA.isKnownNonNull();
2374     return DerefAA.getKnownDereferenceableBytes();
2375   }
2376 
2377   Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2378   if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2379     return 0;
2380 
2381   int64_t Offset;
2382   const Value *Base =
2383       getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2384   if (Base && Base == &AssociatedValue) {
2385     int64_t DerefBytes = Loc->Size.getValue() + Offset;
2386     IsNonNull |= !NullPointerIsDefined;
2387     return std::max(int64_t(0), DerefBytes);
2388   }
2389 
2390   /// Corner case when an offset is 0.
2391   Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2392                                           /*AllowNonInbounds*/ true);
2393   if (Base && Base == &AssociatedValue && Offset == 0) {
2394     int64_t DerefBytes = Loc->Size.getValue();
2395     IsNonNull |= !NullPointerIsDefined;
2396     return std::max(int64_t(0), DerefBytes);
2397   }
2398 
2399   return 0;
2400 }
2401 
2402 struct AANonNullImpl : AANonNull {
2403   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2404       : AANonNull(IRP, A),
2405         NullIsDefined(NullPointerIsDefined(
2406             getAnchorScope(),
2407             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2408 
2409   /// See AbstractAttribute::initialize(...).
2410   void initialize(Attributor &A) override {
2411     Value &V = *getAssociatedValue().stripPointerCasts();
2412     if (!NullIsDefined &&
2413         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2414                 /* IgnoreSubsumingPositions */ false, &A)) {
2415       indicateOptimisticFixpoint();
2416       return;
2417     }
2418 
2419     if (isa<ConstantPointerNull>(V)) {
2420       indicatePessimisticFixpoint();
2421       return;
2422     }
2423 
2424     AANonNull::initialize(A);
2425 
2426     bool CanBeNull, CanBeFreed;
2427     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2428                                          CanBeFreed)) {
2429       if (!CanBeNull) {
2430         indicateOptimisticFixpoint();
2431         return;
2432       }
2433     }
2434 
2435     if (isa<GlobalValue>(V)) {
2436       indicatePessimisticFixpoint();
2437       return;
2438     }
2439 
2440     if (Instruction *CtxI = getCtxI())
2441       followUsesInMBEC(*this, A, getState(), *CtxI);
2442   }
2443 
2444   /// See followUsesInMBEC
2445   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2446                        AANonNull::StateType &State) {
2447     bool IsNonNull = false;
2448     bool TrackUse = false;
2449     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2450                                        IsNonNull, TrackUse);
2451     State.setKnown(IsNonNull);
2452     return TrackUse;
2453   }
2454 
2455   /// See AbstractAttribute::getAsStr().
2456   const std::string getAsStr() const override {
2457     return getAssumed() ? "nonnull" : "may-null";
2458   }
2459 
2460   /// Flag to determine if the underlying value can be null and still allow
2461   /// valid accesses.
2462   const bool NullIsDefined;
2463 };
2464 
2465 /// NonNull attribute for a floating value.
2466 struct AANonNullFloating : public AANonNullImpl {
2467   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2468       : AANonNullImpl(IRP, A) {}
2469 
2470   /// See AbstractAttribute::updateImpl(...).
2471   ChangeStatus updateImpl(Attributor &A) override {
2472     const DataLayout &DL = A.getDataLayout();
2473 
2474     DominatorTree *DT = nullptr;
2475     AssumptionCache *AC = nullptr;
2476     InformationCache &InfoCache = A.getInfoCache();
2477     if (const Function *Fn = getAnchorScope()) {
2478       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2479       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2480     }
2481 
2482     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2483                             AANonNull::StateType &T, bool Stripped) -> bool {
2484       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2485                                              DepClassTy::REQUIRED);
2486       if (!Stripped && this == &AA) {
2487         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2488           T.indicatePessimisticFixpoint();
2489       } else {
2490         // Use abstract attribute information.
2491         const AANonNull::StateType &NS = AA.getState();
2492         T ^= NS;
2493       }
2494       return T.isValidState();
2495     };
2496 
2497     StateType T;
2498     bool UsedAssumedInformation = false;
2499     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2500                                           VisitValueCB, getCtxI(),
2501                                           UsedAssumedInformation))
2502       return indicatePessimisticFixpoint();
2503 
2504     return clampStateAndIndicateChange(getState(), T);
2505   }
2506 
2507   /// See AbstractAttribute::trackStatistics()
2508   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2509 };
2510 
2511 /// NonNull attribute for function return value.
2512 struct AANonNullReturned final
2513     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2514   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2515       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2516 
2517   /// See AbstractAttribute::getAsStr().
2518   const std::string getAsStr() const override {
2519     return getAssumed() ? "nonnull" : "may-null";
2520   }
2521 
2522   /// See AbstractAttribute::trackStatistics()
2523   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2524 };
2525 
2526 /// NonNull attribute for function argument.
2527 struct AANonNullArgument final
2528     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2529   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2530       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2531 
2532   /// See AbstractAttribute::trackStatistics()
2533   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2534 };
2535 
2536 struct AANonNullCallSiteArgument final : AANonNullFloating {
2537   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2538       : AANonNullFloating(IRP, A) {}
2539 
2540   /// See AbstractAttribute::trackStatistics()
2541   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2542 };
2543 
2544 /// NonNull attribute for a call site return position.
2545 struct AANonNullCallSiteReturned final
2546     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2547   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2548       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2549 
2550   /// See AbstractAttribute::trackStatistics()
2551   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2552 };
2553 } // namespace
2554 
2555 /// ------------------------ No-Recurse Attributes ----------------------------
2556 
2557 namespace {
2558 struct AANoRecurseImpl : public AANoRecurse {
2559   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2560 
2561   /// See AbstractAttribute::getAsStr()
2562   const std::string getAsStr() const override {
2563     return getAssumed() ? "norecurse" : "may-recurse";
2564   }
2565 };
2566 
2567 struct AANoRecurseFunction final : AANoRecurseImpl {
2568   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2569       : AANoRecurseImpl(IRP, A) {}
2570 
2571   /// See AbstractAttribute::updateImpl(...).
2572   ChangeStatus updateImpl(Attributor &A) override {
2573 
2574     // If all live call sites are known to be no-recurse, we are as well.
2575     auto CallSitePred = [&](AbstractCallSite ACS) {
2576       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2577           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2578           DepClassTy::NONE);
2579       return NoRecurseAA.isKnownNoRecurse();
2580     };
2581     bool UsedAssumedInformation = false;
2582     if (A.checkForAllCallSites(CallSitePred, *this, true,
2583                                UsedAssumedInformation)) {
2584       // If we know all call sites and all are known no-recurse, we are done.
2585       // If all known call sites, which might not be all that exist, are known
2586       // to be no-recurse, we are not done but we can continue to assume
2587       // no-recurse. If one of the call sites we have not visited will become
2588       // live, another update is triggered.
2589       if (!UsedAssumedInformation)
2590         indicateOptimisticFixpoint();
2591       return ChangeStatus::UNCHANGED;
2592     }
2593 
2594     const AAFunctionReachability &EdgeReachability =
2595         A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2596                                            DepClassTy::REQUIRED);
2597     if (EdgeReachability.canReach(A, *getAnchorScope()))
2598       return indicatePessimisticFixpoint();
2599     return ChangeStatus::UNCHANGED;
2600   }
2601 
2602   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2603 };
2604 
2605 /// NoRecurse attribute deduction for a call sites.
2606 struct AANoRecurseCallSite final : AANoRecurseImpl {
2607   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2608       : AANoRecurseImpl(IRP, A) {}
2609 
2610   /// See AbstractAttribute::initialize(...).
2611   void initialize(Attributor &A) override {
2612     AANoRecurseImpl::initialize(A);
2613     Function *F = getAssociatedFunction();
2614     if (!F || F->isDeclaration())
2615       indicatePessimisticFixpoint();
2616   }
2617 
2618   /// See AbstractAttribute::updateImpl(...).
2619   ChangeStatus updateImpl(Attributor &A) override {
2620     // TODO: Once we have call site specific value information we can provide
2621     //       call site specific liveness information and then it makes
2622     //       sense to specialize attributes for call sites arguments instead of
2623     //       redirecting requests to the callee argument.
2624     Function *F = getAssociatedFunction();
2625     const IRPosition &FnPos = IRPosition::function(*F);
2626     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2627     return clampStateAndIndicateChange(getState(), FnAA.getState());
2628   }
2629 
2630   /// See AbstractAttribute::trackStatistics()
2631   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2632 };
2633 } // namespace
2634 
2635 /// -------------------- Undefined-Behavior Attributes ------------------------
2636 
2637 namespace {
2638 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2639   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2640       : AAUndefinedBehavior(IRP, A) {}
2641 
2642   /// See AbstractAttribute::updateImpl(...).
2643   // through a pointer (i.e. also branches etc.)
2644   ChangeStatus updateImpl(Attributor &A) override {
2645     const size_t UBPrevSize = KnownUBInsts.size();
2646     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2647 
2648     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2649       // Lang ref now states volatile store is not UB, let's skip them.
2650       if (I.isVolatile() && I.mayWriteToMemory())
2651         return true;
2652 
2653       // Skip instructions that are already saved.
2654       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2655         return true;
2656 
2657       // If we reach here, we know we have an instruction
2658       // that accesses memory through a pointer operand,
2659       // for which getPointerOperand() should give it to us.
2660       Value *PtrOp =
2661           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2662       assert(PtrOp &&
2663              "Expected pointer operand of memory accessing instruction");
2664 
2665       // Either we stopped and the appropriate action was taken,
2666       // or we got back a simplified value to continue.
2667       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2668       if (!SimplifiedPtrOp || !SimplifiedPtrOp.getValue())
2669         return true;
2670       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2671 
2672       // A memory access through a pointer is considered UB
2673       // only if the pointer has constant null value.
2674       // TODO: Expand it to not only check constant values.
2675       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2676         AssumedNoUBInsts.insert(&I);
2677         return true;
2678       }
2679       const Type *PtrTy = PtrOpVal->getType();
2680 
2681       // Because we only consider instructions inside functions,
2682       // assume that a parent function exists.
2683       const Function *F = I.getFunction();
2684 
2685       // A memory access using constant null pointer is only considered UB
2686       // if null pointer is _not_ defined for the target platform.
2687       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2688         AssumedNoUBInsts.insert(&I);
2689       else
2690         KnownUBInsts.insert(&I);
2691       return true;
2692     };
2693 
2694     auto InspectBrInstForUB = [&](Instruction &I) {
2695       // A conditional branch instruction is considered UB if it has `undef`
2696       // condition.
2697 
2698       // Skip instructions that are already saved.
2699       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2700         return true;
2701 
2702       // We know we have a branch instruction.
2703       auto *BrInst = cast<BranchInst>(&I);
2704 
2705       // Unconditional branches are never considered UB.
2706       if (BrInst->isUnconditional())
2707         return true;
2708 
2709       // Either we stopped and the appropriate action was taken,
2710       // or we got back a simplified value to continue.
2711       Optional<Value *> SimplifiedCond =
2712           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2713       if (!SimplifiedCond || !*SimplifiedCond)
2714         return true;
2715       AssumedNoUBInsts.insert(&I);
2716       return true;
2717     };
2718 
2719     auto InspectCallSiteForUB = [&](Instruction &I) {
2720       // Check whether a callsite always cause UB or not
2721 
2722       // Skip instructions that are already saved.
2723       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2724         return true;
2725 
2726       // Check nonnull and noundef argument attribute violation for each
2727       // callsite.
2728       CallBase &CB = cast<CallBase>(I);
2729       Function *Callee = CB.getCalledFunction();
2730       if (!Callee)
2731         return true;
2732       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2733         // If current argument is known to be simplified to null pointer and the
2734         // corresponding argument position is known to have nonnull attribute,
2735         // the argument is poison. Furthermore, if the argument is poison and
2736         // the position is known to have noundef attriubte, this callsite is
2737         // considered UB.
2738         if (idx >= Callee->arg_size())
2739           break;
2740         Value *ArgVal = CB.getArgOperand(idx);
2741         if (!ArgVal)
2742           continue;
2743         // Here, we handle three cases.
2744         //   (1) Not having a value means it is dead. (we can replace the value
2745         //       with undef)
2746         //   (2) Simplified to undef. The argument violate noundef attriubte.
2747         //   (3) Simplified to null pointer where known to be nonnull.
2748         //       The argument is a poison value and violate noundef attribute.
2749         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2750         auto &NoUndefAA =
2751             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2752         if (!NoUndefAA.isKnownNoUndef())
2753           continue;
2754         bool UsedAssumedInformation = false;
2755         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2756             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2757         if (UsedAssumedInformation)
2758           continue;
2759         if (SimplifiedVal && !SimplifiedVal.getValue())
2760           return true;
2761         if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.getValue())) {
2762           KnownUBInsts.insert(&I);
2763           continue;
2764         }
2765         if (!ArgVal->getType()->isPointerTy() ||
2766             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2767           continue;
2768         auto &NonNullAA =
2769             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2770         if (NonNullAA.isKnownNonNull())
2771           KnownUBInsts.insert(&I);
2772       }
2773       return true;
2774     };
2775 
2776     auto InspectReturnInstForUB = [&](Instruction &I) {
2777       auto &RI = cast<ReturnInst>(I);
2778       // Either we stopped and the appropriate action was taken,
2779       // or we got back a simplified return value to continue.
2780       Optional<Value *> SimplifiedRetValue =
2781           stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2782       if (!SimplifiedRetValue || !*SimplifiedRetValue)
2783         return true;
2784 
2785       // Check if a return instruction always cause UB or not
2786       // Note: It is guaranteed that the returned position of the anchor
2787       //       scope has noundef attribute when this is called.
2788       //       We also ensure the return position is not "assumed dead"
2789       //       because the returned value was then potentially simplified to
2790       //       `undef` in AAReturnedValues without removing the `noundef`
2791       //       attribute yet.
2792 
2793       // When the returned position has noundef attriubte, UB occurs in the
2794       // following cases.
2795       //   (1) Returned value is known to be undef.
2796       //   (2) The value is known to be a null pointer and the returned
2797       //       position has nonnull attribute (because the returned value is
2798       //       poison).
2799       if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2800         auto &NonNullAA = A.getAAFor<AANonNull>(
2801             *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2802         if (NonNullAA.isKnownNonNull())
2803           KnownUBInsts.insert(&I);
2804       }
2805 
2806       return true;
2807     };
2808 
2809     bool UsedAssumedInformation = false;
2810     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2811                               {Instruction::Load, Instruction::Store,
2812                                Instruction::AtomicCmpXchg,
2813                                Instruction::AtomicRMW},
2814                               UsedAssumedInformation,
2815                               /* CheckBBLivenessOnly */ true);
2816     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2817                               UsedAssumedInformation,
2818                               /* CheckBBLivenessOnly */ true);
2819     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2820                                       UsedAssumedInformation);
2821 
2822     // If the returned position of the anchor scope has noundef attriubte, check
2823     // all returned instructions.
2824     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2825       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2826       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2827         auto &RetPosNoUndefAA =
2828             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2829         if (RetPosNoUndefAA.isKnownNoUndef())
2830           A.checkForAllInstructions(InspectReturnInstForUB, *this,
2831                                     {Instruction::Ret}, UsedAssumedInformation,
2832                                     /* CheckBBLivenessOnly */ true);
2833       }
2834     }
2835 
2836     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2837         UBPrevSize != KnownUBInsts.size())
2838       return ChangeStatus::CHANGED;
2839     return ChangeStatus::UNCHANGED;
2840   }
2841 
2842   bool isKnownToCauseUB(Instruction *I) const override {
2843     return KnownUBInsts.count(I);
2844   }
2845 
2846   bool isAssumedToCauseUB(Instruction *I) const override {
2847     // In simple words, if an instruction is not in the assumed to _not_
2848     // cause UB, then it is assumed UB (that includes those
2849     // in the KnownUBInsts set). The rest is boilerplate
2850     // is to ensure that it is one of the instructions we test
2851     // for UB.
2852 
2853     switch (I->getOpcode()) {
2854     case Instruction::Load:
2855     case Instruction::Store:
2856     case Instruction::AtomicCmpXchg:
2857     case Instruction::AtomicRMW:
2858       return !AssumedNoUBInsts.count(I);
2859     case Instruction::Br: {
2860       auto *BrInst = cast<BranchInst>(I);
2861       if (BrInst->isUnconditional())
2862         return false;
2863       return !AssumedNoUBInsts.count(I);
2864     } break;
2865     default:
2866       return false;
2867     }
2868     return false;
2869   }
2870 
2871   ChangeStatus manifest(Attributor &A) override {
2872     if (KnownUBInsts.empty())
2873       return ChangeStatus::UNCHANGED;
2874     for (Instruction *I : KnownUBInsts)
2875       A.changeToUnreachableAfterManifest(I);
2876     return ChangeStatus::CHANGED;
2877   }
2878 
2879   /// See AbstractAttribute::getAsStr()
2880   const std::string getAsStr() const override {
2881     return getAssumed() ? "undefined-behavior" : "no-ub";
2882   }
2883 
2884   /// Note: The correctness of this analysis depends on the fact that the
2885   /// following 2 sets will stop changing after some point.
2886   /// "Change" here means that their size changes.
2887   /// The size of each set is monotonically increasing
2888   /// (we only add items to them) and it is upper bounded by the number of
2889   /// instructions in the processed function (we can never save more
2890   /// elements in either set than this number). Hence, at some point,
2891   /// they will stop increasing.
2892   /// Consequently, at some point, both sets will have stopped
2893   /// changing, effectively making the analysis reach a fixpoint.
2894 
2895   /// Note: These 2 sets are disjoint and an instruction can be considered
2896   /// one of 3 things:
2897   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2898   ///    the KnownUBInsts set.
2899   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2900   ///    has a reason to assume it).
2901   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2902   ///    could not find a reason to assume or prove that it can cause UB,
2903   ///    hence it assumes it doesn't. We have a set for these instructions
2904   ///    so that we don't reprocess them in every update.
2905   ///    Note however that instructions in this set may cause UB.
2906 
2907 protected:
2908   /// A set of all live instructions _known_ to cause UB.
2909   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2910 
2911 private:
2912   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2913   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2914 
2915   // Should be called on updates in which if we're processing an instruction
2916   // \p I that depends on a value \p V, one of the following has to happen:
2917   // - If the value is assumed, then stop.
2918   // - If the value is known but undef, then consider it UB.
2919   // - Otherwise, do specific processing with the simplified value.
2920   // We return None in the first 2 cases to signify that an appropriate
2921   // action was taken and the caller should stop.
2922   // Otherwise, we return the simplified value that the caller should
2923   // use for specific processing.
2924   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2925                                          Instruction *I) {
2926     bool UsedAssumedInformation = false;
2927     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2928         IRPosition::value(*V), *this, UsedAssumedInformation);
2929     if (!UsedAssumedInformation) {
2930       // Don't depend on assumed values.
2931       if (!SimplifiedV) {
2932         // If it is known (which we tested above) but it doesn't have a value,
2933         // then we can assume `undef` and hence the instruction is UB.
2934         KnownUBInsts.insert(I);
2935         return llvm::None;
2936       }
2937       if (!*SimplifiedV)
2938         return nullptr;
2939       V = *SimplifiedV;
2940     }
2941     if (isa<UndefValue>(V)) {
2942       KnownUBInsts.insert(I);
2943       return llvm::None;
2944     }
2945     return V;
2946   }
2947 };
2948 
2949 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2950   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2951       : AAUndefinedBehaviorImpl(IRP, A) {}
2952 
2953   /// See AbstractAttribute::trackStatistics()
2954   void trackStatistics() const override {
2955     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2956                "Number of instructions known to have UB");
2957     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2958         KnownUBInsts.size();
2959   }
2960 };
2961 } // namespace
2962 
2963 /// ------------------------ Will-Return Attributes ----------------------------
2964 
2965 namespace {
2966 // Helper function that checks whether a function has any cycle which we don't
2967 // know if it is bounded or not.
2968 // Loops with maximum trip count are considered bounded, any other cycle not.
2969 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2970   ScalarEvolution *SE =
2971       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2972   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2973   // If either SCEV or LoopInfo is not available for the function then we assume
2974   // any cycle to be unbounded cycle.
2975   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2976   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2977   if (!SE || !LI) {
2978     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2979       if (SCCI.hasCycle())
2980         return true;
2981     return false;
2982   }
2983 
2984   // If there's irreducible control, the function may contain non-loop cycles.
2985   if (mayContainIrreducibleControl(F, LI))
2986     return true;
2987 
2988   // Any loop that does not have a max trip count is considered unbounded cycle.
2989   for (auto *L : LI->getLoopsInPreorder()) {
2990     if (!SE->getSmallConstantMaxTripCount(L))
2991       return true;
2992   }
2993   return false;
2994 }
2995 
2996 struct AAWillReturnImpl : public AAWillReturn {
2997   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2998       : AAWillReturn(IRP, A) {}
2999 
3000   /// See AbstractAttribute::initialize(...).
3001   void initialize(Attributor &A) override {
3002     AAWillReturn::initialize(A);
3003 
3004     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
3005       indicateOptimisticFixpoint();
3006       return;
3007     }
3008   }
3009 
3010   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
3011   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
3012     // Check for `mustprogress` in the scope and the associated function which
3013     // might be different if this is a call site.
3014     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
3015         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
3016       return false;
3017 
3018     bool IsKnown;
3019     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3020       return IsKnown || !KnownOnly;
3021     return false;
3022   }
3023 
3024   /// See AbstractAttribute::updateImpl(...).
3025   ChangeStatus updateImpl(Attributor &A) override {
3026     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3027       return ChangeStatus::UNCHANGED;
3028 
3029     auto CheckForWillReturn = [&](Instruction &I) {
3030       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
3031       const auto &WillReturnAA =
3032           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
3033       if (WillReturnAA.isKnownWillReturn())
3034         return true;
3035       if (!WillReturnAA.isAssumedWillReturn())
3036         return false;
3037       const auto &NoRecurseAA =
3038           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
3039       return NoRecurseAA.isAssumedNoRecurse();
3040     };
3041 
3042     bool UsedAssumedInformation = false;
3043     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
3044                                            UsedAssumedInformation))
3045       return indicatePessimisticFixpoint();
3046 
3047     return ChangeStatus::UNCHANGED;
3048   }
3049 
3050   /// See AbstractAttribute::getAsStr()
3051   const std::string getAsStr() const override {
3052     return getAssumed() ? "willreturn" : "may-noreturn";
3053   }
3054 };
3055 
3056 struct AAWillReturnFunction final : AAWillReturnImpl {
3057   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
3058       : AAWillReturnImpl(IRP, A) {}
3059 
3060   /// See AbstractAttribute::initialize(...).
3061   void initialize(Attributor &A) override {
3062     AAWillReturnImpl::initialize(A);
3063 
3064     Function *F = getAnchorScope();
3065     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
3066       indicatePessimisticFixpoint();
3067   }
3068 
3069   /// See AbstractAttribute::trackStatistics()
3070   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
3071 };
3072 
3073 /// WillReturn attribute deduction for a call sites.
3074 struct AAWillReturnCallSite final : AAWillReturnImpl {
3075   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3076       : AAWillReturnImpl(IRP, A) {}
3077 
3078   /// See AbstractAttribute::initialize(...).
3079   void initialize(Attributor &A) override {
3080     AAWillReturnImpl::initialize(A);
3081     Function *F = getAssociatedFunction();
3082     if (!F || !A.isFunctionIPOAmendable(*F))
3083       indicatePessimisticFixpoint();
3084   }
3085 
3086   /// See AbstractAttribute::updateImpl(...).
3087   ChangeStatus updateImpl(Attributor &A) override {
3088     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3089       return ChangeStatus::UNCHANGED;
3090 
3091     // TODO: Once we have call site specific value information we can provide
3092     //       call site specific liveness information and then it makes
3093     //       sense to specialize attributes for call sites arguments instead of
3094     //       redirecting requests to the callee argument.
3095     Function *F = getAssociatedFunction();
3096     const IRPosition &FnPos = IRPosition::function(*F);
3097     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
3098     return clampStateAndIndicateChange(getState(), FnAA.getState());
3099   }
3100 
3101   /// See AbstractAttribute::trackStatistics()
3102   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
3103 };
3104 } // namespace
3105 
3106 /// -------------------AAReachability Attribute--------------------------
3107 
3108 namespace {
3109 struct AAReachabilityImpl : AAReachability {
3110   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
3111       : AAReachability(IRP, A) {}
3112 
3113   const std::string getAsStr() const override {
3114     // TODO: Return the number of reachable queries.
3115     return "reachable";
3116   }
3117 
3118   /// See AbstractAttribute::updateImpl(...).
3119   ChangeStatus updateImpl(Attributor &A) override {
3120     return ChangeStatus::UNCHANGED;
3121   }
3122 };
3123 
3124 struct AAReachabilityFunction final : public AAReachabilityImpl {
3125   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
3126       : AAReachabilityImpl(IRP, A) {}
3127 
3128   /// See AbstractAttribute::trackStatistics()
3129   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
3130 };
3131 } // namespace
3132 
3133 /// ------------------------ NoAlias Argument Attribute ------------------------
3134 
3135 namespace {
3136 struct AANoAliasImpl : AANoAlias {
3137   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3138     assert(getAssociatedType()->isPointerTy() &&
3139            "Noalias is a pointer attribute");
3140   }
3141 
3142   const std::string getAsStr() const override {
3143     return getAssumed() ? "noalias" : "may-alias";
3144   }
3145 };
3146 
3147 /// NoAlias attribute for a floating value.
3148 struct AANoAliasFloating final : AANoAliasImpl {
3149   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3150       : AANoAliasImpl(IRP, A) {}
3151 
3152   /// See AbstractAttribute::initialize(...).
3153   void initialize(Attributor &A) override {
3154     AANoAliasImpl::initialize(A);
3155     Value *Val = &getAssociatedValue();
3156     do {
3157       CastInst *CI = dyn_cast<CastInst>(Val);
3158       if (!CI)
3159         break;
3160       Value *Base = CI->getOperand(0);
3161       if (!Base->hasOneUse())
3162         break;
3163       Val = Base;
3164     } while (true);
3165 
3166     if (!Val->getType()->isPointerTy()) {
3167       indicatePessimisticFixpoint();
3168       return;
3169     }
3170 
3171     if (isa<AllocaInst>(Val))
3172       indicateOptimisticFixpoint();
3173     else if (isa<ConstantPointerNull>(Val) &&
3174              !NullPointerIsDefined(getAnchorScope(),
3175                                    Val->getType()->getPointerAddressSpace()))
3176       indicateOptimisticFixpoint();
3177     else if (Val != &getAssociatedValue()) {
3178       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3179           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3180       if (ValNoAliasAA.isKnownNoAlias())
3181         indicateOptimisticFixpoint();
3182     }
3183   }
3184 
3185   /// See AbstractAttribute::updateImpl(...).
3186   ChangeStatus updateImpl(Attributor &A) override {
3187     // TODO: Implement this.
3188     return indicatePessimisticFixpoint();
3189   }
3190 
3191   /// See AbstractAttribute::trackStatistics()
3192   void trackStatistics() const override {
3193     STATS_DECLTRACK_FLOATING_ATTR(noalias)
3194   }
3195 };
3196 
3197 /// NoAlias attribute for an argument.
3198 struct AANoAliasArgument final
3199     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3200   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3201   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3202 
3203   /// See AbstractAttribute::initialize(...).
3204   void initialize(Attributor &A) override {
3205     Base::initialize(A);
3206     // See callsite argument attribute and callee argument attribute.
3207     if (hasAttr({Attribute::ByVal}))
3208       indicateOptimisticFixpoint();
3209   }
3210 
3211   /// See AbstractAttribute::update(...).
3212   ChangeStatus updateImpl(Attributor &A) override {
3213     // We have to make sure no-alias on the argument does not break
3214     // synchronization when this is a callback argument, see also [1] below.
3215     // If synchronization cannot be affected, we delegate to the base updateImpl
3216     // function, otherwise we give up for now.
3217 
3218     // If the function is no-sync, no-alias cannot break synchronization.
3219     const auto &NoSyncAA =
3220         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3221                              DepClassTy::OPTIONAL);
3222     if (NoSyncAA.isAssumedNoSync())
3223       return Base::updateImpl(A);
3224 
3225     // If the argument is read-only, no-alias cannot break synchronization.
3226     bool IsKnown;
3227     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3228       return Base::updateImpl(A);
3229 
3230     // If the argument is never passed through callbacks, no-alias cannot break
3231     // synchronization.
3232     bool UsedAssumedInformation = false;
3233     if (A.checkForAllCallSites(
3234             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3235             true, UsedAssumedInformation))
3236       return Base::updateImpl(A);
3237 
3238     // TODO: add no-alias but make sure it doesn't break synchronization by
3239     // introducing fake uses. See:
3240     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3241     //     International Workshop on OpenMP 2018,
3242     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3243 
3244     return indicatePessimisticFixpoint();
3245   }
3246 
3247   /// See AbstractAttribute::trackStatistics()
3248   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3249 };
3250 
3251 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3252   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3253       : AANoAliasImpl(IRP, A) {}
3254 
3255   /// See AbstractAttribute::initialize(...).
3256   void initialize(Attributor &A) override {
3257     // See callsite argument attribute and callee argument attribute.
3258     const auto &CB = cast<CallBase>(getAnchorValue());
3259     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3260       indicateOptimisticFixpoint();
3261     Value &Val = getAssociatedValue();
3262     if (isa<ConstantPointerNull>(Val) &&
3263         !NullPointerIsDefined(getAnchorScope(),
3264                               Val.getType()->getPointerAddressSpace()))
3265       indicateOptimisticFixpoint();
3266   }
3267 
3268   /// Determine if the underlying value may alias with the call site argument
3269   /// \p OtherArgNo of \p ICS (= the underlying call site).
3270   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3271                             const AAMemoryBehavior &MemBehaviorAA,
3272                             const CallBase &CB, unsigned OtherArgNo) {
3273     // We do not need to worry about aliasing with the underlying IRP.
3274     if (this->getCalleeArgNo() == (int)OtherArgNo)
3275       return false;
3276 
3277     // If it is not a pointer or pointer vector we do not alias.
3278     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3279     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3280       return false;
3281 
3282     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3283         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3284 
3285     // If the argument is readnone, there is no read-write aliasing.
3286     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3287       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3288       return false;
3289     }
3290 
3291     // If the argument is readonly and the underlying value is readonly, there
3292     // is no read-write aliasing.
3293     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3294     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3295       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3296       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3297       return false;
3298     }
3299 
3300     // We have to utilize actual alias analysis queries so we need the object.
3301     if (!AAR)
3302       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3303 
3304     // Try to rule it out at the call site.
3305     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3306     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3307                          "callsite arguments: "
3308                       << getAssociatedValue() << " " << *ArgOp << " => "
3309                       << (IsAliasing ? "" : "no-") << "alias \n");
3310 
3311     return IsAliasing;
3312   }
3313 
3314   bool
3315   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3316                                          const AAMemoryBehavior &MemBehaviorAA,
3317                                          const AANoAlias &NoAliasAA) {
3318     // We can deduce "noalias" if the following conditions hold.
3319     // (i)   Associated value is assumed to be noalias in the definition.
3320     // (ii)  Associated value is assumed to be no-capture in all the uses
3321     //       possibly executed before this callsite.
3322     // (iii) There is no other pointer argument which could alias with the
3323     //       value.
3324 
3325     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3326     if (!AssociatedValueIsNoAliasAtDef) {
3327       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3328                         << " is not no-alias at the definition\n");
3329       return false;
3330     }
3331 
3332     auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3333       const auto &DerefAA = A.getAAFor<AADereferenceable>(
3334           *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3335       return DerefAA.getAssumedDereferenceableBytes();
3336     };
3337 
3338     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3339 
3340     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3341     const Function *ScopeFn = VIRP.getAnchorScope();
3342     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3343     // Check whether the value is captured in the scope using AANoCapture.
3344     // Look at CFG and check only uses possibly executed before this
3345     // callsite.
3346     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3347       Instruction *UserI = cast<Instruction>(U.getUser());
3348 
3349       // If UserI is the curr instruction and there is a single potential use of
3350       // the value in UserI we allow the use.
3351       // TODO: We should inspect the operands and allow those that cannot alias
3352       //       with the value.
3353       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3354         return true;
3355 
3356       if (ScopeFn) {
3357         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3358           if (CB->isArgOperand(&U)) {
3359 
3360             unsigned ArgNo = CB->getArgOperandNo(&U);
3361 
3362             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3363                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3364                 DepClassTy::OPTIONAL);
3365 
3366             if (NoCaptureAA.isAssumedNoCapture())
3367               return true;
3368           }
3369         }
3370 
3371         if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this))
3372           return true;
3373       }
3374 
3375       // TODO: We should track the capturing uses in AANoCapture but the problem
3376       //       is CGSCC runs. For those we would need to "allow" AANoCapture for
3377       //       a value in the module slice.
3378       switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3379       case UseCaptureKind::NO_CAPTURE:
3380         return true;
3381       case UseCaptureKind::MAY_CAPTURE:
3382         LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3383                           << "\n");
3384         return false;
3385       case UseCaptureKind::PASSTHROUGH:
3386         Follow = true;
3387         return true;
3388       }
3389       llvm_unreachable("unknown UseCaptureKind");
3390     };
3391 
3392     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3393       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3394         LLVM_DEBUG(
3395             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3396                    << " cannot be noalias as it is potentially captured\n");
3397         return false;
3398       }
3399     }
3400     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3401 
3402     // Check there is no other pointer argument which could alias with the
3403     // value passed at this call site.
3404     // TODO: AbstractCallSite
3405     const auto &CB = cast<CallBase>(getAnchorValue());
3406     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3407       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3408         return false;
3409 
3410     return true;
3411   }
3412 
3413   /// See AbstractAttribute::updateImpl(...).
3414   ChangeStatus updateImpl(Attributor &A) override {
3415     // If the argument is readnone we are done as there are no accesses via the
3416     // argument.
3417     auto &MemBehaviorAA =
3418         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3419     if (MemBehaviorAA.isAssumedReadNone()) {
3420       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3421       return ChangeStatus::UNCHANGED;
3422     }
3423 
3424     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3425     const auto &NoAliasAA =
3426         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3427 
3428     AAResults *AAR = nullptr;
3429     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3430                                                NoAliasAA)) {
3431       LLVM_DEBUG(
3432           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3433       return ChangeStatus::UNCHANGED;
3434     }
3435 
3436     return indicatePessimisticFixpoint();
3437   }
3438 
3439   /// See AbstractAttribute::trackStatistics()
3440   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3441 };
3442 
3443 /// NoAlias attribute for function return value.
3444 struct AANoAliasReturned final : AANoAliasImpl {
3445   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3446       : AANoAliasImpl(IRP, A) {}
3447 
3448   /// See AbstractAttribute::initialize(...).
3449   void initialize(Attributor &A) override {
3450     AANoAliasImpl::initialize(A);
3451     Function *F = getAssociatedFunction();
3452     if (!F || F->isDeclaration())
3453       indicatePessimisticFixpoint();
3454   }
3455 
3456   /// See AbstractAttribute::updateImpl(...).
3457   virtual ChangeStatus updateImpl(Attributor &A) override {
3458 
3459     auto CheckReturnValue = [&](Value &RV) -> bool {
3460       if (Constant *C = dyn_cast<Constant>(&RV))
3461         if (C->isNullValue() || isa<UndefValue>(C))
3462           return true;
3463 
3464       /// For now, we can only deduce noalias if we have call sites.
3465       /// FIXME: add more support.
3466       if (!isa<CallBase>(&RV))
3467         return false;
3468 
3469       const IRPosition &RVPos = IRPosition::value(RV);
3470       const auto &NoAliasAA =
3471           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3472       if (!NoAliasAA.isAssumedNoAlias())
3473         return false;
3474 
3475       const auto &NoCaptureAA =
3476           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3477       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3478     };
3479 
3480     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3481       return indicatePessimisticFixpoint();
3482 
3483     return ChangeStatus::UNCHANGED;
3484   }
3485 
3486   /// See AbstractAttribute::trackStatistics()
3487   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3488 };
3489 
3490 /// NoAlias attribute deduction for a call site return value.
3491 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3492   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3493       : AANoAliasImpl(IRP, A) {}
3494 
3495   /// See AbstractAttribute::initialize(...).
3496   void initialize(Attributor &A) override {
3497     AANoAliasImpl::initialize(A);
3498     Function *F = getAssociatedFunction();
3499     if (!F || F->isDeclaration())
3500       indicatePessimisticFixpoint();
3501   }
3502 
3503   /// See AbstractAttribute::updateImpl(...).
3504   ChangeStatus updateImpl(Attributor &A) override {
3505     // TODO: Once we have call site specific value information we can provide
3506     //       call site specific liveness information and then it makes
3507     //       sense to specialize attributes for call sites arguments instead of
3508     //       redirecting requests to the callee argument.
3509     Function *F = getAssociatedFunction();
3510     const IRPosition &FnPos = IRPosition::returned(*F);
3511     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3512     return clampStateAndIndicateChange(getState(), FnAA.getState());
3513   }
3514 
3515   /// See AbstractAttribute::trackStatistics()
3516   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3517 };
3518 } // namespace
3519 
3520 /// -------------------AAIsDead Function Attribute-----------------------
3521 
3522 namespace {
3523 struct AAIsDeadValueImpl : public AAIsDead {
3524   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3525 
3526   /// See AbstractAttribute::initialize(...).
3527   void initialize(Attributor &A) override {
3528     if (auto *Scope = getAnchorScope())
3529       if (!A.isRunOn(*Scope))
3530         indicatePessimisticFixpoint();
3531   }
3532 
3533   /// See AAIsDead::isAssumedDead().
3534   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3535 
3536   /// See AAIsDead::isKnownDead().
3537   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3538 
3539   /// See AAIsDead::isAssumedDead(BasicBlock *).
3540   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3541 
3542   /// See AAIsDead::isKnownDead(BasicBlock *).
3543   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3544 
3545   /// See AAIsDead::isAssumedDead(Instruction *I).
3546   bool isAssumedDead(const Instruction *I) const override {
3547     return I == getCtxI() && isAssumedDead();
3548   }
3549 
3550   /// See AAIsDead::isKnownDead(Instruction *I).
3551   bool isKnownDead(const Instruction *I) const override {
3552     return isAssumedDead(I) && isKnownDead();
3553   }
3554 
3555   /// See AbstractAttribute::getAsStr().
3556   virtual const std::string getAsStr() const override {
3557     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3558   }
3559 
3560   /// Check if all uses are assumed dead.
3561   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3562     // Callers might not check the type, void has no uses.
3563     if (V.getType()->isVoidTy() || V.use_empty())
3564       return true;
3565 
3566     // If we replace a value with a constant there are no uses left afterwards.
3567     if (!isa<Constant>(V)) {
3568       if (auto *I = dyn_cast<Instruction>(&V))
3569         if (!A.isRunOn(*I->getFunction()))
3570           return false;
3571       bool UsedAssumedInformation = false;
3572       Optional<Constant *> C =
3573           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3574       if (!C || *C)
3575         return true;
3576     }
3577 
3578     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3579     // Explicitly set the dependence class to required because we want a long
3580     // chain of N dependent instructions to be considered live as soon as one is
3581     // without going through N update cycles. This is not required for
3582     // correctness.
3583     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3584                              DepClassTy::REQUIRED,
3585                              /* IgnoreDroppableUses */ false);
3586   }
3587 
3588   /// Determine if \p I is assumed to be side-effect free.
3589   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3590     if (!I || wouldInstructionBeTriviallyDead(I))
3591       return true;
3592 
3593     auto *CB = dyn_cast<CallBase>(I);
3594     if (!CB || isa<IntrinsicInst>(CB))
3595       return false;
3596 
3597     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3598     const auto &NoUnwindAA =
3599         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3600     if (!NoUnwindAA.isAssumedNoUnwind())
3601       return false;
3602     if (!NoUnwindAA.isKnownNoUnwind())
3603       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3604 
3605     bool IsKnown;
3606     return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3607   }
3608 };
3609 
3610 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3611   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3612       : AAIsDeadValueImpl(IRP, A) {}
3613 
3614   /// See AbstractAttribute::initialize(...).
3615   void initialize(Attributor &A) override {
3616     AAIsDeadValueImpl::initialize(A);
3617 
3618     if (isa<UndefValue>(getAssociatedValue())) {
3619       indicatePessimisticFixpoint();
3620       return;
3621     }
3622 
3623     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3624     if (!isAssumedSideEffectFree(A, I)) {
3625       if (!isa_and_nonnull<StoreInst>(I))
3626         indicatePessimisticFixpoint();
3627       else
3628         removeAssumedBits(HAS_NO_EFFECT);
3629     }
3630   }
3631 
3632   bool isDeadStore(Attributor &A, StoreInst &SI) {
3633     // Lang ref now states volatile store is not UB/dead, let's skip them.
3634     if (SI.isVolatile())
3635       return false;
3636 
3637     bool UsedAssumedInformation = false;
3638     SmallSetVector<Value *, 4> PotentialCopies;
3639     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3640                                              UsedAssumedInformation))
3641       return false;
3642     return llvm::all_of(PotentialCopies, [&](Value *V) {
3643       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3644                              UsedAssumedInformation);
3645     });
3646   }
3647 
3648   /// See AbstractAttribute::getAsStr().
3649   const std::string getAsStr() const override {
3650     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3651     if (isa_and_nonnull<StoreInst>(I))
3652       if (isValidState())
3653         return "assumed-dead-store";
3654     return AAIsDeadValueImpl::getAsStr();
3655   }
3656 
3657   /// See AbstractAttribute::updateImpl(...).
3658   ChangeStatus updateImpl(Attributor &A) override {
3659     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3660     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3661       if (!isDeadStore(A, *SI))
3662         return indicatePessimisticFixpoint();
3663     } else {
3664       if (!isAssumedSideEffectFree(A, I))
3665         return indicatePessimisticFixpoint();
3666       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3667         return indicatePessimisticFixpoint();
3668     }
3669     return ChangeStatus::UNCHANGED;
3670   }
3671 
3672   bool isRemovableStore() const override {
3673     return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3674   }
3675 
3676   /// See AbstractAttribute::manifest(...).
3677   ChangeStatus manifest(Attributor &A) override {
3678     Value &V = getAssociatedValue();
3679     if (auto *I = dyn_cast<Instruction>(&V)) {
3680       // If we get here we basically know the users are all dead. We check if
3681       // isAssumedSideEffectFree returns true here again because it might not be
3682       // the case and only the users are dead but the instruction (=call) is
3683       // still needed.
3684       if (isa<StoreInst>(I) ||
3685           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3686         A.deleteAfterManifest(*I);
3687         return ChangeStatus::CHANGED;
3688       }
3689     }
3690     return ChangeStatus::UNCHANGED;
3691   }
3692 
3693   /// See AbstractAttribute::trackStatistics()
3694   void trackStatistics() const override {
3695     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3696   }
3697 };
3698 
3699 struct AAIsDeadArgument : public AAIsDeadFloating {
3700   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3701       : AAIsDeadFloating(IRP, A) {}
3702 
3703   /// See AbstractAttribute::initialize(...).
3704   void initialize(Attributor &A) override {
3705     AAIsDeadFloating::initialize(A);
3706     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3707       indicatePessimisticFixpoint();
3708   }
3709 
3710   /// See AbstractAttribute::manifest(...).
3711   ChangeStatus manifest(Attributor &A) override {
3712     Argument &Arg = *getAssociatedArgument();
3713     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3714       if (A.registerFunctionSignatureRewrite(
3715               Arg, /* ReplacementTypes */ {},
3716               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3717               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3718         return ChangeStatus::CHANGED;
3719       }
3720     return ChangeStatus::UNCHANGED;
3721   }
3722 
3723   /// See AbstractAttribute::trackStatistics()
3724   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3725 };
3726 
3727 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3728   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3729       : AAIsDeadValueImpl(IRP, A) {}
3730 
3731   /// See AbstractAttribute::initialize(...).
3732   void initialize(Attributor &A) override {
3733     AAIsDeadValueImpl::initialize(A);
3734     if (isa<UndefValue>(getAssociatedValue()))
3735       indicatePessimisticFixpoint();
3736   }
3737 
3738   /// See AbstractAttribute::updateImpl(...).
3739   ChangeStatus updateImpl(Attributor &A) override {
3740     // TODO: Once we have call site specific value information we can provide
3741     //       call site specific liveness information and then it makes
3742     //       sense to specialize attributes for call sites arguments instead of
3743     //       redirecting requests to the callee argument.
3744     Argument *Arg = getAssociatedArgument();
3745     if (!Arg)
3746       return indicatePessimisticFixpoint();
3747     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3748     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3749     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3750   }
3751 
3752   /// See AbstractAttribute::manifest(...).
3753   ChangeStatus manifest(Attributor &A) override {
3754     CallBase &CB = cast<CallBase>(getAnchorValue());
3755     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3756     assert(!isa<UndefValue>(U.get()) &&
3757            "Expected undef values to be filtered out!");
3758     UndefValue &UV = *UndefValue::get(U->getType());
3759     if (A.changeUseAfterManifest(U, UV))
3760       return ChangeStatus::CHANGED;
3761     return ChangeStatus::UNCHANGED;
3762   }
3763 
3764   /// See AbstractAttribute::trackStatistics()
3765   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3766 };
3767 
3768 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3769   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3770       : AAIsDeadFloating(IRP, A) {}
3771 
3772   /// See AAIsDead::isAssumedDead().
3773   bool isAssumedDead() const override {
3774     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3775   }
3776 
3777   /// See AbstractAttribute::initialize(...).
3778   void initialize(Attributor &A) override {
3779     AAIsDeadFloating::initialize(A);
3780     if (isa<UndefValue>(getAssociatedValue())) {
3781       indicatePessimisticFixpoint();
3782       return;
3783     }
3784 
3785     // We track this separately as a secondary state.
3786     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3787   }
3788 
3789   /// See AbstractAttribute::updateImpl(...).
3790   ChangeStatus updateImpl(Attributor &A) override {
3791     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3792     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3793       IsAssumedSideEffectFree = false;
3794       Changed = ChangeStatus::CHANGED;
3795     }
3796     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3797       return indicatePessimisticFixpoint();
3798     return Changed;
3799   }
3800 
3801   /// See AbstractAttribute::trackStatistics()
3802   void trackStatistics() const override {
3803     if (IsAssumedSideEffectFree)
3804       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3805     else
3806       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3807   }
3808 
3809   /// See AbstractAttribute::getAsStr().
3810   const std::string getAsStr() const override {
3811     return isAssumedDead()
3812                ? "assumed-dead"
3813                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3814   }
3815 
3816 private:
3817   bool IsAssumedSideEffectFree = true;
3818 };
3819 
3820 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3821   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3822       : AAIsDeadValueImpl(IRP, A) {}
3823 
3824   /// See AbstractAttribute::updateImpl(...).
3825   ChangeStatus updateImpl(Attributor &A) override {
3826 
3827     bool UsedAssumedInformation = false;
3828     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3829                               {Instruction::Ret}, UsedAssumedInformation);
3830 
3831     auto PredForCallSite = [&](AbstractCallSite ACS) {
3832       if (ACS.isCallbackCall() || !ACS.getInstruction())
3833         return false;
3834       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3835     };
3836 
3837     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3838                                 UsedAssumedInformation))
3839       return indicatePessimisticFixpoint();
3840 
3841     return ChangeStatus::UNCHANGED;
3842   }
3843 
3844   /// See AbstractAttribute::manifest(...).
3845   ChangeStatus manifest(Attributor &A) override {
3846     // TODO: Rewrite the signature to return void?
3847     bool AnyChange = false;
3848     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3849     auto RetInstPred = [&](Instruction &I) {
3850       ReturnInst &RI = cast<ReturnInst>(I);
3851       if (!isa<UndefValue>(RI.getReturnValue()))
3852         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3853       return true;
3854     };
3855     bool UsedAssumedInformation = false;
3856     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3857                               UsedAssumedInformation);
3858     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3859   }
3860 
3861   /// See AbstractAttribute::trackStatistics()
3862   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3863 };
3864 
3865 struct AAIsDeadFunction : public AAIsDead {
3866   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3867 
3868   /// See AbstractAttribute::initialize(...).
3869   void initialize(Attributor &A) override {
3870     Function *F = getAnchorScope();
3871     if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3872       indicatePessimisticFixpoint();
3873       return;
3874     }
3875     ToBeExploredFrom.insert(&F->getEntryBlock().front());
3876     assumeLive(A, F->getEntryBlock());
3877   }
3878 
3879   /// See AbstractAttribute::getAsStr().
3880   const std::string getAsStr() const override {
3881     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3882            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3883            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3884            std::to_string(KnownDeadEnds.size()) + "]";
3885   }
3886 
3887   /// See AbstractAttribute::manifest(...).
3888   ChangeStatus manifest(Attributor &A) override {
3889     assert(getState().isValidState() &&
3890            "Attempted to manifest an invalid state!");
3891 
3892     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3893     Function &F = *getAnchorScope();
3894 
3895     if (AssumedLiveBlocks.empty()) {
3896       A.deleteAfterManifest(F);
3897       return ChangeStatus::CHANGED;
3898     }
3899 
3900     // Flag to determine if we can change an invoke to a call assuming the
3901     // callee is nounwind. This is not possible if the personality of the
3902     // function allows to catch asynchronous exceptions.
3903     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3904 
3905     KnownDeadEnds.set_union(ToBeExploredFrom);
3906     for (const Instruction *DeadEndI : KnownDeadEnds) {
3907       auto *CB = dyn_cast<CallBase>(DeadEndI);
3908       if (!CB)
3909         continue;
3910       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3911           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3912       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3913       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3914         continue;
3915 
3916       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3917         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3918       else
3919         A.changeToUnreachableAfterManifest(
3920             const_cast<Instruction *>(DeadEndI->getNextNode()));
3921       HasChanged = ChangeStatus::CHANGED;
3922     }
3923 
3924     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3925     for (BasicBlock &BB : F)
3926       if (!AssumedLiveBlocks.count(&BB)) {
3927         A.deleteAfterManifest(BB);
3928         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3929         HasChanged = ChangeStatus::CHANGED;
3930       }
3931 
3932     return HasChanged;
3933   }
3934 
3935   /// See AbstractAttribute::updateImpl(...).
3936   ChangeStatus updateImpl(Attributor &A) override;
3937 
3938   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3939     assert(From->getParent() == getAnchorScope() &&
3940            To->getParent() == getAnchorScope() &&
3941            "Used AAIsDead of the wrong function");
3942     return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3943   }
3944 
3945   /// See AbstractAttribute::trackStatistics()
3946   void trackStatistics() const override {}
3947 
3948   /// Returns true if the function is assumed dead.
3949   bool isAssumedDead() const override { return false; }
3950 
3951   /// See AAIsDead::isKnownDead().
3952   bool isKnownDead() const override { return false; }
3953 
3954   /// See AAIsDead::isAssumedDead(BasicBlock *).
3955   bool isAssumedDead(const BasicBlock *BB) const override {
3956     assert(BB->getParent() == getAnchorScope() &&
3957            "BB must be in the same anchor scope function.");
3958 
3959     if (!getAssumed())
3960       return false;
3961     return !AssumedLiveBlocks.count(BB);
3962   }
3963 
3964   /// See AAIsDead::isKnownDead(BasicBlock *).
3965   bool isKnownDead(const BasicBlock *BB) const override {
3966     return getKnown() && isAssumedDead(BB);
3967   }
3968 
3969   /// See AAIsDead::isAssumed(Instruction *I).
3970   bool isAssumedDead(const Instruction *I) const override {
3971     assert(I->getParent()->getParent() == getAnchorScope() &&
3972            "Instruction must be in the same anchor scope function.");
3973 
3974     if (!getAssumed())
3975       return false;
3976 
3977     // If it is not in AssumedLiveBlocks then it for sure dead.
3978     // Otherwise, it can still be after noreturn call in a live block.
3979     if (!AssumedLiveBlocks.count(I->getParent()))
3980       return true;
3981 
3982     // If it is not after a liveness barrier it is live.
3983     const Instruction *PrevI = I->getPrevNode();
3984     while (PrevI) {
3985       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3986         return true;
3987       PrevI = PrevI->getPrevNode();
3988     }
3989     return false;
3990   }
3991 
3992   /// See AAIsDead::isKnownDead(Instruction *I).
3993   bool isKnownDead(const Instruction *I) const override {
3994     return getKnown() && isAssumedDead(I);
3995   }
3996 
3997   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3998   /// that internal function called from \p BB should now be looked at.
3999   bool assumeLive(Attributor &A, const BasicBlock &BB) {
4000     if (!AssumedLiveBlocks.insert(&BB).second)
4001       return false;
4002 
4003     // We assume that all of BB is (probably) live now and if there are calls to
4004     // internal functions we will assume that those are now live as well. This
4005     // is a performance optimization for blocks with calls to a lot of internal
4006     // functions. It can however cause dead functions to be treated as live.
4007     for (const Instruction &I : BB)
4008       if (const auto *CB = dyn_cast<CallBase>(&I))
4009         if (const Function *F = CB->getCalledFunction())
4010           if (F->hasLocalLinkage())
4011             A.markLiveInternalFunction(*F);
4012     return true;
4013   }
4014 
4015   /// Collection of instructions that need to be explored again, e.g., we
4016   /// did assume they do not transfer control to (one of their) successors.
4017   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
4018 
4019   /// Collection of instructions that are known to not transfer control.
4020   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
4021 
4022   /// Collection of all assumed live edges
4023   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
4024 
4025   /// Collection of all assumed live BasicBlocks.
4026   DenseSet<const BasicBlock *> AssumedLiveBlocks;
4027 };
4028 
4029 static bool
4030 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
4031                         AbstractAttribute &AA,
4032                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4033   const IRPosition &IPos = IRPosition::callsite_function(CB);
4034 
4035   const auto &NoReturnAA =
4036       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
4037   if (NoReturnAA.isAssumedNoReturn())
4038     return !NoReturnAA.isKnownNoReturn();
4039   if (CB.isTerminator())
4040     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
4041   else
4042     AliveSuccessors.push_back(CB.getNextNode());
4043   return false;
4044 }
4045 
4046 static bool
4047 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
4048                         AbstractAttribute &AA,
4049                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4050   bool UsedAssumedInformation =
4051       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
4052 
4053   // First, determine if we can change an invoke to a call assuming the
4054   // callee is nounwind. This is not possible if the personality of the
4055   // function allows to catch asynchronous exceptions.
4056   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
4057     AliveSuccessors.push_back(&II.getUnwindDest()->front());
4058   } else {
4059     const IRPosition &IPos = IRPosition::callsite_function(II);
4060     const auto &AANoUnw =
4061         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
4062     if (AANoUnw.isAssumedNoUnwind()) {
4063       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
4064     } else {
4065       AliveSuccessors.push_back(&II.getUnwindDest()->front());
4066     }
4067   }
4068   return UsedAssumedInformation;
4069 }
4070 
4071 static bool
4072 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
4073                         AbstractAttribute &AA,
4074                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4075   bool UsedAssumedInformation = false;
4076   if (BI.getNumSuccessors() == 1) {
4077     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4078   } else {
4079     Optional<Constant *> C =
4080         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
4081     if (!C || isa_and_nonnull<UndefValue>(*C)) {
4082       // No value yet, assume both edges are dead.
4083     } else if (isa_and_nonnull<ConstantInt>(*C)) {
4084       const BasicBlock *SuccBB =
4085           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
4086       AliveSuccessors.push_back(&SuccBB->front());
4087     } else {
4088       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4089       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4090       UsedAssumedInformation = false;
4091     }
4092   }
4093   return UsedAssumedInformation;
4094 }
4095 
4096 static bool
4097 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4098                         AbstractAttribute &AA,
4099                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4100   bool UsedAssumedInformation = false;
4101   Optional<Constant *> C =
4102       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
4103   if (!C || isa_and_nonnull<UndefValue>(C.getValue())) {
4104     // No value yet, assume all edges are dead.
4105   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
4106     for (auto &CaseIt : SI.cases()) {
4107       if (CaseIt.getCaseValue() == C.getValue()) {
4108         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4109         return UsedAssumedInformation;
4110       }
4111     }
4112     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4113     return UsedAssumedInformation;
4114   } else {
4115     for (const BasicBlock *SuccBB : successors(SI.getParent()))
4116       AliveSuccessors.push_back(&SuccBB->front());
4117   }
4118   return UsedAssumedInformation;
4119 }
4120 
4121 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4122   ChangeStatus Change = ChangeStatus::UNCHANGED;
4123 
4124   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4125                     << getAnchorScope()->size() << "] BBs and "
4126                     << ToBeExploredFrom.size() << " exploration points and "
4127                     << KnownDeadEnds.size() << " known dead ends\n");
4128 
4129   // Copy and clear the list of instructions we need to explore from. It is
4130   // refilled with instructions the next update has to look at.
4131   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4132                                                ToBeExploredFrom.end());
4133   decltype(ToBeExploredFrom) NewToBeExploredFrom;
4134 
4135   SmallVector<const Instruction *, 8> AliveSuccessors;
4136   while (!Worklist.empty()) {
4137     const Instruction *I = Worklist.pop_back_val();
4138     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4139 
4140     // Fast forward for uninteresting instructions. We could look for UB here
4141     // though.
4142     while (!I->isTerminator() && !isa<CallBase>(I))
4143       I = I->getNextNode();
4144 
4145     AliveSuccessors.clear();
4146 
4147     bool UsedAssumedInformation = false;
4148     switch (I->getOpcode()) {
4149     // TODO: look for (assumed) UB to backwards propagate "deadness".
4150     default:
4151       assert(I->isTerminator() &&
4152              "Expected non-terminators to be handled already!");
4153       for (const BasicBlock *SuccBB : successors(I->getParent()))
4154         AliveSuccessors.push_back(&SuccBB->front());
4155       break;
4156     case Instruction::Call:
4157       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4158                                                        *this, AliveSuccessors);
4159       break;
4160     case Instruction::Invoke:
4161       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4162                                                        *this, AliveSuccessors);
4163       break;
4164     case Instruction::Br:
4165       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4166                                                        *this, AliveSuccessors);
4167       break;
4168     case Instruction::Switch:
4169       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4170                                                        *this, AliveSuccessors);
4171       break;
4172     }
4173 
4174     if (UsedAssumedInformation) {
4175       NewToBeExploredFrom.insert(I);
4176     } else if (AliveSuccessors.empty() ||
4177                (I->isTerminator() &&
4178                 AliveSuccessors.size() < I->getNumSuccessors())) {
4179       if (KnownDeadEnds.insert(I))
4180         Change = ChangeStatus::CHANGED;
4181     }
4182 
4183     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4184                       << AliveSuccessors.size() << " UsedAssumedInformation: "
4185                       << UsedAssumedInformation << "\n");
4186 
4187     for (const Instruction *AliveSuccessor : AliveSuccessors) {
4188       if (!I->isTerminator()) {
4189         assert(AliveSuccessors.size() == 1 &&
4190                "Non-terminator expected to have a single successor!");
4191         Worklist.push_back(AliveSuccessor);
4192       } else {
4193         // record the assumed live edge
4194         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4195         if (AssumedLiveEdges.insert(Edge).second)
4196           Change = ChangeStatus::CHANGED;
4197         if (assumeLive(A, *AliveSuccessor->getParent()))
4198           Worklist.push_back(AliveSuccessor);
4199       }
4200     }
4201   }
4202 
4203   // Check if the content of ToBeExploredFrom changed, ignore the order.
4204   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4205       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4206         return !ToBeExploredFrom.count(I);
4207       })) {
4208     Change = ChangeStatus::CHANGED;
4209     ToBeExploredFrom = std::move(NewToBeExploredFrom);
4210   }
4211 
4212   // If we know everything is live there is no need to query for liveness.
4213   // Instead, indicating a pessimistic fixpoint will cause the state to be
4214   // "invalid" and all queries to be answered conservatively without lookups.
4215   // To be in this state we have to (1) finished the exploration and (3) not
4216   // discovered any non-trivial dead end and (2) not ruled unreachable code
4217   // dead.
4218   if (ToBeExploredFrom.empty() &&
4219       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4220       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4221         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4222       }))
4223     return indicatePessimisticFixpoint();
4224   return Change;
4225 }
4226 
4227 /// Liveness information for a call sites.
4228 struct AAIsDeadCallSite final : AAIsDeadFunction {
4229   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4230       : AAIsDeadFunction(IRP, A) {}
4231 
4232   /// See AbstractAttribute::initialize(...).
4233   void initialize(Attributor &A) override {
4234     // TODO: Once we have call site specific value information we can provide
4235     //       call site specific liveness information and then it makes
4236     //       sense to specialize attributes for call sites instead of
4237     //       redirecting requests to the callee.
4238     llvm_unreachable("Abstract attributes for liveness are not "
4239                      "supported for call sites yet!");
4240   }
4241 
4242   /// See AbstractAttribute::updateImpl(...).
4243   ChangeStatus updateImpl(Attributor &A) override {
4244     return indicatePessimisticFixpoint();
4245   }
4246 
4247   /// See AbstractAttribute::trackStatistics()
4248   void trackStatistics() const override {}
4249 };
4250 } // namespace
4251 
4252 /// -------------------- Dereferenceable Argument Attribute --------------------
4253 
4254 namespace {
4255 struct AADereferenceableImpl : AADereferenceable {
4256   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4257       : AADereferenceable(IRP, A) {}
4258   using StateType = DerefState;
4259 
4260   /// See AbstractAttribute::initialize(...).
4261   void initialize(Attributor &A) override {
4262     Value &V = *getAssociatedValue().stripPointerCasts();
4263     SmallVector<Attribute, 4> Attrs;
4264     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4265              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4266     for (const Attribute &Attr : Attrs)
4267       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4268 
4269     const IRPosition &IRP = this->getIRPosition();
4270     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4271 
4272     bool CanBeNull, CanBeFreed;
4273     takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4274         A.getDataLayout(), CanBeNull, CanBeFreed));
4275 
4276     bool IsFnInterface = IRP.isFnInterfaceKind();
4277     Function *FnScope = IRP.getAnchorScope();
4278     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4279       indicatePessimisticFixpoint();
4280       return;
4281     }
4282 
4283     if (Instruction *CtxI = getCtxI())
4284       followUsesInMBEC(*this, A, getState(), *CtxI);
4285   }
4286 
4287   /// See AbstractAttribute::getState()
4288   /// {
4289   StateType &getState() override { return *this; }
4290   const StateType &getState() const override { return *this; }
4291   /// }
4292 
4293   /// Helper function for collecting accessed bytes in must-be-executed-context
4294   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4295                               DerefState &State) {
4296     const Value *UseV = U->get();
4297     if (!UseV->getType()->isPointerTy())
4298       return;
4299 
4300     Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4301     if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4302       return;
4303 
4304     int64_t Offset;
4305     const Value *Base = GetPointerBaseWithConstantOffset(
4306         Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4307     if (Base && Base == &getAssociatedValue())
4308       State.addAccessedBytes(Offset, Loc->Size.getValue());
4309   }
4310 
4311   /// See followUsesInMBEC
4312   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4313                        AADereferenceable::StateType &State) {
4314     bool IsNonNull = false;
4315     bool TrackUse = false;
4316     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4317         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4318     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4319                       << " for instruction " << *I << "\n");
4320 
4321     addAccessedBytesForUse(A, U, I, State);
4322     State.takeKnownDerefBytesMaximum(DerefBytes);
4323     return TrackUse;
4324   }
4325 
4326   /// See AbstractAttribute::manifest(...).
4327   ChangeStatus manifest(Attributor &A) override {
4328     ChangeStatus Change = AADereferenceable::manifest(A);
4329     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4330       removeAttrs({Attribute::DereferenceableOrNull});
4331       return ChangeStatus::CHANGED;
4332     }
4333     return Change;
4334   }
4335 
4336   void getDeducedAttributes(LLVMContext &Ctx,
4337                             SmallVectorImpl<Attribute> &Attrs) const override {
4338     // TODO: Add *_globally support
4339     if (isAssumedNonNull())
4340       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4341           Ctx, getAssumedDereferenceableBytes()));
4342     else
4343       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4344           Ctx, getAssumedDereferenceableBytes()));
4345   }
4346 
4347   /// See AbstractAttribute::getAsStr().
4348   const std::string getAsStr() const override {
4349     if (!getAssumedDereferenceableBytes())
4350       return "unknown-dereferenceable";
4351     return std::string("dereferenceable") +
4352            (isAssumedNonNull() ? "" : "_or_null") +
4353            (isAssumedGlobal() ? "_globally" : "") + "<" +
4354            std::to_string(getKnownDereferenceableBytes()) + "-" +
4355            std::to_string(getAssumedDereferenceableBytes()) + ">";
4356   }
4357 };
4358 
4359 /// Dereferenceable attribute for a floating value.
4360 struct AADereferenceableFloating : AADereferenceableImpl {
4361   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4362       : AADereferenceableImpl(IRP, A) {}
4363 
4364   /// See AbstractAttribute::updateImpl(...).
4365   ChangeStatus updateImpl(Attributor &A) override {
4366     const DataLayout &DL = A.getDataLayout();
4367 
4368     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4369                             bool Stripped) -> bool {
4370       unsigned IdxWidth =
4371           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4372       APInt Offset(IdxWidth, 0);
4373       const Value *Base = stripAndAccumulateOffsets(
4374           A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4375           /* AllowNonInbounds */ true);
4376 
4377       const auto &AA = A.getAAFor<AADereferenceable>(
4378           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4379       int64_t DerefBytes = 0;
4380       if (!Stripped && this == &AA) {
4381         // Use IR information if we did not strip anything.
4382         // TODO: track globally.
4383         bool CanBeNull, CanBeFreed;
4384         DerefBytes =
4385             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4386         T.GlobalState.indicatePessimisticFixpoint();
4387       } else {
4388         const DerefState &DS = AA.getState();
4389         DerefBytes = DS.DerefBytesState.getAssumed();
4390         T.GlobalState &= DS.GlobalState;
4391       }
4392 
4393       // For now we do not try to "increase" dereferenceability due to negative
4394       // indices as we first have to come up with code to deal with loops and
4395       // for overflows of the dereferenceable bytes.
4396       int64_t OffsetSExt = Offset.getSExtValue();
4397       if (OffsetSExt < 0)
4398         OffsetSExt = 0;
4399 
4400       T.takeAssumedDerefBytesMinimum(
4401           std::max(int64_t(0), DerefBytes - OffsetSExt));
4402 
4403       if (this == &AA) {
4404         if (!Stripped) {
4405           // If nothing was stripped IR information is all we got.
4406           T.takeKnownDerefBytesMaximum(
4407               std::max(int64_t(0), DerefBytes - OffsetSExt));
4408           T.indicatePessimisticFixpoint();
4409         } else if (OffsetSExt > 0) {
4410           // If something was stripped but there is circular reasoning we look
4411           // for the offset. If it is positive we basically decrease the
4412           // dereferenceable bytes in a circluar loop now, which will simply
4413           // drive them down to the known value in a very slow way which we
4414           // can accelerate.
4415           T.indicatePessimisticFixpoint();
4416         }
4417       }
4418 
4419       return T.isValidState();
4420     };
4421 
4422     DerefState T;
4423     bool UsedAssumedInformation = false;
4424     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4425                                            VisitValueCB, getCtxI(),
4426                                            UsedAssumedInformation))
4427       return indicatePessimisticFixpoint();
4428 
4429     return clampStateAndIndicateChange(getState(), T);
4430   }
4431 
4432   /// See AbstractAttribute::trackStatistics()
4433   void trackStatistics() const override {
4434     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4435   }
4436 };
4437 
4438 /// Dereferenceable attribute for a return value.
4439 struct AADereferenceableReturned final
4440     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4441   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4442       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4443             IRP, A) {}
4444 
4445   /// See AbstractAttribute::trackStatistics()
4446   void trackStatistics() const override {
4447     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4448   }
4449 };
4450 
4451 /// Dereferenceable attribute for an argument
4452 struct AADereferenceableArgument final
4453     : AAArgumentFromCallSiteArguments<AADereferenceable,
4454                                       AADereferenceableImpl> {
4455   using Base =
4456       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4457   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4458       : Base(IRP, A) {}
4459 
4460   /// See AbstractAttribute::trackStatistics()
4461   void trackStatistics() const override {
4462     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4463   }
4464 };
4465 
4466 /// Dereferenceable attribute for a call site argument.
4467 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4468   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4469       : AADereferenceableFloating(IRP, A) {}
4470 
4471   /// See AbstractAttribute::trackStatistics()
4472   void trackStatistics() const override {
4473     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4474   }
4475 };
4476 
4477 /// Dereferenceable attribute deduction for a call site return value.
4478 struct AADereferenceableCallSiteReturned final
4479     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4480   using Base =
4481       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4482   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4483       : Base(IRP, A) {}
4484 
4485   /// See AbstractAttribute::trackStatistics()
4486   void trackStatistics() const override {
4487     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4488   }
4489 };
4490 } // namespace
4491 
4492 // ------------------------ Align Argument Attribute ------------------------
4493 
4494 namespace {
4495 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4496                                     Value &AssociatedValue, const Use *U,
4497                                     const Instruction *I, bool &TrackUse) {
4498   // We need to follow common pointer manipulation uses to the accesses they
4499   // feed into.
4500   if (isa<CastInst>(I)) {
4501     // Follow all but ptr2int casts.
4502     TrackUse = !isa<PtrToIntInst>(I);
4503     return 0;
4504   }
4505   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4506     if (GEP->hasAllConstantIndices())
4507       TrackUse = true;
4508     return 0;
4509   }
4510 
4511   MaybeAlign MA;
4512   if (const auto *CB = dyn_cast<CallBase>(I)) {
4513     if (CB->isBundleOperand(U) || CB->isCallee(U))
4514       return 0;
4515 
4516     unsigned ArgNo = CB->getArgOperandNo(U);
4517     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4518     // As long as we only use known information there is no need to track
4519     // dependences here.
4520     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4521     MA = MaybeAlign(AlignAA.getKnownAlign());
4522   }
4523 
4524   const DataLayout &DL = A.getDataLayout();
4525   const Value *UseV = U->get();
4526   if (auto *SI = dyn_cast<StoreInst>(I)) {
4527     if (SI->getPointerOperand() == UseV)
4528       MA = SI->getAlign();
4529   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4530     if (LI->getPointerOperand() == UseV)
4531       MA = LI->getAlign();
4532   }
4533 
4534   if (!MA || *MA <= QueryingAA.getKnownAlign())
4535     return 0;
4536 
4537   unsigned Alignment = MA->value();
4538   int64_t Offset;
4539 
4540   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4541     if (Base == &AssociatedValue) {
4542       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4543       // So we can say that the maximum power of two which is a divisor of
4544       // gcd(Offset, Alignment) is an alignment.
4545 
4546       uint32_t gcd =
4547           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4548       Alignment = llvm::PowerOf2Floor(gcd);
4549     }
4550   }
4551 
4552   return Alignment;
4553 }
4554 
4555 struct AAAlignImpl : AAAlign {
4556   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4557 
4558   /// See AbstractAttribute::initialize(...).
4559   void initialize(Attributor &A) override {
4560     SmallVector<Attribute, 4> Attrs;
4561     getAttrs({Attribute::Alignment}, Attrs);
4562     for (const Attribute &Attr : Attrs)
4563       takeKnownMaximum(Attr.getValueAsInt());
4564 
4565     Value &V = *getAssociatedValue().stripPointerCasts();
4566     takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4567 
4568     if (getIRPosition().isFnInterfaceKind() &&
4569         (!getAnchorScope() ||
4570          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4571       indicatePessimisticFixpoint();
4572       return;
4573     }
4574 
4575     if (Instruction *CtxI = getCtxI())
4576       followUsesInMBEC(*this, A, getState(), *CtxI);
4577   }
4578 
4579   /// See AbstractAttribute::manifest(...).
4580   ChangeStatus manifest(Attributor &A) override {
4581     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4582 
4583     // Check for users that allow alignment annotations.
4584     Value &AssociatedValue = getAssociatedValue();
4585     for (const Use &U : AssociatedValue.uses()) {
4586       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4587         if (SI->getPointerOperand() == &AssociatedValue)
4588           if (SI->getAlign() < getAssumedAlign()) {
4589             STATS_DECLTRACK(AAAlign, Store,
4590                             "Number of times alignment added to a store");
4591             SI->setAlignment(getAssumedAlign());
4592             LoadStoreChanged = ChangeStatus::CHANGED;
4593           }
4594       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4595         if (LI->getPointerOperand() == &AssociatedValue)
4596           if (LI->getAlign() < getAssumedAlign()) {
4597             LI->setAlignment(getAssumedAlign());
4598             STATS_DECLTRACK(AAAlign, Load,
4599                             "Number of times alignment added to a load");
4600             LoadStoreChanged = ChangeStatus::CHANGED;
4601           }
4602       }
4603     }
4604 
4605     ChangeStatus Changed = AAAlign::manifest(A);
4606 
4607     Align InheritAlign =
4608         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4609     if (InheritAlign >= getAssumedAlign())
4610       return LoadStoreChanged;
4611     return Changed | LoadStoreChanged;
4612   }
4613 
4614   // TODO: Provide a helper to determine the implied ABI alignment and check in
4615   //       the existing manifest method and a new one for AAAlignImpl that value
4616   //       to avoid making the alignment explicit if it did not improve.
4617 
4618   /// See AbstractAttribute::getDeducedAttributes
4619   virtual void
4620   getDeducedAttributes(LLVMContext &Ctx,
4621                        SmallVectorImpl<Attribute> &Attrs) const override {
4622     if (getAssumedAlign() > 1)
4623       Attrs.emplace_back(
4624           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4625   }
4626 
4627   /// See followUsesInMBEC
4628   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4629                        AAAlign::StateType &State) {
4630     bool TrackUse = false;
4631 
4632     unsigned int KnownAlign =
4633         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4634     State.takeKnownMaximum(KnownAlign);
4635 
4636     return TrackUse;
4637   }
4638 
4639   /// See AbstractAttribute::getAsStr().
4640   const std::string getAsStr() const override {
4641     return "align<" + std::to_string(getKnownAlign().value()) + "-" +
4642            std::to_string(getAssumedAlign().value()) + ">";
4643   }
4644 };
4645 
4646 /// Align attribute for a floating value.
4647 struct AAAlignFloating : AAAlignImpl {
4648   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4649 
4650   /// See AbstractAttribute::updateImpl(...).
4651   ChangeStatus updateImpl(Attributor &A) override {
4652     const DataLayout &DL = A.getDataLayout();
4653 
4654     auto VisitValueCB = [&](Value &V, const Instruction *,
4655                             AAAlign::StateType &T, bool Stripped) -> bool {
4656       if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4657         return true;
4658       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4659                                            DepClassTy::REQUIRED);
4660       if (!Stripped && this == &AA) {
4661         int64_t Offset;
4662         unsigned Alignment = 1;
4663         if (const Value *Base =
4664                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4665           // TODO: Use AAAlign for the base too.
4666           Align PA = Base->getPointerAlignment(DL);
4667           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4668           // So we can say that the maximum power of two which is a divisor of
4669           // gcd(Offset, Alignment) is an alignment.
4670 
4671           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4672                                                uint32_t(PA.value()));
4673           Alignment = llvm::PowerOf2Floor(gcd);
4674         } else {
4675           Alignment = V.getPointerAlignment(DL).value();
4676         }
4677         // Use only IR information if we did not strip anything.
4678         T.takeKnownMaximum(Alignment);
4679         T.indicatePessimisticFixpoint();
4680       } else {
4681         // Use abstract attribute information.
4682         const AAAlign::StateType &DS = AA.getState();
4683         T ^= DS;
4684       }
4685       return T.isValidState();
4686     };
4687 
4688     StateType T;
4689     bool UsedAssumedInformation = false;
4690     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4691                                           VisitValueCB, getCtxI(),
4692                                           UsedAssumedInformation))
4693       return indicatePessimisticFixpoint();
4694 
4695     // TODO: If we know we visited all incoming values, thus no are assumed
4696     // dead, we can take the known information from the state T.
4697     return clampStateAndIndicateChange(getState(), T);
4698   }
4699 
4700   /// See AbstractAttribute::trackStatistics()
4701   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4702 };
4703 
4704 /// Align attribute for function return value.
4705 struct AAAlignReturned final
4706     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4707   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4708   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4709 
4710   /// See AbstractAttribute::initialize(...).
4711   void initialize(Attributor &A) override {
4712     Base::initialize(A);
4713     Function *F = getAssociatedFunction();
4714     if (!F || F->isDeclaration())
4715       indicatePessimisticFixpoint();
4716   }
4717 
4718   /// See AbstractAttribute::trackStatistics()
4719   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4720 };
4721 
4722 /// Align attribute for function argument.
4723 struct AAAlignArgument final
4724     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4725   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4726   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4727 
4728   /// See AbstractAttribute::manifest(...).
4729   ChangeStatus manifest(Attributor &A) override {
4730     // If the associated argument is involved in a must-tail call we give up
4731     // because we would need to keep the argument alignments of caller and
4732     // callee in-sync. Just does not seem worth the trouble right now.
4733     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4734       return ChangeStatus::UNCHANGED;
4735     return Base::manifest(A);
4736   }
4737 
4738   /// See AbstractAttribute::trackStatistics()
4739   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4740 };
4741 
4742 struct AAAlignCallSiteArgument final : AAAlignFloating {
4743   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4744       : AAAlignFloating(IRP, A) {}
4745 
4746   /// See AbstractAttribute::manifest(...).
4747   ChangeStatus manifest(Attributor &A) override {
4748     // If the associated argument is involved in a must-tail call we give up
4749     // because we would need to keep the argument alignments of caller and
4750     // callee in-sync. Just does not seem worth the trouble right now.
4751     if (Argument *Arg = getAssociatedArgument())
4752       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4753         return ChangeStatus::UNCHANGED;
4754     ChangeStatus Changed = AAAlignImpl::manifest(A);
4755     Align InheritAlign =
4756         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4757     if (InheritAlign >= getAssumedAlign())
4758       Changed = ChangeStatus::UNCHANGED;
4759     return Changed;
4760   }
4761 
4762   /// See AbstractAttribute::updateImpl(Attributor &A).
4763   ChangeStatus updateImpl(Attributor &A) override {
4764     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4765     if (Argument *Arg = getAssociatedArgument()) {
4766       // We only take known information from the argument
4767       // so we do not need to track a dependence.
4768       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4769           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4770       takeKnownMaximum(ArgAlignAA.getKnownAlign().value());
4771     }
4772     return Changed;
4773   }
4774 
4775   /// See AbstractAttribute::trackStatistics()
4776   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4777 };
4778 
4779 /// Align attribute deduction for a call site return value.
4780 struct AAAlignCallSiteReturned final
4781     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4782   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4783   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4784       : Base(IRP, A) {}
4785 
4786   /// See AbstractAttribute::initialize(...).
4787   void initialize(Attributor &A) override {
4788     Base::initialize(A);
4789     Function *F = getAssociatedFunction();
4790     if (!F || F->isDeclaration())
4791       indicatePessimisticFixpoint();
4792   }
4793 
4794   /// See AbstractAttribute::trackStatistics()
4795   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4796 };
4797 } // namespace
4798 
4799 /// ------------------ Function No-Return Attribute ----------------------------
4800 namespace {
4801 struct AANoReturnImpl : public AANoReturn {
4802   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4803 
4804   /// See AbstractAttribute::initialize(...).
4805   void initialize(Attributor &A) override {
4806     AANoReturn::initialize(A);
4807     Function *F = getAssociatedFunction();
4808     if (!F || F->isDeclaration())
4809       indicatePessimisticFixpoint();
4810   }
4811 
4812   /// See AbstractAttribute::getAsStr().
4813   const std::string getAsStr() const override {
4814     return getAssumed() ? "noreturn" : "may-return";
4815   }
4816 
4817   /// See AbstractAttribute::updateImpl(Attributor &A).
4818   virtual ChangeStatus updateImpl(Attributor &A) override {
4819     auto CheckForNoReturn = [](Instruction &) { return false; };
4820     bool UsedAssumedInformation = false;
4821     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4822                                    {(unsigned)Instruction::Ret},
4823                                    UsedAssumedInformation))
4824       return indicatePessimisticFixpoint();
4825     return ChangeStatus::UNCHANGED;
4826   }
4827 };
4828 
4829 struct AANoReturnFunction final : AANoReturnImpl {
4830   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4831       : AANoReturnImpl(IRP, A) {}
4832 
4833   /// See AbstractAttribute::trackStatistics()
4834   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4835 };
4836 
4837 /// NoReturn attribute deduction for a call sites.
4838 struct AANoReturnCallSite final : AANoReturnImpl {
4839   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4840       : AANoReturnImpl(IRP, A) {}
4841 
4842   /// See AbstractAttribute::initialize(...).
4843   void initialize(Attributor &A) override {
4844     AANoReturnImpl::initialize(A);
4845     if (Function *F = getAssociatedFunction()) {
4846       const IRPosition &FnPos = IRPosition::function(*F);
4847       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4848       if (!FnAA.isAssumedNoReturn())
4849         indicatePessimisticFixpoint();
4850     }
4851   }
4852 
4853   /// See AbstractAttribute::updateImpl(...).
4854   ChangeStatus updateImpl(Attributor &A) override {
4855     // TODO: Once we have call site specific value information we can provide
4856     //       call site specific liveness information and then it makes
4857     //       sense to specialize attributes for call sites arguments instead of
4858     //       redirecting requests to the callee argument.
4859     Function *F = getAssociatedFunction();
4860     const IRPosition &FnPos = IRPosition::function(*F);
4861     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4862     return clampStateAndIndicateChange(getState(), FnAA.getState());
4863   }
4864 
4865   /// See AbstractAttribute::trackStatistics()
4866   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4867 };
4868 } // namespace
4869 
4870 /// ----------------------- Instance Info ---------------------------------
4871 
4872 namespace {
4873 /// A class to hold the state of for no-capture attributes.
4874 struct AAInstanceInfoImpl : public AAInstanceInfo {
4875   AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4876       : AAInstanceInfo(IRP, A) {}
4877 
4878   /// See AbstractAttribute::initialize(...).
4879   void initialize(Attributor &A) override {
4880     Value &V = getAssociatedValue();
4881     if (auto *C = dyn_cast<Constant>(&V)) {
4882       if (C->isThreadDependent())
4883         indicatePessimisticFixpoint();
4884       else
4885         indicateOptimisticFixpoint();
4886       return;
4887     }
4888     if (auto *CB = dyn_cast<CallBase>(&V))
4889       if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4890           !CB->mayReadFromMemory()) {
4891         indicateOptimisticFixpoint();
4892         return;
4893       }
4894   }
4895 
4896   /// See AbstractAttribute::updateImpl(...).
4897   ChangeStatus updateImpl(Attributor &A) override {
4898     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4899 
4900     Value &V = getAssociatedValue();
4901     const Function *Scope = nullptr;
4902     if (auto *I = dyn_cast<Instruction>(&V))
4903       Scope = I->getFunction();
4904     if (auto *A = dyn_cast<Argument>(&V)) {
4905       Scope = A->getParent();
4906       if (!Scope->hasLocalLinkage())
4907         return Changed;
4908     }
4909     if (!Scope)
4910       return indicateOptimisticFixpoint();
4911 
4912     auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4913         *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
4914     if (NoRecurseAA.isAssumedNoRecurse())
4915       return Changed;
4916 
4917     auto UsePred = [&](const Use &U, bool &Follow) {
4918       const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4919       if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4920           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4921         Follow = true;
4922         return true;
4923       }
4924       if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4925           (isa<StoreInst>(UserI) &&
4926            cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4927         return true;
4928       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4929         // This check is not guaranteeing uniqueness but for now that we cannot
4930         // end up with two versions of \p U thinking it was one.
4931         if (!CB->getCalledFunction() ||
4932             !CB->getCalledFunction()->hasLocalLinkage())
4933           return true;
4934         if (!CB->isArgOperand(&U))
4935           return false;
4936         const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4937             *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4938             DepClassTy::OPTIONAL);
4939         if (!ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4940           return false;
4941         // If this call base might reach the scope again we might forward the
4942         // argument back here. This is very conservative.
4943         if (AA::isPotentiallyReachable(A, *CB, *Scope, *this, nullptr))
4944           return false;
4945         return true;
4946       }
4947       return false;
4948     };
4949 
4950     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4951       if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4952         auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4953         if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr))
4954           return true;
4955         auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(
4956             *SI->getFunction());
4957         if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr))
4958           return true;
4959       }
4960       return false;
4961     };
4962 
4963     if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4964                            DepClassTy::OPTIONAL,
4965                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
4966       return indicatePessimisticFixpoint();
4967 
4968     return Changed;
4969   }
4970 
4971   /// See AbstractState::getAsStr().
4972   const std::string getAsStr() const override {
4973     return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4974   }
4975 
4976   /// See AbstractAttribute::trackStatistics()
4977   void trackStatistics() const override {}
4978 };
4979 
4980 /// InstanceInfo attribute for floating values.
4981 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4982   AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4983       : AAInstanceInfoImpl(IRP, A) {}
4984 };
4985 
4986 /// NoCapture attribute for function arguments.
4987 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4988   AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4989       : AAInstanceInfoFloating(IRP, A) {}
4990 };
4991 
4992 /// InstanceInfo attribute for call site arguments.
4993 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4994   AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4995       : AAInstanceInfoImpl(IRP, A) {}
4996 
4997   /// See AbstractAttribute::updateImpl(...).
4998   ChangeStatus updateImpl(Attributor &A) override {
4999     // TODO: Once we have call site specific value information we can provide
5000     //       call site specific liveness information and then it makes
5001     //       sense to specialize attributes for call sites arguments instead of
5002     //       redirecting requests to the callee argument.
5003     Argument *Arg = getAssociatedArgument();
5004     if (!Arg)
5005       return indicatePessimisticFixpoint();
5006     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5007     auto &ArgAA =
5008         A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
5009     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5010   }
5011 };
5012 
5013 /// InstanceInfo attribute for function return value.
5014 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
5015   AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
5016       : AAInstanceInfoImpl(IRP, A) {
5017     llvm_unreachable("InstanceInfo is not applicable to function returns!");
5018   }
5019 
5020   /// See AbstractAttribute::initialize(...).
5021   void initialize(Attributor &A) override {
5022     llvm_unreachable("InstanceInfo is not applicable to function returns!");
5023   }
5024 
5025   /// See AbstractAttribute::updateImpl(...).
5026   ChangeStatus updateImpl(Attributor &A) override {
5027     llvm_unreachable("InstanceInfo is not applicable to function returns!");
5028   }
5029 };
5030 
5031 /// InstanceInfo attribute deduction for a call site return value.
5032 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
5033   AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
5034       : AAInstanceInfoFloating(IRP, A) {}
5035 };
5036 } // namespace
5037 
5038 /// ----------------------- Variable Capturing ---------------------------------
5039 
5040 namespace {
5041 /// A class to hold the state of for no-capture attributes.
5042 struct AANoCaptureImpl : public AANoCapture {
5043   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
5044 
5045   /// See AbstractAttribute::initialize(...).
5046   void initialize(Attributor &A) override {
5047     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
5048       indicateOptimisticFixpoint();
5049       return;
5050     }
5051     Function *AnchorScope = getAnchorScope();
5052     if (isFnInterfaceKind() &&
5053         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
5054       indicatePessimisticFixpoint();
5055       return;
5056     }
5057 
5058     // You cannot "capture" null in the default address space.
5059     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
5060         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
5061       indicateOptimisticFixpoint();
5062       return;
5063     }
5064 
5065     const Function *F =
5066         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
5067 
5068     // Check what state the associated function can actually capture.
5069     if (F)
5070       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5071     else
5072       indicatePessimisticFixpoint();
5073   }
5074 
5075   /// See AbstractAttribute::updateImpl(...).
5076   ChangeStatus updateImpl(Attributor &A) override;
5077 
5078   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
5079   virtual void
5080   getDeducedAttributes(LLVMContext &Ctx,
5081                        SmallVectorImpl<Attribute> &Attrs) const override {
5082     if (!isAssumedNoCaptureMaybeReturned())
5083       return;
5084 
5085     if (isArgumentPosition()) {
5086       if (isAssumedNoCapture())
5087         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
5088       else if (ManifestInternal)
5089         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
5090     }
5091   }
5092 
5093   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
5094   /// depending on the ability of the function associated with \p IRP to capture
5095   /// state in memory and through "returning/throwing", respectively.
5096   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
5097                                                    const Function &F,
5098                                                    BitIntegerState &State) {
5099     // TODO: Once we have memory behavior attributes we should use them here.
5100 
5101     // If we know we cannot communicate or write to memory, we do not care about
5102     // ptr2int anymore.
5103     if (F.onlyReadsMemory() && F.doesNotThrow() &&
5104         F.getReturnType()->isVoidTy()) {
5105       State.addKnownBits(NO_CAPTURE);
5106       return;
5107     }
5108 
5109     // A function cannot capture state in memory if it only reads memory, it can
5110     // however return/throw state and the state might be influenced by the
5111     // pointer value, e.g., loading from a returned pointer might reveal a bit.
5112     if (F.onlyReadsMemory())
5113       State.addKnownBits(NOT_CAPTURED_IN_MEM);
5114 
5115     // A function cannot communicate state back if it does not through
5116     // exceptions and doesn not return values.
5117     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
5118       State.addKnownBits(NOT_CAPTURED_IN_RET);
5119 
5120     // Check existing "returned" attributes.
5121     int ArgNo = IRP.getCalleeArgNo();
5122     if (F.doesNotThrow() && ArgNo >= 0) {
5123       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
5124         if (F.hasParamAttribute(u, Attribute::Returned)) {
5125           if (u == unsigned(ArgNo))
5126             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5127           else if (F.onlyReadsMemory())
5128             State.addKnownBits(NO_CAPTURE);
5129           else
5130             State.addKnownBits(NOT_CAPTURED_IN_RET);
5131           break;
5132         }
5133     }
5134   }
5135 
5136   /// See AbstractState::getAsStr().
5137   const std::string getAsStr() const override {
5138     if (isKnownNoCapture())
5139       return "known not-captured";
5140     if (isAssumedNoCapture())
5141       return "assumed not-captured";
5142     if (isKnownNoCaptureMaybeReturned())
5143       return "known not-captured-maybe-returned";
5144     if (isAssumedNoCaptureMaybeReturned())
5145       return "assumed not-captured-maybe-returned";
5146     return "assumed-captured";
5147   }
5148 
5149   /// Check the use \p U and update \p State accordingly. Return true if we
5150   /// should continue to update the state.
5151   bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5152                 bool &Follow) {
5153     Instruction *UInst = cast<Instruction>(U.getUser());
5154     LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5155                       << *UInst << "\n");
5156 
5157     // Deal with ptr2int by following uses.
5158     if (isa<PtrToIntInst>(UInst)) {
5159       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5160       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5161                           /* Return */ true);
5162     }
5163 
5164     // For stores we already checked if we can follow them, if they make it
5165     // here we give up.
5166     if (isa<StoreInst>(UInst))
5167       return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5168                           /* Return */ false);
5169 
5170     // Explicitly catch return instructions.
5171     if (isa<ReturnInst>(UInst)) {
5172       if (UInst->getFunction() == getAnchorScope())
5173         return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5174                             /* Return */ true);
5175       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5176                           /* Return */ true);
5177     }
5178 
5179     // For now we only use special logic for call sites. However, the tracker
5180     // itself knows about a lot of other non-capturing cases already.
5181     auto *CB = dyn_cast<CallBase>(UInst);
5182     if (!CB || !CB->isArgOperand(&U))
5183       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5184                           /* Return */ true);
5185 
5186     unsigned ArgNo = CB->getArgOperandNo(&U);
5187     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5188     // If we have a abstract no-capture attribute for the argument we can use
5189     // it to justify a non-capture attribute here. This allows recursion!
5190     auto &ArgNoCaptureAA =
5191         A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5192     if (ArgNoCaptureAA.isAssumedNoCapture())
5193       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5194                           /* Return */ false);
5195     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5196       Follow = true;
5197       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5198                           /* Return */ false);
5199     }
5200 
5201     // Lastly, we could not find a reason no-capture can be assumed so we don't.
5202     return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5203                         /* Return */ true);
5204   }
5205 
5206   /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5207   /// \p CapturedInRet, then return true if we should continue updating the
5208   /// state.
5209   static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5210                            bool CapturedInInt, bool CapturedInRet) {
5211     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5212                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5213     if (CapturedInMem)
5214       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5215     if (CapturedInInt)
5216       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5217     if (CapturedInRet)
5218       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5219     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5220   }
5221 };
5222 
5223 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5224   const IRPosition &IRP = getIRPosition();
5225   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5226                                   : &IRP.getAssociatedValue();
5227   if (!V)
5228     return indicatePessimisticFixpoint();
5229 
5230   const Function *F =
5231       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5232   assert(F && "Expected a function!");
5233   const IRPosition &FnPos = IRPosition::function(*F);
5234 
5235   AANoCapture::StateType T;
5236 
5237   // Readonly means we cannot capture through memory.
5238   bool IsKnown;
5239   if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5240     T.addKnownBits(NOT_CAPTURED_IN_MEM);
5241     if (IsKnown)
5242       addKnownBits(NOT_CAPTURED_IN_MEM);
5243   }
5244 
5245   // Make sure all returned values are different than the underlying value.
5246   // TODO: we could do this in a more sophisticated way inside
5247   //       AAReturnedValues, e.g., track all values that escape through returns
5248   //       directly somehow.
5249   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5250     if (!RVAA.getState().isValidState())
5251       return false;
5252     bool SeenConstant = false;
5253     for (auto &It : RVAA.returned_values()) {
5254       if (isa<Constant>(It.first)) {
5255         if (SeenConstant)
5256           return false;
5257         SeenConstant = true;
5258       } else if (!isa<Argument>(It.first) ||
5259                  It.first == getAssociatedArgument())
5260         return false;
5261     }
5262     return true;
5263   };
5264 
5265   const auto &NoUnwindAA =
5266       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5267   if (NoUnwindAA.isAssumedNoUnwind()) {
5268     bool IsVoidTy = F->getReturnType()->isVoidTy();
5269     const AAReturnedValues *RVAA =
5270         IsVoidTy ? nullptr
5271                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5272 
5273                                                  DepClassTy::OPTIONAL);
5274     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5275       T.addKnownBits(NOT_CAPTURED_IN_RET);
5276       if (T.isKnown(NOT_CAPTURED_IN_MEM))
5277         return ChangeStatus::UNCHANGED;
5278       if (NoUnwindAA.isKnownNoUnwind() &&
5279           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5280         addKnownBits(NOT_CAPTURED_IN_RET);
5281         if (isKnown(NOT_CAPTURED_IN_MEM))
5282           return indicateOptimisticFixpoint();
5283       }
5284     }
5285   }
5286 
5287   auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5288     const auto &DerefAA = A.getAAFor<AADereferenceable>(
5289         *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5290     return DerefAA.getAssumedDereferenceableBytes();
5291   };
5292 
5293   auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5294     switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5295     case UseCaptureKind::NO_CAPTURE:
5296       return true;
5297     case UseCaptureKind::MAY_CAPTURE:
5298       return checkUse(A, T, U, Follow);
5299     case UseCaptureKind::PASSTHROUGH:
5300       Follow = true;
5301       return true;
5302     }
5303     llvm_unreachable("Unexpected use capture kind!");
5304   };
5305 
5306   if (!A.checkForAllUses(UseCheck, *this, *V))
5307     return indicatePessimisticFixpoint();
5308 
5309   AANoCapture::StateType &S = getState();
5310   auto Assumed = S.getAssumed();
5311   S.intersectAssumedBits(T.getAssumed());
5312   if (!isAssumedNoCaptureMaybeReturned())
5313     return indicatePessimisticFixpoint();
5314   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5315                                    : ChangeStatus::CHANGED;
5316 }
5317 
5318 /// NoCapture attribute for function arguments.
5319 struct AANoCaptureArgument final : AANoCaptureImpl {
5320   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5321       : AANoCaptureImpl(IRP, A) {}
5322 
5323   /// See AbstractAttribute::trackStatistics()
5324   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5325 };
5326 
5327 /// NoCapture attribute for call site arguments.
5328 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5329   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5330       : AANoCaptureImpl(IRP, A) {}
5331 
5332   /// See AbstractAttribute::initialize(...).
5333   void initialize(Attributor &A) override {
5334     if (Argument *Arg = getAssociatedArgument())
5335       if (Arg->hasByValAttr())
5336         indicateOptimisticFixpoint();
5337     AANoCaptureImpl::initialize(A);
5338   }
5339 
5340   /// See AbstractAttribute::updateImpl(...).
5341   ChangeStatus updateImpl(Attributor &A) override {
5342     // TODO: Once we have call site specific value information we can provide
5343     //       call site specific liveness information and then it makes
5344     //       sense to specialize attributes for call sites arguments instead of
5345     //       redirecting requests to the callee argument.
5346     Argument *Arg = getAssociatedArgument();
5347     if (!Arg)
5348       return indicatePessimisticFixpoint();
5349     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5350     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5351     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5352   }
5353 
5354   /// See AbstractAttribute::trackStatistics()
5355   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5356 };
5357 
5358 /// NoCapture attribute for floating values.
5359 struct AANoCaptureFloating final : AANoCaptureImpl {
5360   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5361       : AANoCaptureImpl(IRP, A) {}
5362 
5363   /// See AbstractAttribute::trackStatistics()
5364   void trackStatistics() const override {
5365     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5366   }
5367 };
5368 
5369 /// NoCapture attribute for function return value.
5370 struct AANoCaptureReturned final : AANoCaptureImpl {
5371   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5372       : AANoCaptureImpl(IRP, A) {
5373     llvm_unreachable("NoCapture is not applicable to function returns!");
5374   }
5375 
5376   /// See AbstractAttribute::initialize(...).
5377   void initialize(Attributor &A) override {
5378     llvm_unreachable("NoCapture is not applicable to function returns!");
5379   }
5380 
5381   /// See AbstractAttribute::updateImpl(...).
5382   ChangeStatus updateImpl(Attributor &A) override {
5383     llvm_unreachable("NoCapture is not applicable to function returns!");
5384   }
5385 
5386   /// See AbstractAttribute::trackStatistics()
5387   void trackStatistics() const override {}
5388 };
5389 
5390 /// NoCapture attribute deduction for a call site return value.
5391 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5392   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5393       : AANoCaptureImpl(IRP, A) {}
5394 
5395   /// See AbstractAttribute::initialize(...).
5396   void initialize(Attributor &A) override {
5397     const Function *F = getAnchorScope();
5398     // Check what state the associated function can actually capture.
5399     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5400   }
5401 
5402   /// See AbstractAttribute::trackStatistics()
5403   void trackStatistics() const override {
5404     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5405   }
5406 };
5407 } // namespace
5408 
5409 /// ------------------ Value Simplify Attribute ----------------------------
5410 
5411 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5412   // FIXME: Add a typecast support.
5413   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5414       SimplifiedAssociatedValue, Other, Ty);
5415   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5416     return false;
5417 
5418   LLVM_DEBUG({
5419     if (SimplifiedAssociatedValue)
5420       dbgs() << "[ValueSimplify] is assumed to be "
5421              << **SimplifiedAssociatedValue << "\n";
5422     else
5423       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5424   });
5425   return true;
5426 }
5427 
5428 namespace {
5429 struct AAValueSimplifyImpl : AAValueSimplify {
5430   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5431       : AAValueSimplify(IRP, A) {}
5432 
5433   /// See AbstractAttribute::initialize(...).
5434   void initialize(Attributor &A) override {
5435     if (getAssociatedValue().getType()->isVoidTy())
5436       indicatePessimisticFixpoint();
5437     if (A.hasSimplificationCallback(getIRPosition()))
5438       indicatePessimisticFixpoint();
5439   }
5440 
5441   /// See AbstractAttribute::getAsStr().
5442   const std::string getAsStr() const override {
5443     LLVM_DEBUG({
5444       dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
5445       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5446         dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5447     });
5448     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5449                           : "not-simple";
5450   }
5451 
5452   /// See AbstractAttribute::trackStatistics()
5453   void trackStatistics() const override {}
5454 
5455   /// See AAValueSimplify::getAssumedSimplifiedValue()
5456   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5457     return SimplifiedAssociatedValue;
5458   }
5459 
5460   /// Ensure the return value is \p V with type \p Ty, if not possible return
5461   /// nullptr. If \p Check is true we will only verify such an operation would
5462   /// suceed and return a non-nullptr value if that is the case. No IR is
5463   /// generated or modified.
5464   static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
5465                            bool Check) {
5466     if (auto *TypedV = AA::getWithType(V, Ty))
5467       return TypedV;
5468     if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
5469       return Check ? &V
5470                    : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty,
5471                                                                       "", CtxI);
5472     return nullptr;
5473   }
5474 
5475   /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
5476   /// If \p Check is true we will only verify such an operation would suceed and
5477   /// return a non-nullptr value if that is the case. No IR is generated or
5478   /// modified.
5479   static Value *reproduceInst(Attributor &A,
5480                               const AbstractAttribute &QueryingAA,
5481                               Instruction &I, Type &Ty, Instruction *CtxI,
5482                               bool Check, ValueToValueMapTy &VMap) {
5483     assert(CtxI && "Cannot reproduce an instruction without context!");
5484     if (Check && (I.mayReadFromMemory() ||
5485                   !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
5486                                                 /* TLI */ nullptr)))
5487       return nullptr;
5488     for (Value *Op : I.operands()) {
5489       Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
5490       if (!NewOp) {
5491         assert(Check && "Manifest of new value unexpectedly failed!");
5492         return nullptr;
5493       }
5494       if (!Check)
5495         VMap[Op] = NewOp;
5496     }
5497     if (Check)
5498       return &I;
5499 
5500     Instruction *CloneI = I.clone();
5501     // TODO: Try to salvage debug information here.
5502     CloneI->setDebugLoc(DebugLoc());
5503     VMap[&I] = CloneI;
5504     CloneI->insertBefore(CtxI);
5505     RemapInstruction(CloneI, VMap);
5506     return CloneI;
5507   }
5508 
5509   /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
5510   /// If \p Check is true we will only verify such an operation would suceed and
5511   /// return a non-nullptr value if that is the case. No IR is generated or
5512   /// modified.
5513   static Value *reproduceValue(Attributor &A,
5514                                const AbstractAttribute &QueryingAA, Value &V,
5515                                Type &Ty, Instruction *CtxI, bool Check,
5516                                ValueToValueMapTy &VMap) {
5517     if (const auto &NewV = VMap.lookup(&V))
5518       return NewV;
5519     bool UsedAssumedInformation = false;
5520     Optional<Value *> SimpleV =
5521         A.getAssumedSimplified(V, QueryingAA, UsedAssumedInformation);
5522     if (!SimpleV)
5523       return PoisonValue::get(&Ty);
5524     Value *EffectiveV = &V;
5525     if (SimpleV.getValue())
5526       EffectiveV = SimpleV.getValue();
5527     if (auto *C = dyn_cast<Constant>(EffectiveV))
5528       if (!C->canTrap())
5529         return C;
5530     if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
5531                                       A.getInfoCache()))
5532       return ensureType(A, *EffectiveV, Ty, CtxI, Check);
5533     if (auto *I = dyn_cast<Instruction>(EffectiveV))
5534       if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
5535         return ensureType(A, *NewV, Ty, CtxI, Check);
5536     return nullptr;
5537   }
5538 
5539   /// Return a value we can use as replacement for the associated one, or
5540   /// nullptr if we don't have one that makes sense.
5541   Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
5542     Value *NewV = SimplifiedAssociatedValue
5543                       ? SimplifiedAssociatedValue.getValue()
5544                       : UndefValue::get(getAssociatedType());
5545     if (NewV && NewV != &getAssociatedValue()) {
5546       ValueToValueMapTy VMap;
5547       // First verify we can reprduce the value with the required type at the
5548       // context location before we actually start modifying the IR.
5549       if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5550                          /* CheckOnly */ true, VMap))
5551         return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5552                               /* CheckOnly */ false, VMap);
5553     }
5554     return nullptr;
5555   }
5556 
5557   /// Helper function for querying AAValueSimplify and updating candicate.
5558   /// \param IRP The value position we are trying to unify with SimplifiedValue
5559   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5560                       const IRPosition &IRP, bool Simplify = true) {
5561     bool UsedAssumedInformation = false;
5562     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5563     if (Simplify)
5564       QueryingValueSimplified =
5565           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5566     return unionAssumed(QueryingValueSimplified);
5567   }
5568 
5569   /// Returns a candidate is found or not
5570   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5571     if (!getAssociatedValue().getType()->isIntegerTy())
5572       return false;
5573 
5574     // This will also pass the call base context.
5575     const auto &AA =
5576         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5577 
5578     Optional<Constant *> COpt = AA.getAssumedConstant(A);
5579 
5580     if (!COpt) {
5581       SimplifiedAssociatedValue = llvm::None;
5582       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5583       return true;
5584     }
5585     if (auto *C = *COpt) {
5586       SimplifiedAssociatedValue = C;
5587       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5588       return true;
5589     }
5590     return false;
5591   }
5592 
5593   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5594     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5595       return true;
5596     if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
5597       return true;
5598     return false;
5599   }
5600 
5601   /// See AbstractAttribute::manifest(...).
5602   ChangeStatus manifest(Attributor &A) override {
5603     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5604     for (auto &U : getAssociatedValue().uses()) {
5605       // Check if we need to adjust the insertion point to make sure the IR is
5606       // valid.
5607       Instruction *IP = dyn_cast<Instruction>(U.getUser());
5608       if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
5609         IP = PHI->getIncomingBlock(U)->getTerminator();
5610       if (auto *NewV = manifestReplacementValue(A, IP)) {
5611         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
5612                           << " -> " << *NewV << " :: " << *this << "\n");
5613         if (A.changeUseAfterManifest(U, *NewV))
5614           Changed = ChangeStatus::CHANGED;
5615       }
5616     }
5617 
5618     return Changed | AAValueSimplify::manifest(A);
5619   }
5620 
5621   /// See AbstractState::indicatePessimisticFixpoint(...).
5622   ChangeStatus indicatePessimisticFixpoint() override {
5623     SimplifiedAssociatedValue = &getAssociatedValue();
5624     return AAValueSimplify::indicatePessimisticFixpoint();
5625   }
5626 };
5627 
5628 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5629   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5630       : AAValueSimplifyImpl(IRP, A) {}
5631 
5632   void initialize(Attributor &A) override {
5633     AAValueSimplifyImpl::initialize(A);
5634     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5635       indicatePessimisticFixpoint();
5636     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5637                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5638                 /* IgnoreSubsumingPositions */ true))
5639       indicatePessimisticFixpoint();
5640   }
5641 
5642   /// See AbstractAttribute::updateImpl(...).
5643   ChangeStatus updateImpl(Attributor &A) override {
5644     // Byval is only replacable if it is readonly otherwise we would write into
5645     // the replaced value and not the copy that byval creates implicitly.
5646     Argument *Arg = getAssociatedArgument();
5647     if (Arg->hasByValAttr()) {
5648       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5649       //       there is no race by not copying a constant byval.
5650       bool IsKnown;
5651       if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
5652         return indicatePessimisticFixpoint();
5653     }
5654 
5655     auto Before = SimplifiedAssociatedValue;
5656 
5657     auto PredForCallSite = [&](AbstractCallSite ACS) {
5658       const IRPosition &ACSArgPos =
5659           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5660       // Check if a coresponding argument was found or if it is on not
5661       // associated (which can happen for callback calls).
5662       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5663         return false;
5664 
5665       // Simplify the argument operand explicitly and check if the result is
5666       // valid in the current scope. This avoids refering to simplified values
5667       // in other functions, e.g., we don't want to say a an argument in a
5668       // static function is actually an argument in a different function.
5669       bool UsedAssumedInformation = false;
5670       Optional<Constant *> SimpleArgOp =
5671           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5672       if (!SimpleArgOp)
5673         return true;
5674       if (!SimpleArgOp.getValue())
5675         return false;
5676       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5677         return false;
5678       return unionAssumed(*SimpleArgOp);
5679     };
5680 
5681     // Generate a answer specific to a call site context.
5682     bool Success;
5683     bool UsedAssumedInformation = false;
5684     if (hasCallBaseContext() &&
5685         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5686       Success = PredForCallSite(
5687           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5688     else
5689       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5690                                        UsedAssumedInformation);
5691 
5692     if (!Success)
5693       if (!askSimplifiedValueForOtherAAs(A))
5694         return indicatePessimisticFixpoint();
5695 
5696     // If a candicate was found in this update, return CHANGED.
5697     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5698                                                : ChangeStatus ::CHANGED;
5699   }
5700 
5701   /// See AbstractAttribute::trackStatistics()
5702   void trackStatistics() const override {
5703     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5704   }
5705 };
5706 
5707 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5708   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5709       : AAValueSimplifyImpl(IRP, A) {}
5710 
5711   /// See AAValueSimplify::getAssumedSimplifiedValue()
5712   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5713     if (!isValidState())
5714       return nullptr;
5715     return SimplifiedAssociatedValue;
5716   }
5717 
5718   /// See AbstractAttribute::updateImpl(...).
5719   ChangeStatus updateImpl(Attributor &A) override {
5720     auto Before = SimplifiedAssociatedValue;
5721 
5722     auto ReturnInstCB = [&](Instruction &I) {
5723       auto &RI = cast<ReturnInst>(I);
5724       return checkAndUpdate(
5725           A, *this,
5726           IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
5727     };
5728 
5729     bool UsedAssumedInformation = false;
5730     if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
5731                                    UsedAssumedInformation))
5732       if (!askSimplifiedValueForOtherAAs(A))
5733         return indicatePessimisticFixpoint();
5734 
5735     // If a candicate was found in this update, return CHANGED.
5736     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5737                                                : ChangeStatus ::CHANGED;
5738   }
5739 
5740   ChangeStatus manifest(Attributor &A) override {
5741     // We queried AAValueSimplify for the returned values so they will be
5742     // replaced if a simplified form was found. Nothing to do here.
5743     return ChangeStatus::UNCHANGED;
5744   }
5745 
5746   /// See AbstractAttribute::trackStatistics()
5747   void trackStatistics() const override {
5748     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5749   }
5750 };
5751 
5752 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5753   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5754       : AAValueSimplifyImpl(IRP, A) {}
5755 
5756   /// See AbstractAttribute::initialize(...).
5757   void initialize(Attributor &A) override {
5758     AAValueSimplifyImpl::initialize(A);
5759     Value &V = getAnchorValue();
5760 
5761     // TODO: add other stuffs
5762     if (isa<Constant>(V))
5763       indicatePessimisticFixpoint();
5764   }
5765 
5766   /// Check if \p Cmp is a comparison we can simplify.
5767   ///
5768   /// We handle multiple cases, one in which at least one operand is an
5769   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5770   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5771   /// will be updated.
5772   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5773     auto Union = [&](Value &V) {
5774       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5775           SimplifiedAssociatedValue, &V, V.getType());
5776       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5777     };
5778 
5779     Value *LHS = Cmp.getOperand(0);
5780     Value *RHS = Cmp.getOperand(1);
5781 
5782     // Simplify the operands first.
5783     bool UsedAssumedInformation = false;
5784     const auto &SimplifiedLHS =
5785         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5786                                *this, UsedAssumedInformation);
5787     if (!SimplifiedLHS)
5788       return true;
5789     if (!SimplifiedLHS.getValue())
5790       return false;
5791     LHS = *SimplifiedLHS;
5792 
5793     const auto &SimplifiedRHS =
5794         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5795                                *this, UsedAssumedInformation);
5796     if (!SimplifiedRHS)
5797       return true;
5798     if (!SimplifiedRHS.getValue())
5799       return false;
5800     RHS = *SimplifiedRHS;
5801 
5802     LLVMContext &Ctx = Cmp.getContext();
5803     // Handle the trivial case first in which we don't even need to think about
5804     // null or non-null.
5805     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5806       Constant *NewVal =
5807           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5808       if (!Union(*NewVal))
5809         return false;
5810       if (!UsedAssumedInformation)
5811         indicateOptimisticFixpoint();
5812       return true;
5813     }
5814 
5815     // From now on we only handle equalities (==, !=).
5816     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5817     if (!ICmp || !ICmp->isEquality())
5818       return false;
5819 
5820     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5821     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5822     if (!LHSIsNull && !RHSIsNull)
5823       return false;
5824 
5825     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5826     // non-nullptr operand and if we assume it's non-null we can conclude the
5827     // result of the comparison.
5828     assert((LHSIsNull || RHSIsNull) &&
5829            "Expected nullptr versus non-nullptr comparison at this point");
5830 
5831     // The index is the operand that we assume is not null.
5832     unsigned PtrIdx = LHSIsNull;
5833     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5834         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5835         DepClassTy::REQUIRED);
5836     if (!PtrNonNullAA.isAssumedNonNull())
5837       return false;
5838     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5839 
5840     // The new value depends on the predicate, true for != and false for ==.
5841     Constant *NewVal = ConstantInt::get(
5842         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5843     if (!Union(*NewVal))
5844       return false;
5845 
5846     if (!UsedAssumedInformation)
5847       indicateOptimisticFixpoint();
5848 
5849     return true;
5850   }
5851 
5852   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5853   /// simplify any operand of the instruction \p I. Return true if successful,
5854   /// in that case SimplifiedAssociatedValue will be updated.
5855   bool handleGenericInst(Attributor &A, Instruction &I) {
5856     bool SomeSimplified = false;
5857     bool UsedAssumedInformation = false;
5858 
5859     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5860     int Idx = 0;
5861     for (Value *Op : I.operands()) {
5862       const auto &SimplifiedOp =
5863           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5864                                  *this, UsedAssumedInformation);
5865       // If we are not sure about any operand we are not sure about the entire
5866       // instruction, we'll wait.
5867       if (!SimplifiedOp)
5868         return true;
5869 
5870       if (SimplifiedOp.getValue())
5871         NewOps[Idx] = SimplifiedOp.getValue();
5872       else
5873         NewOps[Idx] = Op;
5874 
5875       SomeSimplified |= (NewOps[Idx] != Op);
5876       ++Idx;
5877     }
5878 
5879     // We won't bother with the InstSimplify interface if we didn't simplify any
5880     // operand ourselves.
5881     if (!SomeSimplified)
5882       return false;
5883 
5884     InformationCache &InfoCache = A.getInfoCache();
5885     Function *F = I.getFunction();
5886     const auto *DT =
5887         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5888     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5889     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5890     OptimizationRemarkEmitter *ORE = nullptr;
5891 
5892     const DataLayout &DL = I.getModule()->getDataLayout();
5893     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5894     if (Value *SimplifiedI =
5895             simplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5896       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5897           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5898       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5899     }
5900     return false;
5901   }
5902 
5903   /// See AbstractAttribute::updateImpl(...).
5904   ChangeStatus updateImpl(Attributor &A) override {
5905     auto Before = SimplifiedAssociatedValue;
5906 
5907     // Do not simplify loads that are only used in llvm.assume if we cannot also
5908     // remove all stores that may feed into the load. The reason is that the
5909     // assume is probably worth something as long as the stores are around.
5910     if (auto *LI = dyn_cast<LoadInst>(&getAssociatedValue())) {
5911       InformationCache &InfoCache = A.getInfoCache();
5912       if (InfoCache.isOnlyUsedByAssume(*LI)) {
5913         SmallSetVector<Value *, 4> PotentialCopies;
5914         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
5915         bool UsedAssumedInformation = false;
5916         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
5917                                            PotentialValueOrigins, *this,
5918                                            UsedAssumedInformation,
5919                                            /* OnlyExact */ true)) {
5920           if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
5921                 if (!I)
5922                   return true;
5923                 if (auto *SI = dyn_cast<StoreInst>(I))
5924                   return A.isAssumedDead(SI->getOperandUse(0), this,
5925                                          /* LivenessAA */ nullptr,
5926                                          UsedAssumedInformation,
5927                                          /* CheckBBLivenessOnly */ false);
5928                 return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
5929                                        UsedAssumedInformation,
5930                                        /* CheckBBLivenessOnly */ false);
5931               }))
5932             return indicatePessimisticFixpoint();
5933         }
5934       }
5935     }
5936 
5937     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5938                             bool Stripped) -> bool {
5939       auto &AA = A.getAAFor<AAValueSimplify>(
5940           *this, IRPosition::value(V, getCallBaseContext()),
5941           DepClassTy::REQUIRED);
5942       if (!Stripped && this == &AA) {
5943 
5944         if (auto *I = dyn_cast<Instruction>(&V)) {
5945           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5946             if (handleCmp(A, *Cmp))
5947               return true;
5948           if (handleGenericInst(A, *I))
5949             return true;
5950         }
5951         // TODO: Look the instruction and check recursively.
5952 
5953         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5954                           << "\n");
5955         return false;
5956       }
5957       return checkAndUpdate(A, *this,
5958                             IRPosition::value(V, getCallBaseContext()));
5959     };
5960 
5961     bool Dummy = false;
5962     bool UsedAssumedInformation = false;
5963     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5964                                      VisitValueCB, getCtxI(),
5965                                      UsedAssumedInformation,
5966                                      /* UseValueSimplify */ false))
5967       if (!askSimplifiedValueForOtherAAs(A))
5968         return indicatePessimisticFixpoint();
5969 
5970     // If a candicate was found in this update, return CHANGED.
5971     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5972                                                : ChangeStatus ::CHANGED;
5973   }
5974 
5975   /// See AbstractAttribute::trackStatistics()
5976   void trackStatistics() const override {
5977     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5978   }
5979 };
5980 
5981 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5982   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5983       : AAValueSimplifyImpl(IRP, A) {}
5984 
5985   /// See AbstractAttribute::initialize(...).
5986   void initialize(Attributor &A) override {
5987     SimplifiedAssociatedValue = nullptr;
5988     indicateOptimisticFixpoint();
5989   }
5990   /// See AbstractAttribute::initialize(...).
5991   ChangeStatus updateImpl(Attributor &A) override {
5992     llvm_unreachable(
5993         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5994   }
5995   /// See AbstractAttribute::trackStatistics()
5996   void trackStatistics() const override {
5997     STATS_DECLTRACK_FN_ATTR(value_simplify)
5998   }
5999 };
6000 
6001 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
6002   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
6003       : AAValueSimplifyFunction(IRP, A) {}
6004   /// See AbstractAttribute::trackStatistics()
6005   void trackStatistics() const override {
6006     STATS_DECLTRACK_CS_ATTR(value_simplify)
6007   }
6008 };
6009 
6010 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
6011   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
6012       : AAValueSimplifyImpl(IRP, A) {}
6013 
6014   void initialize(Attributor &A) override {
6015     AAValueSimplifyImpl::initialize(A);
6016     Function *Fn = getAssociatedFunction();
6017     if (!Fn) {
6018       indicatePessimisticFixpoint();
6019       return;
6020     }
6021     for (Argument &Arg : Fn->args()) {
6022       if (Arg.hasReturnedAttr()) {
6023         auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
6024                                                  Arg.getArgNo());
6025         if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
6026             checkAndUpdate(A, *this, IRP))
6027           indicateOptimisticFixpoint();
6028         else
6029           indicatePessimisticFixpoint();
6030         return;
6031       }
6032     }
6033   }
6034 
6035   /// See AbstractAttribute::updateImpl(...).
6036   ChangeStatus updateImpl(Attributor &A) override {
6037     auto Before = SimplifiedAssociatedValue;
6038     auto &RetAA = A.getAAFor<AAReturnedValues>(
6039         *this, IRPosition::function(*getAssociatedFunction()),
6040         DepClassTy::REQUIRED);
6041     auto PredForReturned =
6042         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
6043           bool UsedAssumedInformation = false;
6044           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
6045               &RetVal, *cast<CallBase>(getCtxI()), *this,
6046               UsedAssumedInformation);
6047           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
6048               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
6049           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
6050         };
6051     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
6052       if (!askSimplifiedValueForOtherAAs(A))
6053         return indicatePessimisticFixpoint();
6054     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6055                                                : ChangeStatus ::CHANGED;
6056   }
6057 
6058   void trackStatistics() const override {
6059     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
6060   }
6061 };
6062 
6063 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
6064   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
6065       : AAValueSimplifyFloating(IRP, A) {}
6066 
6067   /// See AbstractAttribute::manifest(...).
6068   ChangeStatus manifest(Attributor &A) override {
6069     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6070     // TODO: We should avoid simplification duplication to begin with.
6071     auto *FloatAA = A.lookupAAFor<AAValueSimplify>(
6072         IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE);
6073     if (FloatAA && FloatAA->getState().isValidState())
6074       return Changed;
6075 
6076     if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
6077       Use &U = cast<CallBase>(&getAnchorValue())
6078                    ->getArgOperandUse(getCallSiteArgNo());
6079       if (A.changeUseAfterManifest(U, *NewV))
6080         Changed = ChangeStatus::CHANGED;
6081     }
6082 
6083     return Changed | AAValueSimplify::manifest(A);
6084   }
6085 
6086   void trackStatistics() const override {
6087     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
6088   }
6089 };
6090 } // namespace
6091 
6092 /// ----------------------- Heap-To-Stack Conversion ---------------------------
6093 namespace {
6094 struct AAHeapToStackFunction final : public AAHeapToStack {
6095 
6096   struct AllocationInfo {
6097     /// The call that allocates the memory.
6098     CallBase *const CB;
6099 
6100     /// The library function id for the allocation.
6101     LibFunc LibraryFunctionId = NotLibFunc;
6102 
6103     /// The status wrt. a rewrite.
6104     enum {
6105       STACK_DUE_TO_USE,
6106       STACK_DUE_TO_FREE,
6107       INVALID,
6108     } Status = STACK_DUE_TO_USE;
6109 
6110     /// Flag to indicate if we encountered a use that might free this allocation
6111     /// but which is not in the deallocation infos.
6112     bool HasPotentiallyFreeingUnknownUses = false;
6113 
6114     /// The set of free calls that use this allocation.
6115     SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
6116   };
6117 
6118   struct DeallocationInfo {
6119     /// The call that deallocates the memory.
6120     CallBase *const CB;
6121 
6122     /// Flag to indicate if we don't know all objects this deallocation might
6123     /// free.
6124     bool MightFreeUnknownObjects = false;
6125 
6126     /// The set of allocation calls that are potentially freed.
6127     SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
6128   };
6129 
6130   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
6131       : AAHeapToStack(IRP, A) {}
6132 
6133   ~AAHeapToStackFunction() {
6134     // Ensure we call the destructor so we release any memory allocated in the
6135     // sets.
6136     for (auto &It : AllocationInfos)
6137       It.second->~AllocationInfo();
6138     for (auto &It : DeallocationInfos)
6139       It.second->~DeallocationInfo();
6140   }
6141 
6142   void initialize(Attributor &A) override {
6143     AAHeapToStack::initialize(A);
6144 
6145     const Function *F = getAnchorScope();
6146     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6147 
6148     auto AllocationIdentifierCB = [&](Instruction &I) {
6149       CallBase *CB = dyn_cast<CallBase>(&I);
6150       if (!CB)
6151         return true;
6152       if (isFreeCall(CB, TLI)) {
6153         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
6154         return true;
6155       }
6156       // To do heap to stack, we need to know that the allocation itself is
6157       // removable once uses are rewritten, and that we can initialize the
6158       // alloca to the same pattern as the original allocation result.
6159       if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) {
6160         auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
6161         if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
6162           AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
6163           AllocationInfos[CB] = AI;
6164           if (TLI)
6165             TLI->getLibFunc(*CB, AI->LibraryFunctionId);
6166         }
6167       }
6168       return true;
6169     };
6170 
6171     bool UsedAssumedInformation = false;
6172     bool Success = A.checkForAllCallLikeInstructions(
6173         AllocationIdentifierCB, *this, UsedAssumedInformation,
6174         /* CheckBBLivenessOnly */ false,
6175         /* CheckPotentiallyDead */ true);
6176     (void)Success;
6177     assert(Success && "Did not expect the call base visit callback to fail!");
6178 
6179     Attributor::SimplifictionCallbackTy SCB =
6180         [](const IRPosition &, const AbstractAttribute *,
6181            bool &) -> Optional<Value *> { return nullptr; };
6182     for (const auto &It : AllocationInfos)
6183       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6184                                        SCB);
6185     for (const auto &It : DeallocationInfos)
6186       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6187                                        SCB);
6188   }
6189 
6190   const std::string getAsStr() const override {
6191     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
6192     for (const auto &It : AllocationInfos) {
6193       if (It.second->Status == AllocationInfo::INVALID)
6194         ++NumInvalidMallocs;
6195       else
6196         ++NumH2SMallocs;
6197     }
6198     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
6199            std::to_string(NumInvalidMallocs);
6200   }
6201 
6202   /// See AbstractAttribute::trackStatistics().
6203   void trackStatistics() const override {
6204     STATS_DECL(
6205         MallocCalls, Function,
6206         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
6207     for (auto &It : AllocationInfos)
6208       if (It.second->Status != AllocationInfo::INVALID)
6209         ++BUILD_STAT_NAME(MallocCalls, Function);
6210   }
6211 
6212   bool isAssumedHeapToStack(const CallBase &CB) const override {
6213     if (isValidState())
6214       if (AllocationInfo *AI =
6215               AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
6216         return AI->Status != AllocationInfo::INVALID;
6217     return false;
6218   }
6219 
6220   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
6221     if (!isValidState())
6222       return false;
6223 
6224     for (auto &It : AllocationInfos) {
6225       AllocationInfo &AI = *It.second;
6226       if (AI.Status == AllocationInfo::INVALID)
6227         continue;
6228 
6229       if (AI.PotentialFreeCalls.count(&CB))
6230         return true;
6231     }
6232 
6233     return false;
6234   }
6235 
6236   ChangeStatus manifest(Attributor &A) override {
6237     assert(getState().isValidState() &&
6238            "Attempted to manifest an invalid state!");
6239 
6240     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
6241     Function *F = getAnchorScope();
6242     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6243 
6244     for (auto &It : AllocationInfos) {
6245       AllocationInfo &AI = *It.second;
6246       if (AI.Status == AllocationInfo::INVALID)
6247         continue;
6248 
6249       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
6250         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
6251         A.deleteAfterManifest(*FreeCall);
6252         HasChanged = ChangeStatus::CHANGED;
6253       }
6254 
6255       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
6256                         << "\n");
6257 
6258       auto Remark = [&](OptimizationRemark OR) {
6259         LibFunc IsAllocShared;
6260         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
6261           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
6262             return OR << "Moving globalized variable to the stack.";
6263         return OR << "Moving memory allocation from the heap to the stack.";
6264       };
6265       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6266         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
6267       else
6268         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
6269 
6270       const DataLayout &DL = A.getInfoCache().getDL();
6271       Value *Size;
6272       Optional<APInt> SizeAPI = getSize(A, *this, AI);
6273       if (SizeAPI) {
6274         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
6275       } else {
6276         LLVMContext &Ctx = AI.CB->getContext();
6277         ObjectSizeOpts Opts;
6278         ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
6279         SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
6280         assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
6281                cast<ConstantInt>(SizeOffsetPair.second)->isZero());
6282         Size = SizeOffsetPair.first;
6283       }
6284 
6285       Align Alignment(1);
6286       if (MaybeAlign RetAlign = AI.CB->getRetAlign())
6287         Alignment = std::max(Alignment, *RetAlign);
6288       if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6289         Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
6290         assert(AlignmentAPI && AlignmentAPI.getValue().getZExtValue() > 0 &&
6291                "Expected an alignment during manifest!");
6292         Alignment = std::max(
6293             Alignment, assumeAligned(AlignmentAPI.getValue().getZExtValue()));
6294       }
6295 
6296       // TODO: Hoist the alloca towards the function entry.
6297       unsigned AS = DL.getAllocaAddrSpace();
6298       Instruction *Alloca = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
6299                                            Size, Alignment, "", AI.CB);
6300 
6301       if (Alloca->getType() != AI.CB->getType())
6302         Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6303             Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
6304 
6305       auto *I8Ty = Type::getInt8Ty(F->getContext());
6306       auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6307       assert(InitVal &&
6308              "Must be able to materialize initial memory state of allocation");
6309 
6310       A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca);
6311 
6312       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6313         auto *NBB = II->getNormalDest();
6314         BranchInst::Create(NBB, AI.CB->getParent());
6315         A.deleteAfterManifest(*AI.CB);
6316       } else {
6317         A.deleteAfterManifest(*AI.CB);
6318       }
6319 
6320       // Initialize the alloca with the same value as used by the allocation
6321       // function.  We can skip undef as the initial value of an alloc is
6322       // undef, and the memset would simply end up being DSEd.
6323       if (!isa<UndefValue>(InitVal)) {
6324         IRBuilder<> Builder(Alloca->getNextNode());
6325         // TODO: Use alignment above if align!=1
6326         Builder.CreateMemSet(Alloca, InitVal, Size, None);
6327       }
6328       HasChanged = ChangeStatus::CHANGED;
6329     }
6330 
6331     return HasChanged;
6332   }
6333 
6334   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6335                            Value &V) {
6336     bool UsedAssumedInformation = false;
6337     Optional<Constant *> SimpleV =
6338         A.getAssumedConstant(V, AA, UsedAssumedInformation);
6339     if (!SimpleV)
6340       return APInt(64, 0);
6341     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
6342       return CI->getValue();
6343     return llvm::None;
6344   }
6345 
6346   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6347                           AllocationInfo &AI) {
6348     auto Mapper = [&](const Value *V) -> const Value * {
6349       bool UsedAssumedInformation = false;
6350       if (Optional<Constant *> SimpleV =
6351               A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6352         if (*SimpleV)
6353           return *SimpleV;
6354       return V;
6355     };
6356 
6357     const Function *F = getAnchorScope();
6358     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6359     return getAllocSize(AI.CB, TLI, Mapper);
6360   }
6361 
6362   /// Collection of all malloc-like calls in a function with associated
6363   /// information.
6364   MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6365 
6366   /// Collection of all free-like calls in a function with associated
6367   /// information.
6368   MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6369 
6370   ChangeStatus updateImpl(Attributor &A) override;
6371 };
6372 
6373 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6374   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6375   const Function *F = getAnchorScope();
6376   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6377 
6378   const auto &LivenessAA =
6379       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6380 
6381   MustBeExecutedContextExplorer &Explorer =
6382       A.getInfoCache().getMustBeExecutedContextExplorer();
6383 
6384   bool StackIsAccessibleByOtherThreads =
6385       A.getInfoCache().stackIsAccessibleByOtherThreads();
6386 
6387   // Flag to ensure we update our deallocation information at most once per
6388   // updateImpl call and only if we use the free check reasoning.
6389   bool HasUpdatedFrees = false;
6390 
6391   auto UpdateFrees = [&]() {
6392     HasUpdatedFrees = true;
6393 
6394     for (auto &It : DeallocationInfos) {
6395       DeallocationInfo &DI = *It.second;
6396       // For now we cannot use deallocations that have unknown inputs, skip
6397       // them.
6398       if (DI.MightFreeUnknownObjects)
6399         continue;
6400 
6401       // No need to analyze dead calls, ignore them instead.
6402       bool UsedAssumedInformation = false;
6403       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6404                           /* CheckBBLivenessOnly */ true))
6405         continue;
6406 
6407       // Use the optimistic version to get the freed objects, ignoring dead
6408       // branches etc.
6409       SmallVector<Value *, 8> Objects;
6410       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6411                                            *this, DI.CB,
6412                                            UsedAssumedInformation)) {
6413         LLVM_DEBUG(
6414             dbgs()
6415             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6416         DI.MightFreeUnknownObjects = true;
6417         continue;
6418       }
6419 
6420       // Check each object explicitly.
6421       for (auto *Obj : Objects) {
6422         // Free of null and undef can be ignored as no-ops (or UB in the latter
6423         // case).
6424         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6425           continue;
6426 
6427         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6428         if (!ObjCB) {
6429           LLVM_DEBUG(dbgs()
6430                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6431           DI.MightFreeUnknownObjects = true;
6432           continue;
6433         }
6434 
6435         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6436         if (!AI) {
6437           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6438                             << "\n");
6439           DI.MightFreeUnknownObjects = true;
6440           continue;
6441         }
6442 
6443         DI.PotentialAllocationCalls.insert(ObjCB);
6444       }
6445     }
6446   };
6447 
6448   auto FreeCheck = [&](AllocationInfo &AI) {
6449     // If the stack is not accessible by other threads, the "must-free" logic
6450     // doesn't apply as the pointer could be shared and needs to be places in
6451     // "shareable" memory.
6452     if (!StackIsAccessibleByOtherThreads) {
6453       auto &NoSyncAA =
6454           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6455       if (!NoSyncAA.isAssumedNoSync()) {
6456         LLVM_DEBUG(
6457             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6458                       "other threads and function is not nosync:\n");
6459         return false;
6460       }
6461     }
6462     if (!HasUpdatedFrees)
6463       UpdateFrees();
6464 
6465     // TODO: Allow multi exit functions that have different free calls.
6466     if (AI.PotentialFreeCalls.size() != 1) {
6467       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6468                         << AI.PotentialFreeCalls.size() << "\n");
6469       return false;
6470     }
6471     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6472     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6473     if (!DI) {
6474       LLVM_DEBUG(
6475           dbgs() << "[H2S] unique free call was not known as deallocation call "
6476                  << *UniqueFree << "\n");
6477       return false;
6478     }
6479     if (DI->MightFreeUnknownObjects) {
6480       LLVM_DEBUG(
6481           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6482       return false;
6483     }
6484     if (DI->PotentialAllocationCalls.empty())
6485       return true;
6486     if (DI->PotentialAllocationCalls.size() > 1) {
6487       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6488                         << DI->PotentialAllocationCalls.size()
6489                         << " different allocations\n");
6490       return false;
6491     }
6492     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6493       LLVM_DEBUG(
6494           dbgs()
6495           << "[H2S] unique free call not known to free this allocation but "
6496           << **DI->PotentialAllocationCalls.begin() << "\n");
6497       return false;
6498     }
6499     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6500     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6501       LLVM_DEBUG(
6502           dbgs()
6503           << "[H2S] unique free call might not be executed with the allocation "
6504           << *UniqueFree << "\n");
6505       return false;
6506     }
6507     return true;
6508   };
6509 
6510   auto UsesCheck = [&](AllocationInfo &AI) {
6511     bool ValidUsesOnly = true;
6512 
6513     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6514       Instruction *UserI = cast<Instruction>(U.getUser());
6515       if (isa<LoadInst>(UserI))
6516         return true;
6517       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6518         if (SI->getValueOperand() == U.get()) {
6519           LLVM_DEBUG(dbgs()
6520                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6521           ValidUsesOnly = false;
6522         } else {
6523           // A store into the malloc'ed memory is fine.
6524         }
6525         return true;
6526       }
6527       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6528         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6529           return true;
6530         if (DeallocationInfos.count(CB)) {
6531           AI.PotentialFreeCalls.insert(CB);
6532           return true;
6533         }
6534 
6535         unsigned ArgNo = CB->getArgOperandNo(&U);
6536 
6537         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6538             *this, IRPosition::callsite_argument(*CB, ArgNo),
6539             DepClassTy::OPTIONAL);
6540 
6541         // If a call site argument use is nofree, we are fine.
6542         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6543             *this, IRPosition::callsite_argument(*CB, ArgNo),
6544             DepClassTy::OPTIONAL);
6545 
6546         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6547         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6548         if (MaybeCaptured ||
6549             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6550              MaybeFreed)) {
6551           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6552 
6553           // Emit a missed remark if this is missed OpenMP globalization.
6554           auto Remark = [&](OptimizationRemarkMissed ORM) {
6555             return ORM
6556                    << "Could not move globalized variable to the stack. "
6557                       "Variable is potentially captured in call. Mark "
6558                       "parameter as `__attribute__((noescape))` to override.";
6559           };
6560 
6561           if (ValidUsesOnly &&
6562               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6563             A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
6564 
6565           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6566           ValidUsesOnly = false;
6567         }
6568         return true;
6569       }
6570 
6571       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6572           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6573         Follow = true;
6574         return true;
6575       }
6576       // Unknown user for which we can not track uses further (in a way that
6577       // makes sense).
6578       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6579       ValidUsesOnly = false;
6580       return true;
6581     };
6582     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6583       return false;
6584     return ValidUsesOnly;
6585   };
6586 
6587   // The actual update starts here. We look at all allocations and depending on
6588   // their status perform the appropriate check(s).
6589   for (auto &It : AllocationInfos) {
6590     AllocationInfo &AI = *It.second;
6591     if (AI.Status == AllocationInfo::INVALID)
6592       continue;
6593 
6594     if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6595       Optional<APInt> APAlign = getAPInt(A, *this, *Align);
6596       if (!APAlign) {
6597         // Can't generate an alloca which respects the required alignment
6598         // on the allocation.
6599         LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6600                           << "\n");
6601         AI.Status = AllocationInfo::INVALID;
6602         Changed = ChangeStatus::CHANGED;
6603         continue;
6604       } else {
6605         if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
6606             !APAlign->isPowerOf2()) {
6607           LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
6608                             << "\n");
6609           AI.Status = AllocationInfo::INVALID;
6610           Changed = ChangeStatus::CHANGED;
6611           continue;
6612         }
6613       }
6614     }
6615 
6616     if (MaxHeapToStackSize != -1) {
6617       Optional<APInt> Size = getSize(A, *this, AI);
6618       if (!Size || Size.getValue().ugt(MaxHeapToStackSize)) {
6619         LLVM_DEBUG({
6620           if (!Size)
6621             dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
6622           else
6623             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6624                    << MaxHeapToStackSize << "\n";
6625         });
6626 
6627         AI.Status = AllocationInfo::INVALID;
6628         Changed = ChangeStatus::CHANGED;
6629         continue;
6630       }
6631     }
6632 
6633     switch (AI.Status) {
6634     case AllocationInfo::STACK_DUE_TO_USE:
6635       if (UsesCheck(AI))
6636         continue;
6637       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6638       LLVM_FALLTHROUGH;
6639     case AllocationInfo::STACK_DUE_TO_FREE:
6640       if (FreeCheck(AI))
6641         continue;
6642       AI.Status = AllocationInfo::INVALID;
6643       Changed = ChangeStatus::CHANGED;
6644       continue;
6645     case AllocationInfo::INVALID:
6646       llvm_unreachable("Invalid allocations should never reach this point!");
6647     };
6648   }
6649 
6650   return Changed;
6651 }
6652 } // namespace
6653 
6654 /// ----------------------- Privatizable Pointers ------------------------------
6655 namespace {
6656 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6657   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6658       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6659 
6660   ChangeStatus indicatePessimisticFixpoint() override {
6661     AAPrivatizablePtr::indicatePessimisticFixpoint();
6662     PrivatizableType = nullptr;
6663     return ChangeStatus::CHANGED;
6664   }
6665 
6666   /// Identify the type we can chose for a private copy of the underlying
6667   /// argument. None means it is not clear yet, nullptr means there is none.
6668   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6669 
6670   /// Return a privatizable type that encloses both T0 and T1.
6671   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6672   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6673     if (!T0)
6674       return T1;
6675     if (!T1)
6676       return T0;
6677     if (T0 == T1)
6678       return T0;
6679     return nullptr;
6680   }
6681 
6682   Optional<Type *> getPrivatizableType() const override {
6683     return PrivatizableType;
6684   }
6685 
6686   const std::string getAsStr() const override {
6687     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6688   }
6689 
6690 protected:
6691   Optional<Type *> PrivatizableType;
6692 };
6693 
6694 // TODO: Do this for call site arguments (probably also other values) as well.
6695 
6696 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6697   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6698       : AAPrivatizablePtrImpl(IRP, A) {}
6699 
6700   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6701   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6702     // If this is a byval argument and we know all the call sites (so we can
6703     // rewrite them), there is no need to check them explicitly.
6704     bool UsedAssumedInformation = false;
6705     SmallVector<Attribute, 1> Attrs;
6706     getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true);
6707     if (!Attrs.empty() &&
6708         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6709                                true, UsedAssumedInformation))
6710       return Attrs[0].getValueAsType();
6711 
6712     Optional<Type *> Ty;
6713     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6714 
6715     // Make sure the associated call site argument has the same type at all call
6716     // sites and it is an allocation we know is safe to privatize, for now that
6717     // means we only allow alloca instructions.
6718     // TODO: We can additionally analyze the accesses in the callee to  create
6719     //       the type from that information instead. That is a little more
6720     //       involved and will be done in a follow up patch.
6721     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6722       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6723       // Check if a coresponding argument was found or if it is one not
6724       // associated (which can happen for callback calls).
6725       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6726         return false;
6727 
6728       // Check that all call sites agree on a type.
6729       auto &PrivCSArgAA =
6730           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6731       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6732 
6733       LLVM_DEBUG({
6734         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6735         if (CSTy && CSTy.getValue())
6736           CSTy.getValue()->print(dbgs());
6737         else if (CSTy)
6738           dbgs() << "<nullptr>";
6739         else
6740           dbgs() << "<none>";
6741       });
6742 
6743       Ty = combineTypes(Ty, CSTy);
6744 
6745       LLVM_DEBUG({
6746         dbgs() << " : New Type: ";
6747         if (Ty && Ty.getValue())
6748           Ty.getValue()->print(dbgs());
6749         else if (Ty)
6750           dbgs() << "<nullptr>";
6751         else
6752           dbgs() << "<none>";
6753         dbgs() << "\n";
6754       });
6755 
6756       return !Ty || Ty.getValue();
6757     };
6758 
6759     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6760                                 UsedAssumedInformation))
6761       return nullptr;
6762     return Ty;
6763   }
6764 
6765   /// See AbstractAttribute::updateImpl(...).
6766   ChangeStatus updateImpl(Attributor &A) override {
6767     PrivatizableType = identifyPrivatizableType(A);
6768     if (!PrivatizableType)
6769       return ChangeStatus::UNCHANGED;
6770     if (!PrivatizableType.getValue())
6771       return indicatePessimisticFixpoint();
6772 
6773     // The dependence is optional so we don't give up once we give up on the
6774     // alignment.
6775     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6776                         DepClassTy::OPTIONAL);
6777 
6778     // Avoid arguments with padding for now.
6779     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6780         !isDenselyPacked(*PrivatizableType, A.getInfoCache().getDL())) {
6781       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6782       return indicatePessimisticFixpoint();
6783     }
6784 
6785     // Collect the types that will replace the privatizable type in the function
6786     // signature.
6787     SmallVector<Type *, 16> ReplacementTypes;
6788     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
6789 
6790     // Verify callee and caller agree on how the promoted argument would be
6791     // passed.
6792     Function &Fn = *getIRPosition().getAnchorScope();
6793     const auto *TTI =
6794         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6795     if (!TTI) {
6796       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
6797                         << Fn.getName() << "\n");
6798       return indicatePessimisticFixpoint();
6799     }
6800 
6801     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6802       CallBase *CB = ACS.getInstruction();
6803       return TTI->areTypesABICompatible(
6804           CB->getCaller(), CB->getCalledFunction(), ReplacementTypes);
6805     };
6806     bool UsedAssumedInformation = false;
6807     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6808                                 UsedAssumedInformation)) {
6809       LLVM_DEBUG(
6810           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6811                  << Fn.getName() << "\n");
6812       return indicatePessimisticFixpoint();
6813     }
6814 
6815     // Register a rewrite of the argument.
6816     Argument *Arg = getAssociatedArgument();
6817     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6818       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6819       return indicatePessimisticFixpoint();
6820     }
6821 
6822     unsigned ArgNo = Arg->getArgNo();
6823 
6824     // Helper to check if for the given call site the associated argument is
6825     // passed to a callback where the privatization would be different.
6826     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6827       SmallVector<const Use *, 4> CallbackUses;
6828       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6829       for (const Use *U : CallbackUses) {
6830         AbstractCallSite CBACS(U);
6831         assert(CBACS && CBACS.isCallbackCall());
6832         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6833           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6834 
6835           LLVM_DEBUG({
6836             dbgs()
6837                 << "[AAPrivatizablePtr] Argument " << *Arg
6838                 << "check if can be privatized in the context of its parent ("
6839                 << Arg->getParent()->getName()
6840                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6841                    "callback ("
6842                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6843                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6844                 << CBACS.getCallArgOperand(CBArg) << " vs "
6845                 << CB.getArgOperand(ArgNo) << "\n"
6846                 << "[AAPrivatizablePtr] " << CBArg << " : "
6847                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6848           });
6849 
6850           if (CBArgNo != int(ArgNo))
6851             continue;
6852           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6853               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6854           if (CBArgPrivAA.isValidState()) {
6855             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6856             if (!CBArgPrivTy)
6857               continue;
6858             if (CBArgPrivTy.getValue() == PrivatizableType)
6859               continue;
6860           }
6861 
6862           LLVM_DEBUG({
6863             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6864                    << " cannot be privatized in the context of its parent ("
6865                    << Arg->getParent()->getName()
6866                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6867                       "callback ("
6868                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6869                    << ").\n[AAPrivatizablePtr] for which the argument "
6870                       "privatization is not compatible.\n";
6871           });
6872           return false;
6873         }
6874       }
6875       return true;
6876     };
6877 
6878     // Helper to check if for the given call site the associated argument is
6879     // passed to a direct call where the privatization would be different.
6880     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6881       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6882       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6883       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6884              "Expected a direct call operand for callback call operand");
6885 
6886       LLVM_DEBUG({
6887         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6888                << " check if be privatized in the context of its parent ("
6889                << Arg->getParent()->getName()
6890                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6891                   "direct call of ("
6892                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6893                << ").\n";
6894       });
6895 
6896       Function *DCCallee = DC->getCalledFunction();
6897       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6898         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6899             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6900             DepClassTy::REQUIRED);
6901         if (DCArgPrivAA.isValidState()) {
6902           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6903           if (!DCArgPrivTy)
6904             return true;
6905           if (DCArgPrivTy.getValue() == PrivatizableType)
6906             return true;
6907         }
6908       }
6909 
6910       LLVM_DEBUG({
6911         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6912                << " cannot be privatized in the context of its parent ("
6913                << Arg->getParent()->getName()
6914                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6915                   "direct call of ("
6916                << ACS.getInstruction()->getCalledFunction()->getName()
6917                << ").\n[AAPrivatizablePtr] for which the argument "
6918                   "privatization is not compatible.\n";
6919       });
6920       return false;
6921     };
6922 
6923     // Helper to check if the associated argument is used at the given abstract
6924     // call site in a way that is incompatible with the privatization assumed
6925     // here.
6926     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6927       if (ACS.isDirectCall())
6928         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6929       if (ACS.isCallbackCall())
6930         return IsCompatiblePrivArgOfDirectCS(ACS);
6931       return false;
6932     };
6933 
6934     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6935                                 UsedAssumedInformation))
6936       return indicatePessimisticFixpoint();
6937 
6938     return ChangeStatus::UNCHANGED;
6939   }
6940 
6941   /// Given a type to private \p PrivType, collect the constituates (which are
6942   /// used) in \p ReplacementTypes.
6943   static void
6944   identifyReplacementTypes(Type *PrivType,
6945                            SmallVectorImpl<Type *> &ReplacementTypes) {
6946     // TODO: For now we expand the privatization type to the fullest which can
6947     //       lead to dead arguments that need to be removed later.
6948     assert(PrivType && "Expected privatizable type!");
6949 
6950     // Traverse the type, extract constituate types on the outermost level.
6951     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6952       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6953         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6954     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6955       ReplacementTypes.append(PrivArrayType->getNumElements(),
6956                               PrivArrayType->getElementType());
6957     } else {
6958       ReplacementTypes.push_back(PrivType);
6959     }
6960   }
6961 
6962   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6963   /// The values needed are taken from the arguments of \p F starting at
6964   /// position \p ArgNo.
6965   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6966                                    unsigned ArgNo, Instruction &IP) {
6967     assert(PrivType && "Expected privatizable type!");
6968 
6969     IRBuilder<NoFolder> IRB(&IP);
6970     const DataLayout &DL = F.getParent()->getDataLayout();
6971 
6972     // Traverse the type, build GEPs and stores.
6973     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6974       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6975       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6976         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6977         Value *Ptr =
6978             constructPointer(PointeeTy, PrivType, &Base,
6979                              PrivStructLayout->getElementOffset(u), IRB, DL);
6980         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6981       }
6982     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6983       Type *PointeeTy = PrivArrayType->getElementType();
6984       Type *PointeePtrTy = PointeeTy->getPointerTo();
6985       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6986       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6987         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6988                                       u * PointeeTySize, IRB, DL);
6989         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6990       }
6991     } else {
6992       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6993     }
6994   }
6995 
6996   /// Extract values from \p Base according to the type \p PrivType at the
6997   /// call position \p ACS. The values are appended to \p ReplacementValues.
6998   void createReplacementValues(Align Alignment, Type *PrivType,
6999                                AbstractCallSite ACS, Value *Base,
7000                                SmallVectorImpl<Value *> &ReplacementValues) {
7001     assert(Base && "Expected base value!");
7002     assert(PrivType && "Expected privatizable type!");
7003     Instruction *IP = ACS.getInstruction();
7004 
7005     IRBuilder<NoFolder> IRB(IP);
7006     const DataLayout &DL = IP->getModule()->getDataLayout();
7007 
7008     Type *PrivPtrType = PrivType->getPointerTo();
7009     if (Base->getType() != PrivPtrType)
7010       Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7011           Base, PrivPtrType, "", ACS.getInstruction());
7012 
7013     // Traverse the type, build GEPs and loads.
7014     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
7015       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
7016       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
7017         Type *PointeeTy = PrivStructType->getElementType(u);
7018         Value *Ptr =
7019             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
7020                              PrivStructLayout->getElementOffset(u), IRB, DL);
7021         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
7022         L->setAlignment(Alignment);
7023         ReplacementValues.push_back(L);
7024       }
7025     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
7026       Type *PointeeTy = PrivArrayType->getElementType();
7027       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
7028       Type *PointeePtrTy = PointeeTy->getPointerTo();
7029       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
7030         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
7031                                       u * PointeeTySize, IRB, DL);
7032         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
7033         L->setAlignment(Alignment);
7034         ReplacementValues.push_back(L);
7035       }
7036     } else {
7037       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
7038       L->setAlignment(Alignment);
7039       ReplacementValues.push_back(L);
7040     }
7041   }
7042 
7043   /// See AbstractAttribute::manifest(...)
7044   ChangeStatus manifest(Attributor &A) override {
7045     if (!PrivatizableType)
7046       return ChangeStatus::UNCHANGED;
7047     assert(PrivatizableType.getValue() && "Expected privatizable type!");
7048 
7049     // Collect all tail calls in the function as we cannot allow new allocas to
7050     // escape into tail recursion.
7051     // TODO: Be smarter about new allocas escaping into tail calls.
7052     SmallVector<CallInst *, 16> TailCalls;
7053     bool UsedAssumedInformation = false;
7054     if (!A.checkForAllInstructions(
7055             [&](Instruction &I) {
7056               CallInst &CI = cast<CallInst>(I);
7057               if (CI.isTailCall())
7058                 TailCalls.push_back(&CI);
7059               return true;
7060             },
7061             *this, {Instruction::Call}, UsedAssumedInformation))
7062       return ChangeStatus::UNCHANGED;
7063 
7064     Argument *Arg = getAssociatedArgument();
7065     // Query AAAlign attribute for alignment of associated argument to
7066     // determine the best alignment of loads.
7067     const auto &AlignAA =
7068         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
7069 
7070     // Callback to repair the associated function. A new alloca is placed at the
7071     // beginning and initialized with the values passed through arguments. The
7072     // new alloca replaces the use of the old pointer argument.
7073     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
7074         [=](const Attributor::ArgumentReplacementInfo &ARI,
7075             Function &ReplacementFn, Function::arg_iterator ArgIt) {
7076           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
7077           Instruction *IP = &*EntryBB.getFirstInsertionPt();
7078           const DataLayout &DL = IP->getModule()->getDataLayout();
7079           unsigned AS = DL.getAllocaAddrSpace();
7080           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS,
7081                                            Arg->getName() + ".priv", IP);
7082           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
7083                                ArgIt->getArgNo(), *IP);
7084 
7085           if (AI->getType() != Arg->getType())
7086             AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7087                 AI, Arg->getType(), "", IP);
7088           Arg->replaceAllUsesWith(AI);
7089 
7090           for (CallInst *CI : TailCalls)
7091             CI->setTailCall(false);
7092         };
7093 
7094     // Callback to repair a call site of the associated function. The elements
7095     // of the privatizable type are loaded prior to the call and passed to the
7096     // new function version.
7097     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
7098         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
7099                       AbstractCallSite ACS,
7100                       SmallVectorImpl<Value *> &NewArgOperands) {
7101           // When no alignment is specified for the load instruction,
7102           // natural alignment is assumed.
7103           createReplacementValues(
7104               AlignAA.getAssumedAlign(), *PrivatizableType, ACS,
7105               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
7106               NewArgOperands);
7107         };
7108 
7109     // Collect the types that will replace the privatizable type in the function
7110     // signature.
7111     SmallVector<Type *, 16> ReplacementTypes;
7112     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
7113 
7114     // Register a rewrite of the argument.
7115     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
7116                                            std::move(FnRepairCB),
7117                                            std::move(ACSRepairCB)))
7118       return ChangeStatus::CHANGED;
7119     return ChangeStatus::UNCHANGED;
7120   }
7121 
7122   /// See AbstractAttribute::trackStatistics()
7123   void trackStatistics() const override {
7124     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
7125   }
7126 };
7127 
7128 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
7129   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
7130       : AAPrivatizablePtrImpl(IRP, A) {}
7131 
7132   /// See AbstractAttribute::initialize(...).
7133   virtual void initialize(Attributor &A) override {
7134     // TODO: We can privatize more than arguments.
7135     indicatePessimisticFixpoint();
7136   }
7137 
7138   ChangeStatus updateImpl(Attributor &A) override {
7139     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
7140                      "updateImpl will not be called");
7141   }
7142 
7143   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
7144   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
7145     Value *Obj = getUnderlyingObject(&getAssociatedValue());
7146     if (!Obj) {
7147       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
7148       return nullptr;
7149     }
7150 
7151     if (auto *AI = dyn_cast<AllocaInst>(Obj))
7152       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
7153         if (CI->isOne())
7154           return AI->getAllocatedType();
7155     if (auto *Arg = dyn_cast<Argument>(Obj)) {
7156       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
7157           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
7158       if (PrivArgAA.isAssumedPrivatizablePtr())
7159         return PrivArgAA.getPrivatizableType();
7160     }
7161 
7162     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
7163                          "alloca nor privatizable argument: "
7164                       << *Obj << "!\n");
7165     return nullptr;
7166   }
7167 
7168   /// See AbstractAttribute::trackStatistics()
7169   void trackStatistics() const override {
7170     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
7171   }
7172 };
7173 
7174 struct AAPrivatizablePtrCallSiteArgument final
7175     : public AAPrivatizablePtrFloating {
7176   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
7177       : AAPrivatizablePtrFloating(IRP, A) {}
7178 
7179   /// See AbstractAttribute::initialize(...).
7180   void initialize(Attributor &A) override {
7181     if (getIRPosition().hasAttr(Attribute::ByVal))
7182       indicateOptimisticFixpoint();
7183   }
7184 
7185   /// See AbstractAttribute::updateImpl(...).
7186   ChangeStatus updateImpl(Attributor &A) override {
7187     PrivatizableType = identifyPrivatizableType(A);
7188     if (!PrivatizableType)
7189       return ChangeStatus::UNCHANGED;
7190     if (!PrivatizableType.getValue())
7191       return indicatePessimisticFixpoint();
7192 
7193     const IRPosition &IRP = getIRPosition();
7194     auto &NoCaptureAA =
7195         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
7196     if (!NoCaptureAA.isAssumedNoCapture()) {
7197       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
7198       return indicatePessimisticFixpoint();
7199     }
7200 
7201     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
7202     if (!NoAliasAA.isAssumedNoAlias()) {
7203       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
7204       return indicatePessimisticFixpoint();
7205     }
7206 
7207     bool IsKnown;
7208     if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
7209       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
7210       return indicatePessimisticFixpoint();
7211     }
7212 
7213     return ChangeStatus::UNCHANGED;
7214   }
7215 
7216   /// See AbstractAttribute::trackStatistics()
7217   void trackStatistics() const override {
7218     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
7219   }
7220 };
7221 
7222 struct AAPrivatizablePtrCallSiteReturned final
7223     : public AAPrivatizablePtrFloating {
7224   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
7225       : AAPrivatizablePtrFloating(IRP, A) {}
7226 
7227   /// See AbstractAttribute::initialize(...).
7228   void initialize(Attributor &A) override {
7229     // TODO: We can privatize more than arguments.
7230     indicatePessimisticFixpoint();
7231   }
7232 
7233   /// See AbstractAttribute::trackStatistics()
7234   void trackStatistics() const override {
7235     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
7236   }
7237 };
7238 
7239 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
7240   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
7241       : AAPrivatizablePtrFloating(IRP, A) {}
7242 
7243   /// See AbstractAttribute::initialize(...).
7244   void initialize(Attributor &A) override {
7245     // TODO: We can privatize more than arguments.
7246     indicatePessimisticFixpoint();
7247   }
7248 
7249   /// See AbstractAttribute::trackStatistics()
7250   void trackStatistics() const override {
7251     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
7252   }
7253 };
7254 } // namespace
7255 
7256 /// -------------------- Memory Behavior Attributes ----------------------------
7257 /// Includes read-none, read-only, and write-only.
7258 /// ----------------------------------------------------------------------------
7259 namespace {
7260 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
7261   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
7262       : AAMemoryBehavior(IRP, A) {}
7263 
7264   /// See AbstractAttribute::initialize(...).
7265   void initialize(Attributor &A) override {
7266     intersectAssumedBits(BEST_STATE);
7267     getKnownStateFromValue(getIRPosition(), getState());
7268     AAMemoryBehavior::initialize(A);
7269   }
7270 
7271   /// Return the memory behavior information encoded in the IR for \p IRP.
7272   static void getKnownStateFromValue(const IRPosition &IRP,
7273                                      BitIntegerState &State,
7274                                      bool IgnoreSubsumingPositions = false) {
7275     SmallVector<Attribute, 2> Attrs;
7276     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7277     for (const Attribute &Attr : Attrs) {
7278       switch (Attr.getKindAsEnum()) {
7279       case Attribute::ReadNone:
7280         State.addKnownBits(NO_ACCESSES);
7281         break;
7282       case Attribute::ReadOnly:
7283         State.addKnownBits(NO_WRITES);
7284         break;
7285       case Attribute::WriteOnly:
7286         State.addKnownBits(NO_READS);
7287         break;
7288       default:
7289         llvm_unreachable("Unexpected attribute!");
7290       }
7291     }
7292 
7293     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7294       if (!I->mayReadFromMemory())
7295         State.addKnownBits(NO_READS);
7296       if (!I->mayWriteToMemory())
7297         State.addKnownBits(NO_WRITES);
7298     }
7299   }
7300 
7301   /// See AbstractAttribute::getDeducedAttributes(...).
7302   void getDeducedAttributes(LLVMContext &Ctx,
7303                             SmallVectorImpl<Attribute> &Attrs) const override {
7304     assert(Attrs.size() == 0);
7305     if (isAssumedReadNone())
7306       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7307     else if (isAssumedReadOnly())
7308       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7309     else if (isAssumedWriteOnly())
7310       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7311     assert(Attrs.size() <= 1);
7312   }
7313 
7314   /// See AbstractAttribute::manifest(...).
7315   ChangeStatus manifest(Attributor &A) override {
7316     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
7317       return ChangeStatus::UNCHANGED;
7318 
7319     const IRPosition &IRP = getIRPosition();
7320 
7321     // Check if we would improve the existing attributes first.
7322     SmallVector<Attribute, 4> DeducedAttrs;
7323     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7324     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7325           return IRP.hasAttr(Attr.getKindAsEnum(),
7326                              /* IgnoreSubsumingPositions */ true);
7327         }))
7328       return ChangeStatus::UNCHANGED;
7329 
7330     // Clear existing attributes.
7331     IRP.removeAttrs(AttrKinds);
7332 
7333     // Use the generic manifest method.
7334     return IRAttribute::manifest(A);
7335   }
7336 
7337   /// See AbstractState::getAsStr().
7338   const std::string getAsStr() const override {
7339     if (isAssumedReadNone())
7340       return "readnone";
7341     if (isAssumedReadOnly())
7342       return "readonly";
7343     if (isAssumedWriteOnly())
7344       return "writeonly";
7345     return "may-read/write";
7346   }
7347 
7348   /// The set of IR attributes AAMemoryBehavior deals with.
7349   static const Attribute::AttrKind AttrKinds[3];
7350 };
7351 
7352 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7353     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7354 
7355 /// Memory behavior attribute for a floating value.
7356 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7357   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7358       : AAMemoryBehaviorImpl(IRP, A) {}
7359 
7360   /// See AbstractAttribute::updateImpl(...).
7361   ChangeStatus updateImpl(Attributor &A) override;
7362 
7363   /// See AbstractAttribute::trackStatistics()
7364   void trackStatistics() const override {
7365     if (isAssumedReadNone())
7366       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7367     else if (isAssumedReadOnly())
7368       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7369     else if (isAssumedWriteOnly())
7370       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7371   }
7372 
7373 private:
7374   /// Return true if users of \p UserI might access the underlying
7375   /// variable/location described by \p U and should therefore be analyzed.
7376   bool followUsersOfUseIn(Attributor &A, const Use &U,
7377                           const Instruction *UserI);
7378 
7379   /// Update the state according to the effect of use \p U in \p UserI.
7380   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7381 };
7382 
7383 /// Memory behavior attribute for function argument.
7384 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7385   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7386       : AAMemoryBehaviorFloating(IRP, A) {}
7387 
7388   /// See AbstractAttribute::initialize(...).
7389   void initialize(Attributor &A) override {
7390     intersectAssumedBits(BEST_STATE);
7391     const IRPosition &IRP = getIRPosition();
7392     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7393     // can query it when we use has/getAttr. That would allow us to reuse the
7394     // initialize of the base class here.
7395     bool HasByVal =
7396         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7397     getKnownStateFromValue(IRP, getState(),
7398                            /* IgnoreSubsumingPositions */ HasByVal);
7399 
7400     // Initialize the use vector with all direct uses of the associated value.
7401     Argument *Arg = getAssociatedArgument();
7402     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7403       indicatePessimisticFixpoint();
7404   }
7405 
7406   ChangeStatus manifest(Attributor &A) override {
7407     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7408     if (!getAssociatedValue().getType()->isPointerTy())
7409       return ChangeStatus::UNCHANGED;
7410 
7411     // TODO: From readattrs.ll: "inalloca parameters are always
7412     //                           considered written"
7413     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7414       removeKnownBits(NO_WRITES);
7415       removeAssumedBits(NO_WRITES);
7416     }
7417     return AAMemoryBehaviorFloating::manifest(A);
7418   }
7419 
7420   /// See AbstractAttribute::trackStatistics()
7421   void trackStatistics() const override {
7422     if (isAssumedReadNone())
7423       STATS_DECLTRACK_ARG_ATTR(readnone)
7424     else if (isAssumedReadOnly())
7425       STATS_DECLTRACK_ARG_ATTR(readonly)
7426     else if (isAssumedWriteOnly())
7427       STATS_DECLTRACK_ARG_ATTR(writeonly)
7428   }
7429 };
7430 
7431 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7432   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7433       : AAMemoryBehaviorArgument(IRP, A) {}
7434 
7435   /// See AbstractAttribute::initialize(...).
7436   void initialize(Attributor &A) override {
7437     // If we don't have an associated attribute this is either a variadic call
7438     // or an indirect call, either way, nothing to do here.
7439     Argument *Arg = getAssociatedArgument();
7440     if (!Arg) {
7441       indicatePessimisticFixpoint();
7442       return;
7443     }
7444     if (Arg->hasByValAttr()) {
7445       addKnownBits(NO_WRITES);
7446       removeKnownBits(NO_READS);
7447       removeAssumedBits(NO_READS);
7448     }
7449     AAMemoryBehaviorArgument::initialize(A);
7450     if (getAssociatedFunction()->isDeclaration())
7451       indicatePessimisticFixpoint();
7452   }
7453 
7454   /// See AbstractAttribute::updateImpl(...).
7455   ChangeStatus updateImpl(Attributor &A) override {
7456     // TODO: Once we have call site specific value information we can provide
7457     //       call site specific liveness liveness information and then it makes
7458     //       sense to specialize attributes for call sites arguments instead of
7459     //       redirecting requests to the callee argument.
7460     Argument *Arg = getAssociatedArgument();
7461     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7462     auto &ArgAA =
7463         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7464     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7465   }
7466 
7467   /// See AbstractAttribute::trackStatistics()
7468   void trackStatistics() const override {
7469     if (isAssumedReadNone())
7470       STATS_DECLTRACK_CSARG_ATTR(readnone)
7471     else if (isAssumedReadOnly())
7472       STATS_DECLTRACK_CSARG_ATTR(readonly)
7473     else if (isAssumedWriteOnly())
7474       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7475   }
7476 };
7477 
7478 /// Memory behavior attribute for a call site return position.
7479 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7480   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7481       : AAMemoryBehaviorFloating(IRP, A) {}
7482 
7483   /// See AbstractAttribute::initialize(...).
7484   void initialize(Attributor &A) override {
7485     AAMemoryBehaviorImpl::initialize(A);
7486     Function *F = getAssociatedFunction();
7487     if (!F || F->isDeclaration())
7488       indicatePessimisticFixpoint();
7489   }
7490 
7491   /// See AbstractAttribute::manifest(...).
7492   ChangeStatus manifest(Attributor &A) override {
7493     // We do not annotate returned values.
7494     return ChangeStatus::UNCHANGED;
7495   }
7496 
7497   /// See AbstractAttribute::trackStatistics()
7498   void trackStatistics() const override {}
7499 };
7500 
7501 /// An AA to represent the memory behavior function attributes.
7502 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7503   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7504       : AAMemoryBehaviorImpl(IRP, A) {}
7505 
7506   /// See AbstractAttribute::updateImpl(Attributor &A).
7507   virtual ChangeStatus updateImpl(Attributor &A) override;
7508 
7509   /// See AbstractAttribute::manifest(...).
7510   ChangeStatus manifest(Attributor &A) override {
7511     Function &F = cast<Function>(getAnchorValue());
7512     if (isAssumedReadNone()) {
7513       F.removeFnAttr(Attribute::ArgMemOnly);
7514       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7515       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7516     }
7517     return AAMemoryBehaviorImpl::manifest(A);
7518   }
7519 
7520   /// See AbstractAttribute::trackStatistics()
7521   void trackStatistics() const override {
7522     if (isAssumedReadNone())
7523       STATS_DECLTRACK_FN_ATTR(readnone)
7524     else if (isAssumedReadOnly())
7525       STATS_DECLTRACK_FN_ATTR(readonly)
7526     else if (isAssumedWriteOnly())
7527       STATS_DECLTRACK_FN_ATTR(writeonly)
7528   }
7529 };
7530 
7531 /// AAMemoryBehavior attribute for call sites.
7532 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7533   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7534       : AAMemoryBehaviorImpl(IRP, A) {}
7535 
7536   /// See AbstractAttribute::initialize(...).
7537   void initialize(Attributor &A) override {
7538     AAMemoryBehaviorImpl::initialize(A);
7539     Function *F = getAssociatedFunction();
7540     if (!F || F->isDeclaration())
7541       indicatePessimisticFixpoint();
7542   }
7543 
7544   /// See AbstractAttribute::updateImpl(...).
7545   ChangeStatus updateImpl(Attributor &A) override {
7546     // TODO: Once we have call site specific value information we can provide
7547     //       call site specific liveness liveness information and then it makes
7548     //       sense to specialize attributes for call sites arguments instead of
7549     //       redirecting requests to the callee argument.
7550     Function *F = getAssociatedFunction();
7551     const IRPosition &FnPos = IRPosition::function(*F);
7552     auto &FnAA =
7553         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7554     return clampStateAndIndicateChange(getState(), FnAA.getState());
7555   }
7556 
7557   /// See AbstractAttribute::trackStatistics()
7558   void trackStatistics() const override {
7559     if (isAssumedReadNone())
7560       STATS_DECLTRACK_CS_ATTR(readnone)
7561     else if (isAssumedReadOnly())
7562       STATS_DECLTRACK_CS_ATTR(readonly)
7563     else if (isAssumedWriteOnly())
7564       STATS_DECLTRACK_CS_ATTR(writeonly)
7565   }
7566 };
7567 
7568 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7569 
7570   // The current assumed state used to determine a change.
7571   auto AssumedState = getAssumed();
7572 
7573   auto CheckRWInst = [&](Instruction &I) {
7574     // If the instruction has an own memory behavior state, use it to restrict
7575     // the local state. No further analysis is required as the other memory
7576     // state is as optimistic as it gets.
7577     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7578       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7579           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7580       intersectAssumedBits(MemBehaviorAA.getAssumed());
7581       return !isAtFixpoint();
7582     }
7583 
7584     // Remove access kind modifiers if necessary.
7585     if (I.mayReadFromMemory())
7586       removeAssumedBits(NO_READS);
7587     if (I.mayWriteToMemory())
7588       removeAssumedBits(NO_WRITES);
7589     return !isAtFixpoint();
7590   };
7591 
7592   bool UsedAssumedInformation = false;
7593   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7594                                           UsedAssumedInformation))
7595     return indicatePessimisticFixpoint();
7596 
7597   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7598                                         : ChangeStatus::UNCHANGED;
7599 }
7600 
7601 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7602 
7603   const IRPosition &IRP = getIRPosition();
7604   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7605   AAMemoryBehavior::StateType &S = getState();
7606 
7607   // First, check the function scope. We take the known information and we avoid
7608   // work if the assumed information implies the current assumed information for
7609   // this attribute. This is a valid for all but byval arguments.
7610   Argument *Arg = IRP.getAssociatedArgument();
7611   AAMemoryBehavior::base_t FnMemAssumedState =
7612       AAMemoryBehavior::StateType::getWorstState();
7613   if (!Arg || !Arg->hasByValAttr()) {
7614     const auto &FnMemAA =
7615         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7616     FnMemAssumedState = FnMemAA.getAssumed();
7617     S.addKnownBits(FnMemAA.getKnown());
7618     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7619       return ChangeStatus::UNCHANGED;
7620   }
7621 
7622   // The current assumed state used to determine a change.
7623   auto AssumedState = S.getAssumed();
7624 
7625   // Make sure the value is not captured (except through "return"), if
7626   // it is, any information derived would be irrelevant anyway as we cannot
7627   // check the potential aliases introduced by the capture. However, no need
7628   // to fall back to anythign less optimistic than the function state.
7629   const auto &ArgNoCaptureAA =
7630       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7631   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7632     S.intersectAssumedBits(FnMemAssumedState);
7633     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7634                                           : ChangeStatus::UNCHANGED;
7635   }
7636 
7637   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7638   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7639     Instruction *UserI = cast<Instruction>(U.getUser());
7640     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7641                       << " \n");
7642 
7643     // Droppable users, e.g., llvm::assume does not actually perform any action.
7644     if (UserI->isDroppable())
7645       return true;
7646 
7647     // Check if the users of UserI should also be visited.
7648     Follow = followUsersOfUseIn(A, U, UserI);
7649 
7650     // If UserI might touch memory we analyze the use in detail.
7651     if (UserI->mayReadOrWriteMemory())
7652       analyzeUseIn(A, U, UserI);
7653 
7654     return !isAtFixpoint();
7655   };
7656 
7657   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7658     return indicatePessimisticFixpoint();
7659 
7660   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7661                                         : ChangeStatus::UNCHANGED;
7662 }
7663 
7664 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7665                                                   const Instruction *UserI) {
7666   // The loaded value is unrelated to the pointer argument, no need to
7667   // follow the users of the load.
7668   if (isa<LoadInst>(UserI))
7669     return false;
7670 
7671   // By default we follow all uses assuming UserI might leak information on U,
7672   // we have special handling for call sites operands though.
7673   const auto *CB = dyn_cast<CallBase>(UserI);
7674   if (!CB || !CB->isArgOperand(&U))
7675     return true;
7676 
7677   // If the use is a call argument known not to be captured, the users of
7678   // the call do not need to be visited because they have to be unrelated to
7679   // the input. Note that this check is not trivial even though we disallow
7680   // general capturing of the underlying argument. The reason is that the
7681   // call might the argument "through return", which we allow and for which we
7682   // need to check call users.
7683   if (U.get()->getType()->isPointerTy()) {
7684     unsigned ArgNo = CB->getArgOperandNo(&U);
7685     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7686         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7687     return !ArgNoCaptureAA.isAssumedNoCapture();
7688   }
7689 
7690   return true;
7691 }
7692 
7693 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7694                                             const Instruction *UserI) {
7695   assert(UserI->mayReadOrWriteMemory());
7696 
7697   switch (UserI->getOpcode()) {
7698   default:
7699     // TODO: Handle all atomics and other side-effect operations we know of.
7700     break;
7701   case Instruction::Load:
7702     // Loads cause the NO_READS property to disappear.
7703     removeAssumedBits(NO_READS);
7704     return;
7705 
7706   case Instruction::Store:
7707     // Stores cause the NO_WRITES property to disappear if the use is the
7708     // pointer operand. Note that while capturing was taken care of somewhere
7709     // else we need to deal with stores of the value that is not looked through.
7710     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7711       removeAssumedBits(NO_WRITES);
7712     else
7713       indicatePessimisticFixpoint();
7714     return;
7715 
7716   case Instruction::Call:
7717   case Instruction::CallBr:
7718   case Instruction::Invoke: {
7719     // For call sites we look at the argument memory behavior attribute (this
7720     // could be recursive!) in order to restrict our own state.
7721     const auto *CB = cast<CallBase>(UserI);
7722 
7723     // Give up on operand bundles.
7724     if (CB->isBundleOperand(&U)) {
7725       indicatePessimisticFixpoint();
7726       return;
7727     }
7728 
7729     // Calling a function does read the function pointer, maybe write it if the
7730     // function is self-modifying.
7731     if (CB->isCallee(&U)) {
7732       removeAssumedBits(NO_READS);
7733       break;
7734     }
7735 
7736     // Adjust the possible access behavior based on the information on the
7737     // argument.
7738     IRPosition Pos;
7739     if (U.get()->getType()->isPointerTy())
7740       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7741     else
7742       Pos = IRPosition::callsite_function(*CB);
7743     const auto &MemBehaviorAA =
7744         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7745     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7746     // and at least "known".
7747     intersectAssumedBits(MemBehaviorAA.getAssumed());
7748     return;
7749   }
7750   };
7751 
7752   // Generally, look at the "may-properties" and adjust the assumed state if we
7753   // did not trigger special handling before.
7754   if (UserI->mayReadFromMemory())
7755     removeAssumedBits(NO_READS);
7756   if (UserI->mayWriteToMemory())
7757     removeAssumedBits(NO_WRITES);
7758 }
7759 } // namespace
7760 
7761 /// -------------------- Memory Locations Attributes ---------------------------
7762 /// Includes read-none, argmemonly, inaccessiblememonly,
7763 /// inaccessiblememorargmemonly
7764 /// ----------------------------------------------------------------------------
7765 
7766 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7767     AAMemoryLocation::MemoryLocationsKind MLK) {
7768   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7769     return "all memory";
7770   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7771     return "no memory";
7772   std::string S = "memory:";
7773   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7774     S += "stack,";
7775   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7776     S += "constant,";
7777   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7778     S += "internal global,";
7779   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7780     S += "external global,";
7781   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7782     S += "argument,";
7783   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7784     S += "inaccessible,";
7785   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7786     S += "malloced,";
7787   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7788     S += "unknown,";
7789   S.pop_back();
7790   return S;
7791 }
7792 
7793 namespace {
7794 struct AAMemoryLocationImpl : public AAMemoryLocation {
7795 
7796   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7797       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7798     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7799       AccessKind2Accesses[u] = nullptr;
7800   }
7801 
7802   ~AAMemoryLocationImpl() {
7803     // The AccessSets are allocated via a BumpPtrAllocator, we call
7804     // the destructor manually.
7805     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7806       if (AccessKind2Accesses[u])
7807         AccessKind2Accesses[u]->~AccessSet();
7808   }
7809 
7810   /// See AbstractAttribute::initialize(...).
7811   void initialize(Attributor &A) override {
7812     intersectAssumedBits(BEST_STATE);
7813     getKnownStateFromValue(A, getIRPosition(), getState());
7814     AAMemoryLocation::initialize(A);
7815   }
7816 
7817   /// Return the memory behavior information encoded in the IR for \p IRP.
7818   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7819                                      BitIntegerState &State,
7820                                      bool IgnoreSubsumingPositions = false) {
7821     // For internal functions we ignore `argmemonly` and
7822     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7823     // constant propagation. It is unclear if this is the best way but it is
7824     // unlikely this will cause real performance problems. If we are deriving
7825     // attributes for the anchor function we even remove the attribute in
7826     // addition to ignoring it.
7827     bool UseArgMemOnly = true;
7828     Function *AnchorFn = IRP.getAnchorScope();
7829     if (AnchorFn && A.isRunOn(*AnchorFn))
7830       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7831 
7832     SmallVector<Attribute, 2> Attrs;
7833     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7834     for (const Attribute &Attr : Attrs) {
7835       switch (Attr.getKindAsEnum()) {
7836       case Attribute::ReadNone:
7837         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7838         break;
7839       case Attribute::InaccessibleMemOnly:
7840         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7841         break;
7842       case Attribute::ArgMemOnly:
7843         if (UseArgMemOnly)
7844           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7845         else
7846           IRP.removeAttrs({Attribute::ArgMemOnly});
7847         break;
7848       case Attribute::InaccessibleMemOrArgMemOnly:
7849         if (UseArgMemOnly)
7850           State.addKnownBits(inverseLocation(
7851               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7852         else
7853           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7854         break;
7855       default:
7856         llvm_unreachable("Unexpected attribute!");
7857       }
7858     }
7859   }
7860 
7861   /// See AbstractAttribute::getDeducedAttributes(...).
7862   void getDeducedAttributes(LLVMContext &Ctx,
7863                             SmallVectorImpl<Attribute> &Attrs) const override {
7864     assert(Attrs.size() == 0);
7865     if (isAssumedReadNone()) {
7866       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7867     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7868       if (isAssumedInaccessibleMemOnly())
7869         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7870       else if (isAssumedArgMemOnly())
7871         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7872       else if (isAssumedInaccessibleOrArgMemOnly())
7873         Attrs.push_back(
7874             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7875     }
7876     assert(Attrs.size() <= 1);
7877   }
7878 
7879   /// See AbstractAttribute::manifest(...).
7880   ChangeStatus manifest(Attributor &A) override {
7881     const IRPosition &IRP = getIRPosition();
7882 
7883     // Check if we would improve the existing attributes first.
7884     SmallVector<Attribute, 4> DeducedAttrs;
7885     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7886     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7887           return IRP.hasAttr(Attr.getKindAsEnum(),
7888                              /* IgnoreSubsumingPositions */ true);
7889         }))
7890       return ChangeStatus::UNCHANGED;
7891 
7892     // Clear existing attributes.
7893     IRP.removeAttrs(AttrKinds);
7894     if (isAssumedReadNone())
7895       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7896 
7897     // Use the generic manifest method.
7898     return IRAttribute::manifest(A);
7899   }
7900 
7901   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7902   bool checkForAllAccessesToMemoryKind(
7903       function_ref<bool(const Instruction *, const Value *, AccessKind,
7904                         MemoryLocationsKind)>
7905           Pred,
7906       MemoryLocationsKind RequestedMLK) const override {
7907     if (!isValidState())
7908       return false;
7909 
7910     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7911     if (AssumedMLK == NO_LOCATIONS)
7912       return true;
7913 
7914     unsigned Idx = 0;
7915     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7916          CurMLK *= 2, ++Idx) {
7917       if (CurMLK & RequestedMLK)
7918         continue;
7919 
7920       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7921         for (const AccessInfo &AI : *Accesses)
7922           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7923             return false;
7924     }
7925 
7926     return true;
7927   }
7928 
7929   ChangeStatus indicatePessimisticFixpoint() override {
7930     // If we give up and indicate a pessimistic fixpoint this instruction will
7931     // become an access for all potential access kinds:
7932     // TODO: Add pointers for argmemonly and globals to improve the results of
7933     //       checkForAllAccessesToMemoryKind.
7934     bool Changed = false;
7935     MemoryLocationsKind KnownMLK = getKnown();
7936     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7937     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7938       if (!(CurMLK & KnownMLK))
7939         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7940                                   getAccessKindFromInst(I));
7941     return AAMemoryLocation::indicatePessimisticFixpoint();
7942   }
7943 
7944 protected:
7945   /// Helper struct to tie together an instruction that has a read or write
7946   /// effect with the pointer it accesses (if any).
7947   struct AccessInfo {
7948 
7949     /// The instruction that caused the access.
7950     const Instruction *I;
7951 
7952     /// The base pointer that is accessed, or null if unknown.
7953     const Value *Ptr;
7954 
7955     /// The kind of access (read/write/read+write).
7956     AccessKind Kind;
7957 
7958     bool operator==(const AccessInfo &RHS) const {
7959       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7960     }
7961     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7962       if (LHS.I != RHS.I)
7963         return LHS.I < RHS.I;
7964       if (LHS.Ptr != RHS.Ptr)
7965         return LHS.Ptr < RHS.Ptr;
7966       if (LHS.Kind != RHS.Kind)
7967         return LHS.Kind < RHS.Kind;
7968       return false;
7969     }
7970   };
7971 
7972   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7973   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7974   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7975   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7976 
7977   /// Categorize the pointer arguments of CB that might access memory in
7978   /// AccessedLoc and update the state and access map accordingly.
7979   void
7980   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7981                                      AAMemoryLocation::StateType &AccessedLocs,
7982                                      bool &Changed);
7983 
7984   /// Return the kind(s) of location that may be accessed by \p V.
7985   AAMemoryLocation::MemoryLocationsKind
7986   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7987 
7988   /// Return the access kind as determined by \p I.
7989   AccessKind getAccessKindFromInst(const Instruction *I) {
7990     AccessKind AK = READ_WRITE;
7991     if (I) {
7992       AK = I->mayReadFromMemory() ? READ : NONE;
7993       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7994     }
7995     return AK;
7996   }
7997 
7998   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7999   /// an access of kind \p AK to a \p MLK memory location with the access
8000   /// pointer \p Ptr.
8001   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
8002                                  MemoryLocationsKind MLK, const Instruction *I,
8003                                  const Value *Ptr, bool &Changed,
8004                                  AccessKind AK = READ_WRITE) {
8005 
8006     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
8007     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
8008     if (!Accesses)
8009       Accesses = new (Allocator) AccessSet();
8010     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
8011     State.removeAssumedBits(MLK);
8012   }
8013 
8014   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
8015   /// arguments, and update the state and access map accordingly.
8016   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
8017                           AAMemoryLocation::StateType &State, bool &Changed);
8018 
8019   /// Used to allocate access sets.
8020   BumpPtrAllocator &Allocator;
8021 
8022   /// The set of IR attributes AAMemoryLocation deals with.
8023   static const Attribute::AttrKind AttrKinds[4];
8024 };
8025 
8026 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
8027     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
8028     Attribute::InaccessibleMemOrArgMemOnly};
8029 
8030 void AAMemoryLocationImpl::categorizePtrValue(
8031     Attributor &A, const Instruction &I, const Value &Ptr,
8032     AAMemoryLocation::StateType &State, bool &Changed) {
8033   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
8034                     << Ptr << " ["
8035                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
8036 
8037   SmallVector<Value *, 8> Objects;
8038   bool UsedAssumedInformation = false;
8039   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
8040                                        UsedAssumedInformation,
8041                                        AA::Intraprocedural)) {
8042     LLVM_DEBUG(
8043         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
8044     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
8045                               getAccessKindFromInst(&I));
8046     return;
8047   }
8048 
8049   for (Value *Obj : Objects) {
8050     // TODO: recognize the TBAA used for constant accesses.
8051     MemoryLocationsKind MLK = NO_LOCATIONS;
8052     if (isa<UndefValue>(Obj))
8053       continue;
8054     if (isa<Argument>(Obj)) {
8055       // TODO: For now we do not treat byval arguments as local copies performed
8056       // on the call edge, though, we should. To make that happen we need to
8057       // teach various passes, e.g., DSE, about the copy effect of a byval. That
8058       // would also allow us to mark functions only accessing byval arguments as
8059       // readnone again, atguably their acceses have no effect outside of the
8060       // function, like accesses to allocas.
8061       MLK = NO_ARGUMENT_MEM;
8062     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
8063       // Reading constant memory is not treated as a read "effect" by the
8064       // function attr pass so we won't neither. Constants defined by TBAA are
8065       // similar. (We know we do not write it because it is constant.)
8066       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
8067         if (GVar->isConstant())
8068           continue;
8069 
8070       if (GV->hasLocalLinkage())
8071         MLK = NO_GLOBAL_INTERNAL_MEM;
8072       else
8073         MLK = NO_GLOBAL_EXTERNAL_MEM;
8074     } else if (isa<ConstantPointerNull>(Obj) &&
8075                !NullPointerIsDefined(getAssociatedFunction(),
8076                                      Ptr.getType()->getPointerAddressSpace())) {
8077       continue;
8078     } else if (isa<AllocaInst>(Obj)) {
8079       MLK = NO_LOCAL_MEM;
8080     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
8081       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
8082           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
8083       if (NoAliasAA.isAssumedNoAlias())
8084         MLK = NO_MALLOCED_MEM;
8085       else
8086         MLK = NO_UNKOWN_MEM;
8087     } else {
8088       MLK = NO_UNKOWN_MEM;
8089     }
8090 
8091     assert(MLK != NO_LOCATIONS && "No location specified!");
8092     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
8093                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
8094                       << "\n");
8095     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
8096                               getAccessKindFromInst(&I));
8097   }
8098 
8099   LLVM_DEBUG(
8100       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
8101              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
8102 }
8103 
8104 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
8105     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
8106     bool &Changed) {
8107   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
8108 
8109     // Skip non-pointer arguments.
8110     const Value *ArgOp = CB.getArgOperand(ArgNo);
8111     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
8112       continue;
8113 
8114     // Skip readnone arguments.
8115     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
8116     const auto &ArgOpMemLocationAA =
8117         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
8118 
8119     if (ArgOpMemLocationAA.isAssumedReadNone())
8120       continue;
8121 
8122     // Categorize potentially accessed pointer arguments as if there was an
8123     // access instruction with them as pointer.
8124     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
8125   }
8126 }
8127 
8128 AAMemoryLocation::MemoryLocationsKind
8129 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
8130                                                   bool &Changed) {
8131   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
8132                     << I << "\n");
8133 
8134   AAMemoryLocation::StateType AccessedLocs;
8135   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
8136 
8137   if (auto *CB = dyn_cast<CallBase>(&I)) {
8138 
8139     // First check if we assume any memory is access is visible.
8140     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
8141         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
8142     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
8143                       << " [" << CBMemLocationAA << "]\n");
8144 
8145     if (CBMemLocationAA.isAssumedReadNone())
8146       return NO_LOCATIONS;
8147 
8148     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
8149       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
8150                                 Changed, getAccessKindFromInst(&I));
8151       return AccessedLocs.getAssumed();
8152     }
8153 
8154     uint32_t CBAssumedNotAccessedLocs =
8155         CBMemLocationAA.getAssumedNotAccessedLocation();
8156 
8157     // Set the argmemonly and global bit as we handle them separately below.
8158     uint32_t CBAssumedNotAccessedLocsNoArgMem =
8159         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
8160 
8161     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
8162       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
8163         continue;
8164       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
8165                                 getAccessKindFromInst(&I));
8166     }
8167 
8168     // Now handle global memory if it might be accessed. This is slightly tricky
8169     // as NO_GLOBAL_MEM has multiple bits set.
8170     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
8171     if (HasGlobalAccesses) {
8172       auto AccessPred = [&](const Instruction *, const Value *Ptr,
8173                             AccessKind Kind, MemoryLocationsKind MLK) {
8174         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
8175                                   getAccessKindFromInst(&I));
8176         return true;
8177       };
8178       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
8179               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
8180         return AccessedLocs.getWorstState();
8181     }
8182 
8183     LLVM_DEBUG(
8184         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
8185                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8186 
8187     // Now handle argument memory if it might be accessed.
8188     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
8189     if (HasArgAccesses)
8190       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
8191 
8192     LLVM_DEBUG(
8193         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
8194                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8195 
8196     return AccessedLocs.getAssumed();
8197   }
8198 
8199   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
8200     LLVM_DEBUG(
8201         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
8202                << I << " [" << *Ptr << "]\n");
8203     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
8204     return AccessedLocs.getAssumed();
8205   }
8206 
8207   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
8208                     << I << "\n");
8209   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
8210                             getAccessKindFromInst(&I));
8211   return AccessedLocs.getAssumed();
8212 }
8213 
8214 /// An AA to represent the memory behavior function attributes.
8215 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
8216   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
8217       : AAMemoryLocationImpl(IRP, A) {}
8218 
8219   /// See AbstractAttribute::updateImpl(Attributor &A).
8220   virtual ChangeStatus updateImpl(Attributor &A) override {
8221 
8222     const auto &MemBehaviorAA =
8223         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
8224     if (MemBehaviorAA.isAssumedReadNone()) {
8225       if (MemBehaviorAA.isKnownReadNone())
8226         return indicateOptimisticFixpoint();
8227       assert(isAssumedReadNone() &&
8228              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
8229       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
8230       return ChangeStatus::UNCHANGED;
8231     }
8232 
8233     // The current assumed state used to determine a change.
8234     auto AssumedState = getAssumed();
8235     bool Changed = false;
8236 
8237     auto CheckRWInst = [&](Instruction &I) {
8238       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
8239       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
8240                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
8241       removeAssumedBits(inverseLocation(MLK, false, false));
8242       // Stop once only the valid bit set in the *not assumed location*, thus
8243       // once we don't actually exclude any memory locations in the state.
8244       return getAssumedNotAccessedLocation() != VALID_STATE;
8245     };
8246 
8247     bool UsedAssumedInformation = false;
8248     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8249                                             UsedAssumedInformation))
8250       return indicatePessimisticFixpoint();
8251 
8252     Changed |= AssumedState != getAssumed();
8253     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8254   }
8255 
8256   /// See AbstractAttribute::trackStatistics()
8257   void trackStatistics() const override {
8258     if (isAssumedReadNone())
8259       STATS_DECLTRACK_FN_ATTR(readnone)
8260     else if (isAssumedArgMemOnly())
8261       STATS_DECLTRACK_FN_ATTR(argmemonly)
8262     else if (isAssumedInaccessibleMemOnly())
8263       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
8264     else if (isAssumedInaccessibleOrArgMemOnly())
8265       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
8266   }
8267 };
8268 
8269 /// AAMemoryLocation attribute for call sites.
8270 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
8271   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
8272       : AAMemoryLocationImpl(IRP, A) {}
8273 
8274   /// See AbstractAttribute::initialize(...).
8275   void initialize(Attributor &A) override {
8276     AAMemoryLocationImpl::initialize(A);
8277     Function *F = getAssociatedFunction();
8278     if (!F || F->isDeclaration())
8279       indicatePessimisticFixpoint();
8280   }
8281 
8282   /// See AbstractAttribute::updateImpl(...).
8283   ChangeStatus updateImpl(Attributor &A) override {
8284     // TODO: Once we have call site specific value information we can provide
8285     //       call site specific liveness liveness information and then it makes
8286     //       sense to specialize attributes for call sites arguments instead of
8287     //       redirecting requests to the callee argument.
8288     Function *F = getAssociatedFunction();
8289     const IRPosition &FnPos = IRPosition::function(*F);
8290     auto &FnAA =
8291         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8292     bool Changed = false;
8293     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8294                           AccessKind Kind, MemoryLocationsKind MLK) {
8295       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8296                                 getAccessKindFromInst(I));
8297       return true;
8298     };
8299     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8300       return indicatePessimisticFixpoint();
8301     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8302   }
8303 
8304   /// See AbstractAttribute::trackStatistics()
8305   void trackStatistics() const override {
8306     if (isAssumedReadNone())
8307       STATS_DECLTRACK_CS_ATTR(readnone)
8308   }
8309 };
8310 } // namespace
8311 
8312 /// ------------------ Value Constant Range Attribute -------------------------
8313 
8314 namespace {
8315 struct AAValueConstantRangeImpl : AAValueConstantRange {
8316   using StateType = IntegerRangeState;
8317   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8318       : AAValueConstantRange(IRP, A) {}
8319 
8320   /// See AbstractAttribute::initialize(..).
8321   void initialize(Attributor &A) override {
8322     if (A.hasSimplificationCallback(getIRPosition())) {
8323       indicatePessimisticFixpoint();
8324       return;
8325     }
8326 
8327     // Intersect a range given by SCEV.
8328     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8329 
8330     // Intersect a range given by LVI.
8331     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8332   }
8333 
8334   /// See AbstractAttribute::getAsStr().
8335   const std::string getAsStr() const override {
8336     std::string Str;
8337     llvm::raw_string_ostream OS(Str);
8338     OS << "range(" << getBitWidth() << ")<";
8339     getKnown().print(OS);
8340     OS << " / ";
8341     getAssumed().print(OS);
8342     OS << ">";
8343     return OS.str();
8344   }
8345 
8346   /// Helper function to get a SCEV expr for the associated value at program
8347   /// point \p I.
8348   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8349     if (!getAnchorScope())
8350       return nullptr;
8351 
8352     ScalarEvolution *SE =
8353         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8354             *getAnchorScope());
8355 
8356     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8357         *getAnchorScope());
8358 
8359     if (!SE || !LI)
8360       return nullptr;
8361 
8362     const SCEV *S = SE->getSCEV(&getAssociatedValue());
8363     if (!I)
8364       return S;
8365 
8366     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8367   }
8368 
8369   /// Helper function to get a range from SCEV for the associated value at
8370   /// program point \p I.
8371   ConstantRange getConstantRangeFromSCEV(Attributor &A,
8372                                          const Instruction *I = nullptr) const {
8373     if (!getAnchorScope())
8374       return getWorstState(getBitWidth());
8375 
8376     ScalarEvolution *SE =
8377         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8378             *getAnchorScope());
8379 
8380     const SCEV *S = getSCEV(A, I);
8381     if (!SE || !S)
8382       return getWorstState(getBitWidth());
8383 
8384     return SE->getUnsignedRange(S);
8385   }
8386 
8387   /// Helper function to get a range from LVI for the associated value at
8388   /// program point \p I.
8389   ConstantRange
8390   getConstantRangeFromLVI(Attributor &A,
8391                           const Instruction *CtxI = nullptr) const {
8392     if (!getAnchorScope())
8393       return getWorstState(getBitWidth());
8394 
8395     LazyValueInfo *LVI =
8396         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8397             *getAnchorScope());
8398 
8399     if (!LVI || !CtxI)
8400       return getWorstState(getBitWidth());
8401     return LVI->getConstantRange(&getAssociatedValue(),
8402                                  const_cast<Instruction *>(CtxI));
8403   }
8404 
8405   /// Return true if \p CtxI is valid for querying outside analyses.
8406   /// This basically makes sure we do not ask intra-procedural analysis
8407   /// about a context in the wrong function or a context that violates
8408   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8409   /// if the original context of this AA is OK or should be considered invalid.
8410   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8411                                                const Instruction *CtxI,
8412                                                bool AllowAACtxI) const {
8413     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8414       return false;
8415 
8416     // Our context might be in a different function, neither intra-procedural
8417     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8418     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8419       return false;
8420 
8421     // If the context is not dominated by the value there are paths to the
8422     // context that do not define the value. This cannot be handled by
8423     // LazyValueInfo so we need to bail.
8424     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8425       InformationCache &InfoCache = A.getInfoCache();
8426       const DominatorTree *DT =
8427           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8428               *I->getFunction());
8429       return DT && DT->dominates(I, CtxI);
8430     }
8431 
8432     return true;
8433   }
8434 
8435   /// See AAValueConstantRange::getKnownConstantRange(..).
8436   ConstantRange
8437   getKnownConstantRange(Attributor &A,
8438                         const Instruction *CtxI = nullptr) const override {
8439     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8440                                                  /* AllowAACtxI */ false))
8441       return getKnown();
8442 
8443     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8444     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8445     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8446   }
8447 
8448   /// See AAValueConstantRange::getAssumedConstantRange(..).
8449   ConstantRange
8450   getAssumedConstantRange(Attributor &A,
8451                           const Instruction *CtxI = nullptr) const override {
8452     // TODO: Make SCEV use Attributor assumption.
8453     //       We may be able to bound a variable range via assumptions in
8454     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8455     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8456     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8457                                                  /* AllowAACtxI */ false))
8458       return getAssumed();
8459 
8460     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8461     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8462     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8463   }
8464 
8465   /// Helper function to create MDNode for range metadata.
8466   static MDNode *
8467   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8468                             const ConstantRange &AssumedConstantRange) {
8469     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8470                                   Ty, AssumedConstantRange.getLower())),
8471                               ConstantAsMetadata::get(ConstantInt::get(
8472                                   Ty, AssumedConstantRange.getUpper()))};
8473     return MDNode::get(Ctx, LowAndHigh);
8474   }
8475 
8476   /// Return true if \p Assumed is included in \p KnownRanges.
8477   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8478 
8479     if (Assumed.isFullSet())
8480       return false;
8481 
8482     if (!KnownRanges)
8483       return true;
8484 
8485     // If multiple ranges are annotated in IR, we give up to annotate assumed
8486     // range for now.
8487 
8488     // TODO:  If there exists a known range which containts assumed range, we
8489     // can say assumed range is better.
8490     if (KnownRanges->getNumOperands() > 2)
8491       return false;
8492 
8493     ConstantInt *Lower =
8494         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8495     ConstantInt *Upper =
8496         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8497 
8498     ConstantRange Known(Lower->getValue(), Upper->getValue());
8499     return Known.contains(Assumed) && Known != Assumed;
8500   }
8501 
8502   /// Helper function to set range metadata.
8503   static bool
8504   setRangeMetadataIfisBetterRange(Instruction *I,
8505                                   const ConstantRange &AssumedConstantRange) {
8506     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8507     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8508       if (!AssumedConstantRange.isEmptySet()) {
8509         I->setMetadata(LLVMContext::MD_range,
8510                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8511                                                  AssumedConstantRange));
8512         return true;
8513       }
8514     }
8515     return false;
8516   }
8517 
8518   /// See AbstractAttribute::manifest()
8519   ChangeStatus manifest(Attributor &A) override {
8520     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8521     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8522     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8523 
8524     auto &V = getAssociatedValue();
8525     if (!AssumedConstantRange.isEmptySet() &&
8526         !AssumedConstantRange.isSingleElement()) {
8527       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8528         assert(I == getCtxI() && "Should not annotate an instruction which is "
8529                                  "not the context instruction");
8530         if (isa<CallInst>(I) || isa<LoadInst>(I))
8531           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8532             Changed = ChangeStatus::CHANGED;
8533       }
8534     }
8535 
8536     return Changed;
8537   }
8538 };
8539 
8540 struct AAValueConstantRangeArgument final
8541     : AAArgumentFromCallSiteArguments<
8542           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8543           true /* BridgeCallBaseContext */> {
8544   using Base = AAArgumentFromCallSiteArguments<
8545       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8546       true /* BridgeCallBaseContext */>;
8547   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8548       : Base(IRP, A) {}
8549 
8550   /// See AbstractAttribute::initialize(..).
8551   void initialize(Attributor &A) override {
8552     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8553       indicatePessimisticFixpoint();
8554     } else {
8555       Base::initialize(A);
8556     }
8557   }
8558 
8559   /// See AbstractAttribute::trackStatistics()
8560   void trackStatistics() const override {
8561     STATS_DECLTRACK_ARG_ATTR(value_range)
8562   }
8563 };
8564 
8565 struct AAValueConstantRangeReturned
8566     : AAReturnedFromReturnedValues<AAValueConstantRange,
8567                                    AAValueConstantRangeImpl,
8568                                    AAValueConstantRangeImpl::StateType,
8569                                    /* PropogateCallBaseContext */ true> {
8570   using Base =
8571       AAReturnedFromReturnedValues<AAValueConstantRange,
8572                                    AAValueConstantRangeImpl,
8573                                    AAValueConstantRangeImpl::StateType,
8574                                    /* PropogateCallBaseContext */ true>;
8575   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8576       : Base(IRP, A) {}
8577 
8578   /// See AbstractAttribute::initialize(...).
8579   void initialize(Attributor &A) override {}
8580 
8581   /// See AbstractAttribute::trackStatistics()
8582   void trackStatistics() const override {
8583     STATS_DECLTRACK_FNRET_ATTR(value_range)
8584   }
8585 };
8586 
8587 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8588   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8589       : AAValueConstantRangeImpl(IRP, A) {}
8590 
8591   /// See AbstractAttribute::initialize(...).
8592   void initialize(Attributor &A) override {
8593     AAValueConstantRangeImpl::initialize(A);
8594     if (isAtFixpoint())
8595       return;
8596 
8597     Value &V = getAssociatedValue();
8598 
8599     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8600       unionAssumed(ConstantRange(C->getValue()));
8601       indicateOptimisticFixpoint();
8602       return;
8603     }
8604 
8605     if (isa<UndefValue>(&V)) {
8606       // Collapse the undef state to 0.
8607       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8608       indicateOptimisticFixpoint();
8609       return;
8610     }
8611 
8612     if (isa<CallBase>(&V))
8613       return;
8614 
8615     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8616       return;
8617 
8618     // If it is a load instruction with range metadata, use it.
8619     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8620       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8621         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8622         return;
8623       }
8624 
8625     // We can work with PHI and select instruction as we traverse their operands
8626     // during update.
8627     if (isa<SelectInst>(V) || isa<PHINode>(V))
8628       return;
8629 
8630     // Otherwise we give up.
8631     indicatePessimisticFixpoint();
8632 
8633     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8634                       << getAssociatedValue() << "\n");
8635   }
8636 
8637   bool calculateBinaryOperator(
8638       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8639       const Instruction *CtxI,
8640       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8641     Value *LHS = BinOp->getOperand(0);
8642     Value *RHS = BinOp->getOperand(1);
8643 
8644     // Simplify the operands first.
8645     bool UsedAssumedInformation = false;
8646     const auto &SimplifiedLHS =
8647         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8648                                *this, UsedAssumedInformation);
8649     if (!SimplifiedLHS)
8650       return true;
8651     if (!SimplifiedLHS.getValue())
8652       return false;
8653     LHS = *SimplifiedLHS;
8654 
8655     const auto &SimplifiedRHS =
8656         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8657                                *this, UsedAssumedInformation);
8658     if (!SimplifiedRHS)
8659       return true;
8660     if (!SimplifiedRHS.getValue())
8661       return false;
8662     RHS = *SimplifiedRHS;
8663 
8664     // TODO: Allow non integers as well.
8665     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8666       return false;
8667 
8668     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8669         *this, IRPosition::value(*LHS, getCallBaseContext()),
8670         DepClassTy::REQUIRED);
8671     QuerriedAAs.push_back(&LHSAA);
8672     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8673 
8674     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8675         *this, IRPosition::value(*RHS, getCallBaseContext()),
8676         DepClassTy::REQUIRED);
8677     QuerriedAAs.push_back(&RHSAA);
8678     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8679 
8680     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8681 
8682     T.unionAssumed(AssumedRange);
8683 
8684     // TODO: Track a known state too.
8685 
8686     return T.isValidState();
8687   }
8688 
8689   bool calculateCastInst(
8690       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8691       const Instruction *CtxI,
8692       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8693     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8694     // TODO: Allow non integers as well.
8695     Value *OpV = CastI->getOperand(0);
8696 
8697     // Simplify the operand first.
8698     bool UsedAssumedInformation = false;
8699     const auto &SimplifiedOpV =
8700         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8701                                *this, UsedAssumedInformation);
8702     if (!SimplifiedOpV)
8703       return true;
8704     if (!SimplifiedOpV.getValue())
8705       return false;
8706     OpV = *SimplifiedOpV;
8707 
8708     if (!OpV->getType()->isIntegerTy())
8709       return false;
8710 
8711     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8712         *this, IRPosition::value(*OpV, getCallBaseContext()),
8713         DepClassTy::REQUIRED);
8714     QuerriedAAs.push_back(&OpAA);
8715     T.unionAssumed(
8716         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8717     return T.isValidState();
8718   }
8719 
8720   bool
8721   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8722                    const Instruction *CtxI,
8723                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8724     Value *LHS = CmpI->getOperand(0);
8725     Value *RHS = CmpI->getOperand(1);
8726 
8727     // Simplify the operands first.
8728     bool UsedAssumedInformation = false;
8729     const auto &SimplifiedLHS =
8730         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8731                                *this, UsedAssumedInformation);
8732     if (!SimplifiedLHS)
8733       return true;
8734     if (!SimplifiedLHS.getValue())
8735       return false;
8736     LHS = *SimplifiedLHS;
8737 
8738     const auto &SimplifiedRHS =
8739         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8740                                *this, UsedAssumedInformation);
8741     if (!SimplifiedRHS)
8742       return true;
8743     if (!SimplifiedRHS.getValue())
8744       return false;
8745     RHS = *SimplifiedRHS;
8746 
8747     // TODO: Allow non integers as well.
8748     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8749       return false;
8750 
8751     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8752         *this, IRPosition::value(*LHS, getCallBaseContext()),
8753         DepClassTy::REQUIRED);
8754     QuerriedAAs.push_back(&LHSAA);
8755     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8756         *this, IRPosition::value(*RHS, getCallBaseContext()),
8757         DepClassTy::REQUIRED);
8758     QuerriedAAs.push_back(&RHSAA);
8759     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8760     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8761 
8762     // If one of them is empty set, we can't decide.
8763     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8764       return true;
8765 
8766     bool MustTrue = false, MustFalse = false;
8767 
8768     auto AllowedRegion =
8769         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8770 
8771     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8772       MustFalse = true;
8773 
8774     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8775       MustTrue = true;
8776 
8777     assert((!MustTrue || !MustFalse) &&
8778            "Either MustTrue or MustFalse should be false!");
8779 
8780     if (MustTrue)
8781       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8782     else if (MustFalse)
8783       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8784     else
8785       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8786 
8787     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8788                       << " " << RHSAA << "\n");
8789 
8790     // TODO: Track a known state too.
8791     return T.isValidState();
8792   }
8793 
8794   /// See AbstractAttribute::updateImpl(...).
8795   ChangeStatus updateImpl(Attributor &A) override {
8796     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8797                             IntegerRangeState &T, bool Stripped) -> bool {
8798       Instruction *I = dyn_cast<Instruction>(&V);
8799       if (!I || isa<CallBase>(I)) {
8800 
8801         // Simplify the operand first.
8802         bool UsedAssumedInformation = false;
8803         const auto &SimplifiedOpV =
8804             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8805                                    *this, UsedAssumedInformation);
8806         if (!SimplifiedOpV)
8807           return true;
8808         if (!SimplifiedOpV.getValue())
8809           return false;
8810         Value *VPtr = *SimplifiedOpV;
8811 
8812         // If the value is not instruction, we query AA to Attributor.
8813         const auto &AA = A.getAAFor<AAValueConstantRange>(
8814             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8815             DepClassTy::REQUIRED);
8816 
8817         // Clamp operator is not used to utilize a program point CtxI.
8818         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8819 
8820         return T.isValidState();
8821       }
8822 
8823       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8824       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8825         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8826           return false;
8827       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8828         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8829           return false;
8830       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8831         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8832           return false;
8833       } else {
8834         // Give up with other instructions.
8835         // TODO: Add other instructions
8836 
8837         T.indicatePessimisticFixpoint();
8838         return false;
8839       }
8840 
8841       // Catch circular reasoning in a pessimistic way for now.
8842       // TODO: Check how the range evolves and if we stripped anything, see also
8843       //       AADereferenceable or AAAlign for similar situations.
8844       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8845         if (QueriedAA != this)
8846           continue;
8847         // If we are in a stady state we do not need to worry.
8848         if (T.getAssumed() == getState().getAssumed())
8849           continue;
8850         T.indicatePessimisticFixpoint();
8851       }
8852 
8853       return T.isValidState();
8854     };
8855 
8856     IntegerRangeState T(getBitWidth());
8857 
8858     bool UsedAssumedInformation = false;
8859     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8860                                                   VisitValueCB, getCtxI(),
8861                                                   UsedAssumedInformation,
8862                                                   /* UseValueSimplify */ false))
8863       return indicatePessimisticFixpoint();
8864 
8865     // Ensure that long def-use chains can't cause circular reasoning either by
8866     // introducing a cutoff below.
8867     if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
8868       return ChangeStatus::UNCHANGED;
8869     if (++NumChanges > MaxNumChanges) {
8870       LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
8871                         << " but only " << MaxNumChanges
8872                         << " are allowed to avoid cyclic reasoning.");
8873       return indicatePessimisticFixpoint();
8874     }
8875     return ChangeStatus::CHANGED;
8876   }
8877 
8878   /// See AbstractAttribute::trackStatistics()
8879   void trackStatistics() const override {
8880     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8881   }
8882 
8883   /// Tracker to bail after too many widening steps of the constant range.
8884   int NumChanges = 0;
8885 
8886   /// Upper bound for the number of allowed changes (=widening steps) for the
8887   /// constant range before we give up.
8888   static constexpr int MaxNumChanges = 5;
8889 };
8890 
8891 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8892   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8893       : AAValueConstantRangeImpl(IRP, A) {}
8894 
8895   /// See AbstractAttribute::initialize(...).
8896   ChangeStatus updateImpl(Attributor &A) override {
8897     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8898                      "not be called");
8899   }
8900 
8901   /// See AbstractAttribute::trackStatistics()
8902   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8903 };
8904 
8905 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8906   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8907       : AAValueConstantRangeFunction(IRP, A) {}
8908 
8909   /// See AbstractAttribute::trackStatistics()
8910   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8911 };
8912 
8913 struct AAValueConstantRangeCallSiteReturned
8914     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8915                                      AAValueConstantRangeImpl,
8916                                      AAValueConstantRangeImpl::StateType,
8917                                      /* IntroduceCallBaseContext */ true> {
8918   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8919       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8920                                        AAValueConstantRangeImpl,
8921                                        AAValueConstantRangeImpl::StateType,
8922                                        /* IntroduceCallBaseContext */ true>(IRP,
8923                                                                             A) {
8924   }
8925 
8926   /// See AbstractAttribute::initialize(...).
8927   void initialize(Attributor &A) override {
8928     // If it is a load instruction with range metadata, use the metadata.
8929     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8930       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8931         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8932 
8933     AAValueConstantRangeImpl::initialize(A);
8934   }
8935 
8936   /// See AbstractAttribute::trackStatistics()
8937   void trackStatistics() const override {
8938     STATS_DECLTRACK_CSRET_ATTR(value_range)
8939   }
8940 };
8941 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8942   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8943       : AAValueConstantRangeFloating(IRP, A) {}
8944 
8945   /// See AbstractAttribute::manifest()
8946   ChangeStatus manifest(Attributor &A) override {
8947     return ChangeStatus::UNCHANGED;
8948   }
8949 
8950   /// See AbstractAttribute::trackStatistics()
8951   void trackStatistics() const override {
8952     STATS_DECLTRACK_CSARG_ATTR(value_range)
8953   }
8954 };
8955 } // namespace
8956 
8957 /// ------------------ Potential Values Attribute -------------------------
8958 
8959 namespace {
8960 struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
8961   using StateType = PotentialConstantIntValuesState;
8962 
8963   AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
8964       : AAPotentialConstantValues(IRP, A) {}
8965 
8966   /// See AbstractAttribute::initialize(..).
8967   void initialize(Attributor &A) override {
8968     if (A.hasSimplificationCallback(getIRPosition()))
8969       indicatePessimisticFixpoint();
8970     else
8971       AAPotentialConstantValues::initialize(A);
8972   }
8973 
8974   /// See AbstractAttribute::getAsStr().
8975   const std::string getAsStr() const override {
8976     std::string Str;
8977     llvm::raw_string_ostream OS(Str);
8978     OS << getState();
8979     return OS.str();
8980   }
8981 
8982   /// See AbstractAttribute::updateImpl(...).
8983   ChangeStatus updateImpl(Attributor &A) override {
8984     return indicatePessimisticFixpoint();
8985   }
8986 };
8987 
8988 struct AAPotentialConstantValuesArgument final
8989     : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8990                                       AAPotentialConstantValuesImpl,
8991                                       PotentialConstantIntValuesState> {
8992   using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8993                                                AAPotentialConstantValuesImpl,
8994                                                PotentialConstantIntValuesState>;
8995   AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
8996       : Base(IRP, A) {}
8997 
8998   /// See AbstractAttribute::initialize(..).
8999   void initialize(Attributor &A) override {
9000     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
9001       indicatePessimisticFixpoint();
9002     } else {
9003       Base::initialize(A);
9004     }
9005   }
9006 
9007   /// See AbstractAttribute::trackStatistics()
9008   void trackStatistics() const override {
9009     STATS_DECLTRACK_ARG_ATTR(potential_values)
9010   }
9011 };
9012 
9013 struct AAPotentialConstantValuesReturned
9014     : AAReturnedFromReturnedValues<AAPotentialConstantValues,
9015                                    AAPotentialConstantValuesImpl> {
9016   using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
9017                                             AAPotentialConstantValuesImpl>;
9018   AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
9019       : Base(IRP, A) {}
9020 
9021   /// See AbstractAttribute::trackStatistics()
9022   void trackStatistics() const override {
9023     STATS_DECLTRACK_FNRET_ATTR(potential_values)
9024   }
9025 };
9026 
9027 struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
9028   AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
9029       : AAPotentialConstantValuesImpl(IRP, A) {}
9030 
9031   /// See AbstractAttribute::initialize(..).
9032   void initialize(Attributor &A) override {
9033     AAPotentialConstantValuesImpl::initialize(A);
9034     if (isAtFixpoint())
9035       return;
9036 
9037     Value &V = getAssociatedValue();
9038 
9039     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9040       unionAssumed(C->getValue());
9041       indicateOptimisticFixpoint();
9042       return;
9043     }
9044 
9045     if (isa<UndefValue>(&V)) {
9046       unionAssumedWithUndef();
9047       indicateOptimisticFixpoint();
9048       return;
9049     }
9050 
9051     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
9052       return;
9053 
9054     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
9055       return;
9056 
9057     indicatePessimisticFixpoint();
9058 
9059     LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "
9060                       << getAssociatedValue() << "\n");
9061   }
9062 
9063   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
9064                                 const APInt &RHS) {
9065     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
9066   }
9067 
9068   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
9069                                  uint32_t ResultBitWidth) {
9070     Instruction::CastOps CastOp = CI->getOpcode();
9071     switch (CastOp) {
9072     default:
9073       llvm_unreachable("unsupported or not integer cast");
9074     case Instruction::Trunc:
9075       return Src.trunc(ResultBitWidth);
9076     case Instruction::SExt:
9077       return Src.sext(ResultBitWidth);
9078     case Instruction::ZExt:
9079       return Src.zext(ResultBitWidth);
9080     case Instruction::BitCast:
9081       return Src;
9082     }
9083   }
9084 
9085   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
9086                                        const APInt &LHS, const APInt &RHS,
9087                                        bool &SkipOperation, bool &Unsupported) {
9088     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
9089     // Unsupported is set to true when the binary operator is not supported.
9090     // SkipOperation is set to true when UB occur with the given operand pair
9091     // (LHS, RHS).
9092     // TODO: we should look at nsw and nuw keywords to handle operations
9093     //       that create poison or undef value.
9094     switch (BinOpcode) {
9095     default:
9096       Unsupported = true;
9097       return LHS;
9098     case Instruction::Add:
9099       return LHS + RHS;
9100     case Instruction::Sub:
9101       return LHS - RHS;
9102     case Instruction::Mul:
9103       return LHS * RHS;
9104     case Instruction::UDiv:
9105       if (RHS.isZero()) {
9106         SkipOperation = true;
9107         return LHS;
9108       }
9109       return LHS.udiv(RHS);
9110     case Instruction::SDiv:
9111       if (RHS.isZero()) {
9112         SkipOperation = true;
9113         return LHS;
9114       }
9115       return LHS.sdiv(RHS);
9116     case Instruction::URem:
9117       if (RHS.isZero()) {
9118         SkipOperation = true;
9119         return LHS;
9120       }
9121       return LHS.urem(RHS);
9122     case Instruction::SRem:
9123       if (RHS.isZero()) {
9124         SkipOperation = true;
9125         return LHS;
9126       }
9127       return LHS.srem(RHS);
9128     case Instruction::Shl:
9129       return LHS.shl(RHS);
9130     case Instruction::LShr:
9131       return LHS.lshr(RHS);
9132     case Instruction::AShr:
9133       return LHS.ashr(RHS);
9134     case Instruction::And:
9135       return LHS & RHS;
9136     case Instruction::Or:
9137       return LHS | RHS;
9138     case Instruction::Xor:
9139       return LHS ^ RHS;
9140     }
9141   }
9142 
9143   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
9144                                            const APInt &LHS, const APInt &RHS) {
9145     bool SkipOperation = false;
9146     bool Unsupported = false;
9147     APInt Result =
9148         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
9149     if (Unsupported)
9150       return false;
9151     // If SkipOperation is true, we can ignore this operand pair (L, R).
9152     if (!SkipOperation)
9153       unionAssumed(Result);
9154     return isValidState();
9155   }
9156 
9157   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
9158     auto AssumedBefore = getAssumed();
9159     Value *LHS = ICI->getOperand(0);
9160     Value *RHS = ICI->getOperand(1);
9161 
9162     // Simplify the operands first.
9163     bool UsedAssumedInformation = false;
9164     const auto &SimplifiedLHS =
9165         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9166                                *this, UsedAssumedInformation);
9167     if (!SimplifiedLHS)
9168       return ChangeStatus::UNCHANGED;
9169     if (!SimplifiedLHS.getValue())
9170       return indicatePessimisticFixpoint();
9171     LHS = *SimplifiedLHS;
9172 
9173     const auto &SimplifiedRHS =
9174         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9175                                *this, UsedAssumedInformation);
9176     if (!SimplifiedRHS)
9177       return ChangeStatus::UNCHANGED;
9178     if (!SimplifiedRHS.getValue())
9179       return indicatePessimisticFixpoint();
9180     RHS = *SimplifiedRHS;
9181 
9182     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9183       return indicatePessimisticFixpoint();
9184 
9185     auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9186         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9187     if (!LHSAA.isValidState())
9188       return indicatePessimisticFixpoint();
9189 
9190     auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9191         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9192     if (!RHSAA.isValidState())
9193       return indicatePessimisticFixpoint();
9194 
9195     const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9196     const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9197 
9198     // TODO: make use of undef flag to limit potential values aggressively.
9199     bool MaybeTrue = false, MaybeFalse = false;
9200     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
9201     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9202       // The result of any comparison between undefs can be soundly replaced
9203       // with undef.
9204       unionAssumedWithUndef();
9205     } else if (LHSAA.undefIsContained()) {
9206       for (const APInt &R : RHSAAPVS) {
9207         bool CmpResult = calculateICmpInst(ICI, Zero, R);
9208         MaybeTrue |= CmpResult;
9209         MaybeFalse |= !CmpResult;
9210         if (MaybeTrue & MaybeFalse)
9211           return indicatePessimisticFixpoint();
9212       }
9213     } else if (RHSAA.undefIsContained()) {
9214       for (const APInt &L : LHSAAPVS) {
9215         bool CmpResult = calculateICmpInst(ICI, L, Zero);
9216         MaybeTrue |= CmpResult;
9217         MaybeFalse |= !CmpResult;
9218         if (MaybeTrue & MaybeFalse)
9219           return indicatePessimisticFixpoint();
9220       }
9221     } else {
9222       for (const APInt &L : LHSAAPVS) {
9223         for (const APInt &R : RHSAAPVS) {
9224           bool CmpResult = calculateICmpInst(ICI, L, R);
9225           MaybeTrue |= CmpResult;
9226           MaybeFalse |= !CmpResult;
9227           if (MaybeTrue & MaybeFalse)
9228             return indicatePessimisticFixpoint();
9229         }
9230       }
9231     }
9232     if (MaybeTrue)
9233       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
9234     if (MaybeFalse)
9235       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
9236     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9237                                          : ChangeStatus::CHANGED;
9238   }
9239 
9240   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
9241     auto AssumedBefore = getAssumed();
9242     Value *LHS = SI->getTrueValue();
9243     Value *RHS = SI->getFalseValue();
9244 
9245     // Simplify the operands first.
9246     bool UsedAssumedInformation = false;
9247     const auto &SimplifiedLHS =
9248         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9249                                *this, UsedAssumedInformation);
9250     if (!SimplifiedLHS)
9251       return ChangeStatus::UNCHANGED;
9252     if (!SimplifiedLHS.getValue())
9253       return indicatePessimisticFixpoint();
9254     LHS = *SimplifiedLHS;
9255 
9256     const auto &SimplifiedRHS =
9257         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9258                                *this, UsedAssumedInformation);
9259     if (!SimplifiedRHS)
9260       return ChangeStatus::UNCHANGED;
9261     if (!SimplifiedRHS.getValue())
9262       return indicatePessimisticFixpoint();
9263     RHS = *SimplifiedRHS;
9264 
9265     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9266       return indicatePessimisticFixpoint();
9267 
9268     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
9269                                                   UsedAssumedInformation);
9270 
9271     // Check if we only need one operand.
9272     bool OnlyLeft = false, OnlyRight = false;
9273     if (C && *C && (*C)->isOneValue())
9274       OnlyLeft = true;
9275     else if (C && *C && (*C)->isZeroValue())
9276       OnlyRight = true;
9277 
9278     const AAPotentialConstantValues *LHSAA = nullptr, *RHSAA = nullptr;
9279     if (!OnlyRight) {
9280       LHSAA = &A.getAAFor<AAPotentialConstantValues>(
9281           *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9282       if (!LHSAA->isValidState())
9283         return indicatePessimisticFixpoint();
9284     }
9285     if (!OnlyLeft) {
9286       RHSAA = &A.getAAFor<AAPotentialConstantValues>(
9287           *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9288       if (!RHSAA->isValidState())
9289         return indicatePessimisticFixpoint();
9290     }
9291 
9292     if (!LHSAA || !RHSAA) {
9293       // select (true/false), lhs, rhs
9294       auto *OpAA = LHSAA ? LHSAA : RHSAA;
9295 
9296       if (OpAA->undefIsContained())
9297         unionAssumedWithUndef();
9298       else
9299         unionAssumed(*OpAA);
9300 
9301     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
9302       // select i1 *, undef , undef => undef
9303       unionAssumedWithUndef();
9304     } else {
9305       unionAssumed(*LHSAA);
9306       unionAssumed(*RHSAA);
9307     }
9308     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9309                                          : ChangeStatus::CHANGED;
9310   }
9311 
9312   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9313     auto AssumedBefore = getAssumed();
9314     if (!CI->isIntegerCast())
9315       return indicatePessimisticFixpoint();
9316     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
9317     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9318     Value *Src = CI->getOperand(0);
9319 
9320     // Simplify the operand first.
9321     bool UsedAssumedInformation = false;
9322     const auto &SimplifiedSrc =
9323         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
9324                                *this, UsedAssumedInformation);
9325     if (!SimplifiedSrc)
9326       return ChangeStatus::UNCHANGED;
9327     if (!SimplifiedSrc.getValue())
9328       return indicatePessimisticFixpoint();
9329     Src = *SimplifiedSrc;
9330 
9331     auto &SrcAA = A.getAAFor<AAPotentialConstantValues>(
9332         *this, IRPosition::value(*Src), DepClassTy::REQUIRED);
9333     if (!SrcAA.isValidState())
9334       return indicatePessimisticFixpoint();
9335     const SetTy &SrcAAPVS = SrcAA.getAssumedSet();
9336     if (SrcAA.undefIsContained())
9337       unionAssumedWithUndef();
9338     else {
9339       for (const APInt &S : SrcAAPVS) {
9340         APInt T = calculateCastInst(CI, S, ResultBitWidth);
9341         unionAssumed(T);
9342       }
9343     }
9344     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9345                                          : ChangeStatus::CHANGED;
9346   }
9347 
9348   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9349     auto AssumedBefore = getAssumed();
9350     Value *LHS = BinOp->getOperand(0);
9351     Value *RHS = BinOp->getOperand(1);
9352 
9353     // Simplify the operands first.
9354     bool UsedAssumedInformation = false;
9355     const auto &SimplifiedLHS =
9356         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9357                                *this, UsedAssumedInformation);
9358     if (!SimplifiedLHS)
9359       return ChangeStatus::UNCHANGED;
9360     if (!SimplifiedLHS.getValue())
9361       return indicatePessimisticFixpoint();
9362     LHS = *SimplifiedLHS;
9363 
9364     const auto &SimplifiedRHS =
9365         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9366                                *this, UsedAssumedInformation);
9367     if (!SimplifiedRHS)
9368       return ChangeStatus::UNCHANGED;
9369     if (!SimplifiedRHS.getValue())
9370       return indicatePessimisticFixpoint();
9371     RHS = *SimplifiedRHS;
9372 
9373     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9374       return indicatePessimisticFixpoint();
9375 
9376     auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9377         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9378     if (!LHSAA.isValidState())
9379       return indicatePessimisticFixpoint();
9380 
9381     auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9382         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9383     if (!RHSAA.isValidState())
9384       return indicatePessimisticFixpoint();
9385 
9386     const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9387     const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9388     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9389 
9390     // TODO: make use of undef flag to limit potential values aggressively.
9391     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9392       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9393         return indicatePessimisticFixpoint();
9394     } else if (LHSAA.undefIsContained()) {
9395       for (const APInt &R : RHSAAPVS) {
9396         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9397           return indicatePessimisticFixpoint();
9398       }
9399     } else if (RHSAA.undefIsContained()) {
9400       for (const APInt &L : LHSAAPVS) {
9401         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9402           return indicatePessimisticFixpoint();
9403       }
9404     } else {
9405       for (const APInt &L : LHSAAPVS) {
9406         for (const APInt &R : RHSAAPVS) {
9407           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9408             return indicatePessimisticFixpoint();
9409         }
9410       }
9411     }
9412     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9413                                          : ChangeStatus::CHANGED;
9414   }
9415 
9416   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9417     auto AssumedBefore = getAssumed();
9418     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9419       Value *IncomingValue = PHI->getIncomingValue(u);
9420 
9421       // Simplify the operand first.
9422       bool UsedAssumedInformation = false;
9423       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9424           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9425           UsedAssumedInformation);
9426       if (!SimplifiedIncomingValue)
9427         continue;
9428       if (!SimplifiedIncomingValue.getValue())
9429         return indicatePessimisticFixpoint();
9430       IncomingValue = *SimplifiedIncomingValue;
9431 
9432       auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
9433           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9434       if (!PotentialValuesAA.isValidState())
9435         return indicatePessimisticFixpoint();
9436       if (PotentialValuesAA.undefIsContained())
9437         unionAssumedWithUndef();
9438       else
9439         unionAssumed(PotentialValuesAA.getAssumed());
9440     }
9441     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9442                                          : ChangeStatus::CHANGED;
9443   }
9444 
9445   /// See AbstractAttribute::updateImpl(...).
9446   ChangeStatus updateImpl(Attributor &A) override {
9447     Value &V = getAssociatedValue();
9448     Instruction *I = dyn_cast<Instruction>(&V);
9449 
9450     if (auto *ICI = dyn_cast<ICmpInst>(I))
9451       return updateWithICmpInst(A, ICI);
9452 
9453     if (auto *SI = dyn_cast<SelectInst>(I))
9454       return updateWithSelectInst(A, SI);
9455 
9456     if (auto *CI = dyn_cast<CastInst>(I))
9457       return updateWithCastInst(A, CI);
9458 
9459     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9460       return updateWithBinaryOperator(A, BinOp);
9461 
9462     if (auto *PHI = dyn_cast<PHINode>(I))
9463       return updateWithPHINode(A, PHI);
9464 
9465     return indicatePessimisticFixpoint();
9466   }
9467 
9468   /// See AbstractAttribute::trackStatistics()
9469   void trackStatistics() const override {
9470     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9471   }
9472 };
9473 
9474 struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
9475   AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
9476       : AAPotentialConstantValuesImpl(IRP, A) {}
9477 
9478   /// See AbstractAttribute::initialize(...).
9479   ChangeStatus updateImpl(Attributor &A) override {
9480     llvm_unreachable(
9481         "AAPotentialConstantValues(Function|CallSite)::updateImpl will "
9482         "not be called");
9483   }
9484 
9485   /// See AbstractAttribute::trackStatistics()
9486   void trackStatistics() const override {
9487     STATS_DECLTRACK_FN_ATTR(potential_values)
9488   }
9489 };
9490 
9491 struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
9492   AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
9493       : AAPotentialConstantValuesFunction(IRP, A) {}
9494 
9495   /// See AbstractAttribute::trackStatistics()
9496   void trackStatistics() const override {
9497     STATS_DECLTRACK_CS_ATTR(potential_values)
9498   }
9499 };
9500 
9501 struct AAPotentialConstantValuesCallSiteReturned
9502     : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9503                                      AAPotentialConstantValuesImpl> {
9504   AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
9505                                             Attributor &A)
9506       : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9507                                        AAPotentialConstantValuesImpl>(IRP, A) {}
9508 
9509   /// See AbstractAttribute::trackStatistics()
9510   void trackStatistics() const override {
9511     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9512   }
9513 };
9514 
9515 struct AAPotentialConstantValuesCallSiteArgument
9516     : AAPotentialConstantValuesFloating {
9517   AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
9518                                             Attributor &A)
9519       : AAPotentialConstantValuesFloating(IRP, A) {}
9520 
9521   /// See AbstractAttribute::initialize(..).
9522   void initialize(Attributor &A) override {
9523     AAPotentialConstantValuesImpl::initialize(A);
9524     if (isAtFixpoint())
9525       return;
9526 
9527     Value &V = getAssociatedValue();
9528 
9529     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9530       unionAssumed(C->getValue());
9531       indicateOptimisticFixpoint();
9532       return;
9533     }
9534 
9535     if (isa<UndefValue>(&V)) {
9536       unionAssumedWithUndef();
9537       indicateOptimisticFixpoint();
9538       return;
9539     }
9540   }
9541 
9542   /// See AbstractAttribute::updateImpl(...).
9543   ChangeStatus updateImpl(Attributor &A) override {
9544     Value &V = getAssociatedValue();
9545     auto AssumedBefore = getAssumed();
9546     auto &AA = A.getAAFor<AAPotentialConstantValues>(
9547         *this, IRPosition::value(V), DepClassTy::REQUIRED);
9548     const auto &S = AA.getAssumed();
9549     unionAssumed(S);
9550     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9551                                          : ChangeStatus::CHANGED;
9552   }
9553 
9554   /// See AbstractAttribute::trackStatistics()
9555   void trackStatistics() const override {
9556     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9557   }
9558 };
9559 
9560 /// ------------------------ NoUndef Attribute ---------------------------------
9561 struct AANoUndefImpl : AANoUndef {
9562   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9563 
9564   /// See AbstractAttribute::initialize(...).
9565   void initialize(Attributor &A) override {
9566     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9567       indicateOptimisticFixpoint();
9568       return;
9569     }
9570     Value &V = getAssociatedValue();
9571     if (isa<UndefValue>(V))
9572       indicatePessimisticFixpoint();
9573     else if (isa<FreezeInst>(V))
9574       indicateOptimisticFixpoint();
9575     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9576              isGuaranteedNotToBeUndefOrPoison(&V))
9577       indicateOptimisticFixpoint();
9578     else
9579       AANoUndef::initialize(A);
9580   }
9581 
9582   /// See followUsesInMBEC
9583   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9584                        AANoUndef::StateType &State) {
9585     const Value *UseV = U->get();
9586     const DominatorTree *DT = nullptr;
9587     AssumptionCache *AC = nullptr;
9588     InformationCache &InfoCache = A.getInfoCache();
9589     if (Function *F = getAnchorScope()) {
9590       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9591       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9592     }
9593     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9594     bool TrackUse = false;
9595     // Track use for instructions which must produce undef or poison bits when
9596     // at least one operand contains such bits.
9597     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9598       TrackUse = true;
9599     return TrackUse;
9600   }
9601 
9602   /// See AbstractAttribute::getAsStr().
9603   const std::string getAsStr() const override {
9604     return getAssumed() ? "noundef" : "may-undef-or-poison";
9605   }
9606 
9607   ChangeStatus manifest(Attributor &A) override {
9608     // We don't manifest noundef attribute for dead positions because the
9609     // associated values with dead positions would be replaced with undef
9610     // values.
9611     bool UsedAssumedInformation = false;
9612     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9613                         UsedAssumedInformation))
9614       return ChangeStatus::UNCHANGED;
9615     // A position whose simplified value does not have any value is
9616     // considered to be dead. We don't manifest noundef in such positions for
9617     // the same reason above.
9618     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9619              .has_value())
9620       return ChangeStatus::UNCHANGED;
9621     return AANoUndef::manifest(A);
9622   }
9623 };
9624 
9625 struct AANoUndefFloating : public AANoUndefImpl {
9626   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9627       : AANoUndefImpl(IRP, A) {}
9628 
9629   /// See AbstractAttribute::initialize(...).
9630   void initialize(Attributor &A) override {
9631     AANoUndefImpl::initialize(A);
9632     if (!getState().isAtFixpoint())
9633       if (Instruction *CtxI = getCtxI())
9634         followUsesInMBEC(*this, A, getState(), *CtxI);
9635   }
9636 
9637   /// See AbstractAttribute::updateImpl(...).
9638   ChangeStatus updateImpl(Attributor &A) override {
9639     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9640                             AANoUndef::StateType &T, bool Stripped) -> bool {
9641       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9642                                              DepClassTy::REQUIRED);
9643       if (!Stripped && this == &AA) {
9644         T.indicatePessimisticFixpoint();
9645       } else {
9646         const AANoUndef::StateType &S =
9647             static_cast<const AANoUndef::StateType &>(AA.getState());
9648         T ^= S;
9649       }
9650       return T.isValidState();
9651     };
9652 
9653     StateType T;
9654     bool UsedAssumedInformation = false;
9655     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9656                                           VisitValueCB, getCtxI(),
9657                                           UsedAssumedInformation))
9658       return indicatePessimisticFixpoint();
9659 
9660     return clampStateAndIndicateChange(getState(), T);
9661   }
9662 
9663   /// See AbstractAttribute::trackStatistics()
9664   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9665 };
9666 
9667 struct AANoUndefReturned final
9668     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9669   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9670       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9671 
9672   /// See AbstractAttribute::trackStatistics()
9673   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9674 };
9675 
9676 struct AANoUndefArgument final
9677     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9678   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9679       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9680 
9681   /// See AbstractAttribute::trackStatistics()
9682   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9683 };
9684 
9685 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9686   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9687       : AANoUndefFloating(IRP, A) {}
9688 
9689   /// See AbstractAttribute::trackStatistics()
9690   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9691 };
9692 
9693 struct AANoUndefCallSiteReturned final
9694     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9695   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9696       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9697 
9698   /// See AbstractAttribute::trackStatistics()
9699   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9700 };
9701 
9702 struct AACallEdgesImpl : public AACallEdges {
9703   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9704 
9705   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9706     return CalledFunctions;
9707   }
9708 
9709   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9710 
9711   virtual bool hasNonAsmUnknownCallee() const override {
9712     return HasUnknownCalleeNonAsm;
9713   }
9714 
9715   const std::string getAsStr() const override {
9716     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9717            std::to_string(CalledFunctions.size()) + "]";
9718   }
9719 
9720   void trackStatistics() const override {}
9721 
9722 protected:
9723   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9724     if (CalledFunctions.insert(Fn)) {
9725       Change = ChangeStatus::CHANGED;
9726       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9727                         << "\n");
9728     }
9729   }
9730 
9731   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9732     if (!HasUnknownCallee)
9733       Change = ChangeStatus::CHANGED;
9734     if (NonAsm && !HasUnknownCalleeNonAsm)
9735       Change = ChangeStatus::CHANGED;
9736     HasUnknownCalleeNonAsm |= NonAsm;
9737     HasUnknownCallee = true;
9738   }
9739 
9740 private:
9741   /// Optimistic set of functions that might be called by this position.
9742   SetVector<Function *> CalledFunctions;
9743 
9744   /// Is there any call with a unknown callee.
9745   bool HasUnknownCallee = false;
9746 
9747   /// Is there any call with a unknown callee, excluding any inline asm.
9748   bool HasUnknownCalleeNonAsm = false;
9749 };
9750 
9751 struct AACallEdgesCallSite : public AACallEdgesImpl {
9752   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9753       : AACallEdgesImpl(IRP, A) {}
9754   /// See AbstractAttribute::updateImpl(...).
9755   ChangeStatus updateImpl(Attributor &A) override {
9756     ChangeStatus Change = ChangeStatus::UNCHANGED;
9757 
9758     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9759                           bool Stripped) -> bool {
9760       if (Function *Fn = dyn_cast<Function>(&V)) {
9761         addCalledFunction(Fn, Change);
9762       } else {
9763         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9764         setHasUnknownCallee(true, Change);
9765       }
9766 
9767       // Explore all values.
9768       return true;
9769     };
9770 
9771     // Process any value that we might call.
9772     auto ProcessCalledOperand = [&](Value *V) {
9773       bool DummyValue = false;
9774       bool UsedAssumedInformation = false;
9775       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9776                                        DummyValue, VisitValue, nullptr,
9777                                        UsedAssumedInformation, false)) {
9778         // If we haven't gone through all values, assume that there are unknown
9779         // callees.
9780         setHasUnknownCallee(true, Change);
9781       }
9782     };
9783 
9784     CallBase *CB = cast<CallBase>(getCtxI());
9785 
9786     if (CB->isInlineAsm()) {
9787       if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
9788           !hasAssumption(*CB, "ompx_no_call_asm"))
9789         setHasUnknownCallee(false, Change);
9790       return Change;
9791     }
9792 
9793     // Process callee metadata if available.
9794     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9795       for (auto &Op : MD->operands()) {
9796         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9797         if (Callee)
9798           addCalledFunction(Callee, Change);
9799       }
9800       return Change;
9801     }
9802 
9803     // The most simple case.
9804     ProcessCalledOperand(CB->getCalledOperand());
9805 
9806     // Process callback functions.
9807     SmallVector<const Use *, 4u> CallbackUses;
9808     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9809     for (const Use *U : CallbackUses)
9810       ProcessCalledOperand(U->get());
9811 
9812     return Change;
9813   }
9814 };
9815 
9816 struct AACallEdgesFunction : public AACallEdgesImpl {
9817   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9818       : AACallEdgesImpl(IRP, A) {}
9819 
9820   /// See AbstractAttribute::updateImpl(...).
9821   ChangeStatus updateImpl(Attributor &A) override {
9822     ChangeStatus Change = ChangeStatus::UNCHANGED;
9823 
9824     auto ProcessCallInst = [&](Instruction &Inst) {
9825       CallBase &CB = cast<CallBase>(Inst);
9826 
9827       auto &CBEdges = A.getAAFor<AACallEdges>(
9828           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9829       if (CBEdges.hasNonAsmUnknownCallee())
9830         setHasUnknownCallee(true, Change);
9831       if (CBEdges.hasUnknownCallee())
9832         setHasUnknownCallee(false, Change);
9833 
9834       for (Function *F : CBEdges.getOptimisticEdges())
9835         addCalledFunction(F, Change);
9836 
9837       return true;
9838     };
9839 
9840     // Visit all callable instructions.
9841     bool UsedAssumedInformation = false;
9842     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9843                                            UsedAssumedInformation,
9844                                            /* CheckBBLivenessOnly */ true)) {
9845       // If we haven't looked at all call like instructions, assume that there
9846       // are unknown callees.
9847       setHasUnknownCallee(true, Change);
9848     }
9849 
9850     return Change;
9851   }
9852 };
9853 
9854 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9855 private:
9856   struct QuerySet {
9857     void markReachable(const Function &Fn) {
9858       Reachable.insert(&Fn);
9859       Unreachable.erase(&Fn);
9860     }
9861 
9862     /// If there is no information about the function None is returned.
9863     Optional<bool> isCachedReachable(const Function &Fn) {
9864       // Assume that we can reach the function.
9865       // TODO: Be more specific with the unknown callee.
9866       if (CanReachUnknownCallee)
9867         return true;
9868 
9869       if (Reachable.count(&Fn))
9870         return true;
9871 
9872       if (Unreachable.count(&Fn))
9873         return false;
9874 
9875       return llvm::None;
9876     }
9877 
9878     /// Set of functions that we know for sure is reachable.
9879     DenseSet<const Function *> Reachable;
9880 
9881     /// Set of functions that are unreachable, but might become reachable.
9882     DenseSet<const Function *> Unreachable;
9883 
9884     /// If we can reach a function with a call to a unknown function we assume
9885     /// that we can reach any function.
9886     bool CanReachUnknownCallee = false;
9887   };
9888 
9889   struct QueryResolver : public QuerySet {
9890     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9891                         ArrayRef<const AACallEdges *> AAEdgesList) {
9892       ChangeStatus Change = ChangeStatus::UNCHANGED;
9893 
9894       for (auto *AAEdges : AAEdgesList) {
9895         if (AAEdges->hasUnknownCallee()) {
9896           if (!CanReachUnknownCallee)
9897             Change = ChangeStatus::CHANGED;
9898           CanReachUnknownCallee = true;
9899           return Change;
9900         }
9901       }
9902 
9903       for (const Function *Fn : make_early_inc_range(Unreachable)) {
9904         if (checkIfReachable(A, AA, AAEdgesList, *Fn)) {
9905           Change = ChangeStatus::CHANGED;
9906           markReachable(*Fn);
9907         }
9908       }
9909       return Change;
9910     }
9911 
9912     bool isReachable(Attributor &A, AAFunctionReachability &AA,
9913                      ArrayRef<const AACallEdges *> AAEdgesList,
9914                      const Function &Fn) {
9915       Optional<bool> Cached = isCachedReachable(Fn);
9916       if (Cached)
9917         return Cached.getValue();
9918 
9919       // The query was not cached, thus it is new. We need to request an update
9920       // explicitly to make sure this the information is properly run to a
9921       // fixpoint.
9922       A.registerForUpdate(AA);
9923 
9924       // We need to assume that this function can't reach Fn to prevent
9925       // an infinite loop if this function is recursive.
9926       Unreachable.insert(&Fn);
9927 
9928       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9929       if (Result)
9930         markReachable(Fn);
9931       return Result;
9932     }
9933 
9934     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9935                           ArrayRef<const AACallEdges *> AAEdgesList,
9936                           const Function &Fn) const {
9937 
9938       // Handle the most trivial case first.
9939       for (auto *AAEdges : AAEdgesList) {
9940         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9941 
9942         if (Edges.count(const_cast<Function *>(&Fn)))
9943           return true;
9944       }
9945 
9946       SmallVector<const AAFunctionReachability *, 8> Deps;
9947       for (auto &AAEdges : AAEdgesList) {
9948         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9949 
9950         for (Function *Edge : Edges) {
9951           // Functions that do not call back into the module can be ignored.
9952           if (Edge->hasFnAttribute(Attribute::NoCallback))
9953             continue;
9954 
9955           // We don't need a dependency if the result is reachable.
9956           const AAFunctionReachability &EdgeReachability =
9957               A.getAAFor<AAFunctionReachability>(
9958                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9959           Deps.push_back(&EdgeReachability);
9960 
9961           if (EdgeReachability.canReach(A, Fn))
9962             return true;
9963         }
9964       }
9965 
9966       // The result is false for now, set dependencies and leave.
9967       for (auto *Dep : Deps)
9968         A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
9969 
9970       return false;
9971     }
9972   };
9973 
9974   /// Get call edges that can be reached by this instruction.
9975   bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability,
9976                              const Instruction &Inst,
9977                              SmallVector<const AACallEdges *> &Result) const {
9978     // Determine call like instructions that we can reach from the inst.
9979     auto CheckCallBase = [&](Instruction &CBInst) {
9980       if (!Reachability.isAssumedReachable(A, Inst, CBInst))
9981         return true;
9982 
9983       auto &CB = cast<CallBase>(CBInst);
9984       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9985           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9986 
9987       Result.push_back(&AAEdges);
9988       return true;
9989     };
9990 
9991     bool UsedAssumedInformation = false;
9992     return A.checkForAllCallLikeInstructions(CheckCallBase, *this,
9993                                              UsedAssumedInformation,
9994                                              /* CheckBBLivenessOnly */ true);
9995   }
9996 
9997 public:
9998   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9999       : AAFunctionReachability(IRP, A) {}
10000 
10001   bool canReach(Attributor &A, const Function &Fn) const override {
10002     if (!isValidState())
10003       return true;
10004 
10005     const AACallEdges &AAEdges =
10006         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
10007 
10008     // Attributor returns attributes as const, so this function has to be
10009     // const for users of this attribute to use it without having to do
10010     // a const_cast.
10011     // This is a hack for us to be able to cache queries.
10012     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
10013     bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis,
10014                                                           {&AAEdges}, Fn);
10015 
10016     return Result;
10017   }
10018 
10019   /// Can \p CB reach \p Fn
10020   bool canReach(Attributor &A, CallBase &CB,
10021                 const Function &Fn) const override {
10022     if (!isValidState())
10023       return true;
10024 
10025     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
10026         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
10027 
10028     // Attributor returns attributes as const, so this function has to be
10029     // const for users of this attribute to use it without having to do
10030     // a const_cast.
10031     // This is a hack for us to be able to cache queries.
10032     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
10033     QueryResolver &CBQuery = NonConstThis->CBQueries[&CB];
10034 
10035     bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn);
10036 
10037     return Result;
10038   }
10039 
10040   bool instructionCanReach(Attributor &A, const Instruction &Inst,
10041                            const Function &Fn,
10042                            bool UseBackwards) const override {
10043     if (!isValidState())
10044       return true;
10045 
10046     if (UseBackwards)
10047       return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr);
10048 
10049     const auto &Reachability = A.getAAFor<AAReachability>(
10050         *this, IRPosition::function(*getAssociatedFunction()),
10051         DepClassTy::REQUIRED);
10052 
10053     SmallVector<const AACallEdges *> CallEdges;
10054     bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges);
10055     // Attributor returns attributes as const, so this function has to be
10056     // const for users of this attribute to use it without having to do
10057     // a const_cast.
10058     // This is a hack for us to be able to cache queries.
10059     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
10060     QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
10061     if (!AllKnown)
10062       InstQSet.CanReachUnknownCallee = true;
10063 
10064     return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
10065   }
10066 
10067   /// See AbstractAttribute::updateImpl(...).
10068   ChangeStatus updateImpl(Attributor &A) override {
10069     const AACallEdges &AAEdges =
10070         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
10071     ChangeStatus Change = ChangeStatus::UNCHANGED;
10072 
10073     Change |= WholeFunction.update(A, *this, {&AAEdges});
10074 
10075     for (auto &CBPair : CBQueries) {
10076       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
10077           *this, IRPosition::callsite_function(*CBPair.first),
10078           DepClassTy::REQUIRED);
10079 
10080       Change |= CBPair.second.update(A, *this, {&AAEdges});
10081     }
10082 
10083     // Update the Instruction queries.
10084     if (!InstQueries.empty()) {
10085       const AAReachability *Reachability = &A.getAAFor<AAReachability>(
10086           *this, IRPosition::function(*getAssociatedFunction()),
10087           DepClassTy::REQUIRED);
10088 
10089       // Check for local callbases first.
10090       for (auto &InstPair : InstQueries) {
10091         SmallVector<const AACallEdges *> CallEdges;
10092         bool AllKnown =
10093             getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
10094         // Update will return change if we this effects any queries.
10095         if (!AllKnown)
10096           InstPair.second.CanReachUnknownCallee = true;
10097         Change |= InstPair.second.update(A, *this, CallEdges);
10098       }
10099     }
10100 
10101     return Change;
10102   }
10103 
10104   const std::string getAsStr() const override {
10105     size_t QueryCount =
10106         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
10107 
10108     return "FunctionReachability [" +
10109            std::to_string(WholeFunction.Reachable.size()) + "," +
10110            std::to_string(QueryCount) + "]";
10111   }
10112 
10113   void trackStatistics() const override {}
10114 
10115 private:
10116   bool canReachUnknownCallee() const override {
10117     return WholeFunction.CanReachUnknownCallee;
10118   }
10119 
10120   /// Used to answer if a the whole function can reacha a specific function.
10121   QueryResolver WholeFunction;
10122 
10123   /// Used to answer if a call base inside this function can reach a specific
10124   /// function.
10125   MapVector<const CallBase *, QueryResolver> CBQueries;
10126 
10127   /// This is for instruction queries than scan "forward".
10128   MapVector<const Instruction *, QueryResolver> InstQueries;
10129 };
10130 } // namespace
10131 
10132 /// ---------------------- Assumption Propagation ------------------------------
10133 namespace {
10134 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
10135   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
10136                        const DenseSet<StringRef> &Known)
10137       : AAAssumptionInfo(IRP, A, Known) {}
10138 
10139   bool hasAssumption(const StringRef Assumption) const override {
10140     return isValidState() && setContains(Assumption);
10141   }
10142 
10143   /// See AbstractAttribute::getAsStr()
10144   const std::string getAsStr() const override {
10145     const SetContents &Known = getKnown();
10146     const SetContents &Assumed = getAssumed();
10147 
10148     const std::string KnownStr =
10149         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
10150     const std::string AssumedStr =
10151         (Assumed.isUniversal())
10152             ? "Universal"
10153             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
10154 
10155     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
10156   }
10157 };
10158 
10159 /// Propagates assumption information from parent functions to all of their
10160 /// successors. An assumption can be propagated if the containing function
10161 /// dominates the called function.
10162 ///
10163 /// We start with a "known" set of assumptions already valid for the associated
10164 /// function and an "assumed" set that initially contains all possible
10165 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
10166 /// contents as concrete values are known. The concrete values are seeded by the
10167 /// first nodes that are either entries into the call graph, or contains no
10168 /// assumptions. Each node is updated as the intersection of the assumed state
10169 /// with all of its predecessors.
10170 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
10171   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
10172       : AAAssumptionInfoImpl(IRP, A,
10173                              getAssumptions(*IRP.getAssociatedFunction())) {}
10174 
10175   /// See AbstractAttribute::manifest(...).
10176   ChangeStatus manifest(Attributor &A) override {
10177     const auto &Assumptions = getKnown();
10178 
10179     // Don't manifest a universal set if it somehow made it here.
10180     if (Assumptions.isUniversal())
10181       return ChangeStatus::UNCHANGED;
10182 
10183     Function *AssociatedFunction = getAssociatedFunction();
10184 
10185     bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
10186 
10187     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10188   }
10189 
10190   /// See AbstractAttribute::updateImpl(...).
10191   ChangeStatus updateImpl(Attributor &A) override {
10192     bool Changed = false;
10193 
10194     auto CallSitePred = [&](AbstractCallSite ACS) {
10195       const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
10196           *this, IRPosition::callsite_function(*ACS.getInstruction()),
10197           DepClassTy::REQUIRED);
10198       // Get the set of assumptions shared by all of this function's callers.
10199       Changed |= getIntersection(AssumptionAA.getAssumed());
10200       return !getAssumed().empty() || !getKnown().empty();
10201     };
10202 
10203     bool UsedAssumedInformation = false;
10204     // Get the intersection of all assumptions held by this node's predecessors.
10205     // If we don't know all the call sites then this is either an entry into the
10206     // call graph or an empty node. This node is known to only contain its own
10207     // assumptions and can be propagated to its successors.
10208     if (!A.checkForAllCallSites(CallSitePred, *this, true,
10209                                 UsedAssumedInformation))
10210       return indicatePessimisticFixpoint();
10211 
10212     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10213   }
10214 
10215   void trackStatistics() const override {}
10216 };
10217 
10218 /// Assumption Info defined for call sites.
10219 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
10220 
10221   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
10222       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
10223 
10224   /// See AbstractAttribute::initialize(...).
10225   void initialize(Attributor &A) override {
10226     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10227     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10228   }
10229 
10230   /// See AbstractAttribute::manifest(...).
10231   ChangeStatus manifest(Attributor &A) override {
10232     // Don't manifest a universal set if it somehow made it here.
10233     if (getKnown().isUniversal())
10234       return ChangeStatus::UNCHANGED;
10235 
10236     CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
10237     bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
10238 
10239     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10240   }
10241 
10242   /// See AbstractAttribute::updateImpl(...).
10243   ChangeStatus updateImpl(Attributor &A) override {
10244     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10245     auto &AssumptionAA =
10246         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10247     bool Changed = getIntersection(AssumptionAA.getAssumed());
10248     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10249   }
10250 
10251   /// See AbstractAttribute::trackStatistics()
10252   void trackStatistics() const override {}
10253 
10254 private:
10255   /// Helper to initialized the known set as all the assumptions this call and
10256   /// the callee contain.
10257   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
10258     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
10259     auto Assumptions = getAssumptions(CB);
10260     if (Function *F = IRP.getAssociatedFunction())
10261       set_union(Assumptions, getAssumptions(*F));
10262     if (Function *F = IRP.getAssociatedFunction())
10263       set_union(Assumptions, getAssumptions(*F));
10264     return Assumptions;
10265   }
10266 };
10267 } // namespace
10268 
10269 AACallGraphNode *AACallEdgeIterator::operator*() const {
10270   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
10271       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
10272 }
10273 
10274 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
10275 
10276 const char AAReturnedValues::ID = 0;
10277 const char AANoUnwind::ID = 0;
10278 const char AANoSync::ID = 0;
10279 const char AANoFree::ID = 0;
10280 const char AANonNull::ID = 0;
10281 const char AANoRecurse::ID = 0;
10282 const char AAWillReturn::ID = 0;
10283 const char AAUndefinedBehavior::ID = 0;
10284 const char AANoAlias::ID = 0;
10285 const char AAReachability::ID = 0;
10286 const char AANoReturn::ID = 0;
10287 const char AAIsDead::ID = 0;
10288 const char AADereferenceable::ID = 0;
10289 const char AAAlign::ID = 0;
10290 const char AAInstanceInfo::ID = 0;
10291 const char AANoCapture::ID = 0;
10292 const char AAValueSimplify::ID = 0;
10293 const char AAHeapToStack::ID = 0;
10294 const char AAPrivatizablePtr::ID = 0;
10295 const char AAMemoryBehavior::ID = 0;
10296 const char AAMemoryLocation::ID = 0;
10297 const char AAValueConstantRange::ID = 0;
10298 const char AAPotentialConstantValues::ID = 0;
10299 const char AANoUndef::ID = 0;
10300 const char AACallEdges::ID = 0;
10301 const char AAFunctionReachability::ID = 0;
10302 const char AAPointerInfo::ID = 0;
10303 const char AAAssumptionInfo::ID = 0;
10304 
10305 // Macro magic to create the static generator function for attributes that
10306 // follow the naming scheme.
10307 
10308 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
10309   case IRPosition::PK:                                                         \
10310     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
10311 
10312 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
10313   case IRPosition::PK:                                                         \
10314     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
10315     ++NumAAs;                                                                  \
10316     break;
10317 
10318 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
10319   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10320     CLASS *AA = nullptr;                                                       \
10321     switch (IRP.getPositionKind()) {                                           \
10322       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10323       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10324       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10325       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10326       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10327       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10328       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10329       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10330     }                                                                          \
10331     return *AA;                                                                \
10332   }
10333 
10334 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
10335   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10336     CLASS *AA = nullptr;                                                       \
10337     switch (IRP.getPositionKind()) {                                           \
10338       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10339       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
10340       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10341       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10342       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10343       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10344       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10345       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10346     }                                                                          \
10347     return *AA;                                                                \
10348   }
10349 
10350 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
10351   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10352     CLASS *AA = nullptr;                                                       \
10353     switch (IRP.getPositionKind()) {                                           \
10354       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10355       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10356       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10357       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10358       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10359       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10360       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10361       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10362     }                                                                          \
10363     return *AA;                                                                \
10364   }
10365 
10366 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
10367   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10368     CLASS *AA = nullptr;                                                       \
10369     switch (IRP.getPositionKind()) {                                           \
10370       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10371       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10372       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10373       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10374       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10375       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10376       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10377       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10378     }                                                                          \
10379     return *AA;                                                                \
10380   }
10381 
10382 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
10383   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10384     CLASS *AA = nullptr;                                                       \
10385     switch (IRP.getPositionKind()) {                                           \
10386       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10387       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10388       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10389       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10390       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10391       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10392       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10393       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10394     }                                                                          \
10395     return *AA;                                                                \
10396   }
10397 
10398 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
10399 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
10400 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
10401 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
10402 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
10403 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
10404 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
10405 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
10406 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
10407 
10408 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
10409 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
10410 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
10411 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
10412 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
10413 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
10414 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
10415 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
10416 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
10417 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
10418 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
10419 
10420 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
10421 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
10422 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
10423 
10424 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
10425 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
10426 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
10427 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
10428 
10429 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
10430 
10431 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
10432 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
10433 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
10434 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
10435 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
10436 #undef SWITCH_PK_CREATE
10437 #undef SWITCH_PK_INV
10438