1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/SCCIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetOperations.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumeBundleQueries.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/Analysis/LazyValueInfo.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/TargetTransformInfo.h"
34 #include "llvm/Analysis/ValueTracking.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Assumptions.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/GlobalValue.h"
40 #include "llvm/IR/IRBuilder.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/NoFolder.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/IR/ValueHandle.h"
47 #include "llvm/Support/Alignment.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/GraphWriter.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
55 #include "llvm/Transforms/Utils/Local.h"
56 #include "llvm/Transforms/Utils/ValueMapper.h"
57 #include <cassert>
58 
59 using namespace llvm;
60 
61 #define DEBUG_TYPE "attributor"
62 
63 static cl::opt<bool> ManifestInternal(
64     "attributor-manifest-internal", cl::Hidden,
65     cl::desc("Manifest Attributor internal string attributes."),
66     cl::init(false));
67 
68 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
69                                        cl::Hidden);
70 
71 template <>
72 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
73 
74 static cl::opt<unsigned, true> MaxPotentialValues(
75     "attributor-max-potential-values", cl::Hidden,
76     cl::desc("Maximum number of potential values to be "
77              "tracked for each position."),
78     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
79     cl::init(7));
80 
81 static cl::opt<unsigned> MaxInterferingAccesses(
82     "attributor-max-interfering-accesses", cl::Hidden,
83     cl::desc("Maximum number of interfering accesses to "
84              "check before assuming all might interfere."),
85     cl::init(6));
86 
87 STATISTIC(NumAAs, "Number of abstract attributes created");
88 
89 // Some helper macros to deal with statistics tracking.
90 //
91 // Usage:
92 // For simple IR attribute tracking overload trackStatistics in the abstract
93 // attribute and choose the right STATS_DECLTRACK_********* macro,
94 // e.g.,:
95 //  void trackStatistics() const override {
96 //    STATS_DECLTRACK_ARG_ATTR(returned)
97 //  }
98 // If there is a single "increment" side one can use the macro
99 // STATS_DECLTRACK with a custom message. If there are multiple increment
100 // sides, STATS_DECL and STATS_TRACK can also be used separately.
101 //
102 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
103   ("Number of " #TYPE " marked '" #NAME "'")
104 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
105 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
106 #define STATS_DECL(NAME, TYPE, MSG)                                            \
107   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
108 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
109 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
110   {                                                                            \
111     STATS_DECL(NAME, TYPE, MSG)                                                \
112     STATS_TRACK(NAME, TYPE)                                                    \
113   }
114 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
115   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
116 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
117   STATS_DECLTRACK(NAME, CSArguments,                                           \
118                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
119 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
120   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
121 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
122   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
123 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
124   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
125                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
126 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
127   STATS_DECLTRACK(NAME, CSReturn,                                              \
128                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
129 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
130   STATS_DECLTRACK(NAME, Floating,                                              \
131                   ("Number of floating values known to be '" #NAME "'"))
132 
133 // Specialization of the operator<< for abstract attributes subclasses. This
134 // disambiguates situations where multiple operators are applicable.
135 namespace llvm {
136 #define PIPE_OPERATOR(CLASS)                                                   \
137   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
138     return OS << static_cast<const AbstractAttribute &>(AA);                   \
139   }
140 
141 PIPE_OPERATOR(AAIsDead)
142 PIPE_OPERATOR(AANoUnwind)
143 PIPE_OPERATOR(AANoSync)
144 PIPE_OPERATOR(AANoRecurse)
145 PIPE_OPERATOR(AAWillReturn)
146 PIPE_OPERATOR(AANoReturn)
147 PIPE_OPERATOR(AAReturnedValues)
148 PIPE_OPERATOR(AANonNull)
149 PIPE_OPERATOR(AANoAlias)
150 PIPE_OPERATOR(AADereferenceable)
151 PIPE_OPERATOR(AAAlign)
152 PIPE_OPERATOR(AAInstanceInfo)
153 PIPE_OPERATOR(AANoCapture)
154 PIPE_OPERATOR(AAValueSimplify)
155 PIPE_OPERATOR(AANoFree)
156 PIPE_OPERATOR(AAHeapToStack)
157 PIPE_OPERATOR(AAReachability)
158 PIPE_OPERATOR(AAMemoryBehavior)
159 PIPE_OPERATOR(AAMemoryLocation)
160 PIPE_OPERATOR(AAValueConstantRange)
161 PIPE_OPERATOR(AAPrivatizablePtr)
162 PIPE_OPERATOR(AAUndefinedBehavior)
163 PIPE_OPERATOR(AAPotentialConstantValues)
164 PIPE_OPERATOR(AANoUndef)
165 PIPE_OPERATOR(AACallEdges)
166 PIPE_OPERATOR(AAFunctionReachability)
167 PIPE_OPERATOR(AAPointerInfo)
168 PIPE_OPERATOR(AAAssumptionInfo)
169 
170 #undef PIPE_OPERATOR
171 
172 template <>
173 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
174                                                      const DerefState &R) {
175   ChangeStatus CS0 =
176       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
177   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
178   return CS0 | CS1;
179 }
180 
181 } // namespace llvm
182 
183 /// Get pointer operand of memory accessing instruction. If \p I is
184 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
185 /// is set to false and the instruction is volatile, return nullptr.
186 static const Value *getPointerOperand(const Instruction *I,
187                                       bool AllowVolatile) {
188   if (!AllowVolatile && I->isVolatile())
189     return nullptr;
190 
191   if (auto *LI = dyn_cast<LoadInst>(I)) {
192     return LI->getPointerOperand();
193   }
194 
195   if (auto *SI = dyn_cast<StoreInst>(I)) {
196     return SI->getPointerOperand();
197   }
198 
199   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
200     return CXI->getPointerOperand();
201   }
202 
203   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
204     return RMWI->getPointerOperand();
205   }
206 
207   return nullptr;
208 }
209 
210 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
211 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
212 /// getelement pointer instructions that traverse the natural type of \p Ptr if
213 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
214 /// through a cast to i8*.
215 ///
216 /// TODO: This could probably live somewhere more prominantly if it doesn't
217 ///       already exist.
218 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
219                                int64_t Offset, IRBuilder<NoFolder> &IRB,
220                                const DataLayout &DL) {
221   assert(Offset >= 0 && "Negative offset not supported yet!");
222   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
223                     << "-bytes as " << *ResTy << "\n");
224 
225   if (Offset) {
226     Type *Ty = PtrElemTy;
227     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
228     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
229 
230     SmallVector<Value *, 4> ValIndices;
231     std::string GEPName = Ptr->getName().str();
232     for (const APInt &Index : IntIndices) {
233       ValIndices.push_back(IRB.getInt(Index));
234       GEPName += "." + std::to_string(Index.getZExtValue());
235     }
236 
237     // Create a GEP for the indices collected above.
238     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
239 
240     // If an offset is left we use byte-wise adjustment.
241     if (IntOffset != 0) {
242       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
243       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
244                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
245     }
246   }
247 
248   // Ensure the result has the requested type.
249   Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
250                                                 Ptr->getName() + ".cast");
251 
252   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
253   return Ptr;
254 }
255 
256 /// Recursively visit all values that might become \p IRP at some point. This
257 /// will be done by looking through cast instructions, selects, phis, and calls
258 /// with the "returned" attribute. Once we cannot look through the value any
259 /// further, the callback \p VisitValueCB is invoked and passed the current
260 /// value, the \p State, and a flag to indicate if we stripped anything.
261 /// Stripped means that we unpacked the value associated with \p IRP at least
262 /// once. Note that the value used for the callback may still be the value
263 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
264 /// we will never visit more values than specified by \p MaxValues.
265 /// If \p VS does not contain the Interprocedural bit, only values valid in the
266 /// scope of \p CtxI will be visited and simplification into other scopes is
267 /// prevented.
268 template <typename StateTy>
269 static bool genericValueTraversal(
270     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
271     StateTy &State,
272     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
273         VisitValueCB,
274     const Instruction *CtxI, bool &UsedAssumedInformation,
275     bool UseValueSimplify = true, int MaxValues = 16,
276     function_ref<Value *(Value *)> StripCB = nullptr,
277     AA::ValueScope VS = AA::Interprocedural) {
278 
279   struct LivenessInfo {
280     const AAIsDead *LivenessAA = nullptr;
281     bool AnyDead = false;
282   };
283   SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
284   auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
285     LivenessInfo &LI = LivenessAAs[&F];
286     if (!LI.LivenessAA)
287       LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F),
288                                             DepClassTy::NONE);
289     return LI;
290   };
291 
292   Value *InitialV = &IRP.getAssociatedValue();
293   using Item = std::pair<Value *, const Instruction *>;
294   SmallSet<Item, 16> Visited;
295   SmallVector<Item, 16> Worklist;
296   Worklist.push_back({InitialV, CtxI});
297 
298   int Iteration = 0;
299   do {
300     Item I = Worklist.pop_back_val();
301     Value *V = I.first;
302     CtxI = I.second;
303     if (StripCB)
304       V = StripCB(V);
305 
306     // Check if we should process the current value. To prevent endless
307     // recursion keep a record of the values we followed!
308     if (!Visited.insert(I).second)
309       continue;
310 
311     // Make sure we limit the compile time for complex expressions.
312     if (Iteration++ >= MaxValues) {
313       LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
314                         << Iteration << "!\n");
315       return false;
316     }
317 
318     // Explicitly look through calls with a "returned" attribute if we do
319     // not have a pointer as stripPointerCasts only works on them.
320     Value *NewV = nullptr;
321     if (V->getType()->isPointerTy()) {
322       NewV = V->stripPointerCasts();
323     } else {
324       auto *CB = dyn_cast<CallBase>(V);
325       if (CB && CB->getCalledFunction()) {
326         for (Argument &Arg : CB->getCalledFunction()->args())
327           if (Arg.hasReturnedAttr()) {
328             NewV = CB->getArgOperand(Arg.getArgNo());
329             break;
330           }
331       }
332     }
333     if (NewV && NewV != V) {
334       Worklist.push_back({NewV, CtxI});
335       continue;
336     }
337 
338     // Look through select instructions, visit assumed potential values.
339     if (auto *SI = dyn_cast<SelectInst>(V)) {
340       Optional<Constant *> C = A.getAssumedConstant(
341           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
342       bool NoValueYet = !C;
343       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
344         continue;
345       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
346         if (CI->isZero())
347           Worklist.push_back({SI->getFalseValue(), CtxI});
348         else
349           Worklist.push_back({SI->getTrueValue(), CtxI});
350         continue;
351       }
352       // We could not simplify the condition, assume both values.(
353       Worklist.push_back({SI->getTrueValue(), CtxI});
354       Worklist.push_back({SI->getFalseValue(), CtxI});
355       continue;
356     }
357 
358     // Look through phi nodes, visit all live operands.
359     if (auto *PHI = dyn_cast<PHINode>(V)) {
360       LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction());
361       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
362         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
363         if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) {
364           LI.AnyDead = true;
365           UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint();
366           continue;
367         }
368         Worklist.push_back(
369             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
370       }
371       continue;
372     }
373 
374     if (auto *Arg = dyn_cast<Argument>(V)) {
375       if ((VS & AA::Interprocedural) && !Arg->hasPassPointeeByValueCopyAttr()) {
376         SmallVector<Item> CallSiteValues;
377         bool UsedAssumedInformation = false;
378         if (A.checkForAllCallSites(
379                 [&](AbstractCallSite ACS) {
380                   // Callbacks might not have a corresponding call site operand,
381                   // stick with the argument in that case.
382                   Value *CSOp = ACS.getCallArgOperand(*Arg);
383                   if (!CSOp)
384                     return false;
385                   CallSiteValues.push_back({CSOp, ACS.getInstruction()});
386                   return true;
387                 },
388                 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) {
389           Worklist.append(CallSiteValues);
390           continue;
391         }
392       }
393     }
394 
395     if (UseValueSimplify && !isa<Constant>(V)) {
396       Optional<Value *> SimpleV =
397           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
398       if (!SimpleV.hasValue())
399         continue;
400       Value *NewV = SimpleV.getValue();
401       if (NewV && NewV != V) {
402         if ((VS & AA::Interprocedural) || !CtxI ||
403             AA::isValidInScope(*NewV, CtxI->getFunction())) {
404           Worklist.push_back({NewV, CtxI});
405           continue;
406         }
407       }
408     }
409 
410     if (auto *LI = dyn_cast<LoadInst>(V)) {
411       bool UsedAssumedInformation = false;
412       // If we ask for the potentially loaded values from the initial pointer we
413       // will simply end up here again. The load is as far as we can make it.
414       if (LI->getPointerOperand() != InitialV) {
415         SmallSetVector<Value *, 4> PotentialCopies;
416         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
417         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
418                                            PotentialValueOrigins, QueryingAA,
419                                            UsedAssumedInformation,
420                                            /* OnlyExact */ true)) {
421           // Values have to be dynamically unique or we loose the fact that a
422           // single llvm::Value might represent two runtime values (e.g., stack
423           // locations in different recursive calls).
424           bool DynamicallyUnique =
425               llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) {
426                 return AA::isDynamicallyUnique(A, QueryingAA, *PC);
427               });
428           if (DynamicallyUnique &&
429               ((VS & AA::Interprocedural) || !CtxI ||
430                llvm::all_of(PotentialCopies, [CtxI](Value *PC) {
431                  return AA::isValidInScope(*PC, CtxI->getFunction());
432                }))) {
433             for (auto *PotentialCopy : PotentialCopies)
434               Worklist.push_back({PotentialCopy, CtxI});
435             continue;
436           }
437         }
438       }
439     }
440 
441     // Once a leaf is reached we inform the user through the callback.
442     if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) {
443       LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: "
444                         << *V << "!\n");
445       return false;
446     }
447   } while (!Worklist.empty());
448 
449   // If we actually used liveness information so we have to record a dependence.
450   for (auto &It : LivenessAAs)
451     if (It.second.AnyDead)
452       A.recordDependence(*It.second.LivenessAA, QueryingAA,
453                          DepClassTy::OPTIONAL);
454 
455   // All values have been visited.
456   return true;
457 }
458 
459 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
460                                      SmallVectorImpl<Value *> &Objects,
461                                      const AbstractAttribute &QueryingAA,
462                                      const Instruction *CtxI,
463                                      bool &UsedAssumedInformation,
464                                      AA::ValueScope VS) {
465   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
466   SmallPtrSet<Value *, 8> SeenObjects;
467   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
468                                      SmallVectorImpl<Value *> &Objects,
469                                      bool) -> bool {
470     if (SeenObjects.insert(&Val).second)
471       Objects.push_back(&Val);
472     return true;
473   };
474   if (!genericValueTraversal<decltype(Objects)>(
475           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
476           UsedAssumedInformation, true, 32, StripCB, VS))
477     return false;
478   return true;
479 }
480 
481 static const Value *
482 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
483                           const Value *Val, const DataLayout &DL, APInt &Offset,
484                           bool GetMinOffset, bool AllowNonInbounds,
485                           bool UseAssumed = false) {
486 
487   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
488     const IRPosition &Pos = IRPosition::value(V);
489     // Only track dependence if we are going to use the assumed info.
490     const AAValueConstantRange &ValueConstantRangeAA =
491         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
492                                          UseAssumed ? DepClassTy::OPTIONAL
493                                                     : DepClassTy::NONE);
494     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
495                                      : ValueConstantRangeAA.getKnown();
496     if (Range.isFullSet())
497       return false;
498 
499     // We can only use the lower part of the range because the upper part can
500     // be higher than what the value can really be.
501     if (GetMinOffset)
502       ROffset = Range.getSignedMin();
503     else
504       ROffset = Range.getSignedMax();
505     return true;
506   };
507 
508   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
509                                                 /* AllowInvariant */ true,
510                                                 AttributorAnalysis);
511 }
512 
513 static const Value *
514 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
515                         const Value *Ptr, int64_t &BytesOffset,
516                         const DataLayout &DL, bool AllowNonInbounds = false) {
517   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
518   const Value *Base =
519       stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
520                                 /* GetMinOffset */ true, AllowNonInbounds);
521 
522   BytesOffset = OffsetAPInt.getSExtValue();
523   return Base;
524 }
525 
526 /// Clamp the information known for all returned values of a function
527 /// (identified by \p QueryingAA) into \p S.
528 template <typename AAType, typename StateType = typename AAType::StateType>
529 static void clampReturnedValueStates(
530     Attributor &A, const AAType &QueryingAA, StateType &S,
531     const IRPosition::CallBaseContext *CBContext = nullptr) {
532   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
533                     << QueryingAA << " into " << S << "\n");
534 
535   assert((QueryingAA.getIRPosition().getPositionKind() ==
536               IRPosition::IRP_RETURNED ||
537           QueryingAA.getIRPosition().getPositionKind() ==
538               IRPosition::IRP_CALL_SITE_RETURNED) &&
539          "Can only clamp returned value states for a function returned or call "
540          "site returned position!");
541 
542   // Use an optional state as there might not be any return values and we want
543   // to join (IntegerState::operator&) the state of all there are.
544   Optional<StateType> T;
545 
546   // Callback for each possibly returned value.
547   auto CheckReturnValue = [&](Value &RV) -> bool {
548     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
549     const AAType &AA =
550         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
551     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
552                       << " @ " << RVPos << "\n");
553     const StateType &AAS = AA.getState();
554     if (!T)
555       T = StateType::getBestState(AAS);
556     *T &= AAS;
557     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
558                       << "\n");
559     return T->isValidState();
560   };
561 
562   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
563     S.indicatePessimisticFixpoint();
564   else if (T)
565     S ^= *T;
566 }
567 
568 namespace {
569 /// Helper class for generic deduction: return value -> returned position.
570 template <typename AAType, typename BaseType,
571           typename StateType = typename BaseType::StateType,
572           bool PropagateCallBaseContext = false>
573 struct AAReturnedFromReturnedValues : public BaseType {
574   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
575       : BaseType(IRP, A) {}
576 
577   /// See AbstractAttribute::updateImpl(...).
578   ChangeStatus updateImpl(Attributor &A) override {
579     StateType S(StateType::getBestState(this->getState()));
580     clampReturnedValueStates<AAType, StateType>(
581         A, *this, S,
582         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
583     // TODO: If we know we visited all returned values, thus no are assumed
584     // dead, we can take the known information from the state T.
585     return clampStateAndIndicateChange<StateType>(this->getState(), S);
586   }
587 };
588 
589 /// Clamp the information known at all call sites for a given argument
590 /// (identified by \p QueryingAA) into \p S.
591 template <typename AAType, typename StateType = typename AAType::StateType>
592 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
593                                         StateType &S) {
594   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
595                     << QueryingAA << " into " << S << "\n");
596 
597   assert(QueryingAA.getIRPosition().getPositionKind() ==
598              IRPosition::IRP_ARGUMENT &&
599          "Can only clamp call site argument states for an argument position!");
600 
601   // Use an optional state as there might not be any return values and we want
602   // to join (IntegerState::operator&) the state of all there are.
603   Optional<StateType> T;
604 
605   // The argument number which is also the call site argument number.
606   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
607 
608   auto CallSiteCheck = [&](AbstractCallSite ACS) {
609     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
610     // Check if a coresponding argument was found or if it is on not associated
611     // (which can happen for callback calls).
612     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
613       return false;
614 
615     const AAType &AA =
616         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
617     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
618                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
619     const StateType &AAS = AA.getState();
620     if (!T)
621       T = StateType::getBestState(AAS);
622     *T &= AAS;
623     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
624                       << "\n");
625     return T->isValidState();
626   };
627 
628   bool UsedAssumedInformation = false;
629   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
630                               UsedAssumedInformation))
631     S.indicatePessimisticFixpoint();
632   else if (T)
633     S ^= *T;
634 }
635 
636 /// This function is the bridge between argument position and the call base
637 /// context.
638 template <typename AAType, typename BaseType,
639           typename StateType = typename AAType::StateType>
640 bool getArgumentStateFromCallBaseContext(Attributor &A,
641                                          BaseType &QueryingAttribute,
642                                          IRPosition &Pos, StateType &State) {
643   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
644          "Expected an 'argument' position !");
645   const CallBase *CBContext = Pos.getCallBaseContext();
646   if (!CBContext)
647     return false;
648 
649   int ArgNo = Pos.getCallSiteArgNo();
650   assert(ArgNo >= 0 && "Invalid Arg No!");
651 
652   const auto &AA = A.getAAFor<AAType>(
653       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
654       DepClassTy::REQUIRED);
655   const StateType &CBArgumentState =
656       static_cast<const StateType &>(AA.getState());
657 
658   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
659                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
660                     << "\n");
661 
662   // NOTE: If we want to do call site grouping it should happen here.
663   State ^= CBArgumentState;
664   return true;
665 }
666 
667 /// Helper class for generic deduction: call site argument -> argument position.
668 template <typename AAType, typename BaseType,
669           typename StateType = typename AAType::StateType,
670           bool BridgeCallBaseContext = false>
671 struct AAArgumentFromCallSiteArguments : public BaseType {
672   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
673       : BaseType(IRP, A) {}
674 
675   /// See AbstractAttribute::updateImpl(...).
676   ChangeStatus updateImpl(Attributor &A) override {
677     StateType S = StateType::getBestState(this->getState());
678 
679     if (BridgeCallBaseContext) {
680       bool Success =
681           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
682               A, *this, this->getIRPosition(), S);
683       if (Success)
684         return clampStateAndIndicateChange<StateType>(this->getState(), S);
685     }
686     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
687 
688     // TODO: If we know we visited all incoming values, thus no are assumed
689     // dead, we can take the known information from the state T.
690     return clampStateAndIndicateChange<StateType>(this->getState(), S);
691   }
692 };
693 
694 /// Helper class for generic replication: function returned -> cs returned.
695 template <typename AAType, typename BaseType,
696           typename StateType = typename BaseType::StateType,
697           bool IntroduceCallBaseContext = false>
698 struct AACallSiteReturnedFromReturned : public BaseType {
699   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
700       : BaseType(IRP, A) {}
701 
702   /// See AbstractAttribute::updateImpl(...).
703   ChangeStatus updateImpl(Attributor &A) override {
704     assert(this->getIRPosition().getPositionKind() ==
705                IRPosition::IRP_CALL_SITE_RETURNED &&
706            "Can only wrap function returned positions for call site returned "
707            "positions!");
708     auto &S = this->getState();
709 
710     const Function *AssociatedFunction =
711         this->getIRPosition().getAssociatedFunction();
712     if (!AssociatedFunction)
713       return S.indicatePessimisticFixpoint();
714 
715     CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
716     if (IntroduceCallBaseContext)
717       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
718                         << CBContext << "\n");
719 
720     IRPosition FnPos = IRPosition::returned(
721         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
722     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
723     return clampStateAndIndicateChange(S, AA.getState());
724   }
725 };
726 
727 /// Helper function to accumulate uses.
728 template <class AAType, typename StateType = typename AAType::StateType>
729 static void followUsesInContext(AAType &AA, Attributor &A,
730                                 MustBeExecutedContextExplorer &Explorer,
731                                 const Instruction *CtxI,
732                                 SetVector<const Use *> &Uses,
733                                 StateType &State) {
734   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
735   for (unsigned u = 0; u < Uses.size(); ++u) {
736     const Use *U = Uses[u];
737     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
738       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
739       if (Found && AA.followUseInMBEC(A, U, UserI, State))
740         for (const Use &Us : UserI->uses())
741           Uses.insert(&Us);
742     }
743   }
744 }
745 
746 /// Use the must-be-executed-context around \p I to add information into \p S.
747 /// The AAType class is required to have `followUseInMBEC` method with the
748 /// following signature and behaviour:
749 ///
750 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
751 /// U - Underlying use.
752 /// I - The user of the \p U.
753 /// Returns true if the value should be tracked transitively.
754 ///
755 template <class AAType, typename StateType = typename AAType::StateType>
756 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
757                              Instruction &CtxI) {
758 
759   // Container for (transitive) uses of the associated value.
760   SetVector<const Use *> Uses;
761   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
762     Uses.insert(&U);
763 
764   MustBeExecutedContextExplorer &Explorer =
765       A.getInfoCache().getMustBeExecutedContextExplorer();
766 
767   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
768 
769   if (S.isAtFixpoint())
770     return;
771 
772   SmallVector<const BranchInst *, 4> BrInsts;
773   auto Pred = [&](const Instruction *I) {
774     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
775       if (Br->isConditional())
776         BrInsts.push_back(Br);
777     return true;
778   };
779 
780   // Here, accumulate conditional branch instructions in the context. We
781   // explore the child paths and collect the known states. The disjunction of
782   // those states can be merged to its own state. Let ParentState_i be a state
783   // to indicate the known information for an i-th branch instruction in the
784   // context. ChildStates are created for its successors respectively.
785   //
786   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
787   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
788   //      ...
789   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
790   //
791   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
792   //
793   // FIXME: Currently, recursive branches are not handled. For example, we
794   // can't deduce that ptr must be dereferenced in below function.
795   //
796   // void f(int a, int c, int *ptr) {
797   //    if(a)
798   //      if (b) {
799   //        *ptr = 0;
800   //      } else {
801   //        *ptr = 1;
802   //      }
803   //    else {
804   //      if (b) {
805   //        *ptr = 0;
806   //      } else {
807   //        *ptr = 1;
808   //      }
809   //    }
810   // }
811 
812   Explorer.checkForAllContext(&CtxI, Pred);
813   for (const BranchInst *Br : BrInsts) {
814     StateType ParentState;
815 
816     // The known state of the parent state is a conjunction of children's
817     // known states so it is initialized with a best state.
818     ParentState.indicateOptimisticFixpoint();
819 
820     for (const BasicBlock *BB : Br->successors()) {
821       StateType ChildState;
822 
823       size_t BeforeSize = Uses.size();
824       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
825 
826       // Erase uses which only appear in the child.
827       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
828         It = Uses.erase(It);
829 
830       ParentState &= ChildState;
831     }
832 
833     // Use only known state.
834     S += ParentState;
835   }
836 }
837 } // namespace
838 
839 /// ------------------------ PointerInfo ---------------------------------------
840 
841 namespace llvm {
842 namespace AA {
843 namespace PointerInfo {
844 
845 struct State;
846 
847 } // namespace PointerInfo
848 } // namespace AA
849 
850 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
851 template <>
852 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
853   using Access = AAPointerInfo::Access;
854   static inline Access getEmptyKey();
855   static inline Access getTombstoneKey();
856   static unsigned getHashValue(const Access &A);
857   static bool isEqual(const Access &LHS, const Access &RHS);
858 };
859 
860 /// Helper that allows OffsetAndSize as a key in a DenseMap.
861 template <>
862 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
863     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
864 
865 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
866 /// but the instruction
867 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
868   using Base = DenseMapInfo<Instruction *>;
869   using Access = AAPointerInfo::Access;
870   static inline Access getEmptyKey();
871   static inline Access getTombstoneKey();
872   static unsigned getHashValue(const Access &A);
873   static bool isEqual(const Access &LHS, const Access &RHS);
874 };
875 
876 } // namespace llvm
877 
878 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
879 struct AA::PointerInfo::State : public AbstractState {
880 
881   ~State() {
882     // We do not delete the Accesses objects but need to destroy them still.
883     for (auto &It : AccessBins)
884       It.second->~Accesses();
885   }
886 
887   /// Return the best possible representable state.
888   static State getBestState(const State &SIS) { return State(); }
889 
890   /// Return the worst possible representable state.
891   static State getWorstState(const State &SIS) {
892     State R;
893     R.indicatePessimisticFixpoint();
894     return R;
895   }
896 
897   State() = default;
898   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {
899     SIS.AccessBins.clear();
900   }
901 
902   const State &getAssumed() const { return *this; }
903 
904   /// See AbstractState::isValidState().
905   bool isValidState() const override { return BS.isValidState(); }
906 
907   /// See AbstractState::isAtFixpoint().
908   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
909 
910   /// See AbstractState::indicateOptimisticFixpoint().
911   ChangeStatus indicateOptimisticFixpoint() override {
912     BS.indicateOptimisticFixpoint();
913     return ChangeStatus::UNCHANGED;
914   }
915 
916   /// See AbstractState::indicatePessimisticFixpoint().
917   ChangeStatus indicatePessimisticFixpoint() override {
918     BS.indicatePessimisticFixpoint();
919     return ChangeStatus::CHANGED;
920   }
921 
922   State &operator=(const State &R) {
923     if (this == &R)
924       return *this;
925     BS = R.BS;
926     AccessBins = R.AccessBins;
927     return *this;
928   }
929 
930   State &operator=(State &&R) {
931     if (this == &R)
932       return *this;
933     std::swap(BS, R.BS);
934     std::swap(AccessBins, R.AccessBins);
935     return *this;
936   }
937 
938   bool operator==(const State &R) const {
939     if (BS != R.BS)
940       return false;
941     if (AccessBins.size() != R.AccessBins.size())
942       return false;
943     auto It = begin(), RIt = R.begin(), E = end();
944     while (It != E) {
945       if (It->getFirst() != RIt->getFirst())
946         return false;
947       auto &Accs = It->getSecond();
948       auto &RAccs = RIt->getSecond();
949       if (Accs->size() != RAccs->size())
950         return false;
951       for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
952         if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
953           return false;
954       ++It;
955       ++RIt;
956     }
957     return true;
958   }
959   bool operator!=(const State &R) const { return !(*this == R); }
960 
961   /// We store accesses in a set with the instruction as key.
962   struct Accesses {
963     SmallVector<AAPointerInfo::Access, 4> Accesses;
964     DenseMap<const Instruction *, unsigned> Map;
965 
966     unsigned size() const { return Accesses.size(); }
967 
968     using vec_iterator = decltype(Accesses)::iterator;
969     vec_iterator begin() { return Accesses.begin(); }
970     vec_iterator end() { return Accesses.end(); }
971 
972     using iterator = decltype(Map)::const_iterator;
973     iterator find(AAPointerInfo::Access &Acc) {
974       return Map.find(Acc.getRemoteInst());
975     }
976     iterator find_end() { return Map.end(); }
977 
978     AAPointerInfo::Access &get(iterator &It) {
979       return Accesses[It->getSecond()];
980     }
981 
982     void insert(AAPointerInfo::Access &Acc) {
983       Map[Acc.getRemoteInst()] = Accesses.size();
984       Accesses.push_back(Acc);
985     }
986   };
987 
988   /// We store all accesses in bins denoted by their offset and size.
989   using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>;
990 
991   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
992   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
993 
994 protected:
995   /// The bins with all the accesses for the associated pointer.
996   AccessBinsTy AccessBins;
997 
998   /// Add a new access to the state at offset \p Offset and with size \p Size.
999   /// The access is associated with \p I, writes \p Content (if anything), and
1000   /// is of kind \p Kind.
1001   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
1002   ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
1003                          Instruction &I, Optional<Value *> Content,
1004                          AAPointerInfo::AccessKind Kind, Type *Ty,
1005                          Instruction *RemoteI = nullptr,
1006                          Accesses *BinPtr = nullptr) {
1007     AAPointerInfo::OffsetAndSize Key{Offset, Size};
1008     Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
1009     if (!Bin)
1010       Bin = new (A.Allocator) Accesses;
1011     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1012     // Check if we have an access for this instruction in this bin, if not,
1013     // simply add it.
1014     auto It = Bin->find(Acc);
1015     if (It == Bin->find_end()) {
1016       Bin->insert(Acc);
1017       return ChangeStatus::CHANGED;
1018     }
1019     // If the existing access is the same as then new one, nothing changed.
1020     AAPointerInfo::Access &Current = Bin->get(It);
1021     AAPointerInfo::Access Before = Current;
1022     // The new one will be combined with the existing one.
1023     Current &= Acc;
1024     return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1025   }
1026 
1027   /// See AAPointerInfo::forallInterferingAccesses.
1028   bool forallInterferingAccesses(
1029       AAPointerInfo::OffsetAndSize OAS,
1030       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1031     if (!isValidState())
1032       return false;
1033 
1034     for (auto &It : AccessBins) {
1035       AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
1036       if (!OAS.mayOverlap(ItOAS))
1037         continue;
1038       bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
1039       for (auto &Access : *It.getSecond())
1040         if (!CB(Access, IsExact))
1041           return false;
1042     }
1043     return true;
1044   }
1045 
1046   /// See AAPointerInfo::forallInterferingAccesses.
1047   bool forallInterferingAccesses(
1048       Instruction &I,
1049       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1050     if (!isValidState())
1051       return false;
1052 
1053     // First find the offset and size of I.
1054     AAPointerInfo::OffsetAndSize OAS(-1, -1);
1055     for (auto &It : AccessBins) {
1056       for (auto &Access : *It.getSecond()) {
1057         if (Access.getRemoteInst() == &I) {
1058           OAS = It.getFirst();
1059           break;
1060         }
1061       }
1062       if (OAS.getSize() != -1)
1063         break;
1064     }
1065     // No access for I was found, we are done.
1066     if (OAS.getSize() == -1)
1067       return true;
1068 
1069     // Now that we have an offset and size, find all overlapping ones and use
1070     // the callback on the accesses.
1071     return forallInterferingAccesses(OAS, CB);
1072   }
1073 
1074 private:
1075   /// State to track fixpoint and validity.
1076   BooleanState BS;
1077 };
1078 
1079 namespace {
1080 struct AAPointerInfoImpl
1081     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1082   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1083   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1084 
1085   /// See AbstractAttribute::initialize(...).
1086   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1087 
1088   /// See AbstractAttribute::getAsStr().
1089   const std::string getAsStr() const override {
1090     return std::string("PointerInfo ") +
1091            (isValidState() ? (std::string("#") +
1092                               std::to_string(AccessBins.size()) + " bins")
1093                            : "<invalid>");
1094   }
1095 
1096   /// See AbstractAttribute::manifest(...).
1097   ChangeStatus manifest(Attributor &A) override {
1098     return AAPointerInfo::manifest(A);
1099   }
1100 
1101   bool forallInterferingAccesses(
1102       OffsetAndSize OAS,
1103       function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1104       const override {
1105     return State::forallInterferingAccesses(OAS, CB);
1106   }
1107   bool forallInterferingAccesses(
1108       Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
1109       function_ref<bool(const Access &, bool)> UserCB) const override {
1110     SmallPtrSet<const Access *, 8> DominatingWrites;
1111     SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
1112 
1113     Function &Scope = *I.getFunction();
1114     const auto &NoSyncAA = A.getAAFor<AANoSync>(
1115         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1116     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1117         IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
1118     const bool NoSync = NoSyncAA.isAssumedNoSync();
1119 
1120     // Helper to determine if we need to consider threading, which we cannot
1121     // right now. However, if the function is (assumed) nosync or the thread
1122     // executing all instructions is the main thread only we can ignore
1123     // threading.
1124     auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
1125       if (NoSync)
1126         return true;
1127       if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
1128         return true;
1129       return false;
1130     };
1131 
1132     // Helper to determine if the access is executed by the same thread as the
1133     // load, for now it is sufficient to avoid any potential threading effects
1134     // as we cannot deal with them anyway.
1135     auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
1136       return CanIgnoreThreading(*Acc.getLocalInst());
1137     };
1138 
1139     // TODO: Use inter-procedural reachability and dominance.
1140     const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1141         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1142 
1143     const bool FindInterferingWrites = I.mayReadFromMemory();
1144     const bool FindInterferingReads = I.mayWriteToMemory();
1145     const bool UseDominanceReasoning = FindInterferingWrites;
1146     const bool CanUseCFGResoning = CanIgnoreThreading(I);
1147     InformationCache &InfoCache = A.getInfoCache();
1148     const DominatorTree *DT =
1149         NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning
1150             ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
1151                   Scope)
1152             : nullptr;
1153 
1154     enum GPUAddressSpace : unsigned {
1155       Generic = 0,
1156       Global = 1,
1157       Shared = 3,
1158       Constant = 4,
1159       Local = 5,
1160     };
1161 
1162     // Helper to check if a value has "kernel lifetime", that is it will not
1163     // outlive a GPU kernel. This is true for shared, constant, and local
1164     // globals on AMD and NVIDIA GPUs.
1165     auto HasKernelLifetime = [&](Value *V, Module &M) {
1166       Triple T(M.getTargetTriple());
1167       if (!(T.isAMDGPU() || T.isNVPTX()))
1168         return false;
1169       switch (V->getType()->getPointerAddressSpace()) {
1170       case GPUAddressSpace::Shared:
1171       case GPUAddressSpace::Constant:
1172       case GPUAddressSpace::Local:
1173         return true;
1174       default:
1175         return false;
1176       };
1177     };
1178 
1179     // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1180     // to determine if we should look at reachability from the callee. For
1181     // certain pointers we know the lifetime and we do not have to step into the
1182     // callee to determine reachability as the pointer would be dead in the
1183     // callee. See the conditional initialization below.
1184     std::function<bool(const Function &)> IsLiveInCalleeCB;
1185 
1186     if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1187       // If the alloca containing function is not recursive the alloca
1188       // must be dead in the callee.
1189       const Function *AIFn = AI->getFunction();
1190       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1191           *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL);
1192       if (NoRecurseAA.isAssumedNoRecurse()) {
1193         IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1194       }
1195     } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1196       // If the global has kernel lifetime we can stop if we reach a kernel
1197       // as it is "dead" in the (unknown) callees.
1198       if (HasKernelLifetime(GV, *GV->getParent()))
1199         IsLiveInCalleeCB = [](const Function &Fn) {
1200           return !Fn.hasFnAttribute("kernel");
1201         };
1202     }
1203 
1204     auto AccessCB = [&](const Access &Acc, bool Exact) {
1205       if ((!FindInterferingWrites || !Acc.isWrite()) &&
1206           (!FindInterferingReads || !Acc.isRead()))
1207         return true;
1208 
1209       // For now we only filter accesses based on CFG reasoning which does not
1210       // work yet if we have threading effects, or the access is complicated.
1211       if (CanUseCFGResoning) {
1212         if ((!Acc.isWrite() ||
1213              !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1214                                          IsLiveInCalleeCB)) &&
1215             (!Acc.isRead() ||
1216              !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1217                                          IsLiveInCalleeCB)))
1218           return true;
1219         if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) &&
1220             IsSameThreadAsLoad(Acc)) {
1221           if (DT->dominates(Acc.getLocalInst(), &I))
1222             DominatingWrites.insert(&Acc);
1223         }
1224       }
1225 
1226       InterferingAccesses.push_back({&Acc, Exact});
1227       return true;
1228     };
1229     if (!State::forallInterferingAccesses(I, AccessCB))
1230       return false;
1231 
1232     // If we cannot use CFG reasoning we only filter the non-write accesses
1233     // and are done here.
1234     if (!CanUseCFGResoning) {
1235       for (auto &It : InterferingAccesses)
1236         if (!UserCB(*It.first, It.second))
1237           return false;
1238       return true;
1239     }
1240 
1241     // Helper to determine if we can skip a specific write access. This is in
1242     // the worst case quadratic as we are looking for another write that will
1243     // hide the effect of this one.
1244     auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1245       if (!IsSameThreadAsLoad(Acc))
1246         return false;
1247       if (!DominatingWrites.count(&Acc))
1248         return false;
1249       for (const Access *DomAcc : DominatingWrites) {
1250         assert(Acc.getLocalInst()->getFunction() ==
1251                    DomAcc->getLocalInst()->getFunction() &&
1252                "Expected dominating writes to be in the same function!");
1253 
1254         if (DomAcc != &Acc &&
1255             DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1256           return true;
1257         }
1258       }
1259       return false;
1260     };
1261 
1262     // Run the user callback on all accesses we cannot skip and return if that
1263     // succeeded for all or not.
1264     unsigned NumInterferingAccesses = InterferingAccesses.size();
1265     for (auto &It : InterferingAccesses) {
1266       if (!DT || NumInterferingAccesses > MaxInterferingAccesses ||
1267           !CanSkipAccess(*It.first, It.second)) {
1268         if (!UserCB(*It.first, It.second))
1269           return false;
1270       }
1271     }
1272     return true;
1273   }
1274 
1275   ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
1276                                     int64_t Offset, CallBase &CB,
1277                                     bool FromCallee = false) {
1278     using namespace AA::PointerInfo;
1279     if (!OtherAA.getState().isValidState() || !isValidState())
1280       return indicatePessimisticFixpoint();
1281 
1282     const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1283     bool IsByval =
1284         FromCallee && OtherAAImpl.getAssociatedArgument()->hasByValAttr();
1285 
1286     // Combine the accesses bin by bin.
1287     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1288     for (auto &It : OtherAAImpl.getState()) {
1289       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1290       if (Offset != OffsetAndSize::Unknown)
1291         OAS = OffsetAndSize(It.first.getOffset() + Offset, It.first.getSize());
1292       Accesses *Bin = AccessBins.lookup(OAS);
1293       for (const AAPointerInfo::Access &RAcc : *It.second) {
1294         if (IsByval && !RAcc.isRead())
1295           continue;
1296         bool UsedAssumedInformation = false;
1297         AccessKind AK = RAcc.getKind();
1298         Optional<Value *> Content = RAcc.getContent();
1299         if (FromCallee) {
1300           Content = A.translateArgumentToCallSiteContent(
1301               RAcc.getContent(), CB, *this, UsedAssumedInformation);
1302           AK = AccessKind(
1303               AK & (IsByval ? AccessKind::AK_READ : AccessKind::AK_READ_WRITE));
1304         }
1305         Changed =
1306             Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1307                                 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1308       }
1309     }
1310     return Changed;
1311   }
1312 
1313   /// Statistic tracking for all AAPointerInfo implementations.
1314   /// See AbstractAttribute::trackStatistics().
1315   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1316 };
1317 
1318 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1319   using AccessKind = AAPointerInfo::AccessKind;
1320   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1321       : AAPointerInfoImpl(IRP, A) {}
1322 
1323   /// See AbstractAttribute::initialize(...).
1324   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1325 
1326   /// Deal with an access and signal if it was handled successfully.
1327   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1328                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1329                     ChangeStatus &Changed, Type *Ty,
1330                     int64_t Size = OffsetAndSize::Unknown) {
1331     using namespace AA::PointerInfo;
1332     // No need to find a size if one is given or the offset is unknown.
1333     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1334         Ty) {
1335       const DataLayout &DL = A.getDataLayout();
1336       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1337       if (!AccessSize.isScalable())
1338         Size = AccessSize.getFixedSize();
1339     }
1340     Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1341     return true;
1342   };
1343 
1344   /// Helper struct, will support ranges eventually.
1345   struct OffsetInfo {
1346     int64_t Offset = OffsetAndSize::Unknown;
1347 
1348     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1349   };
1350 
1351   /// See AbstractAttribute::updateImpl(...).
1352   ChangeStatus updateImpl(Attributor &A) override {
1353     using namespace AA::PointerInfo;
1354     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1355     Value &AssociatedValue = getAssociatedValue();
1356 
1357     const DataLayout &DL = A.getDataLayout();
1358     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1359     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1360 
1361     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1362                                      bool &Follow) {
1363       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1364       UsrOI = PtrOI;
1365       Follow = true;
1366       return true;
1367     };
1368 
1369     const auto *TLI = getAnchorScope()
1370                           ? A.getInfoCache().getTargetLibraryInfoForFunction(
1371                                 *getAnchorScope())
1372                           : nullptr;
1373     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1374       Value *CurPtr = U.get();
1375       User *Usr = U.getUser();
1376       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1377                         << *Usr << "\n");
1378       assert(OffsetInfoMap.count(CurPtr) &&
1379              "The current pointer offset should have been seeded!");
1380 
1381       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1382         if (CE->isCast())
1383           return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1384         if (CE->isCompare())
1385           return true;
1386         if (!isa<GEPOperator>(CE)) {
1387           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1388                             << "\n");
1389           return false;
1390         }
1391       }
1392       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1393         // Note the order here, the Usr access might change the map, CurPtr is
1394         // already in it though.
1395         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1396         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1397         UsrOI = PtrOI;
1398 
1399         // TODO: Use range information.
1400         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1401             !GEP->hasAllConstantIndices()) {
1402           UsrOI.Offset = OffsetAndSize::Unknown;
1403           Follow = true;
1404           return true;
1405         }
1406 
1407         SmallVector<Value *, 8> Indices;
1408         for (Use &Idx : GEP->indices()) {
1409           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1410             Indices.push_back(CIdx);
1411             continue;
1412           }
1413 
1414           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1415                             << " : " << *Idx << "\n");
1416           return false;
1417         }
1418         UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1419                                           GEP->getSourceElementType(), Indices);
1420         Follow = true;
1421         return true;
1422       }
1423       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1424         return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1425 
1426       // For PHIs we need to take care of the recurrence explicitly as the value
1427       // might change while we iterate through a loop. For now, we give up if
1428       // the PHI is not invariant.
1429       if (isa<PHINode>(Usr)) {
1430         // Note the order here, the Usr access might change the map, CurPtr is
1431         // already in it though.
1432         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1433         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1434         // Check if the PHI is invariant (so far).
1435         if (UsrOI == PtrOI)
1436           return true;
1437 
1438         // Check if the PHI operand has already an unknown offset as we can't
1439         // improve on that anymore.
1440         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1441           UsrOI = PtrOI;
1442           Follow = true;
1443           return true;
1444         }
1445 
1446         // Check if the PHI operand is not dependent on the PHI itself.
1447         // TODO: This is not great as we look at the pointer type. However, it
1448         // is unclear where the Offset size comes from with typeless pointers.
1449         APInt Offset(
1450             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1451             0);
1452         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1453                                     DL, Offset, /* AllowNonInbounds */ true)) {
1454           if (Offset != PtrOI.Offset) {
1455             LLVM_DEBUG(dbgs()
1456                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1457                        << *CurPtr << " in " << *Usr << "\n");
1458             return false;
1459           }
1460           return HandlePassthroughUser(Usr, PtrOI, Follow);
1461         }
1462 
1463         // TODO: Approximate in case we know the direction of the recurrence.
1464         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1465                           << *CurPtr << " in " << *Usr << "\n");
1466         UsrOI = PtrOI;
1467         UsrOI.Offset = OffsetAndSize::Unknown;
1468         Follow = true;
1469         return true;
1470       }
1471 
1472       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1473         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1474                             AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1475                             Changed, LoadI->getType());
1476       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1477         if (StoreI->getValueOperand() == CurPtr) {
1478           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1479                             << *StoreI << "\n");
1480           return false;
1481         }
1482         bool UsedAssumedInformation = false;
1483         Optional<Value *> Content = A.getAssumedSimplified(
1484             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1485         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1486                             OffsetInfoMap[CurPtr].Offset, Changed,
1487                             StoreI->getValueOperand()->getType());
1488       }
1489       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1490         if (CB->isLifetimeStartOrEnd())
1491           return true;
1492         if (TLI && isFreeCall(CB, TLI))
1493           return true;
1494         if (CB->isArgOperand(&U)) {
1495           unsigned ArgNo = CB->getArgOperandNo(&U);
1496           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1497               *this, IRPosition::callsite_argument(*CB, ArgNo),
1498               DepClassTy::REQUIRED);
1499           Changed = translateAndAddState(A, CSArgPI,
1500                                          OffsetInfoMap[CurPtr].Offset, *CB) |
1501                     Changed;
1502           return true;
1503         }
1504         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1505                           << "\n");
1506         // TODO: Allow some call uses
1507         return false;
1508       }
1509 
1510       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1511       return false;
1512     };
1513     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1514       if (OffsetInfoMap.count(NewU))
1515         return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1516       OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1517       return true;
1518     };
1519     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1520                            /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1521                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
1522       return indicatePessimisticFixpoint();
1523 
1524     LLVM_DEBUG({
1525       dbgs() << "Accesses by bin after update:\n";
1526       for (auto &It : AccessBins) {
1527         dbgs() << "[" << It.first.getOffset() << "-"
1528                << It.first.getOffset() + It.first.getSize()
1529                << "] : " << It.getSecond()->size() << "\n";
1530         for (auto &Acc : *It.getSecond()) {
1531           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1532                  << "\n";
1533           if (Acc.getLocalInst() != Acc.getRemoteInst())
1534             dbgs() << "     -->                         "
1535                    << *Acc.getRemoteInst() << "\n";
1536           if (!Acc.isWrittenValueYetUndetermined()) {
1537             if (Acc.getWrittenValue())
1538               dbgs() << "       - c: " << *Acc.getWrittenValue() << "\n";
1539             else
1540               dbgs() << "       - c: <unknown>\n";
1541           }
1542         }
1543       }
1544     });
1545 
1546     return Changed;
1547   }
1548 
1549   /// See AbstractAttribute::trackStatistics()
1550   void trackStatistics() const override {
1551     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1552   }
1553 };
1554 
1555 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1556   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1557       : AAPointerInfoImpl(IRP, A) {}
1558 
1559   /// See AbstractAttribute::updateImpl(...).
1560   ChangeStatus updateImpl(Attributor &A) override {
1561     return indicatePessimisticFixpoint();
1562   }
1563 
1564   /// See AbstractAttribute::trackStatistics()
1565   void trackStatistics() const override {
1566     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1567   }
1568 };
1569 
1570 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1571   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1572       : AAPointerInfoFloating(IRP, A) {}
1573 
1574   /// See AbstractAttribute::initialize(...).
1575   void initialize(Attributor &A) override {
1576     AAPointerInfoFloating::initialize(A);
1577     if (getAnchorScope()->isDeclaration())
1578       indicatePessimisticFixpoint();
1579   }
1580 
1581   /// See AbstractAttribute::trackStatistics()
1582   void trackStatistics() const override {
1583     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1584   }
1585 };
1586 
1587 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1588   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1589       : AAPointerInfoFloating(IRP, A) {}
1590 
1591   /// See AbstractAttribute::updateImpl(...).
1592   ChangeStatus updateImpl(Attributor &A) override {
1593     using namespace AA::PointerInfo;
1594     // We handle memory intrinsics explicitly, at least the first (=
1595     // destination) and second (=source) arguments as we know how they are
1596     // accessed.
1597     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1598       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1599       int64_t LengthVal = OffsetAndSize::Unknown;
1600       if (Length)
1601         LengthVal = Length->getSExtValue();
1602       Value &Ptr = getAssociatedValue();
1603       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1604       ChangeStatus Changed = ChangeStatus::UNCHANGED;
1605       if (ArgNo == 0) {
1606         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1607                      nullptr, LengthVal);
1608       } else if (ArgNo == 1) {
1609         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1610                      nullptr, LengthVal);
1611       } else {
1612         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1613                           << *MI << "\n");
1614         return indicatePessimisticFixpoint();
1615       }
1616       return Changed;
1617     }
1618 
1619     // TODO: Once we have call site specific value information we can provide
1620     //       call site specific liveness information and then it makes
1621     //       sense to specialize attributes for call sites arguments instead of
1622     //       redirecting requests to the callee argument.
1623     Argument *Arg = getAssociatedArgument();
1624     if (!Arg)
1625       return indicatePessimisticFixpoint();
1626     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1627     auto &ArgAA =
1628         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1629     return translateAndAddState(A, ArgAA, 0, *cast<CallBase>(getCtxI()),
1630                                 /* FromCallee */ true);
1631   }
1632 
1633   /// See AbstractAttribute::trackStatistics()
1634   void trackStatistics() const override {
1635     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1636   }
1637 };
1638 
1639 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1640   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1641       : AAPointerInfoFloating(IRP, A) {}
1642 
1643   /// See AbstractAttribute::trackStatistics()
1644   void trackStatistics() const override {
1645     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1646   }
1647 };
1648 } // namespace
1649 
1650 /// -----------------------NoUnwind Function Attribute--------------------------
1651 
1652 namespace {
1653 struct AANoUnwindImpl : AANoUnwind {
1654   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1655 
1656   const std::string getAsStr() const override {
1657     return getAssumed() ? "nounwind" : "may-unwind";
1658   }
1659 
1660   /// See AbstractAttribute::updateImpl(...).
1661   ChangeStatus updateImpl(Attributor &A) override {
1662     auto Opcodes = {
1663         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1664         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1665         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1666 
1667     auto CheckForNoUnwind = [&](Instruction &I) {
1668       if (!I.mayThrow())
1669         return true;
1670 
1671       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1672         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1673             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1674         return NoUnwindAA.isAssumedNoUnwind();
1675       }
1676       return false;
1677     };
1678 
1679     bool UsedAssumedInformation = false;
1680     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1681                                    UsedAssumedInformation))
1682       return indicatePessimisticFixpoint();
1683 
1684     return ChangeStatus::UNCHANGED;
1685   }
1686 };
1687 
1688 struct AANoUnwindFunction final : public AANoUnwindImpl {
1689   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1690       : AANoUnwindImpl(IRP, A) {}
1691 
1692   /// See AbstractAttribute::trackStatistics()
1693   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1694 };
1695 
1696 /// NoUnwind attribute deduction for a call sites.
1697 struct AANoUnwindCallSite final : AANoUnwindImpl {
1698   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1699       : AANoUnwindImpl(IRP, A) {}
1700 
1701   /// See AbstractAttribute::initialize(...).
1702   void initialize(Attributor &A) override {
1703     AANoUnwindImpl::initialize(A);
1704     Function *F = getAssociatedFunction();
1705     if (!F || F->isDeclaration())
1706       indicatePessimisticFixpoint();
1707   }
1708 
1709   /// See AbstractAttribute::updateImpl(...).
1710   ChangeStatus updateImpl(Attributor &A) override {
1711     // TODO: Once we have call site specific value information we can provide
1712     //       call site specific liveness information and then it makes
1713     //       sense to specialize attributes for call sites arguments instead of
1714     //       redirecting requests to the callee argument.
1715     Function *F = getAssociatedFunction();
1716     const IRPosition &FnPos = IRPosition::function(*F);
1717     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1718     return clampStateAndIndicateChange(getState(), FnAA.getState());
1719   }
1720 
1721   /// See AbstractAttribute::trackStatistics()
1722   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1723 };
1724 } // namespace
1725 
1726 /// --------------------- Function Return Values -------------------------------
1727 
1728 namespace {
1729 /// "Attribute" that collects all potential returned values and the return
1730 /// instructions that they arise from.
1731 ///
1732 /// If there is a unique returned value R, the manifest method will:
1733 ///   - mark R with the "returned" attribute, if R is an argument.
1734 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1735 
1736   /// Mapping of values potentially returned by the associated function to the
1737   /// return instructions that might return them.
1738   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1739 
1740   /// State flags
1741   ///
1742   ///{
1743   bool IsFixed = false;
1744   bool IsValidState = true;
1745   ///}
1746 
1747 public:
1748   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1749       : AAReturnedValues(IRP, A) {}
1750 
1751   /// See AbstractAttribute::initialize(...).
1752   void initialize(Attributor &A) override {
1753     // Reset the state.
1754     IsFixed = false;
1755     IsValidState = true;
1756     ReturnedValues.clear();
1757 
1758     Function *F = getAssociatedFunction();
1759     if (!F || F->isDeclaration()) {
1760       indicatePessimisticFixpoint();
1761       return;
1762     }
1763     assert(!F->getReturnType()->isVoidTy() &&
1764            "Did not expect a void return type!");
1765 
1766     // The map from instruction opcodes to those instructions in the function.
1767     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1768 
1769     // Look through all arguments, if one is marked as returned we are done.
1770     for (Argument &Arg : F->args()) {
1771       if (Arg.hasReturnedAttr()) {
1772         auto &ReturnInstSet = ReturnedValues[&Arg];
1773         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1774           for (Instruction *RI : *Insts)
1775             ReturnInstSet.insert(cast<ReturnInst>(RI));
1776 
1777         indicateOptimisticFixpoint();
1778         return;
1779       }
1780     }
1781 
1782     if (!A.isFunctionIPOAmendable(*F))
1783       indicatePessimisticFixpoint();
1784   }
1785 
1786   /// See AbstractAttribute::manifest(...).
1787   ChangeStatus manifest(Attributor &A) override;
1788 
1789   /// See AbstractAttribute::getState(...).
1790   AbstractState &getState() override { return *this; }
1791 
1792   /// See AbstractAttribute::getState(...).
1793   const AbstractState &getState() const override { return *this; }
1794 
1795   /// See AbstractAttribute::updateImpl(Attributor &A).
1796   ChangeStatus updateImpl(Attributor &A) override;
1797 
1798   llvm::iterator_range<iterator> returned_values() override {
1799     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1800   }
1801 
1802   llvm::iterator_range<const_iterator> returned_values() const override {
1803     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1804   }
1805 
1806   /// Return the number of potential return values, -1 if unknown.
1807   size_t getNumReturnValues() const override {
1808     return isValidState() ? ReturnedValues.size() : -1;
1809   }
1810 
1811   /// Return an assumed unique return value if a single candidate is found. If
1812   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1813   /// Optional::NoneType.
1814   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1815 
1816   /// See AbstractState::checkForAllReturnedValues(...).
1817   bool checkForAllReturnedValuesAndReturnInsts(
1818       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1819       const override;
1820 
1821   /// Pretty print the attribute similar to the IR representation.
1822   const std::string getAsStr() const override;
1823 
1824   /// See AbstractState::isAtFixpoint().
1825   bool isAtFixpoint() const override { return IsFixed; }
1826 
1827   /// See AbstractState::isValidState().
1828   bool isValidState() const override { return IsValidState; }
1829 
1830   /// See AbstractState::indicateOptimisticFixpoint(...).
1831   ChangeStatus indicateOptimisticFixpoint() override {
1832     IsFixed = true;
1833     return ChangeStatus::UNCHANGED;
1834   }
1835 
1836   ChangeStatus indicatePessimisticFixpoint() override {
1837     IsFixed = true;
1838     IsValidState = false;
1839     return ChangeStatus::CHANGED;
1840   }
1841 };
1842 
1843 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1844   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1845 
1846   // Bookkeeping.
1847   assert(isValidState());
1848   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1849                   "Number of function with known return values");
1850 
1851   // Check if we have an assumed unique return value that we could manifest.
1852   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1853 
1854   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1855     return Changed;
1856 
1857   // Bookkeeping.
1858   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1859                   "Number of function with unique return");
1860   // If the assumed unique return value is an argument, annotate it.
1861   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1862     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1863             getAssociatedFunction()->getReturnType())) {
1864       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1865       Changed = IRAttribute::manifest(A);
1866     }
1867   }
1868   return Changed;
1869 }
1870 
1871 const std::string AAReturnedValuesImpl::getAsStr() const {
1872   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1873          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1874 }
1875 
1876 Optional<Value *>
1877 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1878   // If checkForAllReturnedValues provides a unique value, ignoring potential
1879   // undef values that can also be present, it is assumed to be the actual
1880   // return value and forwarded to the caller of this method. If there are
1881   // multiple, a nullptr is returned indicating there cannot be a unique
1882   // returned value.
1883   Optional<Value *> UniqueRV;
1884   Type *Ty = getAssociatedFunction()->getReturnType();
1885 
1886   auto Pred = [&](Value &RV) -> bool {
1887     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1888     return UniqueRV != Optional<Value *>(nullptr);
1889   };
1890 
1891   if (!A.checkForAllReturnedValues(Pred, *this))
1892     UniqueRV = nullptr;
1893 
1894   return UniqueRV;
1895 }
1896 
1897 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1898     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1899     const {
1900   if (!isValidState())
1901     return false;
1902 
1903   // Check all returned values but ignore call sites as long as we have not
1904   // encountered an overdefined one during an update.
1905   for (auto &It : ReturnedValues) {
1906     Value *RV = It.first;
1907     if (!Pred(*RV, It.second))
1908       return false;
1909   }
1910 
1911   return true;
1912 }
1913 
1914 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1915   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1916 
1917   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1918                            bool) -> bool {
1919     assert(AA::isValidInScope(V, Ret.getFunction()) &&
1920            "Assumed returned value should be valid in function scope!");
1921     if (ReturnedValues[&V].insert(&Ret))
1922       Changed = ChangeStatus::CHANGED;
1923     return true;
1924   };
1925 
1926   bool UsedAssumedInformation = false;
1927   auto ReturnInstCB = [&](Instruction &I) {
1928     ReturnInst &Ret = cast<ReturnInst>(I);
1929     return genericValueTraversal<ReturnInst>(
1930         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1931         &I, UsedAssumedInformation, /* UseValueSimplify */ true,
1932         /* MaxValues */ 16,
1933         /* StripCB */ nullptr, AA::Intraprocedural);
1934   };
1935 
1936   // Discover returned values from all live returned instructions in the
1937   // associated function.
1938   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1939                                  UsedAssumedInformation))
1940     return indicatePessimisticFixpoint();
1941   return Changed;
1942 }
1943 
1944 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1945   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1946       : AAReturnedValuesImpl(IRP, A) {}
1947 
1948   /// See AbstractAttribute::trackStatistics()
1949   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1950 };
1951 
1952 /// Returned values information for a call sites.
1953 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1954   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1955       : AAReturnedValuesImpl(IRP, A) {}
1956 
1957   /// See AbstractAttribute::initialize(...).
1958   void initialize(Attributor &A) override {
1959     // TODO: Once we have call site specific value information we can provide
1960     //       call site specific liveness information and then it makes
1961     //       sense to specialize attributes for call sites instead of
1962     //       redirecting requests to the callee.
1963     llvm_unreachable("Abstract attributes for returned values are not "
1964                      "supported for call sites yet!");
1965   }
1966 
1967   /// See AbstractAttribute::updateImpl(...).
1968   ChangeStatus updateImpl(Attributor &A) override {
1969     return indicatePessimisticFixpoint();
1970   }
1971 
1972   /// See AbstractAttribute::trackStatistics()
1973   void trackStatistics() const override {}
1974 };
1975 } // namespace
1976 
1977 /// ------------------------ NoSync Function Attribute -------------------------
1978 
1979 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
1980   if (!I->isAtomic())
1981     return false;
1982 
1983   if (auto *FI = dyn_cast<FenceInst>(I))
1984     // All legal orderings for fence are stronger than monotonic.
1985     return FI->getSyncScopeID() != SyncScope::SingleThread;
1986   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1987     // Unordered is not a legal ordering for cmpxchg.
1988     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1989             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1990   }
1991 
1992   AtomicOrdering Ordering;
1993   switch (I->getOpcode()) {
1994   case Instruction::AtomicRMW:
1995     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1996     break;
1997   case Instruction::Store:
1998     Ordering = cast<StoreInst>(I)->getOrdering();
1999     break;
2000   case Instruction::Load:
2001     Ordering = cast<LoadInst>(I)->getOrdering();
2002     break;
2003   default:
2004     llvm_unreachable(
2005         "New atomic operations need to be known in the attributor.");
2006   }
2007 
2008   return (Ordering != AtomicOrdering::Unordered &&
2009           Ordering != AtomicOrdering::Monotonic);
2010 }
2011 
2012 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
2013 /// which would be nosync except that they have a volatile flag.  All other
2014 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
2015 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
2016   if (auto *MI = dyn_cast<MemIntrinsic>(I))
2017     return !MI->isVolatile();
2018   return false;
2019 }
2020 
2021 namespace {
2022 struct AANoSyncImpl : AANoSync {
2023   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
2024 
2025   const std::string getAsStr() const override {
2026     return getAssumed() ? "nosync" : "may-sync";
2027   }
2028 
2029   /// See AbstractAttribute::updateImpl(...).
2030   ChangeStatus updateImpl(Attributor &A) override;
2031 };
2032 
2033 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2034 
2035   auto CheckRWInstForNoSync = [&](Instruction &I) {
2036     return AA::isNoSyncInst(A, I, *this);
2037   };
2038 
2039   auto CheckForNoSync = [&](Instruction &I) {
2040     // At this point we handled all read/write effects and they are all
2041     // nosync, so they can be skipped.
2042     if (I.mayReadOrWriteMemory())
2043       return true;
2044 
2045     // non-convergent and readnone imply nosync.
2046     return !cast<CallBase>(I).isConvergent();
2047   };
2048 
2049   bool UsedAssumedInformation = false;
2050   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2051                                           UsedAssumedInformation) ||
2052       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2053                                          UsedAssumedInformation))
2054     return indicatePessimisticFixpoint();
2055 
2056   return ChangeStatus::UNCHANGED;
2057 }
2058 
2059 struct AANoSyncFunction final : public AANoSyncImpl {
2060   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2061       : AANoSyncImpl(IRP, A) {}
2062 
2063   /// See AbstractAttribute::trackStatistics()
2064   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
2065 };
2066 
2067 /// NoSync attribute deduction for a call sites.
2068 struct AANoSyncCallSite final : AANoSyncImpl {
2069   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2070       : AANoSyncImpl(IRP, A) {}
2071 
2072   /// See AbstractAttribute::initialize(...).
2073   void initialize(Attributor &A) override {
2074     AANoSyncImpl::initialize(A);
2075     Function *F = getAssociatedFunction();
2076     if (!F || F->isDeclaration())
2077       indicatePessimisticFixpoint();
2078   }
2079 
2080   /// See AbstractAttribute::updateImpl(...).
2081   ChangeStatus updateImpl(Attributor &A) override {
2082     // TODO: Once we have call site specific value information we can provide
2083     //       call site specific liveness information and then it makes
2084     //       sense to specialize attributes for call sites arguments instead of
2085     //       redirecting requests to the callee argument.
2086     Function *F = getAssociatedFunction();
2087     const IRPosition &FnPos = IRPosition::function(*F);
2088     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
2089     return clampStateAndIndicateChange(getState(), FnAA.getState());
2090   }
2091 
2092   /// See AbstractAttribute::trackStatistics()
2093   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
2094 };
2095 } // namespace
2096 
2097 /// ------------------------ No-Free Attributes ----------------------------
2098 
2099 namespace {
2100 struct AANoFreeImpl : public AANoFree {
2101   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2102 
2103   /// See AbstractAttribute::updateImpl(...).
2104   ChangeStatus updateImpl(Attributor &A) override {
2105     auto CheckForNoFree = [&](Instruction &I) {
2106       const auto &CB = cast<CallBase>(I);
2107       if (CB.hasFnAttr(Attribute::NoFree))
2108         return true;
2109 
2110       const auto &NoFreeAA = A.getAAFor<AANoFree>(
2111           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2112       return NoFreeAA.isAssumedNoFree();
2113     };
2114 
2115     bool UsedAssumedInformation = false;
2116     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2117                                            UsedAssumedInformation))
2118       return indicatePessimisticFixpoint();
2119     return ChangeStatus::UNCHANGED;
2120   }
2121 
2122   /// See AbstractAttribute::getAsStr().
2123   const std::string getAsStr() const override {
2124     return getAssumed() ? "nofree" : "may-free";
2125   }
2126 };
2127 
2128 struct AANoFreeFunction final : public AANoFreeImpl {
2129   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2130       : AANoFreeImpl(IRP, A) {}
2131 
2132   /// See AbstractAttribute::trackStatistics()
2133   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2134 };
2135 
2136 /// NoFree attribute deduction for a call sites.
2137 struct AANoFreeCallSite final : AANoFreeImpl {
2138   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2139       : AANoFreeImpl(IRP, A) {}
2140 
2141   /// See AbstractAttribute::initialize(...).
2142   void initialize(Attributor &A) override {
2143     AANoFreeImpl::initialize(A);
2144     Function *F = getAssociatedFunction();
2145     if (!F || F->isDeclaration())
2146       indicatePessimisticFixpoint();
2147   }
2148 
2149   /// See AbstractAttribute::updateImpl(...).
2150   ChangeStatus updateImpl(Attributor &A) override {
2151     // TODO: Once we have call site specific value information we can provide
2152     //       call site specific liveness information and then it makes
2153     //       sense to specialize attributes for call sites arguments instead of
2154     //       redirecting requests to the callee argument.
2155     Function *F = getAssociatedFunction();
2156     const IRPosition &FnPos = IRPosition::function(*F);
2157     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2158     return clampStateAndIndicateChange(getState(), FnAA.getState());
2159   }
2160 
2161   /// See AbstractAttribute::trackStatistics()
2162   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2163 };
2164 
2165 /// NoFree attribute for floating values.
2166 struct AANoFreeFloating : AANoFreeImpl {
2167   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2168       : AANoFreeImpl(IRP, A) {}
2169 
2170   /// See AbstractAttribute::trackStatistics()
2171   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2172 
2173   /// See Abstract Attribute::updateImpl(...).
2174   ChangeStatus updateImpl(Attributor &A) override {
2175     const IRPosition &IRP = getIRPosition();
2176 
2177     const auto &NoFreeAA = A.getAAFor<AANoFree>(
2178         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2179     if (NoFreeAA.isAssumedNoFree())
2180       return ChangeStatus::UNCHANGED;
2181 
2182     Value &AssociatedValue = getIRPosition().getAssociatedValue();
2183     auto Pred = [&](const Use &U, bool &Follow) -> bool {
2184       Instruction *UserI = cast<Instruction>(U.getUser());
2185       if (auto *CB = dyn_cast<CallBase>(UserI)) {
2186         if (CB->isBundleOperand(&U))
2187           return false;
2188         if (!CB->isArgOperand(&U))
2189           return true;
2190         unsigned ArgNo = CB->getArgOperandNo(&U);
2191 
2192         const auto &NoFreeArg = A.getAAFor<AANoFree>(
2193             *this, IRPosition::callsite_argument(*CB, ArgNo),
2194             DepClassTy::REQUIRED);
2195         return NoFreeArg.isAssumedNoFree();
2196       }
2197 
2198       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2199           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2200         Follow = true;
2201         return true;
2202       }
2203       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2204           isa<ReturnInst>(UserI))
2205         return true;
2206 
2207       // Unknown user.
2208       return false;
2209     };
2210     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2211       return indicatePessimisticFixpoint();
2212 
2213     return ChangeStatus::UNCHANGED;
2214   }
2215 };
2216 
2217 /// NoFree attribute for a call site argument.
2218 struct AANoFreeArgument final : AANoFreeFloating {
2219   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2220       : AANoFreeFloating(IRP, A) {}
2221 
2222   /// See AbstractAttribute::trackStatistics()
2223   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2224 };
2225 
2226 /// NoFree attribute for call site arguments.
2227 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2228   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2229       : AANoFreeFloating(IRP, A) {}
2230 
2231   /// See AbstractAttribute::updateImpl(...).
2232   ChangeStatus updateImpl(Attributor &A) override {
2233     // TODO: Once we have call site specific value information we can provide
2234     //       call site specific liveness information and then it makes
2235     //       sense to specialize attributes for call sites arguments instead of
2236     //       redirecting requests to the callee argument.
2237     Argument *Arg = getAssociatedArgument();
2238     if (!Arg)
2239       return indicatePessimisticFixpoint();
2240     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2241     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2242     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2243   }
2244 
2245   /// See AbstractAttribute::trackStatistics()
2246   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2247 };
2248 
2249 /// NoFree attribute for function return value.
2250 struct AANoFreeReturned final : AANoFreeFloating {
2251   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2252       : AANoFreeFloating(IRP, A) {
2253     llvm_unreachable("NoFree is not applicable to function returns!");
2254   }
2255 
2256   /// See AbstractAttribute::initialize(...).
2257   void initialize(Attributor &A) override {
2258     llvm_unreachable("NoFree is not applicable to function returns!");
2259   }
2260 
2261   /// See AbstractAttribute::updateImpl(...).
2262   ChangeStatus updateImpl(Attributor &A) override {
2263     llvm_unreachable("NoFree is not applicable to function returns!");
2264   }
2265 
2266   /// See AbstractAttribute::trackStatistics()
2267   void trackStatistics() const override {}
2268 };
2269 
2270 /// NoFree attribute deduction for a call site return value.
2271 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2272   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2273       : AANoFreeFloating(IRP, A) {}
2274 
2275   ChangeStatus manifest(Attributor &A) override {
2276     return ChangeStatus::UNCHANGED;
2277   }
2278   /// See AbstractAttribute::trackStatistics()
2279   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2280 };
2281 } // namespace
2282 
2283 /// ------------------------ NonNull Argument Attribute ------------------------
2284 namespace {
2285 static int64_t getKnownNonNullAndDerefBytesForUse(
2286     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2287     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2288   TrackUse = false;
2289 
2290   const Value *UseV = U->get();
2291   if (!UseV->getType()->isPointerTy())
2292     return 0;
2293 
2294   // We need to follow common pointer manipulation uses to the accesses they
2295   // feed into. We can try to be smart to avoid looking through things we do not
2296   // like for now, e.g., non-inbounds GEPs.
2297   if (isa<CastInst>(I)) {
2298     TrackUse = true;
2299     return 0;
2300   }
2301 
2302   if (isa<GetElementPtrInst>(I)) {
2303     TrackUse = true;
2304     return 0;
2305   }
2306 
2307   Type *PtrTy = UseV->getType();
2308   const Function *F = I->getFunction();
2309   bool NullPointerIsDefined =
2310       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2311   const DataLayout &DL = A.getInfoCache().getDL();
2312   if (const auto *CB = dyn_cast<CallBase>(I)) {
2313     if (CB->isBundleOperand(U)) {
2314       if (RetainedKnowledge RK = getKnowledgeFromUse(
2315               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2316         IsNonNull |=
2317             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2318         return RK.ArgValue;
2319       }
2320       return 0;
2321     }
2322 
2323     if (CB->isCallee(U)) {
2324       IsNonNull |= !NullPointerIsDefined;
2325       return 0;
2326     }
2327 
2328     unsigned ArgNo = CB->getArgOperandNo(U);
2329     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2330     // As long as we only use known information there is no need to track
2331     // dependences here.
2332     auto &DerefAA =
2333         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2334     IsNonNull |= DerefAA.isKnownNonNull();
2335     return DerefAA.getKnownDereferenceableBytes();
2336   }
2337 
2338   Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2339   if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2340     return 0;
2341 
2342   int64_t Offset;
2343   const Value *Base =
2344       getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2345   if (Base && Base == &AssociatedValue) {
2346     int64_t DerefBytes = Loc->Size.getValue() + Offset;
2347     IsNonNull |= !NullPointerIsDefined;
2348     return std::max(int64_t(0), DerefBytes);
2349   }
2350 
2351   /// Corner case when an offset is 0.
2352   Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2353                                           /*AllowNonInbounds*/ true);
2354   if (Base && Base == &AssociatedValue && Offset == 0) {
2355     int64_t DerefBytes = Loc->Size.getValue();
2356     IsNonNull |= !NullPointerIsDefined;
2357     return std::max(int64_t(0), DerefBytes);
2358   }
2359 
2360   return 0;
2361 }
2362 
2363 struct AANonNullImpl : AANonNull {
2364   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2365       : AANonNull(IRP, A),
2366         NullIsDefined(NullPointerIsDefined(
2367             getAnchorScope(),
2368             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2369 
2370   /// See AbstractAttribute::initialize(...).
2371   void initialize(Attributor &A) override {
2372     Value &V = *getAssociatedValue().stripPointerCasts();
2373     if (!NullIsDefined &&
2374         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2375                 /* IgnoreSubsumingPositions */ false, &A)) {
2376       indicateOptimisticFixpoint();
2377       return;
2378     }
2379 
2380     if (isa<ConstantPointerNull>(V)) {
2381       indicatePessimisticFixpoint();
2382       return;
2383     }
2384 
2385     AANonNull::initialize(A);
2386 
2387     bool CanBeNull, CanBeFreed;
2388     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2389                                          CanBeFreed)) {
2390       if (!CanBeNull) {
2391         indicateOptimisticFixpoint();
2392         return;
2393       }
2394     }
2395 
2396     if (isa<GlobalValue>(V)) {
2397       indicatePessimisticFixpoint();
2398       return;
2399     }
2400 
2401     if (Instruction *CtxI = getCtxI())
2402       followUsesInMBEC(*this, A, getState(), *CtxI);
2403   }
2404 
2405   /// See followUsesInMBEC
2406   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2407                        AANonNull::StateType &State) {
2408     bool IsNonNull = false;
2409     bool TrackUse = false;
2410     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2411                                        IsNonNull, TrackUse);
2412     State.setKnown(IsNonNull);
2413     return TrackUse;
2414   }
2415 
2416   /// See AbstractAttribute::getAsStr().
2417   const std::string getAsStr() const override {
2418     return getAssumed() ? "nonnull" : "may-null";
2419   }
2420 
2421   /// Flag to determine if the underlying value can be null and still allow
2422   /// valid accesses.
2423   const bool NullIsDefined;
2424 };
2425 
2426 /// NonNull attribute for a floating value.
2427 struct AANonNullFloating : public AANonNullImpl {
2428   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2429       : AANonNullImpl(IRP, A) {}
2430 
2431   /// See AbstractAttribute::updateImpl(...).
2432   ChangeStatus updateImpl(Attributor &A) override {
2433     const DataLayout &DL = A.getDataLayout();
2434 
2435     DominatorTree *DT = nullptr;
2436     AssumptionCache *AC = nullptr;
2437     InformationCache &InfoCache = A.getInfoCache();
2438     if (const Function *Fn = getAnchorScope()) {
2439       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2440       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2441     }
2442 
2443     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2444                             AANonNull::StateType &T, bool Stripped) -> bool {
2445       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2446                                              DepClassTy::REQUIRED);
2447       if (!Stripped && this == &AA) {
2448         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2449           T.indicatePessimisticFixpoint();
2450       } else {
2451         // Use abstract attribute information.
2452         const AANonNull::StateType &NS = AA.getState();
2453         T ^= NS;
2454       }
2455       return T.isValidState();
2456     };
2457 
2458     StateType T;
2459     bool UsedAssumedInformation = false;
2460     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2461                                           VisitValueCB, getCtxI(),
2462                                           UsedAssumedInformation))
2463       return indicatePessimisticFixpoint();
2464 
2465     return clampStateAndIndicateChange(getState(), T);
2466   }
2467 
2468   /// See AbstractAttribute::trackStatistics()
2469   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2470 };
2471 
2472 /// NonNull attribute for function return value.
2473 struct AANonNullReturned final
2474     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2475   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2476       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2477 
2478   /// See AbstractAttribute::getAsStr().
2479   const std::string getAsStr() const override {
2480     return getAssumed() ? "nonnull" : "may-null";
2481   }
2482 
2483   /// See AbstractAttribute::trackStatistics()
2484   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2485 };
2486 
2487 /// NonNull attribute for function argument.
2488 struct AANonNullArgument final
2489     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2490   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2491       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2492 
2493   /// See AbstractAttribute::trackStatistics()
2494   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2495 };
2496 
2497 struct AANonNullCallSiteArgument final : AANonNullFloating {
2498   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2499       : AANonNullFloating(IRP, A) {}
2500 
2501   /// See AbstractAttribute::trackStatistics()
2502   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2503 };
2504 
2505 /// NonNull attribute for a call site return position.
2506 struct AANonNullCallSiteReturned final
2507     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2508   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2509       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2510 
2511   /// See AbstractAttribute::trackStatistics()
2512   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2513 };
2514 } // namespace
2515 
2516 /// ------------------------ No-Recurse Attributes ----------------------------
2517 
2518 namespace {
2519 struct AANoRecurseImpl : public AANoRecurse {
2520   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2521 
2522   /// See AbstractAttribute::getAsStr()
2523   const std::string getAsStr() const override {
2524     return getAssumed() ? "norecurse" : "may-recurse";
2525   }
2526 };
2527 
2528 struct AANoRecurseFunction final : AANoRecurseImpl {
2529   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2530       : AANoRecurseImpl(IRP, A) {}
2531 
2532   /// See AbstractAttribute::updateImpl(...).
2533   ChangeStatus updateImpl(Attributor &A) override {
2534 
2535     // If all live call sites are known to be no-recurse, we are as well.
2536     auto CallSitePred = [&](AbstractCallSite ACS) {
2537       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2538           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2539           DepClassTy::NONE);
2540       return NoRecurseAA.isKnownNoRecurse();
2541     };
2542     bool UsedAssumedInformation = false;
2543     if (A.checkForAllCallSites(CallSitePred, *this, true,
2544                                UsedAssumedInformation)) {
2545       // If we know all call sites and all are known no-recurse, we are done.
2546       // If all known call sites, which might not be all that exist, are known
2547       // to be no-recurse, we are not done but we can continue to assume
2548       // no-recurse. If one of the call sites we have not visited will become
2549       // live, another update is triggered.
2550       if (!UsedAssumedInformation)
2551         indicateOptimisticFixpoint();
2552       return ChangeStatus::UNCHANGED;
2553     }
2554 
2555     const AAFunctionReachability &EdgeReachability =
2556         A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2557                                            DepClassTy::REQUIRED);
2558     if (EdgeReachability.canReach(A, *getAnchorScope()))
2559       return indicatePessimisticFixpoint();
2560     return ChangeStatus::UNCHANGED;
2561   }
2562 
2563   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2564 };
2565 
2566 /// NoRecurse attribute deduction for a call sites.
2567 struct AANoRecurseCallSite final : AANoRecurseImpl {
2568   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2569       : AANoRecurseImpl(IRP, A) {}
2570 
2571   /// See AbstractAttribute::initialize(...).
2572   void initialize(Attributor &A) override {
2573     AANoRecurseImpl::initialize(A);
2574     Function *F = getAssociatedFunction();
2575     if (!F || F->isDeclaration())
2576       indicatePessimisticFixpoint();
2577   }
2578 
2579   /// See AbstractAttribute::updateImpl(...).
2580   ChangeStatus updateImpl(Attributor &A) override {
2581     // TODO: Once we have call site specific value information we can provide
2582     //       call site specific liveness information and then it makes
2583     //       sense to specialize attributes for call sites arguments instead of
2584     //       redirecting requests to the callee argument.
2585     Function *F = getAssociatedFunction();
2586     const IRPosition &FnPos = IRPosition::function(*F);
2587     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2588     return clampStateAndIndicateChange(getState(), FnAA.getState());
2589   }
2590 
2591   /// See AbstractAttribute::trackStatistics()
2592   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2593 };
2594 } // namespace
2595 
2596 /// -------------------- Undefined-Behavior Attributes ------------------------
2597 
2598 namespace {
2599 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2600   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2601       : AAUndefinedBehavior(IRP, A) {}
2602 
2603   /// See AbstractAttribute::updateImpl(...).
2604   // through a pointer (i.e. also branches etc.)
2605   ChangeStatus updateImpl(Attributor &A) override {
2606     const size_t UBPrevSize = KnownUBInsts.size();
2607     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2608 
2609     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2610       // Lang ref now states volatile store is not UB, let's skip them.
2611       if (I.isVolatile() && I.mayWriteToMemory())
2612         return true;
2613 
2614       // Skip instructions that are already saved.
2615       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2616         return true;
2617 
2618       // If we reach here, we know we have an instruction
2619       // that accesses memory through a pointer operand,
2620       // for which getPointerOperand() should give it to us.
2621       Value *PtrOp =
2622           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2623       assert(PtrOp &&
2624              "Expected pointer operand of memory accessing instruction");
2625 
2626       // Either we stopped and the appropriate action was taken,
2627       // or we got back a simplified value to continue.
2628       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2629       if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2630         return true;
2631       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2632 
2633       // A memory access through a pointer is considered UB
2634       // only if the pointer has constant null value.
2635       // TODO: Expand it to not only check constant values.
2636       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2637         AssumedNoUBInsts.insert(&I);
2638         return true;
2639       }
2640       const Type *PtrTy = PtrOpVal->getType();
2641 
2642       // Because we only consider instructions inside functions,
2643       // assume that a parent function exists.
2644       const Function *F = I.getFunction();
2645 
2646       // A memory access using constant null pointer is only considered UB
2647       // if null pointer is _not_ defined for the target platform.
2648       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2649         AssumedNoUBInsts.insert(&I);
2650       else
2651         KnownUBInsts.insert(&I);
2652       return true;
2653     };
2654 
2655     auto InspectBrInstForUB = [&](Instruction &I) {
2656       // A conditional branch instruction is considered UB if it has `undef`
2657       // condition.
2658 
2659       // Skip instructions that are already saved.
2660       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2661         return true;
2662 
2663       // We know we have a branch instruction.
2664       auto *BrInst = cast<BranchInst>(&I);
2665 
2666       // Unconditional branches are never considered UB.
2667       if (BrInst->isUnconditional())
2668         return true;
2669 
2670       // Either we stopped and the appropriate action was taken,
2671       // or we got back a simplified value to continue.
2672       Optional<Value *> SimplifiedCond =
2673           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2674       if (!SimplifiedCond || !*SimplifiedCond)
2675         return true;
2676       AssumedNoUBInsts.insert(&I);
2677       return true;
2678     };
2679 
2680     auto InspectCallSiteForUB = [&](Instruction &I) {
2681       // Check whether a callsite always cause UB or not
2682 
2683       // Skip instructions that are already saved.
2684       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2685         return true;
2686 
2687       // Check nonnull and noundef argument attribute violation for each
2688       // callsite.
2689       CallBase &CB = cast<CallBase>(I);
2690       Function *Callee = CB.getCalledFunction();
2691       if (!Callee)
2692         return true;
2693       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2694         // If current argument is known to be simplified to null pointer and the
2695         // corresponding argument position is known to have nonnull attribute,
2696         // the argument is poison. Furthermore, if the argument is poison and
2697         // the position is known to have noundef attriubte, this callsite is
2698         // considered UB.
2699         if (idx >= Callee->arg_size())
2700           break;
2701         Value *ArgVal = CB.getArgOperand(idx);
2702         if (!ArgVal)
2703           continue;
2704         // Here, we handle three cases.
2705         //   (1) Not having a value means it is dead. (we can replace the value
2706         //       with undef)
2707         //   (2) Simplified to undef. The argument violate noundef attriubte.
2708         //   (3) Simplified to null pointer where known to be nonnull.
2709         //       The argument is a poison value and violate noundef attribute.
2710         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2711         auto &NoUndefAA =
2712             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2713         if (!NoUndefAA.isKnownNoUndef())
2714           continue;
2715         bool UsedAssumedInformation = false;
2716         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2717             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2718         if (UsedAssumedInformation)
2719           continue;
2720         if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2721           return true;
2722         if (!SimplifiedVal.hasValue() ||
2723             isa<UndefValue>(*SimplifiedVal.getValue())) {
2724           KnownUBInsts.insert(&I);
2725           continue;
2726         }
2727         if (!ArgVal->getType()->isPointerTy() ||
2728             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2729           continue;
2730         auto &NonNullAA =
2731             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2732         if (NonNullAA.isKnownNonNull())
2733           KnownUBInsts.insert(&I);
2734       }
2735       return true;
2736     };
2737 
2738     auto InspectReturnInstForUB = [&](Instruction &I) {
2739       auto &RI = cast<ReturnInst>(I);
2740       // Either we stopped and the appropriate action was taken,
2741       // or we got back a simplified return value to continue.
2742       Optional<Value *> SimplifiedRetValue =
2743           stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2744       if (!SimplifiedRetValue || !*SimplifiedRetValue)
2745         return true;
2746 
2747       // Check if a return instruction always cause UB or not
2748       // Note: It is guaranteed that the returned position of the anchor
2749       //       scope has noundef attribute when this is called.
2750       //       We also ensure the return position is not "assumed dead"
2751       //       because the returned value was then potentially simplified to
2752       //       `undef` in AAReturnedValues without removing the `noundef`
2753       //       attribute yet.
2754 
2755       // When the returned position has noundef attriubte, UB occurs in the
2756       // following cases.
2757       //   (1) Returned value is known to be undef.
2758       //   (2) The value is known to be a null pointer and the returned
2759       //       position has nonnull attribute (because the returned value is
2760       //       poison).
2761       if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2762         auto &NonNullAA = A.getAAFor<AANonNull>(
2763             *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2764         if (NonNullAA.isKnownNonNull())
2765           KnownUBInsts.insert(&I);
2766       }
2767 
2768       return true;
2769     };
2770 
2771     bool UsedAssumedInformation = false;
2772     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2773                               {Instruction::Load, Instruction::Store,
2774                                Instruction::AtomicCmpXchg,
2775                                Instruction::AtomicRMW},
2776                               UsedAssumedInformation,
2777                               /* CheckBBLivenessOnly */ true);
2778     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2779                               UsedAssumedInformation,
2780                               /* CheckBBLivenessOnly */ true);
2781     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2782                                       UsedAssumedInformation);
2783 
2784     // If the returned position of the anchor scope has noundef attriubte, check
2785     // all returned instructions.
2786     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2787       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2788       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2789         auto &RetPosNoUndefAA =
2790             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2791         if (RetPosNoUndefAA.isKnownNoUndef())
2792           A.checkForAllInstructions(InspectReturnInstForUB, *this,
2793                                     {Instruction::Ret}, UsedAssumedInformation,
2794                                     /* CheckBBLivenessOnly */ true);
2795       }
2796     }
2797 
2798     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2799         UBPrevSize != KnownUBInsts.size())
2800       return ChangeStatus::CHANGED;
2801     return ChangeStatus::UNCHANGED;
2802   }
2803 
2804   bool isKnownToCauseUB(Instruction *I) const override {
2805     return KnownUBInsts.count(I);
2806   }
2807 
2808   bool isAssumedToCauseUB(Instruction *I) const override {
2809     // In simple words, if an instruction is not in the assumed to _not_
2810     // cause UB, then it is assumed UB (that includes those
2811     // in the KnownUBInsts set). The rest is boilerplate
2812     // is to ensure that it is one of the instructions we test
2813     // for UB.
2814 
2815     switch (I->getOpcode()) {
2816     case Instruction::Load:
2817     case Instruction::Store:
2818     case Instruction::AtomicCmpXchg:
2819     case Instruction::AtomicRMW:
2820       return !AssumedNoUBInsts.count(I);
2821     case Instruction::Br: {
2822       auto *BrInst = cast<BranchInst>(I);
2823       if (BrInst->isUnconditional())
2824         return false;
2825       return !AssumedNoUBInsts.count(I);
2826     } break;
2827     default:
2828       return false;
2829     }
2830     return false;
2831   }
2832 
2833   ChangeStatus manifest(Attributor &A) override {
2834     if (KnownUBInsts.empty())
2835       return ChangeStatus::UNCHANGED;
2836     for (Instruction *I : KnownUBInsts)
2837       A.changeToUnreachableAfterManifest(I);
2838     return ChangeStatus::CHANGED;
2839   }
2840 
2841   /// See AbstractAttribute::getAsStr()
2842   const std::string getAsStr() const override {
2843     return getAssumed() ? "undefined-behavior" : "no-ub";
2844   }
2845 
2846   /// Note: The correctness of this analysis depends on the fact that the
2847   /// following 2 sets will stop changing after some point.
2848   /// "Change" here means that their size changes.
2849   /// The size of each set is monotonically increasing
2850   /// (we only add items to them) and it is upper bounded by the number of
2851   /// instructions in the processed function (we can never save more
2852   /// elements in either set than this number). Hence, at some point,
2853   /// they will stop increasing.
2854   /// Consequently, at some point, both sets will have stopped
2855   /// changing, effectively making the analysis reach a fixpoint.
2856 
2857   /// Note: These 2 sets are disjoint and an instruction can be considered
2858   /// one of 3 things:
2859   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2860   ///    the KnownUBInsts set.
2861   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2862   ///    has a reason to assume it).
2863   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2864   ///    could not find a reason to assume or prove that it can cause UB,
2865   ///    hence it assumes it doesn't. We have a set for these instructions
2866   ///    so that we don't reprocess them in every update.
2867   ///    Note however that instructions in this set may cause UB.
2868 
2869 protected:
2870   /// A set of all live instructions _known_ to cause UB.
2871   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2872 
2873 private:
2874   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2875   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2876 
2877   // Should be called on updates in which if we're processing an instruction
2878   // \p I that depends on a value \p V, one of the following has to happen:
2879   // - If the value is assumed, then stop.
2880   // - If the value is known but undef, then consider it UB.
2881   // - Otherwise, do specific processing with the simplified value.
2882   // We return None in the first 2 cases to signify that an appropriate
2883   // action was taken and the caller should stop.
2884   // Otherwise, we return the simplified value that the caller should
2885   // use for specific processing.
2886   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2887                                          Instruction *I) {
2888     bool UsedAssumedInformation = false;
2889     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2890         IRPosition::value(*V), *this, UsedAssumedInformation);
2891     if (!UsedAssumedInformation) {
2892       // Don't depend on assumed values.
2893       if (!SimplifiedV) {
2894         // If it is known (which we tested above) but it doesn't have a value,
2895         // then we can assume `undef` and hence the instruction is UB.
2896         KnownUBInsts.insert(I);
2897         return llvm::None;
2898       }
2899       if (!*SimplifiedV)
2900         return nullptr;
2901       V = *SimplifiedV;
2902     }
2903     if (isa<UndefValue>(V)) {
2904       KnownUBInsts.insert(I);
2905       return llvm::None;
2906     }
2907     return V;
2908   }
2909 };
2910 
2911 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2912   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2913       : AAUndefinedBehaviorImpl(IRP, A) {}
2914 
2915   /// See AbstractAttribute::trackStatistics()
2916   void trackStatistics() const override {
2917     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2918                "Number of instructions known to have UB");
2919     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2920         KnownUBInsts.size();
2921   }
2922 };
2923 } // namespace
2924 
2925 /// ------------------------ Will-Return Attributes ----------------------------
2926 
2927 namespace {
2928 // Helper function that checks whether a function has any cycle which we don't
2929 // know if it is bounded or not.
2930 // Loops with maximum trip count are considered bounded, any other cycle not.
2931 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2932   ScalarEvolution *SE =
2933       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2934   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2935   // If either SCEV or LoopInfo is not available for the function then we assume
2936   // any cycle to be unbounded cycle.
2937   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2938   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2939   if (!SE || !LI) {
2940     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2941       if (SCCI.hasCycle())
2942         return true;
2943     return false;
2944   }
2945 
2946   // If there's irreducible control, the function may contain non-loop cycles.
2947   if (mayContainIrreducibleControl(F, LI))
2948     return true;
2949 
2950   // Any loop that does not have a max trip count is considered unbounded cycle.
2951   for (auto *L : LI->getLoopsInPreorder()) {
2952     if (!SE->getSmallConstantMaxTripCount(L))
2953       return true;
2954   }
2955   return false;
2956 }
2957 
2958 struct AAWillReturnImpl : public AAWillReturn {
2959   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2960       : AAWillReturn(IRP, A) {}
2961 
2962   /// See AbstractAttribute::initialize(...).
2963   void initialize(Attributor &A) override {
2964     AAWillReturn::initialize(A);
2965 
2966     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2967       indicateOptimisticFixpoint();
2968       return;
2969     }
2970   }
2971 
2972   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2973   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2974     // Check for `mustprogress` in the scope and the associated function which
2975     // might be different if this is a call site.
2976     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2977         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2978       return false;
2979 
2980     bool IsKnown;
2981     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
2982       return IsKnown || !KnownOnly;
2983     return false;
2984   }
2985 
2986   /// See AbstractAttribute::updateImpl(...).
2987   ChangeStatus updateImpl(Attributor &A) override {
2988     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2989       return ChangeStatus::UNCHANGED;
2990 
2991     auto CheckForWillReturn = [&](Instruction &I) {
2992       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2993       const auto &WillReturnAA =
2994           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2995       if (WillReturnAA.isKnownWillReturn())
2996         return true;
2997       if (!WillReturnAA.isAssumedWillReturn())
2998         return false;
2999       const auto &NoRecurseAA =
3000           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
3001       return NoRecurseAA.isAssumedNoRecurse();
3002     };
3003 
3004     bool UsedAssumedInformation = false;
3005     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
3006                                            UsedAssumedInformation))
3007       return indicatePessimisticFixpoint();
3008 
3009     return ChangeStatus::UNCHANGED;
3010   }
3011 
3012   /// See AbstractAttribute::getAsStr()
3013   const std::string getAsStr() const override {
3014     return getAssumed() ? "willreturn" : "may-noreturn";
3015   }
3016 };
3017 
3018 struct AAWillReturnFunction final : AAWillReturnImpl {
3019   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
3020       : AAWillReturnImpl(IRP, A) {}
3021 
3022   /// See AbstractAttribute::initialize(...).
3023   void initialize(Attributor &A) override {
3024     AAWillReturnImpl::initialize(A);
3025 
3026     Function *F = getAnchorScope();
3027     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
3028       indicatePessimisticFixpoint();
3029   }
3030 
3031   /// See AbstractAttribute::trackStatistics()
3032   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
3033 };
3034 
3035 /// WillReturn attribute deduction for a call sites.
3036 struct AAWillReturnCallSite final : AAWillReturnImpl {
3037   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3038       : AAWillReturnImpl(IRP, A) {}
3039 
3040   /// See AbstractAttribute::initialize(...).
3041   void initialize(Attributor &A) override {
3042     AAWillReturnImpl::initialize(A);
3043     Function *F = getAssociatedFunction();
3044     if (!F || !A.isFunctionIPOAmendable(*F))
3045       indicatePessimisticFixpoint();
3046   }
3047 
3048   /// See AbstractAttribute::updateImpl(...).
3049   ChangeStatus updateImpl(Attributor &A) override {
3050     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3051       return ChangeStatus::UNCHANGED;
3052 
3053     // TODO: Once we have call site specific value information we can provide
3054     //       call site specific liveness information and then it makes
3055     //       sense to specialize attributes for call sites arguments instead of
3056     //       redirecting requests to the callee argument.
3057     Function *F = getAssociatedFunction();
3058     const IRPosition &FnPos = IRPosition::function(*F);
3059     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
3060     return clampStateAndIndicateChange(getState(), FnAA.getState());
3061   }
3062 
3063   /// See AbstractAttribute::trackStatistics()
3064   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
3065 };
3066 } // namespace
3067 
3068 /// -------------------AAReachability Attribute--------------------------
3069 
3070 namespace {
3071 struct AAReachabilityImpl : AAReachability {
3072   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
3073       : AAReachability(IRP, A) {}
3074 
3075   const std::string getAsStr() const override {
3076     // TODO: Return the number of reachable queries.
3077     return "reachable";
3078   }
3079 
3080   /// See AbstractAttribute::updateImpl(...).
3081   ChangeStatus updateImpl(Attributor &A) override {
3082     return ChangeStatus::UNCHANGED;
3083   }
3084 };
3085 
3086 struct AAReachabilityFunction final : public AAReachabilityImpl {
3087   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
3088       : AAReachabilityImpl(IRP, A) {}
3089 
3090   /// See AbstractAttribute::trackStatistics()
3091   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
3092 };
3093 } // namespace
3094 
3095 /// ------------------------ NoAlias Argument Attribute ------------------------
3096 
3097 namespace {
3098 struct AANoAliasImpl : AANoAlias {
3099   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3100     assert(getAssociatedType()->isPointerTy() &&
3101            "Noalias is a pointer attribute");
3102   }
3103 
3104   const std::string getAsStr() const override {
3105     return getAssumed() ? "noalias" : "may-alias";
3106   }
3107 };
3108 
3109 /// NoAlias attribute for a floating value.
3110 struct AANoAliasFloating final : AANoAliasImpl {
3111   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3112       : AANoAliasImpl(IRP, A) {}
3113 
3114   /// See AbstractAttribute::initialize(...).
3115   void initialize(Attributor &A) override {
3116     AANoAliasImpl::initialize(A);
3117     Value *Val = &getAssociatedValue();
3118     do {
3119       CastInst *CI = dyn_cast<CastInst>(Val);
3120       if (!CI)
3121         break;
3122       Value *Base = CI->getOperand(0);
3123       if (!Base->hasOneUse())
3124         break;
3125       Val = Base;
3126     } while (true);
3127 
3128     if (!Val->getType()->isPointerTy()) {
3129       indicatePessimisticFixpoint();
3130       return;
3131     }
3132 
3133     if (isa<AllocaInst>(Val))
3134       indicateOptimisticFixpoint();
3135     else if (isa<ConstantPointerNull>(Val) &&
3136              !NullPointerIsDefined(getAnchorScope(),
3137                                    Val->getType()->getPointerAddressSpace()))
3138       indicateOptimisticFixpoint();
3139     else if (Val != &getAssociatedValue()) {
3140       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3141           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3142       if (ValNoAliasAA.isKnownNoAlias())
3143         indicateOptimisticFixpoint();
3144     }
3145   }
3146 
3147   /// See AbstractAttribute::updateImpl(...).
3148   ChangeStatus updateImpl(Attributor &A) override {
3149     // TODO: Implement this.
3150     return indicatePessimisticFixpoint();
3151   }
3152 
3153   /// See AbstractAttribute::trackStatistics()
3154   void trackStatistics() const override {
3155     STATS_DECLTRACK_FLOATING_ATTR(noalias)
3156   }
3157 };
3158 
3159 /// NoAlias attribute for an argument.
3160 struct AANoAliasArgument final
3161     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3162   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3163   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3164 
3165   /// See AbstractAttribute::initialize(...).
3166   void initialize(Attributor &A) override {
3167     Base::initialize(A);
3168     // See callsite argument attribute and callee argument attribute.
3169     if (hasAttr({Attribute::ByVal}))
3170       indicateOptimisticFixpoint();
3171   }
3172 
3173   /// See AbstractAttribute::update(...).
3174   ChangeStatus updateImpl(Attributor &A) override {
3175     // We have to make sure no-alias on the argument does not break
3176     // synchronization when this is a callback argument, see also [1] below.
3177     // If synchronization cannot be affected, we delegate to the base updateImpl
3178     // function, otherwise we give up for now.
3179 
3180     // If the function is no-sync, no-alias cannot break synchronization.
3181     const auto &NoSyncAA =
3182         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3183                              DepClassTy::OPTIONAL);
3184     if (NoSyncAA.isAssumedNoSync())
3185       return Base::updateImpl(A);
3186 
3187     // If the argument is read-only, no-alias cannot break synchronization.
3188     bool IsKnown;
3189     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3190       return Base::updateImpl(A);
3191 
3192     // If the argument is never passed through callbacks, no-alias cannot break
3193     // synchronization.
3194     bool UsedAssumedInformation = false;
3195     if (A.checkForAllCallSites(
3196             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3197             true, UsedAssumedInformation))
3198       return Base::updateImpl(A);
3199 
3200     // TODO: add no-alias but make sure it doesn't break synchronization by
3201     // introducing fake uses. See:
3202     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3203     //     International Workshop on OpenMP 2018,
3204     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3205 
3206     return indicatePessimisticFixpoint();
3207   }
3208 
3209   /// See AbstractAttribute::trackStatistics()
3210   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3211 };
3212 
3213 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3214   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3215       : AANoAliasImpl(IRP, A) {}
3216 
3217   /// See AbstractAttribute::initialize(...).
3218   void initialize(Attributor &A) override {
3219     // See callsite argument attribute and callee argument attribute.
3220     const auto &CB = cast<CallBase>(getAnchorValue());
3221     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3222       indicateOptimisticFixpoint();
3223     Value &Val = getAssociatedValue();
3224     if (isa<ConstantPointerNull>(Val) &&
3225         !NullPointerIsDefined(getAnchorScope(),
3226                               Val.getType()->getPointerAddressSpace()))
3227       indicateOptimisticFixpoint();
3228   }
3229 
3230   /// Determine if the underlying value may alias with the call site argument
3231   /// \p OtherArgNo of \p ICS (= the underlying call site).
3232   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3233                             const AAMemoryBehavior &MemBehaviorAA,
3234                             const CallBase &CB, unsigned OtherArgNo) {
3235     // We do not need to worry about aliasing with the underlying IRP.
3236     if (this->getCalleeArgNo() == (int)OtherArgNo)
3237       return false;
3238 
3239     // If it is not a pointer or pointer vector we do not alias.
3240     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3241     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3242       return false;
3243 
3244     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3245         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3246 
3247     // If the argument is readnone, there is no read-write aliasing.
3248     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3249       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3250       return false;
3251     }
3252 
3253     // If the argument is readonly and the underlying value is readonly, there
3254     // is no read-write aliasing.
3255     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3256     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3257       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3258       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3259       return false;
3260     }
3261 
3262     // We have to utilize actual alias analysis queries so we need the object.
3263     if (!AAR)
3264       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3265 
3266     // Try to rule it out at the call site.
3267     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3268     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3269                          "callsite arguments: "
3270                       << getAssociatedValue() << " " << *ArgOp << " => "
3271                       << (IsAliasing ? "" : "no-") << "alias \n");
3272 
3273     return IsAliasing;
3274   }
3275 
3276   bool
3277   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3278                                          const AAMemoryBehavior &MemBehaviorAA,
3279                                          const AANoAlias &NoAliasAA) {
3280     // We can deduce "noalias" if the following conditions hold.
3281     // (i)   Associated value is assumed to be noalias in the definition.
3282     // (ii)  Associated value is assumed to be no-capture in all the uses
3283     //       possibly executed before this callsite.
3284     // (iii) There is no other pointer argument which could alias with the
3285     //       value.
3286 
3287     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3288     if (!AssociatedValueIsNoAliasAtDef) {
3289       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3290                         << " is not no-alias at the definition\n");
3291       return false;
3292     }
3293 
3294     auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3295       const auto &DerefAA = A.getAAFor<AADereferenceable>(
3296           *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3297       return DerefAA.getAssumedDereferenceableBytes();
3298     };
3299 
3300     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3301 
3302     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3303     const Function *ScopeFn = VIRP.getAnchorScope();
3304     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3305     // Check whether the value is captured in the scope using AANoCapture.
3306     // Look at CFG and check only uses possibly executed before this
3307     // callsite.
3308     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3309       Instruction *UserI = cast<Instruction>(U.getUser());
3310 
3311       // If UserI is the curr instruction and there is a single potential use of
3312       // the value in UserI we allow the use.
3313       // TODO: We should inspect the operands and allow those that cannot alias
3314       //       with the value.
3315       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3316         return true;
3317 
3318       if (ScopeFn) {
3319         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3320           if (CB->isArgOperand(&U)) {
3321 
3322             unsigned ArgNo = CB->getArgOperandNo(&U);
3323 
3324             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3325                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3326                 DepClassTy::OPTIONAL);
3327 
3328             if (NoCaptureAA.isAssumedNoCapture())
3329               return true;
3330           }
3331         }
3332 
3333         if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this))
3334           return true;
3335       }
3336 
3337       // TODO: We should track the capturing uses in AANoCapture but the problem
3338       //       is CGSCC runs. For those we would need to "allow" AANoCapture for
3339       //       a value in the module slice.
3340       switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3341       case UseCaptureKind::NO_CAPTURE:
3342         return true;
3343       case UseCaptureKind::MAY_CAPTURE:
3344         LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3345                           << "\n");
3346         return false;
3347       case UseCaptureKind::PASSTHROUGH:
3348         Follow = true;
3349         return true;
3350       }
3351       llvm_unreachable("unknown UseCaptureKind");
3352     };
3353 
3354     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3355       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3356         LLVM_DEBUG(
3357             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3358                    << " cannot be noalias as it is potentially captured\n");
3359         return false;
3360       }
3361     }
3362     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3363 
3364     // Check there is no other pointer argument which could alias with the
3365     // value passed at this call site.
3366     // TODO: AbstractCallSite
3367     const auto &CB = cast<CallBase>(getAnchorValue());
3368     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3369       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3370         return false;
3371 
3372     return true;
3373   }
3374 
3375   /// See AbstractAttribute::updateImpl(...).
3376   ChangeStatus updateImpl(Attributor &A) override {
3377     // If the argument is readnone we are done as there are no accesses via the
3378     // argument.
3379     auto &MemBehaviorAA =
3380         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3381     if (MemBehaviorAA.isAssumedReadNone()) {
3382       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3383       return ChangeStatus::UNCHANGED;
3384     }
3385 
3386     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3387     const auto &NoAliasAA =
3388         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3389 
3390     AAResults *AAR = nullptr;
3391     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3392                                                NoAliasAA)) {
3393       LLVM_DEBUG(
3394           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3395       return ChangeStatus::UNCHANGED;
3396     }
3397 
3398     return indicatePessimisticFixpoint();
3399   }
3400 
3401   /// See AbstractAttribute::trackStatistics()
3402   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3403 };
3404 
3405 /// NoAlias attribute for function return value.
3406 struct AANoAliasReturned final : AANoAliasImpl {
3407   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3408       : AANoAliasImpl(IRP, A) {}
3409 
3410   /// See AbstractAttribute::initialize(...).
3411   void initialize(Attributor &A) override {
3412     AANoAliasImpl::initialize(A);
3413     Function *F = getAssociatedFunction();
3414     if (!F || F->isDeclaration())
3415       indicatePessimisticFixpoint();
3416   }
3417 
3418   /// See AbstractAttribute::updateImpl(...).
3419   virtual ChangeStatus updateImpl(Attributor &A) override {
3420 
3421     auto CheckReturnValue = [&](Value &RV) -> bool {
3422       if (Constant *C = dyn_cast<Constant>(&RV))
3423         if (C->isNullValue() || isa<UndefValue>(C))
3424           return true;
3425 
3426       /// For now, we can only deduce noalias if we have call sites.
3427       /// FIXME: add more support.
3428       if (!isa<CallBase>(&RV))
3429         return false;
3430 
3431       const IRPosition &RVPos = IRPosition::value(RV);
3432       const auto &NoAliasAA =
3433           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3434       if (!NoAliasAA.isAssumedNoAlias())
3435         return false;
3436 
3437       const auto &NoCaptureAA =
3438           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3439       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3440     };
3441 
3442     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3443       return indicatePessimisticFixpoint();
3444 
3445     return ChangeStatus::UNCHANGED;
3446   }
3447 
3448   /// See AbstractAttribute::trackStatistics()
3449   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3450 };
3451 
3452 /// NoAlias attribute deduction for a call site return value.
3453 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3454   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3455       : AANoAliasImpl(IRP, A) {}
3456 
3457   /// See AbstractAttribute::initialize(...).
3458   void initialize(Attributor &A) override {
3459     AANoAliasImpl::initialize(A);
3460     Function *F = getAssociatedFunction();
3461     if (!F || F->isDeclaration())
3462       indicatePessimisticFixpoint();
3463   }
3464 
3465   /// See AbstractAttribute::updateImpl(...).
3466   ChangeStatus updateImpl(Attributor &A) override {
3467     // TODO: Once we have call site specific value information we can provide
3468     //       call site specific liveness information and then it makes
3469     //       sense to specialize attributes for call sites arguments instead of
3470     //       redirecting requests to the callee argument.
3471     Function *F = getAssociatedFunction();
3472     const IRPosition &FnPos = IRPosition::returned(*F);
3473     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3474     return clampStateAndIndicateChange(getState(), FnAA.getState());
3475   }
3476 
3477   /// See AbstractAttribute::trackStatistics()
3478   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3479 };
3480 } // namespace
3481 
3482 /// -------------------AAIsDead Function Attribute-----------------------
3483 
3484 namespace {
3485 struct AAIsDeadValueImpl : public AAIsDead {
3486   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3487 
3488   /// See AbstractAttribute::initialize(...).
3489   void initialize(Attributor &A) override {
3490     if (auto *Scope = getAnchorScope())
3491       if (!A.isRunOn(*Scope))
3492         indicatePessimisticFixpoint();
3493   }
3494 
3495   /// See AAIsDead::isAssumedDead().
3496   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3497 
3498   /// See AAIsDead::isKnownDead().
3499   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3500 
3501   /// See AAIsDead::isAssumedDead(BasicBlock *).
3502   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3503 
3504   /// See AAIsDead::isKnownDead(BasicBlock *).
3505   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3506 
3507   /// See AAIsDead::isAssumedDead(Instruction *I).
3508   bool isAssumedDead(const Instruction *I) const override {
3509     return I == getCtxI() && isAssumedDead();
3510   }
3511 
3512   /// See AAIsDead::isKnownDead(Instruction *I).
3513   bool isKnownDead(const Instruction *I) const override {
3514     return isAssumedDead(I) && isKnownDead();
3515   }
3516 
3517   /// See AbstractAttribute::getAsStr().
3518   virtual const std::string getAsStr() const override {
3519     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3520   }
3521 
3522   /// Check if all uses are assumed dead.
3523   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3524     // Callers might not check the type, void has no uses.
3525     if (V.getType()->isVoidTy() || V.use_empty())
3526       return true;
3527 
3528     // If we replace a value with a constant there are no uses left afterwards.
3529     if (!isa<Constant>(V)) {
3530       if (auto *I = dyn_cast<Instruction>(&V))
3531         if (!A.isRunOn(*I->getFunction()))
3532           return false;
3533       bool UsedAssumedInformation = false;
3534       Optional<Constant *> C =
3535           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3536       if (!C || *C)
3537         return true;
3538     }
3539 
3540     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3541     // Explicitly set the dependence class to required because we want a long
3542     // chain of N dependent instructions to be considered live as soon as one is
3543     // without going through N update cycles. This is not required for
3544     // correctness.
3545     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3546                              DepClassTy::REQUIRED,
3547                              /* IgnoreDroppableUses */ false);
3548   }
3549 
3550   /// Determine if \p I is assumed to be side-effect free.
3551   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3552     if (!I || wouldInstructionBeTriviallyDead(I))
3553       return true;
3554 
3555     auto *CB = dyn_cast<CallBase>(I);
3556     if (!CB || isa<IntrinsicInst>(CB))
3557       return false;
3558 
3559     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3560     const auto &NoUnwindAA =
3561         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3562     if (!NoUnwindAA.isAssumedNoUnwind())
3563       return false;
3564     if (!NoUnwindAA.isKnownNoUnwind())
3565       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3566 
3567     bool IsKnown;
3568     return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3569   }
3570 };
3571 
3572 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3573   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3574       : AAIsDeadValueImpl(IRP, A) {}
3575 
3576   /// See AbstractAttribute::initialize(...).
3577   void initialize(Attributor &A) override {
3578     AAIsDeadValueImpl::initialize(A);
3579 
3580     if (isa<UndefValue>(getAssociatedValue())) {
3581       indicatePessimisticFixpoint();
3582       return;
3583     }
3584 
3585     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3586     if (!isAssumedSideEffectFree(A, I)) {
3587       if (!isa_and_nonnull<StoreInst>(I))
3588         indicatePessimisticFixpoint();
3589       else
3590         removeAssumedBits(HAS_NO_EFFECT);
3591     }
3592   }
3593 
3594   bool isDeadStore(Attributor &A, StoreInst &SI) {
3595     // Lang ref now states volatile store is not UB/dead, let's skip them.
3596     if (SI.isVolatile())
3597       return false;
3598 
3599     bool UsedAssumedInformation = false;
3600     SmallSetVector<Value *, 4> PotentialCopies;
3601     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3602                                              UsedAssumedInformation))
3603       return false;
3604     return llvm::all_of(PotentialCopies, [&](Value *V) {
3605       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3606                              UsedAssumedInformation);
3607     });
3608   }
3609 
3610   /// See AbstractAttribute::getAsStr().
3611   const std::string getAsStr() const override {
3612     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3613     if (isa_and_nonnull<StoreInst>(I))
3614       if (isValidState())
3615         return "assumed-dead-store";
3616     return AAIsDeadValueImpl::getAsStr();
3617   }
3618 
3619   /// See AbstractAttribute::updateImpl(...).
3620   ChangeStatus updateImpl(Attributor &A) override {
3621     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3622     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3623       if (!isDeadStore(A, *SI))
3624         return indicatePessimisticFixpoint();
3625     } else {
3626       if (!isAssumedSideEffectFree(A, I))
3627         return indicatePessimisticFixpoint();
3628       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3629         return indicatePessimisticFixpoint();
3630     }
3631     return ChangeStatus::UNCHANGED;
3632   }
3633 
3634   bool isRemovableStore() const override {
3635     return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3636   }
3637 
3638   /// See AbstractAttribute::manifest(...).
3639   ChangeStatus manifest(Attributor &A) override {
3640     Value &V = getAssociatedValue();
3641     if (auto *I = dyn_cast<Instruction>(&V)) {
3642       // If we get here we basically know the users are all dead. We check if
3643       // isAssumedSideEffectFree returns true here again because it might not be
3644       // the case and only the users are dead but the instruction (=call) is
3645       // still needed.
3646       if (isa<StoreInst>(I) ||
3647           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3648         A.deleteAfterManifest(*I);
3649         return ChangeStatus::CHANGED;
3650       }
3651     }
3652     return ChangeStatus::UNCHANGED;
3653   }
3654 
3655   /// See AbstractAttribute::trackStatistics()
3656   void trackStatistics() const override {
3657     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3658   }
3659 };
3660 
3661 struct AAIsDeadArgument : public AAIsDeadFloating {
3662   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3663       : AAIsDeadFloating(IRP, A) {}
3664 
3665   /// See AbstractAttribute::initialize(...).
3666   void initialize(Attributor &A) override {
3667     AAIsDeadFloating::initialize(A);
3668     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3669       indicatePessimisticFixpoint();
3670   }
3671 
3672   /// See AbstractAttribute::manifest(...).
3673   ChangeStatus manifest(Attributor &A) override {
3674     Argument &Arg = *getAssociatedArgument();
3675     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3676       if (A.registerFunctionSignatureRewrite(
3677               Arg, /* ReplacementTypes */ {},
3678               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3679               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3680         return ChangeStatus::CHANGED;
3681       }
3682     return ChangeStatus::UNCHANGED;
3683   }
3684 
3685   /// See AbstractAttribute::trackStatistics()
3686   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3687 };
3688 
3689 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3690   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3691       : AAIsDeadValueImpl(IRP, A) {}
3692 
3693   /// See AbstractAttribute::initialize(...).
3694   void initialize(Attributor &A) override {
3695     AAIsDeadValueImpl::initialize(A);
3696     if (isa<UndefValue>(getAssociatedValue()))
3697       indicatePessimisticFixpoint();
3698   }
3699 
3700   /// See AbstractAttribute::updateImpl(...).
3701   ChangeStatus updateImpl(Attributor &A) override {
3702     // TODO: Once we have call site specific value information we can provide
3703     //       call site specific liveness information and then it makes
3704     //       sense to specialize attributes for call sites arguments instead of
3705     //       redirecting requests to the callee argument.
3706     Argument *Arg = getAssociatedArgument();
3707     if (!Arg)
3708       return indicatePessimisticFixpoint();
3709     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3710     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3711     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3712   }
3713 
3714   /// See AbstractAttribute::manifest(...).
3715   ChangeStatus manifest(Attributor &A) override {
3716     CallBase &CB = cast<CallBase>(getAnchorValue());
3717     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3718     assert(!isa<UndefValue>(U.get()) &&
3719            "Expected undef values to be filtered out!");
3720     UndefValue &UV = *UndefValue::get(U->getType());
3721     if (A.changeUseAfterManifest(U, UV))
3722       return ChangeStatus::CHANGED;
3723     return ChangeStatus::UNCHANGED;
3724   }
3725 
3726   /// See AbstractAttribute::trackStatistics()
3727   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3728 };
3729 
3730 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3731   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3732       : AAIsDeadFloating(IRP, A) {}
3733 
3734   /// See AAIsDead::isAssumedDead().
3735   bool isAssumedDead() const override {
3736     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3737   }
3738 
3739   /// See AbstractAttribute::initialize(...).
3740   void initialize(Attributor &A) override {
3741     AAIsDeadFloating::initialize(A);
3742     if (isa<UndefValue>(getAssociatedValue())) {
3743       indicatePessimisticFixpoint();
3744       return;
3745     }
3746 
3747     // We track this separately as a secondary state.
3748     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3749   }
3750 
3751   /// See AbstractAttribute::updateImpl(...).
3752   ChangeStatus updateImpl(Attributor &A) override {
3753     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3754     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3755       IsAssumedSideEffectFree = false;
3756       Changed = ChangeStatus::CHANGED;
3757     }
3758     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3759       return indicatePessimisticFixpoint();
3760     return Changed;
3761   }
3762 
3763   /// See AbstractAttribute::trackStatistics()
3764   void trackStatistics() const override {
3765     if (IsAssumedSideEffectFree)
3766       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3767     else
3768       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3769   }
3770 
3771   /// See AbstractAttribute::getAsStr().
3772   const std::string getAsStr() const override {
3773     return isAssumedDead()
3774                ? "assumed-dead"
3775                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3776   }
3777 
3778 private:
3779   bool IsAssumedSideEffectFree = true;
3780 };
3781 
3782 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3783   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3784       : AAIsDeadValueImpl(IRP, A) {}
3785 
3786   /// See AbstractAttribute::updateImpl(...).
3787   ChangeStatus updateImpl(Attributor &A) override {
3788 
3789     bool UsedAssumedInformation = false;
3790     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3791                               {Instruction::Ret}, UsedAssumedInformation);
3792 
3793     auto PredForCallSite = [&](AbstractCallSite ACS) {
3794       if (ACS.isCallbackCall() || !ACS.getInstruction())
3795         return false;
3796       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3797     };
3798 
3799     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3800                                 UsedAssumedInformation))
3801       return indicatePessimisticFixpoint();
3802 
3803     return ChangeStatus::UNCHANGED;
3804   }
3805 
3806   /// See AbstractAttribute::manifest(...).
3807   ChangeStatus manifest(Attributor &A) override {
3808     // TODO: Rewrite the signature to return void?
3809     bool AnyChange = false;
3810     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3811     auto RetInstPred = [&](Instruction &I) {
3812       ReturnInst &RI = cast<ReturnInst>(I);
3813       if (!isa<UndefValue>(RI.getReturnValue()))
3814         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3815       return true;
3816     };
3817     bool UsedAssumedInformation = false;
3818     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3819                               UsedAssumedInformation);
3820     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3821   }
3822 
3823   /// See AbstractAttribute::trackStatistics()
3824   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3825 };
3826 
3827 struct AAIsDeadFunction : public AAIsDead {
3828   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3829 
3830   /// See AbstractAttribute::initialize(...).
3831   void initialize(Attributor &A) override {
3832     Function *F = getAnchorScope();
3833     if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3834       indicatePessimisticFixpoint();
3835       return;
3836     }
3837     ToBeExploredFrom.insert(&F->getEntryBlock().front());
3838     assumeLive(A, F->getEntryBlock());
3839   }
3840 
3841   /// See AbstractAttribute::getAsStr().
3842   const std::string getAsStr() const override {
3843     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3844            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3845            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3846            std::to_string(KnownDeadEnds.size()) + "]";
3847   }
3848 
3849   /// See AbstractAttribute::manifest(...).
3850   ChangeStatus manifest(Attributor &A) override {
3851     assert(getState().isValidState() &&
3852            "Attempted to manifest an invalid state!");
3853 
3854     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3855     Function &F = *getAnchorScope();
3856 
3857     if (AssumedLiveBlocks.empty()) {
3858       A.deleteAfterManifest(F);
3859       return ChangeStatus::CHANGED;
3860     }
3861 
3862     // Flag to determine if we can change an invoke to a call assuming the
3863     // callee is nounwind. This is not possible if the personality of the
3864     // function allows to catch asynchronous exceptions.
3865     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3866 
3867     KnownDeadEnds.set_union(ToBeExploredFrom);
3868     for (const Instruction *DeadEndI : KnownDeadEnds) {
3869       auto *CB = dyn_cast<CallBase>(DeadEndI);
3870       if (!CB)
3871         continue;
3872       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3873           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3874       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3875       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3876         continue;
3877 
3878       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3879         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3880       else
3881         A.changeToUnreachableAfterManifest(
3882             const_cast<Instruction *>(DeadEndI->getNextNode()));
3883       HasChanged = ChangeStatus::CHANGED;
3884     }
3885 
3886     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3887     for (BasicBlock &BB : F)
3888       if (!AssumedLiveBlocks.count(&BB)) {
3889         A.deleteAfterManifest(BB);
3890         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3891         HasChanged = ChangeStatus::CHANGED;
3892       }
3893 
3894     return HasChanged;
3895   }
3896 
3897   /// See AbstractAttribute::updateImpl(...).
3898   ChangeStatus updateImpl(Attributor &A) override;
3899 
3900   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3901     assert(From->getParent() == getAnchorScope() &&
3902            To->getParent() == getAnchorScope() &&
3903            "Used AAIsDead of the wrong function");
3904     return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3905   }
3906 
3907   /// See AbstractAttribute::trackStatistics()
3908   void trackStatistics() const override {}
3909 
3910   /// Returns true if the function is assumed dead.
3911   bool isAssumedDead() const override { return false; }
3912 
3913   /// See AAIsDead::isKnownDead().
3914   bool isKnownDead() const override { return false; }
3915 
3916   /// See AAIsDead::isAssumedDead(BasicBlock *).
3917   bool isAssumedDead(const BasicBlock *BB) const override {
3918     assert(BB->getParent() == getAnchorScope() &&
3919            "BB must be in the same anchor scope function.");
3920 
3921     if (!getAssumed())
3922       return false;
3923     return !AssumedLiveBlocks.count(BB);
3924   }
3925 
3926   /// See AAIsDead::isKnownDead(BasicBlock *).
3927   bool isKnownDead(const BasicBlock *BB) const override {
3928     return getKnown() && isAssumedDead(BB);
3929   }
3930 
3931   /// See AAIsDead::isAssumed(Instruction *I).
3932   bool isAssumedDead(const Instruction *I) const override {
3933     assert(I->getParent()->getParent() == getAnchorScope() &&
3934            "Instruction must be in the same anchor scope function.");
3935 
3936     if (!getAssumed())
3937       return false;
3938 
3939     // If it is not in AssumedLiveBlocks then it for sure dead.
3940     // Otherwise, it can still be after noreturn call in a live block.
3941     if (!AssumedLiveBlocks.count(I->getParent()))
3942       return true;
3943 
3944     // If it is not after a liveness barrier it is live.
3945     const Instruction *PrevI = I->getPrevNode();
3946     while (PrevI) {
3947       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3948         return true;
3949       PrevI = PrevI->getPrevNode();
3950     }
3951     return false;
3952   }
3953 
3954   /// See AAIsDead::isKnownDead(Instruction *I).
3955   bool isKnownDead(const Instruction *I) const override {
3956     return getKnown() && isAssumedDead(I);
3957   }
3958 
3959   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3960   /// that internal function called from \p BB should now be looked at.
3961   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3962     if (!AssumedLiveBlocks.insert(&BB).second)
3963       return false;
3964 
3965     // We assume that all of BB is (probably) live now and if there are calls to
3966     // internal functions we will assume that those are now live as well. This
3967     // is a performance optimization for blocks with calls to a lot of internal
3968     // functions. It can however cause dead functions to be treated as live.
3969     for (const Instruction &I : BB)
3970       if (const auto *CB = dyn_cast<CallBase>(&I))
3971         if (const Function *F = CB->getCalledFunction())
3972           if (F->hasLocalLinkage())
3973             A.markLiveInternalFunction(*F);
3974     return true;
3975   }
3976 
3977   /// Collection of instructions that need to be explored again, e.g., we
3978   /// did assume they do not transfer control to (one of their) successors.
3979   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3980 
3981   /// Collection of instructions that are known to not transfer control.
3982   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3983 
3984   /// Collection of all assumed live edges
3985   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3986 
3987   /// Collection of all assumed live BasicBlocks.
3988   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3989 };
3990 
3991 static bool
3992 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3993                         AbstractAttribute &AA,
3994                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3995   const IRPosition &IPos = IRPosition::callsite_function(CB);
3996 
3997   const auto &NoReturnAA =
3998       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3999   if (NoReturnAA.isAssumedNoReturn())
4000     return !NoReturnAA.isKnownNoReturn();
4001   if (CB.isTerminator())
4002     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
4003   else
4004     AliveSuccessors.push_back(CB.getNextNode());
4005   return false;
4006 }
4007 
4008 static bool
4009 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
4010                         AbstractAttribute &AA,
4011                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4012   bool UsedAssumedInformation =
4013       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
4014 
4015   // First, determine if we can change an invoke to a call assuming the
4016   // callee is nounwind. This is not possible if the personality of the
4017   // function allows to catch asynchronous exceptions.
4018   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
4019     AliveSuccessors.push_back(&II.getUnwindDest()->front());
4020   } else {
4021     const IRPosition &IPos = IRPosition::callsite_function(II);
4022     const auto &AANoUnw =
4023         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
4024     if (AANoUnw.isAssumedNoUnwind()) {
4025       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
4026     } else {
4027       AliveSuccessors.push_back(&II.getUnwindDest()->front());
4028     }
4029   }
4030   return UsedAssumedInformation;
4031 }
4032 
4033 static bool
4034 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
4035                         AbstractAttribute &AA,
4036                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4037   bool UsedAssumedInformation = false;
4038   if (BI.getNumSuccessors() == 1) {
4039     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4040   } else {
4041     Optional<Constant *> C =
4042         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
4043     if (!C || isa_and_nonnull<UndefValue>(*C)) {
4044       // No value yet, assume both edges are dead.
4045     } else if (isa_and_nonnull<ConstantInt>(*C)) {
4046       const BasicBlock *SuccBB =
4047           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
4048       AliveSuccessors.push_back(&SuccBB->front());
4049     } else {
4050       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4051       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4052       UsedAssumedInformation = false;
4053     }
4054   }
4055   return UsedAssumedInformation;
4056 }
4057 
4058 static bool
4059 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4060                         AbstractAttribute &AA,
4061                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4062   bool UsedAssumedInformation = false;
4063   Optional<Constant *> C =
4064       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
4065   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
4066     // No value yet, assume all edges are dead.
4067   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
4068     for (auto &CaseIt : SI.cases()) {
4069       if (CaseIt.getCaseValue() == C.getValue()) {
4070         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4071         return UsedAssumedInformation;
4072       }
4073     }
4074     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4075     return UsedAssumedInformation;
4076   } else {
4077     for (const BasicBlock *SuccBB : successors(SI.getParent()))
4078       AliveSuccessors.push_back(&SuccBB->front());
4079   }
4080   return UsedAssumedInformation;
4081 }
4082 
4083 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4084   ChangeStatus Change = ChangeStatus::UNCHANGED;
4085 
4086   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4087                     << getAnchorScope()->size() << "] BBs and "
4088                     << ToBeExploredFrom.size() << " exploration points and "
4089                     << KnownDeadEnds.size() << " known dead ends\n");
4090 
4091   // Copy and clear the list of instructions we need to explore from. It is
4092   // refilled with instructions the next update has to look at.
4093   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4094                                                ToBeExploredFrom.end());
4095   decltype(ToBeExploredFrom) NewToBeExploredFrom;
4096 
4097   SmallVector<const Instruction *, 8> AliveSuccessors;
4098   while (!Worklist.empty()) {
4099     const Instruction *I = Worklist.pop_back_val();
4100     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4101 
4102     // Fast forward for uninteresting instructions. We could look for UB here
4103     // though.
4104     while (!I->isTerminator() && !isa<CallBase>(I))
4105       I = I->getNextNode();
4106 
4107     AliveSuccessors.clear();
4108 
4109     bool UsedAssumedInformation = false;
4110     switch (I->getOpcode()) {
4111     // TODO: look for (assumed) UB to backwards propagate "deadness".
4112     default:
4113       assert(I->isTerminator() &&
4114              "Expected non-terminators to be handled already!");
4115       for (const BasicBlock *SuccBB : successors(I->getParent()))
4116         AliveSuccessors.push_back(&SuccBB->front());
4117       break;
4118     case Instruction::Call:
4119       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4120                                                        *this, AliveSuccessors);
4121       break;
4122     case Instruction::Invoke:
4123       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4124                                                        *this, AliveSuccessors);
4125       break;
4126     case Instruction::Br:
4127       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4128                                                        *this, AliveSuccessors);
4129       break;
4130     case Instruction::Switch:
4131       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4132                                                        *this, AliveSuccessors);
4133       break;
4134     }
4135 
4136     if (UsedAssumedInformation) {
4137       NewToBeExploredFrom.insert(I);
4138     } else if (AliveSuccessors.empty() ||
4139                (I->isTerminator() &&
4140                 AliveSuccessors.size() < I->getNumSuccessors())) {
4141       if (KnownDeadEnds.insert(I))
4142         Change = ChangeStatus::CHANGED;
4143     }
4144 
4145     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4146                       << AliveSuccessors.size() << " UsedAssumedInformation: "
4147                       << UsedAssumedInformation << "\n");
4148 
4149     for (const Instruction *AliveSuccessor : AliveSuccessors) {
4150       if (!I->isTerminator()) {
4151         assert(AliveSuccessors.size() == 1 &&
4152                "Non-terminator expected to have a single successor!");
4153         Worklist.push_back(AliveSuccessor);
4154       } else {
4155         // record the assumed live edge
4156         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4157         if (AssumedLiveEdges.insert(Edge).second)
4158           Change = ChangeStatus::CHANGED;
4159         if (assumeLive(A, *AliveSuccessor->getParent()))
4160           Worklist.push_back(AliveSuccessor);
4161       }
4162     }
4163   }
4164 
4165   // Check if the content of ToBeExploredFrom changed, ignore the order.
4166   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4167       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4168         return !ToBeExploredFrom.count(I);
4169       })) {
4170     Change = ChangeStatus::CHANGED;
4171     ToBeExploredFrom = std::move(NewToBeExploredFrom);
4172   }
4173 
4174   // If we know everything is live there is no need to query for liveness.
4175   // Instead, indicating a pessimistic fixpoint will cause the state to be
4176   // "invalid" and all queries to be answered conservatively without lookups.
4177   // To be in this state we have to (1) finished the exploration and (3) not
4178   // discovered any non-trivial dead end and (2) not ruled unreachable code
4179   // dead.
4180   if (ToBeExploredFrom.empty() &&
4181       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4182       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4183         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4184       }))
4185     return indicatePessimisticFixpoint();
4186   return Change;
4187 }
4188 
4189 /// Liveness information for a call sites.
4190 struct AAIsDeadCallSite final : AAIsDeadFunction {
4191   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4192       : AAIsDeadFunction(IRP, A) {}
4193 
4194   /// See AbstractAttribute::initialize(...).
4195   void initialize(Attributor &A) override {
4196     // TODO: Once we have call site specific value information we can provide
4197     //       call site specific liveness information and then it makes
4198     //       sense to specialize attributes for call sites instead of
4199     //       redirecting requests to the callee.
4200     llvm_unreachable("Abstract attributes for liveness are not "
4201                      "supported for call sites yet!");
4202   }
4203 
4204   /// See AbstractAttribute::updateImpl(...).
4205   ChangeStatus updateImpl(Attributor &A) override {
4206     return indicatePessimisticFixpoint();
4207   }
4208 
4209   /// See AbstractAttribute::trackStatistics()
4210   void trackStatistics() const override {}
4211 };
4212 } // namespace
4213 
4214 /// -------------------- Dereferenceable Argument Attribute --------------------
4215 
4216 namespace {
4217 struct AADereferenceableImpl : AADereferenceable {
4218   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4219       : AADereferenceable(IRP, A) {}
4220   using StateType = DerefState;
4221 
4222   /// See AbstractAttribute::initialize(...).
4223   void initialize(Attributor &A) override {
4224     Value &V = *getAssociatedValue().stripPointerCasts();
4225     SmallVector<Attribute, 4> Attrs;
4226     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4227              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4228     for (const Attribute &Attr : Attrs)
4229       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4230 
4231     const IRPosition &IRP = this->getIRPosition();
4232     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4233 
4234     bool CanBeNull, CanBeFreed;
4235     takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4236         A.getDataLayout(), CanBeNull, CanBeFreed));
4237 
4238     bool IsFnInterface = IRP.isFnInterfaceKind();
4239     Function *FnScope = IRP.getAnchorScope();
4240     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4241       indicatePessimisticFixpoint();
4242       return;
4243     }
4244 
4245     if (Instruction *CtxI = getCtxI())
4246       followUsesInMBEC(*this, A, getState(), *CtxI);
4247   }
4248 
4249   /// See AbstractAttribute::getState()
4250   /// {
4251   StateType &getState() override { return *this; }
4252   const StateType &getState() const override { return *this; }
4253   /// }
4254 
4255   /// Helper function for collecting accessed bytes in must-be-executed-context
4256   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4257                               DerefState &State) {
4258     const Value *UseV = U->get();
4259     if (!UseV->getType()->isPointerTy())
4260       return;
4261 
4262     Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4263     if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4264       return;
4265 
4266     int64_t Offset;
4267     const Value *Base = GetPointerBaseWithConstantOffset(
4268         Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4269     if (Base && Base == &getAssociatedValue())
4270       State.addAccessedBytes(Offset, Loc->Size.getValue());
4271   }
4272 
4273   /// See followUsesInMBEC
4274   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4275                        AADereferenceable::StateType &State) {
4276     bool IsNonNull = false;
4277     bool TrackUse = false;
4278     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4279         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4280     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4281                       << " for instruction " << *I << "\n");
4282 
4283     addAccessedBytesForUse(A, U, I, State);
4284     State.takeKnownDerefBytesMaximum(DerefBytes);
4285     return TrackUse;
4286   }
4287 
4288   /// See AbstractAttribute::manifest(...).
4289   ChangeStatus manifest(Attributor &A) override {
4290     ChangeStatus Change = AADereferenceable::manifest(A);
4291     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4292       removeAttrs({Attribute::DereferenceableOrNull});
4293       return ChangeStatus::CHANGED;
4294     }
4295     return Change;
4296   }
4297 
4298   void getDeducedAttributes(LLVMContext &Ctx,
4299                             SmallVectorImpl<Attribute> &Attrs) const override {
4300     // TODO: Add *_globally support
4301     if (isAssumedNonNull())
4302       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4303           Ctx, getAssumedDereferenceableBytes()));
4304     else
4305       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4306           Ctx, getAssumedDereferenceableBytes()));
4307   }
4308 
4309   /// See AbstractAttribute::getAsStr().
4310   const std::string getAsStr() const override {
4311     if (!getAssumedDereferenceableBytes())
4312       return "unknown-dereferenceable";
4313     return std::string("dereferenceable") +
4314            (isAssumedNonNull() ? "" : "_or_null") +
4315            (isAssumedGlobal() ? "_globally" : "") + "<" +
4316            std::to_string(getKnownDereferenceableBytes()) + "-" +
4317            std::to_string(getAssumedDereferenceableBytes()) + ">";
4318   }
4319 };
4320 
4321 /// Dereferenceable attribute for a floating value.
4322 struct AADereferenceableFloating : AADereferenceableImpl {
4323   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4324       : AADereferenceableImpl(IRP, A) {}
4325 
4326   /// See AbstractAttribute::updateImpl(...).
4327   ChangeStatus updateImpl(Attributor &A) override {
4328     const DataLayout &DL = A.getDataLayout();
4329 
4330     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4331                             bool Stripped) -> bool {
4332       unsigned IdxWidth =
4333           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4334       APInt Offset(IdxWidth, 0);
4335       const Value *Base = stripAndAccumulateOffsets(
4336           A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4337           /* AllowNonInbounds */ true);
4338 
4339       const auto &AA = A.getAAFor<AADereferenceable>(
4340           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4341       int64_t DerefBytes = 0;
4342       if (!Stripped && this == &AA) {
4343         // Use IR information if we did not strip anything.
4344         // TODO: track globally.
4345         bool CanBeNull, CanBeFreed;
4346         DerefBytes =
4347             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4348         T.GlobalState.indicatePessimisticFixpoint();
4349       } else {
4350         const DerefState &DS = AA.getState();
4351         DerefBytes = DS.DerefBytesState.getAssumed();
4352         T.GlobalState &= DS.GlobalState;
4353       }
4354 
4355       // For now we do not try to "increase" dereferenceability due to negative
4356       // indices as we first have to come up with code to deal with loops and
4357       // for overflows of the dereferenceable bytes.
4358       int64_t OffsetSExt = Offset.getSExtValue();
4359       if (OffsetSExt < 0)
4360         OffsetSExt = 0;
4361 
4362       T.takeAssumedDerefBytesMinimum(
4363           std::max(int64_t(0), DerefBytes - OffsetSExt));
4364 
4365       if (this == &AA) {
4366         if (!Stripped) {
4367           // If nothing was stripped IR information is all we got.
4368           T.takeKnownDerefBytesMaximum(
4369               std::max(int64_t(0), DerefBytes - OffsetSExt));
4370           T.indicatePessimisticFixpoint();
4371         } else if (OffsetSExt > 0) {
4372           // If something was stripped but there is circular reasoning we look
4373           // for the offset. If it is positive we basically decrease the
4374           // dereferenceable bytes in a circluar loop now, which will simply
4375           // drive them down to the known value in a very slow way which we
4376           // can accelerate.
4377           T.indicatePessimisticFixpoint();
4378         }
4379       }
4380 
4381       return T.isValidState();
4382     };
4383 
4384     DerefState T;
4385     bool UsedAssumedInformation = false;
4386     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4387                                            VisitValueCB, getCtxI(),
4388                                            UsedAssumedInformation))
4389       return indicatePessimisticFixpoint();
4390 
4391     return clampStateAndIndicateChange(getState(), T);
4392   }
4393 
4394   /// See AbstractAttribute::trackStatistics()
4395   void trackStatistics() const override {
4396     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4397   }
4398 };
4399 
4400 /// Dereferenceable attribute for a return value.
4401 struct AADereferenceableReturned final
4402     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4403   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4404       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4405             IRP, A) {}
4406 
4407   /// See AbstractAttribute::trackStatistics()
4408   void trackStatistics() const override {
4409     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4410   }
4411 };
4412 
4413 /// Dereferenceable attribute for an argument
4414 struct AADereferenceableArgument final
4415     : AAArgumentFromCallSiteArguments<AADereferenceable,
4416                                       AADereferenceableImpl> {
4417   using Base =
4418       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4419   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4420       : Base(IRP, A) {}
4421 
4422   /// See AbstractAttribute::trackStatistics()
4423   void trackStatistics() const override {
4424     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4425   }
4426 };
4427 
4428 /// Dereferenceable attribute for a call site argument.
4429 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4430   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4431       : AADereferenceableFloating(IRP, A) {}
4432 
4433   /// See AbstractAttribute::trackStatistics()
4434   void trackStatistics() const override {
4435     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4436   }
4437 };
4438 
4439 /// Dereferenceable attribute deduction for a call site return value.
4440 struct AADereferenceableCallSiteReturned final
4441     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4442   using Base =
4443       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4444   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4445       : Base(IRP, A) {}
4446 
4447   /// See AbstractAttribute::trackStatistics()
4448   void trackStatistics() const override {
4449     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4450   }
4451 };
4452 } // namespace
4453 
4454 // ------------------------ Align Argument Attribute ------------------------
4455 
4456 namespace {
4457 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4458                                     Value &AssociatedValue, const Use *U,
4459                                     const Instruction *I, bool &TrackUse) {
4460   // We need to follow common pointer manipulation uses to the accesses they
4461   // feed into.
4462   if (isa<CastInst>(I)) {
4463     // Follow all but ptr2int casts.
4464     TrackUse = !isa<PtrToIntInst>(I);
4465     return 0;
4466   }
4467   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4468     if (GEP->hasAllConstantIndices())
4469       TrackUse = true;
4470     return 0;
4471   }
4472 
4473   MaybeAlign MA;
4474   if (const auto *CB = dyn_cast<CallBase>(I)) {
4475     if (CB->isBundleOperand(U) || CB->isCallee(U))
4476       return 0;
4477 
4478     unsigned ArgNo = CB->getArgOperandNo(U);
4479     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4480     // As long as we only use known information there is no need to track
4481     // dependences here.
4482     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4483     MA = MaybeAlign(AlignAA.getKnownAlign());
4484   }
4485 
4486   const DataLayout &DL = A.getDataLayout();
4487   const Value *UseV = U->get();
4488   if (auto *SI = dyn_cast<StoreInst>(I)) {
4489     if (SI->getPointerOperand() == UseV)
4490       MA = SI->getAlign();
4491   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4492     if (LI->getPointerOperand() == UseV)
4493       MA = LI->getAlign();
4494   }
4495 
4496   if (!MA || *MA <= QueryingAA.getKnownAlign())
4497     return 0;
4498 
4499   unsigned Alignment = MA->value();
4500   int64_t Offset;
4501 
4502   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4503     if (Base == &AssociatedValue) {
4504       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4505       // So we can say that the maximum power of two which is a divisor of
4506       // gcd(Offset, Alignment) is an alignment.
4507 
4508       uint32_t gcd =
4509           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4510       Alignment = llvm::PowerOf2Floor(gcd);
4511     }
4512   }
4513 
4514   return Alignment;
4515 }
4516 
4517 struct AAAlignImpl : AAAlign {
4518   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4519 
4520   /// See AbstractAttribute::initialize(...).
4521   void initialize(Attributor &A) override {
4522     SmallVector<Attribute, 4> Attrs;
4523     getAttrs({Attribute::Alignment}, Attrs);
4524     for (const Attribute &Attr : Attrs)
4525       takeKnownMaximum(Attr.getValueAsInt());
4526 
4527     Value &V = *getAssociatedValue().stripPointerCasts();
4528     takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4529 
4530     if (getIRPosition().isFnInterfaceKind() &&
4531         (!getAnchorScope() ||
4532          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4533       indicatePessimisticFixpoint();
4534       return;
4535     }
4536 
4537     if (Instruction *CtxI = getCtxI())
4538       followUsesInMBEC(*this, A, getState(), *CtxI);
4539   }
4540 
4541   /// See AbstractAttribute::manifest(...).
4542   ChangeStatus manifest(Attributor &A) override {
4543     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4544 
4545     // Check for users that allow alignment annotations.
4546     Value &AssociatedValue = getAssociatedValue();
4547     for (const Use &U : AssociatedValue.uses()) {
4548       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4549         if (SI->getPointerOperand() == &AssociatedValue)
4550           if (SI->getAlign() < getAssumedAlign()) {
4551             STATS_DECLTRACK(AAAlign, Store,
4552                             "Number of times alignment added to a store");
4553             SI->setAlignment(getAssumedAlign());
4554             LoadStoreChanged = ChangeStatus::CHANGED;
4555           }
4556       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4557         if (LI->getPointerOperand() == &AssociatedValue)
4558           if (LI->getAlign() < getAssumedAlign()) {
4559             LI->setAlignment(getAssumedAlign());
4560             STATS_DECLTRACK(AAAlign, Load,
4561                             "Number of times alignment added to a load");
4562             LoadStoreChanged = ChangeStatus::CHANGED;
4563           }
4564       }
4565     }
4566 
4567     ChangeStatus Changed = AAAlign::manifest(A);
4568 
4569     Align InheritAlign =
4570         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4571     if (InheritAlign >= getAssumedAlign())
4572       return LoadStoreChanged;
4573     return Changed | LoadStoreChanged;
4574   }
4575 
4576   // TODO: Provide a helper to determine the implied ABI alignment and check in
4577   //       the existing manifest method and a new one for AAAlignImpl that value
4578   //       to avoid making the alignment explicit if it did not improve.
4579 
4580   /// See AbstractAttribute::getDeducedAttributes
4581   virtual void
4582   getDeducedAttributes(LLVMContext &Ctx,
4583                        SmallVectorImpl<Attribute> &Attrs) const override {
4584     if (getAssumedAlign() > 1)
4585       Attrs.emplace_back(
4586           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4587   }
4588 
4589   /// See followUsesInMBEC
4590   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4591                        AAAlign::StateType &State) {
4592     bool TrackUse = false;
4593 
4594     unsigned int KnownAlign =
4595         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4596     State.takeKnownMaximum(KnownAlign);
4597 
4598     return TrackUse;
4599   }
4600 
4601   /// See AbstractAttribute::getAsStr().
4602   const std::string getAsStr() const override {
4603     return "align<" + std::to_string(getKnownAlign().value()) + "-" +
4604            std::to_string(getAssumedAlign().value()) + ">";
4605   }
4606 };
4607 
4608 /// Align attribute for a floating value.
4609 struct AAAlignFloating : AAAlignImpl {
4610   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4611 
4612   /// See AbstractAttribute::updateImpl(...).
4613   ChangeStatus updateImpl(Attributor &A) override {
4614     const DataLayout &DL = A.getDataLayout();
4615 
4616     auto VisitValueCB = [&](Value &V, const Instruction *,
4617                             AAAlign::StateType &T, bool Stripped) -> bool {
4618       if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4619         return true;
4620       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4621                                            DepClassTy::REQUIRED);
4622       if (!Stripped && this == &AA) {
4623         int64_t Offset;
4624         unsigned Alignment = 1;
4625         if (const Value *Base =
4626                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4627           // TODO: Use AAAlign for the base too.
4628           Align PA = Base->getPointerAlignment(DL);
4629           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4630           // So we can say that the maximum power of two which is a divisor of
4631           // gcd(Offset, Alignment) is an alignment.
4632 
4633           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4634                                                uint32_t(PA.value()));
4635           Alignment = llvm::PowerOf2Floor(gcd);
4636         } else {
4637           Alignment = V.getPointerAlignment(DL).value();
4638         }
4639         // Use only IR information if we did not strip anything.
4640         T.takeKnownMaximum(Alignment);
4641         T.indicatePessimisticFixpoint();
4642       } else {
4643         // Use abstract attribute information.
4644         const AAAlign::StateType &DS = AA.getState();
4645         T ^= DS;
4646       }
4647       return T.isValidState();
4648     };
4649 
4650     StateType T;
4651     bool UsedAssumedInformation = false;
4652     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4653                                           VisitValueCB, getCtxI(),
4654                                           UsedAssumedInformation))
4655       return indicatePessimisticFixpoint();
4656 
4657     // TODO: If we know we visited all incoming values, thus no are assumed
4658     // dead, we can take the known information from the state T.
4659     return clampStateAndIndicateChange(getState(), T);
4660   }
4661 
4662   /// See AbstractAttribute::trackStatistics()
4663   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4664 };
4665 
4666 /// Align attribute for function return value.
4667 struct AAAlignReturned final
4668     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4669   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4670   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4671 
4672   /// See AbstractAttribute::initialize(...).
4673   void initialize(Attributor &A) override {
4674     Base::initialize(A);
4675     Function *F = getAssociatedFunction();
4676     if (!F || F->isDeclaration())
4677       indicatePessimisticFixpoint();
4678   }
4679 
4680   /// See AbstractAttribute::trackStatistics()
4681   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4682 };
4683 
4684 /// Align attribute for function argument.
4685 struct AAAlignArgument final
4686     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4687   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4688   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4689 
4690   /// See AbstractAttribute::manifest(...).
4691   ChangeStatus manifest(Attributor &A) override {
4692     // If the associated argument is involved in a must-tail call we give up
4693     // because we would need to keep the argument alignments of caller and
4694     // callee in-sync. Just does not seem worth the trouble right now.
4695     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4696       return ChangeStatus::UNCHANGED;
4697     return Base::manifest(A);
4698   }
4699 
4700   /// See AbstractAttribute::trackStatistics()
4701   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4702 };
4703 
4704 struct AAAlignCallSiteArgument final : AAAlignFloating {
4705   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4706       : AAAlignFloating(IRP, A) {}
4707 
4708   /// See AbstractAttribute::manifest(...).
4709   ChangeStatus manifest(Attributor &A) override {
4710     // If the associated argument is involved in a must-tail call we give up
4711     // because we would need to keep the argument alignments of caller and
4712     // callee in-sync. Just does not seem worth the trouble right now.
4713     if (Argument *Arg = getAssociatedArgument())
4714       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4715         return ChangeStatus::UNCHANGED;
4716     ChangeStatus Changed = AAAlignImpl::manifest(A);
4717     Align InheritAlign =
4718         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4719     if (InheritAlign >= getAssumedAlign())
4720       Changed = ChangeStatus::UNCHANGED;
4721     return Changed;
4722   }
4723 
4724   /// See AbstractAttribute::updateImpl(Attributor &A).
4725   ChangeStatus updateImpl(Attributor &A) override {
4726     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4727     if (Argument *Arg = getAssociatedArgument()) {
4728       // We only take known information from the argument
4729       // so we do not need to track a dependence.
4730       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4731           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4732       takeKnownMaximum(ArgAlignAA.getKnownAlign().value());
4733     }
4734     return Changed;
4735   }
4736 
4737   /// See AbstractAttribute::trackStatistics()
4738   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4739 };
4740 
4741 /// Align attribute deduction for a call site return value.
4742 struct AAAlignCallSiteReturned final
4743     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4744   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4745   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4746       : Base(IRP, A) {}
4747 
4748   /// See AbstractAttribute::initialize(...).
4749   void initialize(Attributor &A) override {
4750     Base::initialize(A);
4751     Function *F = getAssociatedFunction();
4752     if (!F || F->isDeclaration())
4753       indicatePessimisticFixpoint();
4754   }
4755 
4756   /// See AbstractAttribute::trackStatistics()
4757   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4758 };
4759 } // namespace
4760 
4761 /// ------------------ Function No-Return Attribute ----------------------------
4762 namespace {
4763 struct AANoReturnImpl : public AANoReturn {
4764   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4765 
4766   /// See AbstractAttribute::initialize(...).
4767   void initialize(Attributor &A) override {
4768     AANoReturn::initialize(A);
4769     Function *F = getAssociatedFunction();
4770     if (!F || F->isDeclaration())
4771       indicatePessimisticFixpoint();
4772   }
4773 
4774   /// See AbstractAttribute::getAsStr().
4775   const std::string getAsStr() const override {
4776     return getAssumed() ? "noreturn" : "may-return";
4777   }
4778 
4779   /// See AbstractAttribute::updateImpl(Attributor &A).
4780   virtual ChangeStatus updateImpl(Attributor &A) override {
4781     auto CheckForNoReturn = [](Instruction &) { return false; };
4782     bool UsedAssumedInformation = false;
4783     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4784                                    {(unsigned)Instruction::Ret},
4785                                    UsedAssumedInformation))
4786       return indicatePessimisticFixpoint();
4787     return ChangeStatus::UNCHANGED;
4788   }
4789 };
4790 
4791 struct AANoReturnFunction final : AANoReturnImpl {
4792   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4793       : AANoReturnImpl(IRP, A) {}
4794 
4795   /// See AbstractAttribute::trackStatistics()
4796   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4797 };
4798 
4799 /// NoReturn attribute deduction for a call sites.
4800 struct AANoReturnCallSite final : AANoReturnImpl {
4801   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4802       : AANoReturnImpl(IRP, A) {}
4803 
4804   /// See AbstractAttribute::initialize(...).
4805   void initialize(Attributor &A) override {
4806     AANoReturnImpl::initialize(A);
4807     if (Function *F = getAssociatedFunction()) {
4808       const IRPosition &FnPos = IRPosition::function(*F);
4809       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4810       if (!FnAA.isAssumedNoReturn())
4811         indicatePessimisticFixpoint();
4812     }
4813   }
4814 
4815   /// See AbstractAttribute::updateImpl(...).
4816   ChangeStatus updateImpl(Attributor &A) override {
4817     // TODO: Once we have call site specific value information we can provide
4818     //       call site specific liveness information and then it makes
4819     //       sense to specialize attributes for call sites arguments instead of
4820     //       redirecting requests to the callee argument.
4821     Function *F = getAssociatedFunction();
4822     const IRPosition &FnPos = IRPosition::function(*F);
4823     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4824     return clampStateAndIndicateChange(getState(), FnAA.getState());
4825   }
4826 
4827   /// See AbstractAttribute::trackStatistics()
4828   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4829 };
4830 } // namespace
4831 
4832 /// ----------------------- Instance Info ---------------------------------
4833 
4834 namespace {
4835 /// A class to hold the state of for no-capture attributes.
4836 struct AAInstanceInfoImpl : public AAInstanceInfo {
4837   AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4838       : AAInstanceInfo(IRP, A) {}
4839 
4840   /// See AbstractAttribute::initialize(...).
4841   void initialize(Attributor &A) override {
4842     Value &V = getAssociatedValue();
4843     if (auto *C = dyn_cast<Constant>(&V)) {
4844       if (C->isThreadDependent())
4845         indicatePessimisticFixpoint();
4846       else
4847         indicateOptimisticFixpoint();
4848       return;
4849     }
4850     if (auto *CB = dyn_cast<CallBase>(&V))
4851       if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4852           !CB->mayReadFromMemory()) {
4853         indicateOptimisticFixpoint();
4854         return;
4855       }
4856   }
4857 
4858   /// See AbstractAttribute::updateImpl(...).
4859   ChangeStatus updateImpl(Attributor &A) override {
4860     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4861 
4862     Value &V = getAssociatedValue();
4863     const Function *Scope = nullptr;
4864     if (auto *I = dyn_cast<Instruction>(&V))
4865       Scope = I->getFunction();
4866     if (auto *A = dyn_cast<Argument>(&V)) {
4867       Scope = A->getParent();
4868       if (!Scope->hasLocalLinkage())
4869         return Changed;
4870     }
4871     if (!Scope)
4872       return indicateOptimisticFixpoint();
4873 
4874     auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4875         *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
4876     if (NoRecurseAA.isAssumedNoRecurse())
4877       return Changed;
4878 
4879     auto UsePred = [&](const Use &U, bool &Follow) {
4880       const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4881       if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4882           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4883         Follow = true;
4884         return true;
4885       }
4886       if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4887           (isa<StoreInst>(UserI) &&
4888            cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4889         return true;
4890       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4891         // This check is not guaranteeing uniqueness but for now that we cannot
4892         // end up with two versions of \p U thinking it was one.
4893         if (!CB->getCalledFunction() ||
4894             !CB->getCalledFunction()->hasLocalLinkage())
4895           return true;
4896         if (!CB->isArgOperand(&U))
4897           return false;
4898         const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4899             *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4900             DepClassTy::OPTIONAL);
4901         if (!ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4902           return false;
4903         // If this call base might reach the scope again we might forward the
4904         // argument back here. This is very conservative.
4905         if (AA::isPotentiallyReachable(A, *CB, *Scope, *this, nullptr))
4906           return false;
4907         return true;
4908       }
4909       return false;
4910     };
4911 
4912     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4913       if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4914         auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4915         if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr))
4916           return true;
4917         auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(
4918             *SI->getFunction());
4919         if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr))
4920           return true;
4921       }
4922       return false;
4923     };
4924 
4925     if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4926                            DepClassTy::OPTIONAL,
4927                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
4928       return indicatePessimisticFixpoint();
4929 
4930     return Changed;
4931   }
4932 
4933   /// See AbstractState::getAsStr().
4934   const std::string getAsStr() const override {
4935     return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4936   }
4937 
4938   /// See AbstractAttribute::trackStatistics()
4939   void trackStatistics() const override {}
4940 };
4941 
4942 /// InstanceInfo attribute for floating values.
4943 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4944   AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4945       : AAInstanceInfoImpl(IRP, A) {}
4946 };
4947 
4948 /// NoCapture attribute for function arguments.
4949 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4950   AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4951       : AAInstanceInfoFloating(IRP, A) {}
4952 };
4953 
4954 /// InstanceInfo attribute for call site arguments.
4955 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4956   AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4957       : AAInstanceInfoImpl(IRP, A) {}
4958 
4959   /// See AbstractAttribute::updateImpl(...).
4960   ChangeStatus updateImpl(Attributor &A) override {
4961     // TODO: Once we have call site specific value information we can provide
4962     //       call site specific liveness information and then it makes
4963     //       sense to specialize attributes for call sites arguments instead of
4964     //       redirecting requests to the callee argument.
4965     Argument *Arg = getAssociatedArgument();
4966     if (!Arg)
4967       return indicatePessimisticFixpoint();
4968     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4969     auto &ArgAA =
4970         A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
4971     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4972   }
4973 };
4974 
4975 /// InstanceInfo attribute for function return value.
4976 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
4977   AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
4978       : AAInstanceInfoImpl(IRP, A) {
4979     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4980   }
4981 
4982   /// See AbstractAttribute::initialize(...).
4983   void initialize(Attributor &A) override {
4984     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4985   }
4986 
4987   /// See AbstractAttribute::updateImpl(...).
4988   ChangeStatus updateImpl(Attributor &A) override {
4989     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4990   }
4991 };
4992 
4993 /// InstanceInfo attribute deduction for a call site return value.
4994 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
4995   AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
4996       : AAInstanceInfoFloating(IRP, A) {}
4997 };
4998 } // namespace
4999 
5000 /// ----------------------- Variable Capturing ---------------------------------
5001 
5002 namespace {
5003 /// A class to hold the state of for no-capture attributes.
5004 struct AANoCaptureImpl : public AANoCapture {
5005   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
5006 
5007   /// See AbstractAttribute::initialize(...).
5008   void initialize(Attributor &A) override {
5009     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
5010       indicateOptimisticFixpoint();
5011       return;
5012     }
5013     Function *AnchorScope = getAnchorScope();
5014     if (isFnInterfaceKind() &&
5015         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
5016       indicatePessimisticFixpoint();
5017       return;
5018     }
5019 
5020     // You cannot "capture" null in the default address space.
5021     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
5022         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
5023       indicateOptimisticFixpoint();
5024       return;
5025     }
5026 
5027     const Function *F =
5028         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
5029 
5030     // Check what state the associated function can actually capture.
5031     if (F)
5032       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5033     else
5034       indicatePessimisticFixpoint();
5035   }
5036 
5037   /// See AbstractAttribute::updateImpl(...).
5038   ChangeStatus updateImpl(Attributor &A) override;
5039 
5040   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
5041   virtual void
5042   getDeducedAttributes(LLVMContext &Ctx,
5043                        SmallVectorImpl<Attribute> &Attrs) const override {
5044     if (!isAssumedNoCaptureMaybeReturned())
5045       return;
5046 
5047     if (isArgumentPosition()) {
5048       if (isAssumedNoCapture())
5049         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
5050       else if (ManifestInternal)
5051         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
5052     }
5053   }
5054 
5055   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
5056   /// depending on the ability of the function associated with \p IRP to capture
5057   /// state in memory and through "returning/throwing", respectively.
5058   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
5059                                                    const Function &F,
5060                                                    BitIntegerState &State) {
5061     // TODO: Once we have memory behavior attributes we should use them here.
5062 
5063     // If we know we cannot communicate or write to memory, we do not care about
5064     // ptr2int anymore.
5065     if (F.onlyReadsMemory() && F.doesNotThrow() &&
5066         F.getReturnType()->isVoidTy()) {
5067       State.addKnownBits(NO_CAPTURE);
5068       return;
5069     }
5070 
5071     // A function cannot capture state in memory if it only reads memory, it can
5072     // however return/throw state and the state might be influenced by the
5073     // pointer value, e.g., loading from a returned pointer might reveal a bit.
5074     if (F.onlyReadsMemory())
5075       State.addKnownBits(NOT_CAPTURED_IN_MEM);
5076 
5077     // A function cannot communicate state back if it does not through
5078     // exceptions and doesn not return values.
5079     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
5080       State.addKnownBits(NOT_CAPTURED_IN_RET);
5081 
5082     // Check existing "returned" attributes.
5083     int ArgNo = IRP.getCalleeArgNo();
5084     if (F.doesNotThrow() && ArgNo >= 0) {
5085       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
5086         if (F.hasParamAttribute(u, Attribute::Returned)) {
5087           if (u == unsigned(ArgNo))
5088             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5089           else if (F.onlyReadsMemory())
5090             State.addKnownBits(NO_CAPTURE);
5091           else
5092             State.addKnownBits(NOT_CAPTURED_IN_RET);
5093           break;
5094         }
5095     }
5096   }
5097 
5098   /// See AbstractState::getAsStr().
5099   const std::string getAsStr() const override {
5100     if (isKnownNoCapture())
5101       return "known not-captured";
5102     if (isAssumedNoCapture())
5103       return "assumed not-captured";
5104     if (isKnownNoCaptureMaybeReturned())
5105       return "known not-captured-maybe-returned";
5106     if (isAssumedNoCaptureMaybeReturned())
5107       return "assumed not-captured-maybe-returned";
5108     return "assumed-captured";
5109   }
5110 
5111   /// Check the use \p U and update \p State accordingly. Return true if we
5112   /// should continue to update the state.
5113   bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5114                 bool &Follow) {
5115     Instruction *UInst = cast<Instruction>(U.getUser());
5116     LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5117                       << *UInst << "\n");
5118 
5119     // Deal with ptr2int by following uses.
5120     if (isa<PtrToIntInst>(UInst)) {
5121       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5122       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5123                           /* Return */ true);
5124     }
5125 
5126     // For stores we already checked if we can follow them, if they make it
5127     // here we give up.
5128     if (isa<StoreInst>(UInst))
5129       return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5130                           /* Return */ false);
5131 
5132     // Explicitly catch return instructions.
5133     if (isa<ReturnInst>(UInst)) {
5134       if (UInst->getFunction() == getAnchorScope())
5135         return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5136                             /* Return */ true);
5137       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5138                           /* Return */ true);
5139     }
5140 
5141     // For now we only use special logic for call sites. However, the tracker
5142     // itself knows about a lot of other non-capturing cases already.
5143     auto *CB = dyn_cast<CallBase>(UInst);
5144     if (!CB || !CB->isArgOperand(&U))
5145       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5146                           /* Return */ true);
5147 
5148     unsigned ArgNo = CB->getArgOperandNo(&U);
5149     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5150     // If we have a abstract no-capture attribute for the argument we can use
5151     // it to justify a non-capture attribute here. This allows recursion!
5152     auto &ArgNoCaptureAA =
5153         A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5154     if (ArgNoCaptureAA.isAssumedNoCapture())
5155       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5156                           /* Return */ false);
5157     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5158       Follow = true;
5159       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5160                           /* Return */ false);
5161     }
5162 
5163     // Lastly, we could not find a reason no-capture can be assumed so we don't.
5164     return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5165                         /* Return */ true);
5166   }
5167 
5168   /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5169   /// \p CapturedInRet, then return true if we should continue updating the
5170   /// state.
5171   static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5172                            bool CapturedInInt, bool CapturedInRet) {
5173     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5174                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5175     if (CapturedInMem)
5176       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5177     if (CapturedInInt)
5178       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5179     if (CapturedInRet)
5180       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5181     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5182   }
5183 };
5184 
5185 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5186   const IRPosition &IRP = getIRPosition();
5187   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5188                                   : &IRP.getAssociatedValue();
5189   if (!V)
5190     return indicatePessimisticFixpoint();
5191 
5192   const Function *F =
5193       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5194   assert(F && "Expected a function!");
5195   const IRPosition &FnPos = IRPosition::function(*F);
5196 
5197   AANoCapture::StateType T;
5198 
5199   // Readonly means we cannot capture through memory.
5200   bool IsKnown;
5201   if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5202     T.addKnownBits(NOT_CAPTURED_IN_MEM);
5203     if (IsKnown)
5204       addKnownBits(NOT_CAPTURED_IN_MEM);
5205   }
5206 
5207   // Make sure all returned values are different than the underlying value.
5208   // TODO: we could do this in a more sophisticated way inside
5209   //       AAReturnedValues, e.g., track all values that escape through returns
5210   //       directly somehow.
5211   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5212     if (!RVAA.getState().isValidState())
5213       return false;
5214     bool SeenConstant = false;
5215     for (auto &It : RVAA.returned_values()) {
5216       if (isa<Constant>(It.first)) {
5217         if (SeenConstant)
5218           return false;
5219         SeenConstant = true;
5220       } else if (!isa<Argument>(It.first) ||
5221                  It.first == getAssociatedArgument())
5222         return false;
5223     }
5224     return true;
5225   };
5226 
5227   const auto &NoUnwindAA =
5228       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5229   if (NoUnwindAA.isAssumedNoUnwind()) {
5230     bool IsVoidTy = F->getReturnType()->isVoidTy();
5231     const AAReturnedValues *RVAA =
5232         IsVoidTy ? nullptr
5233                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5234 
5235                                                  DepClassTy::OPTIONAL);
5236     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5237       T.addKnownBits(NOT_CAPTURED_IN_RET);
5238       if (T.isKnown(NOT_CAPTURED_IN_MEM))
5239         return ChangeStatus::UNCHANGED;
5240       if (NoUnwindAA.isKnownNoUnwind() &&
5241           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5242         addKnownBits(NOT_CAPTURED_IN_RET);
5243         if (isKnown(NOT_CAPTURED_IN_MEM))
5244           return indicateOptimisticFixpoint();
5245       }
5246     }
5247   }
5248 
5249   auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5250     const auto &DerefAA = A.getAAFor<AADereferenceable>(
5251         *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5252     return DerefAA.getAssumedDereferenceableBytes();
5253   };
5254 
5255   auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5256     switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5257     case UseCaptureKind::NO_CAPTURE:
5258       return true;
5259     case UseCaptureKind::MAY_CAPTURE:
5260       return checkUse(A, T, U, Follow);
5261     case UseCaptureKind::PASSTHROUGH:
5262       Follow = true;
5263       return true;
5264     }
5265     llvm_unreachable("Unexpected use capture kind!");
5266   };
5267 
5268   if (!A.checkForAllUses(UseCheck, *this, *V))
5269     return indicatePessimisticFixpoint();
5270 
5271   AANoCapture::StateType &S = getState();
5272   auto Assumed = S.getAssumed();
5273   S.intersectAssumedBits(T.getAssumed());
5274   if (!isAssumedNoCaptureMaybeReturned())
5275     return indicatePessimisticFixpoint();
5276   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5277                                    : ChangeStatus::CHANGED;
5278 }
5279 
5280 /// NoCapture attribute for function arguments.
5281 struct AANoCaptureArgument final : AANoCaptureImpl {
5282   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5283       : AANoCaptureImpl(IRP, A) {}
5284 
5285   /// See AbstractAttribute::trackStatistics()
5286   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5287 };
5288 
5289 /// NoCapture attribute for call site arguments.
5290 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5291   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5292       : AANoCaptureImpl(IRP, A) {}
5293 
5294   /// See AbstractAttribute::initialize(...).
5295   void initialize(Attributor &A) override {
5296     if (Argument *Arg = getAssociatedArgument())
5297       if (Arg->hasByValAttr())
5298         indicateOptimisticFixpoint();
5299     AANoCaptureImpl::initialize(A);
5300   }
5301 
5302   /// See AbstractAttribute::updateImpl(...).
5303   ChangeStatus updateImpl(Attributor &A) override {
5304     // TODO: Once we have call site specific value information we can provide
5305     //       call site specific liveness information and then it makes
5306     //       sense to specialize attributes for call sites arguments instead of
5307     //       redirecting requests to the callee argument.
5308     Argument *Arg = getAssociatedArgument();
5309     if (!Arg)
5310       return indicatePessimisticFixpoint();
5311     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5312     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5313     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5314   }
5315 
5316   /// See AbstractAttribute::trackStatistics()
5317   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5318 };
5319 
5320 /// NoCapture attribute for floating values.
5321 struct AANoCaptureFloating final : AANoCaptureImpl {
5322   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5323       : AANoCaptureImpl(IRP, A) {}
5324 
5325   /// See AbstractAttribute::trackStatistics()
5326   void trackStatistics() const override {
5327     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5328   }
5329 };
5330 
5331 /// NoCapture attribute for function return value.
5332 struct AANoCaptureReturned final : AANoCaptureImpl {
5333   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5334       : AANoCaptureImpl(IRP, A) {
5335     llvm_unreachable("NoCapture is not applicable to function returns!");
5336   }
5337 
5338   /// See AbstractAttribute::initialize(...).
5339   void initialize(Attributor &A) override {
5340     llvm_unreachable("NoCapture is not applicable to function returns!");
5341   }
5342 
5343   /// See AbstractAttribute::updateImpl(...).
5344   ChangeStatus updateImpl(Attributor &A) override {
5345     llvm_unreachable("NoCapture is not applicable to function returns!");
5346   }
5347 
5348   /// See AbstractAttribute::trackStatistics()
5349   void trackStatistics() const override {}
5350 };
5351 
5352 /// NoCapture attribute deduction for a call site return value.
5353 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5354   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5355       : AANoCaptureImpl(IRP, A) {}
5356 
5357   /// See AbstractAttribute::initialize(...).
5358   void initialize(Attributor &A) override {
5359     const Function *F = getAnchorScope();
5360     // Check what state the associated function can actually capture.
5361     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5362   }
5363 
5364   /// See AbstractAttribute::trackStatistics()
5365   void trackStatistics() const override {
5366     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5367   }
5368 };
5369 } // namespace
5370 
5371 /// ------------------ Value Simplify Attribute ----------------------------
5372 
5373 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5374   // FIXME: Add a typecast support.
5375   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5376       SimplifiedAssociatedValue, Other, Ty);
5377   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5378     return false;
5379 
5380   LLVM_DEBUG({
5381     if (SimplifiedAssociatedValue)
5382       dbgs() << "[ValueSimplify] is assumed to be "
5383              << **SimplifiedAssociatedValue << "\n";
5384     else
5385       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5386   });
5387   return true;
5388 }
5389 
5390 namespace {
5391 struct AAValueSimplifyImpl : AAValueSimplify {
5392   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5393       : AAValueSimplify(IRP, A) {}
5394 
5395   /// See AbstractAttribute::initialize(...).
5396   void initialize(Attributor &A) override {
5397     if (getAssociatedValue().getType()->isVoidTy())
5398       indicatePessimisticFixpoint();
5399     if (A.hasSimplificationCallback(getIRPosition()))
5400       indicatePessimisticFixpoint();
5401   }
5402 
5403   /// See AbstractAttribute::getAsStr().
5404   const std::string getAsStr() const override {
5405     LLVM_DEBUG({
5406       dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
5407       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5408         dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5409     });
5410     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5411                           : "not-simple";
5412   }
5413 
5414   /// See AbstractAttribute::trackStatistics()
5415   void trackStatistics() const override {}
5416 
5417   /// See AAValueSimplify::getAssumedSimplifiedValue()
5418   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5419     return SimplifiedAssociatedValue;
5420   }
5421 
5422   /// Ensure the return value is \p V with type \p Ty, if not possible return
5423   /// nullptr. If \p Check is true we will only verify such an operation would
5424   /// suceed and return a non-nullptr value if that is the case. No IR is
5425   /// generated or modified.
5426   static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
5427                            bool Check) {
5428     if (auto *TypedV = AA::getWithType(V, Ty))
5429       return TypedV;
5430     if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
5431       return Check ? &V
5432                    : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty,
5433                                                                       "", CtxI);
5434     return nullptr;
5435   }
5436 
5437   /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
5438   /// If \p Check is true we will only verify such an operation would suceed and
5439   /// return a non-nullptr value if that is the case. No IR is generated or
5440   /// modified.
5441   static Value *reproduceInst(Attributor &A,
5442                               const AbstractAttribute &QueryingAA,
5443                               Instruction &I, Type &Ty, Instruction *CtxI,
5444                               bool Check, ValueToValueMapTy &VMap) {
5445     assert(CtxI && "Cannot reproduce an instruction without context!");
5446     if (Check && (I.mayReadFromMemory() ||
5447                   !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
5448                                                 /* TLI */ nullptr)))
5449       return nullptr;
5450     for (Value *Op : I.operands()) {
5451       Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
5452       if (!NewOp) {
5453         assert(Check && "Manifest of new value unexpectedly failed!");
5454         return nullptr;
5455       }
5456       if (!Check)
5457         VMap[Op] = NewOp;
5458     }
5459     if (Check)
5460       return &I;
5461 
5462     Instruction *CloneI = I.clone();
5463     // TODO: Try to salvage debug information here.
5464     CloneI->setDebugLoc(DebugLoc());
5465     VMap[&I] = CloneI;
5466     CloneI->insertBefore(CtxI);
5467     RemapInstruction(CloneI, VMap);
5468     return CloneI;
5469   }
5470 
5471   /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
5472   /// If \p Check is true we will only verify such an operation would suceed and
5473   /// return a non-nullptr value if that is the case. No IR is generated or
5474   /// modified.
5475   static Value *reproduceValue(Attributor &A,
5476                                const AbstractAttribute &QueryingAA, Value &V,
5477                                Type &Ty, Instruction *CtxI, bool Check,
5478                                ValueToValueMapTy &VMap) {
5479     if (const auto &NewV = VMap.lookup(&V))
5480       return NewV;
5481     bool UsedAssumedInformation = false;
5482     Optional<Value *> SimpleV =
5483         A.getAssumedSimplified(V, QueryingAA, UsedAssumedInformation);
5484     if (!SimpleV.hasValue())
5485       return PoisonValue::get(&Ty);
5486     Value *EffectiveV = &V;
5487     if (SimpleV.getValue())
5488       EffectiveV = SimpleV.getValue();
5489     if (auto *C = dyn_cast<Constant>(EffectiveV))
5490       if (!C->canTrap())
5491         return C;
5492     if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
5493                                       A.getInfoCache()))
5494       return ensureType(A, *EffectiveV, Ty, CtxI, Check);
5495     if (auto *I = dyn_cast<Instruction>(EffectiveV))
5496       if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
5497         return ensureType(A, *NewV, Ty, CtxI, Check);
5498     return nullptr;
5499   }
5500 
5501   /// Return a value we can use as replacement for the associated one, or
5502   /// nullptr if we don't have one that makes sense.
5503   Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
5504     Value *NewV = SimplifiedAssociatedValue.hasValue()
5505                       ? SimplifiedAssociatedValue.getValue()
5506                       : UndefValue::get(getAssociatedType());
5507     if (NewV && NewV != &getAssociatedValue()) {
5508       ValueToValueMapTy VMap;
5509       // First verify we can reprduce the value with the required type at the
5510       // context location before we actually start modifying the IR.
5511       if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5512                          /* CheckOnly */ true, VMap))
5513         return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5514                               /* CheckOnly */ false, VMap);
5515     }
5516     return nullptr;
5517   }
5518 
5519   /// Helper function for querying AAValueSimplify and updating candicate.
5520   /// \param IRP The value position we are trying to unify with SimplifiedValue
5521   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5522                       const IRPosition &IRP, bool Simplify = true) {
5523     bool UsedAssumedInformation = false;
5524     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5525     if (Simplify)
5526       QueryingValueSimplified =
5527           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5528     return unionAssumed(QueryingValueSimplified);
5529   }
5530 
5531   /// Returns a candidate is found or not
5532   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5533     if (!getAssociatedValue().getType()->isIntegerTy())
5534       return false;
5535 
5536     // This will also pass the call base context.
5537     const auto &AA =
5538         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5539 
5540     Optional<Constant *> COpt = AA.getAssumedConstant(A);
5541 
5542     if (!COpt) {
5543       SimplifiedAssociatedValue = llvm::None;
5544       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5545       return true;
5546     }
5547     if (auto *C = *COpt) {
5548       SimplifiedAssociatedValue = C;
5549       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5550       return true;
5551     }
5552     return false;
5553   }
5554 
5555   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5556     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5557       return true;
5558     if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
5559       return true;
5560     return false;
5561   }
5562 
5563   /// See AbstractAttribute::manifest(...).
5564   ChangeStatus manifest(Attributor &A) override {
5565     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5566     for (auto &U : getAssociatedValue().uses()) {
5567       // Check if we need to adjust the insertion point to make sure the IR is
5568       // valid.
5569       Instruction *IP = dyn_cast<Instruction>(U.getUser());
5570       if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
5571         IP = PHI->getIncomingBlock(U)->getTerminator();
5572       if (auto *NewV = manifestReplacementValue(A, IP)) {
5573         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
5574                           << " -> " << *NewV << " :: " << *this << "\n");
5575         if (A.changeUseAfterManifest(U, *NewV))
5576           Changed = ChangeStatus::CHANGED;
5577       }
5578     }
5579 
5580     return Changed | AAValueSimplify::manifest(A);
5581   }
5582 
5583   /// See AbstractState::indicatePessimisticFixpoint(...).
5584   ChangeStatus indicatePessimisticFixpoint() override {
5585     SimplifiedAssociatedValue = &getAssociatedValue();
5586     return AAValueSimplify::indicatePessimisticFixpoint();
5587   }
5588 };
5589 
5590 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5591   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5592       : AAValueSimplifyImpl(IRP, A) {}
5593 
5594   void initialize(Attributor &A) override {
5595     AAValueSimplifyImpl::initialize(A);
5596     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5597       indicatePessimisticFixpoint();
5598     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5599                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5600                 /* IgnoreSubsumingPositions */ true))
5601       indicatePessimisticFixpoint();
5602   }
5603 
5604   /// See AbstractAttribute::updateImpl(...).
5605   ChangeStatus updateImpl(Attributor &A) override {
5606     // Byval is only replacable if it is readonly otherwise we would write into
5607     // the replaced value and not the copy that byval creates implicitly.
5608     Argument *Arg = getAssociatedArgument();
5609     if (Arg->hasByValAttr()) {
5610       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5611       //       there is no race by not copying a constant byval.
5612       bool IsKnown;
5613       if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
5614         return indicatePessimisticFixpoint();
5615     }
5616 
5617     auto Before = SimplifiedAssociatedValue;
5618 
5619     auto PredForCallSite = [&](AbstractCallSite ACS) {
5620       const IRPosition &ACSArgPos =
5621           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5622       // Check if a coresponding argument was found or if it is on not
5623       // associated (which can happen for callback calls).
5624       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5625         return false;
5626 
5627       // Simplify the argument operand explicitly and check if the result is
5628       // valid in the current scope. This avoids refering to simplified values
5629       // in other functions, e.g., we don't want to say a an argument in a
5630       // static function is actually an argument in a different function.
5631       bool UsedAssumedInformation = false;
5632       Optional<Constant *> SimpleArgOp =
5633           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5634       if (!SimpleArgOp.hasValue())
5635         return true;
5636       if (!SimpleArgOp.getValue())
5637         return false;
5638       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5639         return false;
5640       return unionAssumed(*SimpleArgOp);
5641     };
5642 
5643     // Generate a answer specific to a call site context.
5644     bool Success;
5645     bool UsedAssumedInformation = false;
5646     if (hasCallBaseContext() &&
5647         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5648       Success = PredForCallSite(
5649           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5650     else
5651       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5652                                        UsedAssumedInformation);
5653 
5654     if (!Success)
5655       if (!askSimplifiedValueForOtherAAs(A))
5656         return indicatePessimisticFixpoint();
5657 
5658     // If a candicate was found in this update, return CHANGED.
5659     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5660                                                : ChangeStatus ::CHANGED;
5661   }
5662 
5663   /// See AbstractAttribute::trackStatistics()
5664   void trackStatistics() const override {
5665     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5666   }
5667 };
5668 
5669 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5670   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5671       : AAValueSimplifyImpl(IRP, A) {}
5672 
5673   /// See AAValueSimplify::getAssumedSimplifiedValue()
5674   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5675     if (!isValidState())
5676       return nullptr;
5677     return SimplifiedAssociatedValue;
5678   }
5679 
5680   /// See AbstractAttribute::updateImpl(...).
5681   ChangeStatus updateImpl(Attributor &A) override {
5682     auto Before = SimplifiedAssociatedValue;
5683 
5684     auto ReturnInstCB = [&](Instruction &I) {
5685       auto &RI = cast<ReturnInst>(I);
5686       return checkAndUpdate(
5687           A, *this,
5688           IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
5689     };
5690 
5691     bool UsedAssumedInformation = false;
5692     if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
5693                                    UsedAssumedInformation))
5694       if (!askSimplifiedValueForOtherAAs(A))
5695         return indicatePessimisticFixpoint();
5696 
5697     // If a candicate was found in this update, return CHANGED.
5698     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5699                                                : ChangeStatus ::CHANGED;
5700   }
5701 
5702   ChangeStatus manifest(Attributor &A) override {
5703     // We queried AAValueSimplify for the returned values so they will be
5704     // replaced if a simplified form was found. Nothing to do here.
5705     return ChangeStatus::UNCHANGED;
5706   }
5707 
5708   /// See AbstractAttribute::trackStatistics()
5709   void trackStatistics() const override {
5710     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5711   }
5712 };
5713 
5714 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5715   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5716       : AAValueSimplifyImpl(IRP, A) {}
5717 
5718   /// See AbstractAttribute::initialize(...).
5719   void initialize(Attributor &A) override {
5720     AAValueSimplifyImpl::initialize(A);
5721     Value &V = getAnchorValue();
5722 
5723     // TODO: add other stuffs
5724     if (isa<Constant>(V))
5725       indicatePessimisticFixpoint();
5726   }
5727 
5728   /// Check if \p Cmp is a comparison we can simplify.
5729   ///
5730   /// We handle multiple cases, one in which at least one operand is an
5731   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5732   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5733   /// will be updated.
5734   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5735     auto Union = [&](Value &V) {
5736       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5737           SimplifiedAssociatedValue, &V, V.getType());
5738       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5739     };
5740 
5741     Value *LHS = Cmp.getOperand(0);
5742     Value *RHS = Cmp.getOperand(1);
5743 
5744     // Simplify the operands first.
5745     bool UsedAssumedInformation = false;
5746     const auto &SimplifiedLHS =
5747         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5748                                *this, UsedAssumedInformation);
5749     if (!SimplifiedLHS.hasValue())
5750       return true;
5751     if (!SimplifiedLHS.getValue())
5752       return false;
5753     LHS = *SimplifiedLHS;
5754 
5755     const auto &SimplifiedRHS =
5756         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5757                                *this, UsedAssumedInformation);
5758     if (!SimplifiedRHS.hasValue())
5759       return true;
5760     if (!SimplifiedRHS.getValue())
5761       return false;
5762     RHS = *SimplifiedRHS;
5763 
5764     LLVMContext &Ctx = Cmp.getContext();
5765     // Handle the trivial case first in which we don't even need to think about
5766     // null or non-null.
5767     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5768       Constant *NewVal =
5769           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5770       if (!Union(*NewVal))
5771         return false;
5772       if (!UsedAssumedInformation)
5773         indicateOptimisticFixpoint();
5774       return true;
5775     }
5776 
5777     // From now on we only handle equalities (==, !=).
5778     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5779     if (!ICmp || !ICmp->isEquality())
5780       return false;
5781 
5782     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5783     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5784     if (!LHSIsNull && !RHSIsNull)
5785       return false;
5786 
5787     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5788     // non-nullptr operand and if we assume it's non-null we can conclude the
5789     // result of the comparison.
5790     assert((LHSIsNull || RHSIsNull) &&
5791            "Expected nullptr versus non-nullptr comparison at this point");
5792 
5793     // The index is the operand that we assume is not null.
5794     unsigned PtrIdx = LHSIsNull;
5795     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5796         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5797         DepClassTy::REQUIRED);
5798     if (!PtrNonNullAA.isAssumedNonNull())
5799       return false;
5800     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5801 
5802     // The new value depends on the predicate, true for != and false for ==.
5803     Constant *NewVal = ConstantInt::get(
5804         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5805     if (!Union(*NewVal))
5806       return false;
5807 
5808     if (!UsedAssumedInformation)
5809       indicateOptimisticFixpoint();
5810 
5811     return true;
5812   }
5813 
5814   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5815   /// simplify any operand of the instruction \p I. Return true if successful,
5816   /// in that case SimplifiedAssociatedValue will be updated.
5817   bool handleGenericInst(Attributor &A, Instruction &I) {
5818     bool SomeSimplified = false;
5819     bool UsedAssumedInformation = false;
5820 
5821     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5822     int Idx = 0;
5823     for (Value *Op : I.operands()) {
5824       const auto &SimplifiedOp =
5825           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5826                                  *this, UsedAssumedInformation);
5827       // If we are not sure about any operand we are not sure about the entire
5828       // instruction, we'll wait.
5829       if (!SimplifiedOp.hasValue())
5830         return true;
5831 
5832       if (SimplifiedOp.getValue())
5833         NewOps[Idx] = SimplifiedOp.getValue();
5834       else
5835         NewOps[Idx] = Op;
5836 
5837       SomeSimplified |= (NewOps[Idx] != Op);
5838       ++Idx;
5839     }
5840 
5841     // We won't bother with the InstSimplify interface if we didn't simplify any
5842     // operand ourselves.
5843     if (!SomeSimplified)
5844       return false;
5845 
5846     InformationCache &InfoCache = A.getInfoCache();
5847     Function *F = I.getFunction();
5848     const auto *DT =
5849         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5850     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5851     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5852     OptimizationRemarkEmitter *ORE = nullptr;
5853 
5854     const DataLayout &DL = I.getModule()->getDataLayout();
5855     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5856     if (Value *SimplifiedI =
5857             simplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5858       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5859           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5860       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5861     }
5862     return false;
5863   }
5864 
5865   /// See AbstractAttribute::updateImpl(...).
5866   ChangeStatus updateImpl(Attributor &A) override {
5867     auto Before = SimplifiedAssociatedValue;
5868 
5869     // Do not simplify loads that are only used in llvm.assume if we cannot also
5870     // remove all stores that may feed into the load. The reason is that the
5871     // assume is probably worth something as long as the stores are around.
5872     if (auto *LI = dyn_cast<LoadInst>(&getAssociatedValue())) {
5873       InformationCache &InfoCache = A.getInfoCache();
5874       if (InfoCache.isOnlyUsedByAssume(*LI)) {
5875         SmallSetVector<Value *, 4> PotentialCopies;
5876         SmallSetVector<Instruction *, 4> PotentialValueOrigins;
5877         bool UsedAssumedInformation = false;
5878         if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
5879                                            PotentialValueOrigins, *this,
5880                                            UsedAssumedInformation,
5881                                            /* OnlyExact */ true)) {
5882           if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
5883                 if (!I)
5884                   return true;
5885                 if (auto *SI = dyn_cast<StoreInst>(I))
5886                   return A.isAssumedDead(SI->getOperandUse(0), this,
5887                                          /* LivenessAA */ nullptr,
5888                                          UsedAssumedInformation,
5889                                          /* CheckBBLivenessOnly */ false);
5890                 return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
5891                                        UsedAssumedInformation,
5892                                        /* CheckBBLivenessOnly */ false);
5893               }))
5894             return indicatePessimisticFixpoint();
5895         }
5896       }
5897     }
5898 
5899     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5900                             bool Stripped) -> bool {
5901       auto &AA = A.getAAFor<AAValueSimplify>(
5902           *this, IRPosition::value(V, getCallBaseContext()),
5903           DepClassTy::REQUIRED);
5904       if (!Stripped && this == &AA) {
5905 
5906         if (auto *I = dyn_cast<Instruction>(&V)) {
5907           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5908             if (handleCmp(A, *Cmp))
5909               return true;
5910           if (handleGenericInst(A, *I))
5911             return true;
5912         }
5913         // TODO: Look the instruction and check recursively.
5914 
5915         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5916                           << "\n");
5917         return false;
5918       }
5919       return checkAndUpdate(A, *this,
5920                             IRPosition::value(V, getCallBaseContext()));
5921     };
5922 
5923     bool Dummy = false;
5924     bool UsedAssumedInformation = false;
5925     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5926                                      VisitValueCB, getCtxI(),
5927                                      UsedAssumedInformation,
5928                                      /* UseValueSimplify */ false))
5929       if (!askSimplifiedValueForOtherAAs(A))
5930         return indicatePessimisticFixpoint();
5931 
5932     // If a candicate was found in this update, return CHANGED.
5933     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5934                                                : ChangeStatus ::CHANGED;
5935   }
5936 
5937   /// See AbstractAttribute::trackStatistics()
5938   void trackStatistics() const override {
5939     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5940   }
5941 };
5942 
5943 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5944   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5945       : AAValueSimplifyImpl(IRP, A) {}
5946 
5947   /// See AbstractAttribute::initialize(...).
5948   void initialize(Attributor &A) override {
5949     SimplifiedAssociatedValue = nullptr;
5950     indicateOptimisticFixpoint();
5951   }
5952   /// See AbstractAttribute::initialize(...).
5953   ChangeStatus updateImpl(Attributor &A) override {
5954     llvm_unreachable(
5955         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5956   }
5957   /// See AbstractAttribute::trackStatistics()
5958   void trackStatistics() const override {
5959     STATS_DECLTRACK_FN_ATTR(value_simplify)
5960   }
5961 };
5962 
5963 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5964   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5965       : AAValueSimplifyFunction(IRP, A) {}
5966   /// See AbstractAttribute::trackStatistics()
5967   void trackStatistics() const override {
5968     STATS_DECLTRACK_CS_ATTR(value_simplify)
5969   }
5970 };
5971 
5972 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5973   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5974       : AAValueSimplifyImpl(IRP, A) {}
5975 
5976   void initialize(Attributor &A) override {
5977     AAValueSimplifyImpl::initialize(A);
5978     Function *Fn = getAssociatedFunction();
5979     if (!Fn) {
5980       indicatePessimisticFixpoint();
5981       return;
5982     }
5983     for (Argument &Arg : Fn->args()) {
5984       if (Arg.hasReturnedAttr()) {
5985         auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
5986                                                  Arg.getArgNo());
5987         if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
5988             checkAndUpdate(A, *this, IRP))
5989           indicateOptimisticFixpoint();
5990         else
5991           indicatePessimisticFixpoint();
5992         return;
5993       }
5994     }
5995   }
5996 
5997   /// See AbstractAttribute::updateImpl(...).
5998   ChangeStatus updateImpl(Attributor &A) override {
5999     auto Before = SimplifiedAssociatedValue;
6000     auto &RetAA = A.getAAFor<AAReturnedValues>(
6001         *this, IRPosition::function(*getAssociatedFunction()),
6002         DepClassTy::REQUIRED);
6003     auto PredForReturned =
6004         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
6005           bool UsedAssumedInformation = false;
6006           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
6007               &RetVal, *cast<CallBase>(getCtxI()), *this,
6008               UsedAssumedInformation);
6009           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
6010               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
6011           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
6012         };
6013     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
6014       if (!askSimplifiedValueForOtherAAs(A))
6015         return indicatePessimisticFixpoint();
6016     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
6017                                                : ChangeStatus ::CHANGED;
6018   }
6019 
6020   void trackStatistics() const override {
6021     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
6022   }
6023 };
6024 
6025 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
6026   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
6027       : AAValueSimplifyFloating(IRP, A) {}
6028 
6029   /// See AbstractAttribute::manifest(...).
6030   ChangeStatus manifest(Attributor &A) override {
6031     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6032     // TODO: We should avoid simplification duplication to begin with.
6033     auto *FloatAA = A.lookupAAFor<AAValueSimplify>(
6034         IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE);
6035     if (FloatAA && FloatAA->getState().isValidState())
6036       return Changed;
6037 
6038     if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
6039       Use &U = cast<CallBase>(&getAnchorValue())
6040                    ->getArgOperandUse(getCallSiteArgNo());
6041       if (A.changeUseAfterManifest(U, *NewV))
6042         Changed = ChangeStatus::CHANGED;
6043     }
6044 
6045     return Changed | AAValueSimplify::manifest(A);
6046   }
6047 
6048   void trackStatistics() const override {
6049     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
6050   }
6051 };
6052 } // namespace
6053 
6054 /// ----------------------- Heap-To-Stack Conversion ---------------------------
6055 namespace {
6056 struct AAHeapToStackFunction final : public AAHeapToStack {
6057 
6058   struct AllocationInfo {
6059     /// The call that allocates the memory.
6060     CallBase *const CB;
6061 
6062     /// The library function id for the allocation.
6063     LibFunc LibraryFunctionId = NotLibFunc;
6064 
6065     /// The status wrt. a rewrite.
6066     enum {
6067       STACK_DUE_TO_USE,
6068       STACK_DUE_TO_FREE,
6069       INVALID,
6070     } Status = STACK_DUE_TO_USE;
6071 
6072     /// Flag to indicate if we encountered a use that might free this allocation
6073     /// but which is not in the deallocation infos.
6074     bool HasPotentiallyFreeingUnknownUses = false;
6075 
6076     /// The set of free calls that use this allocation.
6077     SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
6078   };
6079 
6080   struct DeallocationInfo {
6081     /// The call that deallocates the memory.
6082     CallBase *const CB;
6083 
6084     /// Flag to indicate if we don't know all objects this deallocation might
6085     /// free.
6086     bool MightFreeUnknownObjects = false;
6087 
6088     /// The set of allocation calls that are potentially freed.
6089     SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
6090   };
6091 
6092   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
6093       : AAHeapToStack(IRP, A) {}
6094 
6095   ~AAHeapToStackFunction() {
6096     // Ensure we call the destructor so we release any memory allocated in the
6097     // sets.
6098     for (auto &It : AllocationInfos)
6099       It.second->~AllocationInfo();
6100     for (auto &It : DeallocationInfos)
6101       It.second->~DeallocationInfo();
6102   }
6103 
6104   void initialize(Attributor &A) override {
6105     AAHeapToStack::initialize(A);
6106 
6107     const Function *F = getAnchorScope();
6108     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6109 
6110     auto AllocationIdentifierCB = [&](Instruction &I) {
6111       CallBase *CB = dyn_cast<CallBase>(&I);
6112       if (!CB)
6113         return true;
6114       if (isFreeCall(CB, TLI)) {
6115         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
6116         return true;
6117       }
6118       // To do heap to stack, we need to know that the allocation itself is
6119       // removable once uses are rewritten, and that we can initialize the
6120       // alloca to the same pattern as the original allocation result.
6121       if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) {
6122         auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
6123         if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
6124           AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
6125           AllocationInfos[CB] = AI;
6126           if (TLI)
6127             TLI->getLibFunc(*CB, AI->LibraryFunctionId);
6128         }
6129       }
6130       return true;
6131     };
6132 
6133     bool UsedAssumedInformation = false;
6134     bool Success = A.checkForAllCallLikeInstructions(
6135         AllocationIdentifierCB, *this, UsedAssumedInformation,
6136         /* CheckBBLivenessOnly */ false,
6137         /* CheckPotentiallyDead */ true);
6138     (void)Success;
6139     assert(Success && "Did not expect the call base visit callback to fail!");
6140 
6141     Attributor::SimplifictionCallbackTy SCB =
6142         [](const IRPosition &, const AbstractAttribute *,
6143            bool &) -> Optional<Value *> { return nullptr; };
6144     for (const auto &It : AllocationInfos)
6145       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6146                                        SCB);
6147     for (const auto &It : DeallocationInfos)
6148       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
6149                                        SCB);
6150   }
6151 
6152   const std::string getAsStr() const override {
6153     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
6154     for (const auto &It : AllocationInfos) {
6155       if (It.second->Status == AllocationInfo::INVALID)
6156         ++NumInvalidMallocs;
6157       else
6158         ++NumH2SMallocs;
6159     }
6160     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
6161            std::to_string(NumInvalidMallocs);
6162   }
6163 
6164   /// See AbstractAttribute::trackStatistics().
6165   void trackStatistics() const override {
6166     STATS_DECL(
6167         MallocCalls, Function,
6168         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
6169     for (auto &It : AllocationInfos)
6170       if (It.second->Status != AllocationInfo::INVALID)
6171         ++BUILD_STAT_NAME(MallocCalls, Function);
6172   }
6173 
6174   bool isAssumedHeapToStack(const CallBase &CB) const override {
6175     if (isValidState())
6176       if (AllocationInfo *AI =
6177               AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
6178         return AI->Status != AllocationInfo::INVALID;
6179     return false;
6180   }
6181 
6182   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
6183     if (!isValidState())
6184       return false;
6185 
6186     for (auto &It : AllocationInfos) {
6187       AllocationInfo &AI = *It.second;
6188       if (AI.Status == AllocationInfo::INVALID)
6189         continue;
6190 
6191       if (AI.PotentialFreeCalls.count(&CB))
6192         return true;
6193     }
6194 
6195     return false;
6196   }
6197 
6198   ChangeStatus manifest(Attributor &A) override {
6199     assert(getState().isValidState() &&
6200            "Attempted to manifest an invalid state!");
6201 
6202     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
6203     Function *F = getAnchorScope();
6204     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6205 
6206     for (auto &It : AllocationInfos) {
6207       AllocationInfo &AI = *It.second;
6208       if (AI.Status == AllocationInfo::INVALID)
6209         continue;
6210 
6211       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
6212         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
6213         A.deleteAfterManifest(*FreeCall);
6214         HasChanged = ChangeStatus::CHANGED;
6215       }
6216 
6217       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
6218                         << "\n");
6219 
6220       auto Remark = [&](OptimizationRemark OR) {
6221         LibFunc IsAllocShared;
6222         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
6223           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
6224             return OR << "Moving globalized variable to the stack.";
6225         return OR << "Moving memory allocation from the heap to the stack.";
6226       };
6227       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6228         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
6229       else
6230         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
6231 
6232       const DataLayout &DL = A.getInfoCache().getDL();
6233       Value *Size;
6234       Optional<APInt> SizeAPI = getSize(A, *this, AI);
6235       if (SizeAPI) {
6236         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
6237       } else {
6238         LLVMContext &Ctx = AI.CB->getContext();
6239         ObjectSizeOpts Opts;
6240         ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
6241         SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
6242         assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
6243                cast<ConstantInt>(SizeOffsetPair.second)->isZero());
6244         Size = SizeOffsetPair.first;
6245       }
6246 
6247       Align Alignment(1);
6248       if (MaybeAlign RetAlign = AI.CB->getRetAlign())
6249         Alignment = std::max(Alignment, *RetAlign);
6250       if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6251         Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
6252         assert(AlignmentAPI.hasValue() &&
6253                AlignmentAPI.getValue().getZExtValue() > 0 &&
6254                "Expected an alignment during manifest!");
6255         Alignment = std::max(
6256             Alignment, assumeAligned(AlignmentAPI.getValue().getZExtValue()));
6257       }
6258 
6259       // TODO: Hoist the alloca towards the function entry.
6260       unsigned AS = DL.getAllocaAddrSpace();
6261       Instruction *Alloca = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
6262                                            Size, Alignment, "", AI.CB);
6263 
6264       if (Alloca->getType() != AI.CB->getType())
6265         Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6266             Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
6267 
6268       auto *I8Ty = Type::getInt8Ty(F->getContext());
6269       auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6270       assert(InitVal &&
6271              "Must be able to materialize initial memory state of allocation");
6272 
6273       A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca);
6274 
6275       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6276         auto *NBB = II->getNormalDest();
6277         BranchInst::Create(NBB, AI.CB->getParent());
6278         A.deleteAfterManifest(*AI.CB);
6279       } else {
6280         A.deleteAfterManifest(*AI.CB);
6281       }
6282 
6283       // Initialize the alloca with the same value as used by the allocation
6284       // function.  We can skip undef as the initial value of an alloc is
6285       // undef, and the memset would simply end up being DSEd.
6286       if (!isa<UndefValue>(InitVal)) {
6287         IRBuilder<> Builder(Alloca->getNextNode());
6288         // TODO: Use alignment above if align!=1
6289         Builder.CreateMemSet(Alloca, InitVal, Size, None);
6290       }
6291       HasChanged = ChangeStatus::CHANGED;
6292     }
6293 
6294     return HasChanged;
6295   }
6296 
6297   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6298                            Value &V) {
6299     bool UsedAssumedInformation = false;
6300     Optional<Constant *> SimpleV =
6301         A.getAssumedConstant(V, AA, UsedAssumedInformation);
6302     if (!SimpleV.hasValue())
6303       return APInt(64, 0);
6304     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
6305       return CI->getValue();
6306     return llvm::None;
6307   }
6308 
6309   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6310                           AllocationInfo &AI) {
6311     auto Mapper = [&](const Value *V) -> const Value * {
6312       bool UsedAssumedInformation = false;
6313       if (Optional<Constant *> SimpleV =
6314               A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6315         if (*SimpleV)
6316           return *SimpleV;
6317       return V;
6318     };
6319 
6320     const Function *F = getAnchorScope();
6321     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6322     return getAllocSize(AI.CB, TLI, Mapper);
6323   }
6324 
6325   /// Collection of all malloc-like calls in a function with associated
6326   /// information.
6327   MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6328 
6329   /// Collection of all free-like calls in a function with associated
6330   /// information.
6331   MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6332 
6333   ChangeStatus updateImpl(Attributor &A) override;
6334 };
6335 
6336 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6337   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6338   const Function *F = getAnchorScope();
6339   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6340 
6341   const auto &LivenessAA =
6342       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6343 
6344   MustBeExecutedContextExplorer &Explorer =
6345       A.getInfoCache().getMustBeExecutedContextExplorer();
6346 
6347   bool StackIsAccessibleByOtherThreads =
6348       A.getInfoCache().stackIsAccessibleByOtherThreads();
6349 
6350   // Flag to ensure we update our deallocation information at most once per
6351   // updateImpl call and only if we use the free check reasoning.
6352   bool HasUpdatedFrees = false;
6353 
6354   auto UpdateFrees = [&]() {
6355     HasUpdatedFrees = true;
6356 
6357     for (auto &It : DeallocationInfos) {
6358       DeallocationInfo &DI = *It.second;
6359       // For now we cannot use deallocations that have unknown inputs, skip
6360       // them.
6361       if (DI.MightFreeUnknownObjects)
6362         continue;
6363 
6364       // No need to analyze dead calls, ignore them instead.
6365       bool UsedAssumedInformation = false;
6366       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6367                           /* CheckBBLivenessOnly */ true))
6368         continue;
6369 
6370       // Use the optimistic version to get the freed objects, ignoring dead
6371       // branches etc.
6372       SmallVector<Value *, 8> Objects;
6373       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6374                                            *this, DI.CB,
6375                                            UsedAssumedInformation)) {
6376         LLVM_DEBUG(
6377             dbgs()
6378             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6379         DI.MightFreeUnknownObjects = true;
6380         continue;
6381       }
6382 
6383       // Check each object explicitly.
6384       for (auto *Obj : Objects) {
6385         // Free of null and undef can be ignored as no-ops (or UB in the latter
6386         // case).
6387         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6388           continue;
6389 
6390         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6391         if (!ObjCB) {
6392           LLVM_DEBUG(dbgs()
6393                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6394           DI.MightFreeUnknownObjects = true;
6395           continue;
6396         }
6397 
6398         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6399         if (!AI) {
6400           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6401                             << "\n");
6402           DI.MightFreeUnknownObjects = true;
6403           continue;
6404         }
6405 
6406         DI.PotentialAllocationCalls.insert(ObjCB);
6407       }
6408     }
6409   };
6410 
6411   auto FreeCheck = [&](AllocationInfo &AI) {
6412     // If the stack is not accessible by other threads, the "must-free" logic
6413     // doesn't apply as the pointer could be shared and needs to be places in
6414     // "shareable" memory.
6415     if (!StackIsAccessibleByOtherThreads) {
6416       auto &NoSyncAA =
6417           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6418       if (!NoSyncAA.isAssumedNoSync()) {
6419         LLVM_DEBUG(
6420             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6421                       "other threads and function is not nosync:\n");
6422         return false;
6423       }
6424     }
6425     if (!HasUpdatedFrees)
6426       UpdateFrees();
6427 
6428     // TODO: Allow multi exit functions that have different free calls.
6429     if (AI.PotentialFreeCalls.size() != 1) {
6430       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6431                         << AI.PotentialFreeCalls.size() << "\n");
6432       return false;
6433     }
6434     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6435     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6436     if (!DI) {
6437       LLVM_DEBUG(
6438           dbgs() << "[H2S] unique free call was not known as deallocation call "
6439                  << *UniqueFree << "\n");
6440       return false;
6441     }
6442     if (DI->MightFreeUnknownObjects) {
6443       LLVM_DEBUG(
6444           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6445       return false;
6446     }
6447     if (DI->PotentialAllocationCalls.empty())
6448       return true;
6449     if (DI->PotentialAllocationCalls.size() > 1) {
6450       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6451                         << DI->PotentialAllocationCalls.size()
6452                         << " different allocations\n");
6453       return false;
6454     }
6455     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6456       LLVM_DEBUG(
6457           dbgs()
6458           << "[H2S] unique free call not known to free this allocation but "
6459           << **DI->PotentialAllocationCalls.begin() << "\n");
6460       return false;
6461     }
6462     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6463     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6464       LLVM_DEBUG(
6465           dbgs()
6466           << "[H2S] unique free call might not be executed with the allocation "
6467           << *UniqueFree << "\n");
6468       return false;
6469     }
6470     return true;
6471   };
6472 
6473   auto UsesCheck = [&](AllocationInfo &AI) {
6474     bool ValidUsesOnly = true;
6475 
6476     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6477       Instruction *UserI = cast<Instruction>(U.getUser());
6478       if (isa<LoadInst>(UserI))
6479         return true;
6480       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6481         if (SI->getValueOperand() == U.get()) {
6482           LLVM_DEBUG(dbgs()
6483                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6484           ValidUsesOnly = false;
6485         } else {
6486           // A store into the malloc'ed memory is fine.
6487         }
6488         return true;
6489       }
6490       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6491         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6492           return true;
6493         if (DeallocationInfos.count(CB)) {
6494           AI.PotentialFreeCalls.insert(CB);
6495           return true;
6496         }
6497 
6498         unsigned ArgNo = CB->getArgOperandNo(&U);
6499 
6500         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6501             *this, IRPosition::callsite_argument(*CB, ArgNo),
6502             DepClassTy::OPTIONAL);
6503 
6504         // If a call site argument use is nofree, we are fine.
6505         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6506             *this, IRPosition::callsite_argument(*CB, ArgNo),
6507             DepClassTy::OPTIONAL);
6508 
6509         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6510         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6511         if (MaybeCaptured ||
6512             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6513              MaybeFreed)) {
6514           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6515 
6516           // Emit a missed remark if this is missed OpenMP globalization.
6517           auto Remark = [&](OptimizationRemarkMissed ORM) {
6518             return ORM
6519                    << "Could not move globalized variable to the stack. "
6520                       "Variable is potentially captured in call. Mark "
6521                       "parameter as `__attribute__((noescape))` to override.";
6522           };
6523 
6524           if (ValidUsesOnly &&
6525               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6526             A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
6527 
6528           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6529           ValidUsesOnly = false;
6530         }
6531         return true;
6532       }
6533 
6534       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6535           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6536         Follow = true;
6537         return true;
6538       }
6539       // Unknown user for which we can not track uses further (in a way that
6540       // makes sense).
6541       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6542       ValidUsesOnly = false;
6543       return true;
6544     };
6545     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6546       return false;
6547     return ValidUsesOnly;
6548   };
6549 
6550   // The actual update starts here. We look at all allocations and depending on
6551   // their status perform the appropriate check(s).
6552   for (auto &It : AllocationInfos) {
6553     AllocationInfo &AI = *It.second;
6554     if (AI.Status == AllocationInfo::INVALID)
6555       continue;
6556 
6557     if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6558       Optional<APInt> APAlign = getAPInt(A, *this, *Align);
6559       if (!APAlign) {
6560         // Can't generate an alloca which respects the required alignment
6561         // on the allocation.
6562         LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6563                           << "\n");
6564         AI.Status = AllocationInfo::INVALID;
6565         Changed = ChangeStatus::CHANGED;
6566         continue;
6567       } else {
6568         if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
6569             !APAlign->isPowerOf2()) {
6570           LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
6571                             << "\n");
6572           AI.Status = AllocationInfo::INVALID;
6573           Changed = ChangeStatus::CHANGED;
6574           continue;
6575         }
6576       }
6577     }
6578 
6579     if (MaxHeapToStackSize != -1) {
6580       Optional<APInt> Size = getSize(A, *this, AI);
6581       if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6582         LLVM_DEBUG({
6583           if (!Size.hasValue())
6584             dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
6585           else
6586             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6587                    << MaxHeapToStackSize << "\n";
6588         });
6589 
6590         AI.Status = AllocationInfo::INVALID;
6591         Changed = ChangeStatus::CHANGED;
6592         continue;
6593       }
6594     }
6595 
6596     switch (AI.Status) {
6597     case AllocationInfo::STACK_DUE_TO_USE:
6598       if (UsesCheck(AI))
6599         continue;
6600       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6601       LLVM_FALLTHROUGH;
6602     case AllocationInfo::STACK_DUE_TO_FREE:
6603       if (FreeCheck(AI))
6604         continue;
6605       AI.Status = AllocationInfo::INVALID;
6606       Changed = ChangeStatus::CHANGED;
6607       continue;
6608     case AllocationInfo::INVALID:
6609       llvm_unreachable("Invalid allocations should never reach this point!");
6610     };
6611   }
6612 
6613   return Changed;
6614 }
6615 } // namespace
6616 
6617 /// ----------------------- Privatizable Pointers ------------------------------
6618 namespace {
6619 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6620   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6621       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6622 
6623   ChangeStatus indicatePessimisticFixpoint() override {
6624     AAPrivatizablePtr::indicatePessimisticFixpoint();
6625     PrivatizableType = nullptr;
6626     return ChangeStatus::CHANGED;
6627   }
6628 
6629   /// Identify the type we can chose for a private copy of the underlying
6630   /// argument. None means it is not clear yet, nullptr means there is none.
6631   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6632 
6633   /// Return a privatizable type that encloses both T0 and T1.
6634   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6635   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6636     if (!T0.hasValue())
6637       return T1;
6638     if (!T1.hasValue())
6639       return T0;
6640     if (T0 == T1)
6641       return T0;
6642     return nullptr;
6643   }
6644 
6645   Optional<Type *> getPrivatizableType() const override {
6646     return PrivatizableType;
6647   }
6648 
6649   const std::string getAsStr() const override {
6650     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6651   }
6652 
6653 protected:
6654   Optional<Type *> PrivatizableType;
6655 };
6656 
6657 // TODO: Do this for call site arguments (probably also other values) as well.
6658 
6659 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6660   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6661       : AAPrivatizablePtrImpl(IRP, A) {}
6662 
6663   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6664   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6665     // If this is a byval argument and we know all the call sites (so we can
6666     // rewrite them), there is no need to check them explicitly.
6667     bool UsedAssumedInformation = false;
6668     SmallVector<Attribute, 1> Attrs;
6669     getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true);
6670     if (!Attrs.empty() &&
6671         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6672                                true, UsedAssumedInformation))
6673       return Attrs[0].getValueAsType();
6674 
6675     Optional<Type *> Ty;
6676     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6677 
6678     // Make sure the associated call site argument has the same type at all call
6679     // sites and it is an allocation we know is safe to privatize, for now that
6680     // means we only allow alloca instructions.
6681     // TODO: We can additionally analyze the accesses in the callee to  create
6682     //       the type from that information instead. That is a little more
6683     //       involved and will be done in a follow up patch.
6684     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6685       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6686       // Check if a coresponding argument was found or if it is one not
6687       // associated (which can happen for callback calls).
6688       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6689         return false;
6690 
6691       // Check that all call sites agree on a type.
6692       auto &PrivCSArgAA =
6693           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6694       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6695 
6696       LLVM_DEBUG({
6697         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6698         if (CSTy.hasValue() && CSTy.getValue())
6699           CSTy.getValue()->print(dbgs());
6700         else if (CSTy.hasValue())
6701           dbgs() << "<nullptr>";
6702         else
6703           dbgs() << "<none>";
6704       });
6705 
6706       Ty = combineTypes(Ty, CSTy);
6707 
6708       LLVM_DEBUG({
6709         dbgs() << " : New Type: ";
6710         if (Ty.hasValue() && Ty.getValue())
6711           Ty.getValue()->print(dbgs());
6712         else if (Ty.hasValue())
6713           dbgs() << "<nullptr>";
6714         else
6715           dbgs() << "<none>";
6716         dbgs() << "\n";
6717       });
6718 
6719       return !Ty.hasValue() || Ty.getValue();
6720     };
6721 
6722     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6723                                 UsedAssumedInformation))
6724       return nullptr;
6725     return Ty;
6726   }
6727 
6728   /// See AbstractAttribute::updateImpl(...).
6729   ChangeStatus updateImpl(Attributor &A) override {
6730     PrivatizableType = identifyPrivatizableType(A);
6731     if (!PrivatizableType.hasValue())
6732       return ChangeStatus::UNCHANGED;
6733     if (!PrivatizableType.getValue())
6734       return indicatePessimisticFixpoint();
6735 
6736     // The dependence is optional so we don't give up once we give up on the
6737     // alignment.
6738     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6739                         DepClassTy::OPTIONAL);
6740 
6741     // Avoid arguments with padding for now.
6742     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6743         !ArgumentPromotionPass::isDenselyPacked(*PrivatizableType,
6744                                                 A.getInfoCache().getDL())) {
6745       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6746       return indicatePessimisticFixpoint();
6747     }
6748 
6749     // Collect the types that will replace the privatizable type in the function
6750     // signature.
6751     SmallVector<Type *, 16> ReplacementTypes;
6752     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
6753 
6754     // Verify callee and caller agree on how the promoted argument would be
6755     // passed.
6756     Function &Fn = *getIRPosition().getAnchorScope();
6757     const auto *TTI =
6758         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6759     if (!TTI) {
6760       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
6761                         << Fn.getName() << "\n");
6762       return indicatePessimisticFixpoint();
6763     }
6764 
6765     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6766       CallBase *CB = ACS.getInstruction();
6767       return TTI->areTypesABICompatible(
6768           CB->getCaller(), CB->getCalledFunction(), ReplacementTypes);
6769     };
6770     bool UsedAssumedInformation = false;
6771     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6772                                 UsedAssumedInformation)) {
6773       LLVM_DEBUG(
6774           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6775                  << Fn.getName() << "\n");
6776       return indicatePessimisticFixpoint();
6777     }
6778 
6779     // Register a rewrite of the argument.
6780     Argument *Arg = getAssociatedArgument();
6781     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6782       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6783       return indicatePessimisticFixpoint();
6784     }
6785 
6786     unsigned ArgNo = Arg->getArgNo();
6787 
6788     // Helper to check if for the given call site the associated argument is
6789     // passed to a callback where the privatization would be different.
6790     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6791       SmallVector<const Use *, 4> CallbackUses;
6792       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6793       for (const Use *U : CallbackUses) {
6794         AbstractCallSite CBACS(U);
6795         assert(CBACS && CBACS.isCallbackCall());
6796         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6797           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6798 
6799           LLVM_DEBUG({
6800             dbgs()
6801                 << "[AAPrivatizablePtr] Argument " << *Arg
6802                 << "check if can be privatized in the context of its parent ("
6803                 << Arg->getParent()->getName()
6804                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6805                    "callback ("
6806                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6807                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6808                 << CBACS.getCallArgOperand(CBArg) << " vs "
6809                 << CB.getArgOperand(ArgNo) << "\n"
6810                 << "[AAPrivatizablePtr] " << CBArg << " : "
6811                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6812           });
6813 
6814           if (CBArgNo != int(ArgNo))
6815             continue;
6816           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6817               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6818           if (CBArgPrivAA.isValidState()) {
6819             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6820             if (!CBArgPrivTy.hasValue())
6821               continue;
6822             if (CBArgPrivTy.getValue() == PrivatizableType)
6823               continue;
6824           }
6825 
6826           LLVM_DEBUG({
6827             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6828                    << " cannot be privatized in the context of its parent ("
6829                    << Arg->getParent()->getName()
6830                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6831                       "callback ("
6832                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6833                    << ").\n[AAPrivatizablePtr] for which the argument "
6834                       "privatization is not compatible.\n";
6835           });
6836           return false;
6837         }
6838       }
6839       return true;
6840     };
6841 
6842     // Helper to check if for the given call site the associated argument is
6843     // passed to a direct call where the privatization would be different.
6844     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6845       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6846       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6847       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6848              "Expected a direct call operand for callback call operand");
6849 
6850       LLVM_DEBUG({
6851         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6852                << " check if be privatized in the context of its parent ("
6853                << Arg->getParent()->getName()
6854                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6855                   "direct call of ("
6856                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6857                << ").\n";
6858       });
6859 
6860       Function *DCCallee = DC->getCalledFunction();
6861       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6862         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6863             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6864             DepClassTy::REQUIRED);
6865         if (DCArgPrivAA.isValidState()) {
6866           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6867           if (!DCArgPrivTy.hasValue())
6868             return true;
6869           if (DCArgPrivTy.getValue() == PrivatizableType)
6870             return true;
6871         }
6872       }
6873 
6874       LLVM_DEBUG({
6875         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6876                << " cannot be privatized in the context of its parent ("
6877                << Arg->getParent()->getName()
6878                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6879                   "direct call of ("
6880                << ACS.getInstruction()->getCalledFunction()->getName()
6881                << ").\n[AAPrivatizablePtr] for which the argument "
6882                   "privatization is not compatible.\n";
6883       });
6884       return false;
6885     };
6886 
6887     // Helper to check if the associated argument is used at the given abstract
6888     // call site in a way that is incompatible with the privatization assumed
6889     // here.
6890     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6891       if (ACS.isDirectCall())
6892         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6893       if (ACS.isCallbackCall())
6894         return IsCompatiblePrivArgOfDirectCS(ACS);
6895       return false;
6896     };
6897 
6898     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6899                                 UsedAssumedInformation))
6900       return indicatePessimisticFixpoint();
6901 
6902     return ChangeStatus::UNCHANGED;
6903   }
6904 
6905   /// Given a type to private \p PrivType, collect the constituates (which are
6906   /// used) in \p ReplacementTypes.
6907   static void
6908   identifyReplacementTypes(Type *PrivType,
6909                            SmallVectorImpl<Type *> &ReplacementTypes) {
6910     // TODO: For now we expand the privatization type to the fullest which can
6911     //       lead to dead arguments that need to be removed later.
6912     assert(PrivType && "Expected privatizable type!");
6913 
6914     // Traverse the type, extract constituate types on the outermost level.
6915     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6916       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6917         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6918     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6919       ReplacementTypes.append(PrivArrayType->getNumElements(),
6920                               PrivArrayType->getElementType());
6921     } else {
6922       ReplacementTypes.push_back(PrivType);
6923     }
6924   }
6925 
6926   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6927   /// The values needed are taken from the arguments of \p F starting at
6928   /// position \p ArgNo.
6929   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6930                                    unsigned ArgNo, Instruction &IP) {
6931     assert(PrivType && "Expected privatizable type!");
6932 
6933     IRBuilder<NoFolder> IRB(&IP);
6934     const DataLayout &DL = F.getParent()->getDataLayout();
6935 
6936     // Traverse the type, build GEPs and stores.
6937     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6938       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6939       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6940         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6941         Value *Ptr =
6942             constructPointer(PointeeTy, PrivType, &Base,
6943                              PrivStructLayout->getElementOffset(u), IRB, DL);
6944         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6945       }
6946     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6947       Type *PointeeTy = PrivArrayType->getElementType();
6948       Type *PointeePtrTy = PointeeTy->getPointerTo();
6949       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6950       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6951         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6952                                       u * PointeeTySize, IRB, DL);
6953         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6954       }
6955     } else {
6956       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6957     }
6958   }
6959 
6960   /// Extract values from \p Base according to the type \p PrivType at the
6961   /// call position \p ACS. The values are appended to \p ReplacementValues.
6962   void createReplacementValues(Align Alignment, Type *PrivType,
6963                                AbstractCallSite ACS, Value *Base,
6964                                SmallVectorImpl<Value *> &ReplacementValues) {
6965     assert(Base && "Expected base value!");
6966     assert(PrivType && "Expected privatizable type!");
6967     Instruction *IP = ACS.getInstruction();
6968 
6969     IRBuilder<NoFolder> IRB(IP);
6970     const DataLayout &DL = IP->getModule()->getDataLayout();
6971 
6972     Type *PrivPtrType = PrivType->getPointerTo();
6973     if (Base->getType() != PrivPtrType)
6974       Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6975           Base, PrivPtrType, "", ACS.getInstruction());
6976 
6977     // Traverse the type, build GEPs and loads.
6978     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6979       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6980       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6981         Type *PointeeTy = PrivStructType->getElementType(u);
6982         Value *Ptr =
6983             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6984                              PrivStructLayout->getElementOffset(u), IRB, DL);
6985         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6986         L->setAlignment(Alignment);
6987         ReplacementValues.push_back(L);
6988       }
6989     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6990       Type *PointeeTy = PrivArrayType->getElementType();
6991       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6992       Type *PointeePtrTy = PointeeTy->getPointerTo();
6993       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6994         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6995                                       u * PointeeTySize, IRB, DL);
6996         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6997         L->setAlignment(Alignment);
6998         ReplacementValues.push_back(L);
6999       }
7000     } else {
7001       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
7002       L->setAlignment(Alignment);
7003       ReplacementValues.push_back(L);
7004     }
7005   }
7006 
7007   /// See AbstractAttribute::manifest(...)
7008   ChangeStatus manifest(Attributor &A) override {
7009     if (!PrivatizableType.hasValue())
7010       return ChangeStatus::UNCHANGED;
7011     assert(PrivatizableType.getValue() && "Expected privatizable type!");
7012 
7013     // Collect all tail calls in the function as we cannot allow new allocas to
7014     // escape into tail recursion.
7015     // TODO: Be smarter about new allocas escaping into tail calls.
7016     SmallVector<CallInst *, 16> TailCalls;
7017     bool UsedAssumedInformation = false;
7018     if (!A.checkForAllInstructions(
7019             [&](Instruction &I) {
7020               CallInst &CI = cast<CallInst>(I);
7021               if (CI.isTailCall())
7022                 TailCalls.push_back(&CI);
7023               return true;
7024             },
7025             *this, {Instruction::Call}, UsedAssumedInformation))
7026       return ChangeStatus::UNCHANGED;
7027 
7028     Argument *Arg = getAssociatedArgument();
7029     // Query AAAlign attribute for alignment of associated argument to
7030     // determine the best alignment of loads.
7031     const auto &AlignAA =
7032         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
7033 
7034     // Callback to repair the associated function. A new alloca is placed at the
7035     // beginning and initialized with the values passed through arguments. The
7036     // new alloca replaces the use of the old pointer argument.
7037     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
7038         [=](const Attributor::ArgumentReplacementInfo &ARI,
7039             Function &ReplacementFn, Function::arg_iterator ArgIt) {
7040           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
7041           Instruction *IP = &*EntryBB.getFirstInsertionPt();
7042           const DataLayout &DL = IP->getModule()->getDataLayout();
7043           unsigned AS = DL.getAllocaAddrSpace();
7044           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS,
7045                                            Arg->getName() + ".priv", IP);
7046           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
7047                                ArgIt->getArgNo(), *IP);
7048 
7049           if (AI->getType() != Arg->getType())
7050             AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
7051                 AI, Arg->getType(), "", IP);
7052           Arg->replaceAllUsesWith(AI);
7053 
7054           for (CallInst *CI : TailCalls)
7055             CI->setTailCall(false);
7056         };
7057 
7058     // Callback to repair a call site of the associated function. The elements
7059     // of the privatizable type are loaded prior to the call and passed to the
7060     // new function version.
7061     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
7062         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
7063                       AbstractCallSite ACS,
7064                       SmallVectorImpl<Value *> &NewArgOperands) {
7065           // When no alignment is specified for the load instruction,
7066           // natural alignment is assumed.
7067           createReplacementValues(
7068               AlignAA.getAssumedAlign(), *PrivatizableType, ACS,
7069               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
7070               NewArgOperands);
7071         };
7072 
7073     // Collect the types that will replace the privatizable type in the function
7074     // signature.
7075     SmallVector<Type *, 16> ReplacementTypes;
7076     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
7077 
7078     // Register a rewrite of the argument.
7079     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
7080                                            std::move(FnRepairCB),
7081                                            std::move(ACSRepairCB)))
7082       return ChangeStatus::CHANGED;
7083     return ChangeStatus::UNCHANGED;
7084   }
7085 
7086   /// See AbstractAttribute::trackStatistics()
7087   void trackStatistics() const override {
7088     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
7089   }
7090 };
7091 
7092 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
7093   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
7094       : AAPrivatizablePtrImpl(IRP, A) {}
7095 
7096   /// See AbstractAttribute::initialize(...).
7097   virtual void initialize(Attributor &A) override {
7098     // TODO: We can privatize more than arguments.
7099     indicatePessimisticFixpoint();
7100   }
7101 
7102   ChangeStatus updateImpl(Attributor &A) override {
7103     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
7104                      "updateImpl will not be called");
7105   }
7106 
7107   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
7108   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
7109     Value *Obj = getUnderlyingObject(&getAssociatedValue());
7110     if (!Obj) {
7111       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
7112       return nullptr;
7113     }
7114 
7115     if (auto *AI = dyn_cast<AllocaInst>(Obj))
7116       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
7117         if (CI->isOne())
7118           return AI->getAllocatedType();
7119     if (auto *Arg = dyn_cast<Argument>(Obj)) {
7120       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
7121           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
7122       if (PrivArgAA.isAssumedPrivatizablePtr())
7123         return PrivArgAA.getPrivatizableType();
7124     }
7125 
7126     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
7127                          "alloca nor privatizable argument: "
7128                       << *Obj << "!\n");
7129     return nullptr;
7130   }
7131 
7132   /// See AbstractAttribute::trackStatistics()
7133   void trackStatistics() const override {
7134     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
7135   }
7136 };
7137 
7138 struct AAPrivatizablePtrCallSiteArgument final
7139     : public AAPrivatizablePtrFloating {
7140   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
7141       : AAPrivatizablePtrFloating(IRP, A) {}
7142 
7143   /// See AbstractAttribute::initialize(...).
7144   void initialize(Attributor &A) override {
7145     if (getIRPosition().hasAttr(Attribute::ByVal))
7146       indicateOptimisticFixpoint();
7147   }
7148 
7149   /// See AbstractAttribute::updateImpl(...).
7150   ChangeStatus updateImpl(Attributor &A) override {
7151     PrivatizableType = identifyPrivatizableType(A);
7152     if (!PrivatizableType.hasValue())
7153       return ChangeStatus::UNCHANGED;
7154     if (!PrivatizableType.getValue())
7155       return indicatePessimisticFixpoint();
7156 
7157     const IRPosition &IRP = getIRPosition();
7158     auto &NoCaptureAA =
7159         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
7160     if (!NoCaptureAA.isAssumedNoCapture()) {
7161       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
7162       return indicatePessimisticFixpoint();
7163     }
7164 
7165     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
7166     if (!NoAliasAA.isAssumedNoAlias()) {
7167       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
7168       return indicatePessimisticFixpoint();
7169     }
7170 
7171     bool IsKnown;
7172     if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
7173       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
7174       return indicatePessimisticFixpoint();
7175     }
7176 
7177     return ChangeStatus::UNCHANGED;
7178   }
7179 
7180   /// See AbstractAttribute::trackStatistics()
7181   void trackStatistics() const override {
7182     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
7183   }
7184 };
7185 
7186 struct AAPrivatizablePtrCallSiteReturned final
7187     : public AAPrivatizablePtrFloating {
7188   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
7189       : AAPrivatizablePtrFloating(IRP, A) {}
7190 
7191   /// See AbstractAttribute::initialize(...).
7192   void initialize(Attributor &A) override {
7193     // TODO: We can privatize more than arguments.
7194     indicatePessimisticFixpoint();
7195   }
7196 
7197   /// See AbstractAttribute::trackStatistics()
7198   void trackStatistics() const override {
7199     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
7200   }
7201 };
7202 
7203 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
7204   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
7205       : AAPrivatizablePtrFloating(IRP, A) {}
7206 
7207   /// See AbstractAttribute::initialize(...).
7208   void initialize(Attributor &A) override {
7209     // TODO: We can privatize more than arguments.
7210     indicatePessimisticFixpoint();
7211   }
7212 
7213   /// See AbstractAttribute::trackStatistics()
7214   void trackStatistics() const override {
7215     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
7216   }
7217 };
7218 } // namespace
7219 
7220 /// -------------------- Memory Behavior Attributes ----------------------------
7221 /// Includes read-none, read-only, and write-only.
7222 /// ----------------------------------------------------------------------------
7223 namespace {
7224 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
7225   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
7226       : AAMemoryBehavior(IRP, A) {}
7227 
7228   /// See AbstractAttribute::initialize(...).
7229   void initialize(Attributor &A) override {
7230     intersectAssumedBits(BEST_STATE);
7231     getKnownStateFromValue(getIRPosition(), getState());
7232     AAMemoryBehavior::initialize(A);
7233   }
7234 
7235   /// Return the memory behavior information encoded in the IR for \p IRP.
7236   static void getKnownStateFromValue(const IRPosition &IRP,
7237                                      BitIntegerState &State,
7238                                      bool IgnoreSubsumingPositions = false) {
7239     SmallVector<Attribute, 2> Attrs;
7240     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7241     for (const Attribute &Attr : Attrs) {
7242       switch (Attr.getKindAsEnum()) {
7243       case Attribute::ReadNone:
7244         State.addKnownBits(NO_ACCESSES);
7245         break;
7246       case Attribute::ReadOnly:
7247         State.addKnownBits(NO_WRITES);
7248         break;
7249       case Attribute::WriteOnly:
7250         State.addKnownBits(NO_READS);
7251         break;
7252       default:
7253         llvm_unreachable("Unexpected attribute!");
7254       }
7255     }
7256 
7257     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7258       if (!I->mayReadFromMemory())
7259         State.addKnownBits(NO_READS);
7260       if (!I->mayWriteToMemory())
7261         State.addKnownBits(NO_WRITES);
7262     }
7263   }
7264 
7265   /// See AbstractAttribute::getDeducedAttributes(...).
7266   void getDeducedAttributes(LLVMContext &Ctx,
7267                             SmallVectorImpl<Attribute> &Attrs) const override {
7268     assert(Attrs.size() == 0);
7269     if (isAssumedReadNone())
7270       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7271     else if (isAssumedReadOnly())
7272       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7273     else if (isAssumedWriteOnly())
7274       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7275     assert(Attrs.size() <= 1);
7276   }
7277 
7278   /// See AbstractAttribute::manifest(...).
7279   ChangeStatus manifest(Attributor &A) override {
7280     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
7281       return ChangeStatus::UNCHANGED;
7282 
7283     const IRPosition &IRP = getIRPosition();
7284 
7285     // Check if we would improve the existing attributes first.
7286     SmallVector<Attribute, 4> DeducedAttrs;
7287     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7288     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7289           return IRP.hasAttr(Attr.getKindAsEnum(),
7290                              /* IgnoreSubsumingPositions */ true);
7291         }))
7292       return ChangeStatus::UNCHANGED;
7293 
7294     // Clear existing attributes.
7295     IRP.removeAttrs(AttrKinds);
7296 
7297     // Use the generic manifest method.
7298     return IRAttribute::manifest(A);
7299   }
7300 
7301   /// See AbstractState::getAsStr().
7302   const std::string getAsStr() const override {
7303     if (isAssumedReadNone())
7304       return "readnone";
7305     if (isAssumedReadOnly())
7306       return "readonly";
7307     if (isAssumedWriteOnly())
7308       return "writeonly";
7309     return "may-read/write";
7310   }
7311 
7312   /// The set of IR attributes AAMemoryBehavior deals with.
7313   static const Attribute::AttrKind AttrKinds[3];
7314 };
7315 
7316 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7317     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7318 
7319 /// Memory behavior attribute for a floating value.
7320 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7321   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7322       : AAMemoryBehaviorImpl(IRP, A) {}
7323 
7324   /// See AbstractAttribute::updateImpl(...).
7325   ChangeStatus updateImpl(Attributor &A) override;
7326 
7327   /// See AbstractAttribute::trackStatistics()
7328   void trackStatistics() const override {
7329     if (isAssumedReadNone())
7330       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7331     else if (isAssumedReadOnly())
7332       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7333     else if (isAssumedWriteOnly())
7334       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7335   }
7336 
7337 private:
7338   /// Return true if users of \p UserI might access the underlying
7339   /// variable/location described by \p U and should therefore be analyzed.
7340   bool followUsersOfUseIn(Attributor &A, const Use &U,
7341                           const Instruction *UserI);
7342 
7343   /// Update the state according to the effect of use \p U in \p UserI.
7344   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7345 };
7346 
7347 /// Memory behavior attribute for function argument.
7348 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7349   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7350       : AAMemoryBehaviorFloating(IRP, A) {}
7351 
7352   /// See AbstractAttribute::initialize(...).
7353   void initialize(Attributor &A) override {
7354     intersectAssumedBits(BEST_STATE);
7355     const IRPosition &IRP = getIRPosition();
7356     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7357     // can query it when we use has/getAttr. That would allow us to reuse the
7358     // initialize of the base class here.
7359     bool HasByVal =
7360         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7361     getKnownStateFromValue(IRP, getState(),
7362                            /* IgnoreSubsumingPositions */ HasByVal);
7363 
7364     // Initialize the use vector with all direct uses of the associated value.
7365     Argument *Arg = getAssociatedArgument();
7366     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7367       indicatePessimisticFixpoint();
7368   }
7369 
7370   ChangeStatus manifest(Attributor &A) override {
7371     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7372     if (!getAssociatedValue().getType()->isPointerTy())
7373       return ChangeStatus::UNCHANGED;
7374 
7375     // TODO: From readattrs.ll: "inalloca parameters are always
7376     //                           considered written"
7377     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7378       removeKnownBits(NO_WRITES);
7379       removeAssumedBits(NO_WRITES);
7380     }
7381     return AAMemoryBehaviorFloating::manifest(A);
7382   }
7383 
7384   /// See AbstractAttribute::trackStatistics()
7385   void trackStatistics() const override {
7386     if (isAssumedReadNone())
7387       STATS_DECLTRACK_ARG_ATTR(readnone)
7388     else if (isAssumedReadOnly())
7389       STATS_DECLTRACK_ARG_ATTR(readonly)
7390     else if (isAssumedWriteOnly())
7391       STATS_DECLTRACK_ARG_ATTR(writeonly)
7392   }
7393 };
7394 
7395 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7396   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7397       : AAMemoryBehaviorArgument(IRP, A) {}
7398 
7399   /// See AbstractAttribute::initialize(...).
7400   void initialize(Attributor &A) override {
7401     // If we don't have an associated attribute this is either a variadic call
7402     // or an indirect call, either way, nothing to do here.
7403     Argument *Arg = getAssociatedArgument();
7404     if (!Arg) {
7405       indicatePessimisticFixpoint();
7406       return;
7407     }
7408     if (Arg->hasByValAttr()) {
7409       addKnownBits(NO_WRITES);
7410       removeKnownBits(NO_READS);
7411       removeAssumedBits(NO_READS);
7412     }
7413     AAMemoryBehaviorArgument::initialize(A);
7414     if (getAssociatedFunction()->isDeclaration())
7415       indicatePessimisticFixpoint();
7416   }
7417 
7418   /// See AbstractAttribute::updateImpl(...).
7419   ChangeStatus updateImpl(Attributor &A) override {
7420     // TODO: Once we have call site specific value information we can provide
7421     //       call site specific liveness liveness information and then it makes
7422     //       sense to specialize attributes for call sites arguments instead of
7423     //       redirecting requests to the callee argument.
7424     Argument *Arg = getAssociatedArgument();
7425     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7426     auto &ArgAA =
7427         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7428     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7429   }
7430 
7431   /// See AbstractAttribute::trackStatistics()
7432   void trackStatistics() const override {
7433     if (isAssumedReadNone())
7434       STATS_DECLTRACK_CSARG_ATTR(readnone)
7435     else if (isAssumedReadOnly())
7436       STATS_DECLTRACK_CSARG_ATTR(readonly)
7437     else if (isAssumedWriteOnly())
7438       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7439   }
7440 };
7441 
7442 /// Memory behavior attribute for a call site return position.
7443 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7444   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7445       : AAMemoryBehaviorFloating(IRP, A) {}
7446 
7447   /// See AbstractAttribute::initialize(...).
7448   void initialize(Attributor &A) override {
7449     AAMemoryBehaviorImpl::initialize(A);
7450     Function *F = getAssociatedFunction();
7451     if (!F || F->isDeclaration())
7452       indicatePessimisticFixpoint();
7453   }
7454 
7455   /// See AbstractAttribute::manifest(...).
7456   ChangeStatus manifest(Attributor &A) override {
7457     // We do not annotate returned values.
7458     return ChangeStatus::UNCHANGED;
7459   }
7460 
7461   /// See AbstractAttribute::trackStatistics()
7462   void trackStatistics() const override {}
7463 };
7464 
7465 /// An AA to represent the memory behavior function attributes.
7466 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7467   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7468       : AAMemoryBehaviorImpl(IRP, A) {}
7469 
7470   /// See AbstractAttribute::updateImpl(Attributor &A).
7471   virtual ChangeStatus updateImpl(Attributor &A) override;
7472 
7473   /// See AbstractAttribute::manifest(...).
7474   ChangeStatus manifest(Attributor &A) override {
7475     Function &F = cast<Function>(getAnchorValue());
7476     if (isAssumedReadNone()) {
7477       F.removeFnAttr(Attribute::ArgMemOnly);
7478       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7479       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7480     }
7481     return AAMemoryBehaviorImpl::manifest(A);
7482   }
7483 
7484   /// See AbstractAttribute::trackStatistics()
7485   void trackStatistics() const override {
7486     if (isAssumedReadNone())
7487       STATS_DECLTRACK_FN_ATTR(readnone)
7488     else if (isAssumedReadOnly())
7489       STATS_DECLTRACK_FN_ATTR(readonly)
7490     else if (isAssumedWriteOnly())
7491       STATS_DECLTRACK_FN_ATTR(writeonly)
7492   }
7493 };
7494 
7495 /// AAMemoryBehavior attribute for call sites.
7496 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7497   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7498       : AAMemoryBehaviorImpl(IRP, A) {}
7499 
7500   /// See AbstractAttribute::initialize(...).
7501   void initialize(Attributor &A) override {
7502     AAMemoryBehaviorImpl::initialize(A);
7503     Function *F = getAssociatedFunction();
7504     if (!F || F->isDeclaration())
7505       indicatePessimisticFixpoint();
7506   }
7507 
7508   /// See AbstractAttribute::updateImpl(...).
7509   ChangeStatus updateImpl(Attributor &A) override {
7510     // TODO: Once we have call site specific value information we can provide
7511     //       call site specific liveness liveness information and then it makes
7512     //       sense to specialize attributes for call sites arguments instead of
7513     //       redirecting requests to the callee argument.
7514     Function *F = getAssociatedFunction();
7515     const IRPosition &FnPos = IRPosition::function(*F);
7516     auto &FnAA =
7517         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7518     return clampStateAndIndicateChange(getState(), FnAA.getState());
7519   }
7520 
7521   /// See AbstractAttribute::trackStatistics()
7522   void trackStatistics() const override {
7523     if (isAssumedReadNone())
7524       STATS_DECLTRACK_CS_ATTR(readnone)
7525     else if (isAssumedReadOnly())
7526       STATS_DECLTRACK_CS_ATTR(readonly)
7527     else if (isAssumedWriteOnly())
7528       STATS_DECLTRACK_CS_ATTR(writeonly)
7529   }
7530 };
7531 
7532 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7533 
7534   // The current assumed state used to determine a change.
7535   auto AssumedState = getAssumed();
7536 
7537   auto CheckRWInst = [&](Instruction &I) {
7538     // If the instruction has an own memory behavior state, use it to restrict
7539     // the local state. No further analysis is required as the other memory
7540     // state is as optimistic as it gets.
7541     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7542       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7543           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7544       intersectAssumedBits(MemBehaviorAA.getAssumed());
7545       return !isAtFixpoint();
7546     }
7547 
7548     // Remove access kind modifiers if necessary.
7549     if (I.mayReadFromMemory())
7550       removeAssumedBits(NO_READS);
7551     if (I.mayWriteToMemory())
7552       removeAssumedBits(NO_WRITES);
7553     return !isAtFixpoint();
7554   };
7555 
7556   bool UsedAssumedInformation = false;
7557   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7558                                           UsedAssumedInformation))
7559     return indicatePessimisticFixpoint();
7560 
7561   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7562                                         : ChangeStatus::UNCHANGED;
7563 }
7564 
7565 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7566 
7567   const IRPosition &IRP = getIRPosition();
7568   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7569   AAMemoryBehavior::StateType &S = getState();
7570 
7571   // First, check the function scope. We take the known information and we avoid
7572   // work if the assumed information implies the current assumed information for
7573   // this attribute. This is a valid for all but byval arguments.
7574   Argument *Arg = IRP.getAssociatedArgument();
7575   AAMemoryBehavior::base_t FnMemAssumedState =
7576       AAMemoryBehavior::StateType::getWorstState();
7577   if (!Arg || !Arg->hasByValAttr()) {
7578     const auto &FnMemAA =
7579         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7580     FnMemAssumedState = FnMemAA.getAssumed();
7581     S.addKnownBits(FnMemAA.getKnown());
7582     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7583       return ChangeStatus::UNCHANGED;
7584   }
7585 
7586   // The current assumed state used to determine a change.
7587   auto AssumedState = S.getAssumed();
7588 
7589   // Make sure the value is not captured (except through "return"), if
7590   // it is, any information derived would be irrelevant anyway as we cannot
7591   // check the potential aliases introduced by the capture. However, no need
7592   // to fall back to anythign less optimistic than the function state.
7593   const auto &ArgNoCaptureAA =
7594       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7595   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7596     S.intersectAssumedBits(FnMemAssumedState);
7597     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7598                                           : ChangeStatus::UNCHANGED;
7599   }
7600 
7601   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7602   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7603     Instruction *UserI = cast<Instruction>(U.getUser());
7604     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7605                       << " \n");
7606 
7607     // Droppable users, e.g., llvm::assume does not actually perform any action.
7608     if (UserI->isDroppable())
7609       return true;
7610 
7611     // Check if the users of UserI should also be visited.
7612     Follow = followUsersOfUseIn(A, U, UserI);
7613 
7614     // If UserI might touch memory we analyze the use in detail.
7615     if (UserI->mayReadOrWriteMemory())
7616       analyzeUseIn(A, U, UserI);
7617 
7618     return !isAtFixpoint();
7619   };
7620 
7621   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7622     return indicatePessimisticFixpoint();
7623 
7624   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7625                                         : ChangeStatus::UNCHANGED;
7626 }
7627 
7628 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7629                                                   const Instruction *UserI) {
7630   // The loaded value is unrelated to the pointer argument, no need to
7631   // follow the users of the load.
7632   if (isa<LoadInst>(UserI))
7633     return false;
7634 
7635   // By default we follow all uses assuming UserI might leak information on U,
7636   // we have special handling for call sites operands though.
7637   const auto *CB = dyn_cast<CallBase>(UserI);
7638   if (!CB || !CB->isArgOperand(&U))
7639     return true;
7640 
7641   // If the use is a call argument known not to be captured, the users of
7642   // the call do not need to be visited because they have to be unrelated to
7643   // the input. Note that this check is not trivial even though we disallow
7644   // general capturing of the underlying argument. The reason is that the
7645   // call might the argument "through return", which we allow and for which we
7646   // need to check call users.
7647   if (U.get()->getType()->isPointerTy()) {
7648     unsigned ArgNo = CB->getArgOperandNo(&U);
7649     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7650         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7651     return !ArgNoCaptureAA.isAssumedNoCapture();
7652   }
7653 
7654   return true;
7655 }
7656 
7657 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7658                                             const Instruction *UserI) {
7659   assert(UserI->mayReadOrWriteMemory());
7660 
7661   switch (UserI->getOpcode()) {
7662   default:
7663     // TODO: Handle all atomics and other side-effect operations we know of.
7664     break;
7665   case Instruction::Load:
7666     // Loads cause the NO_READS property to disappear.
7667     removeAssumedBits(NO_READS);
7668     return;
7669 
7670   case Instruction::Store:
7671     // Stores cause the NO_WRITES property to disappear if the use is the
7672     // pointer operand. Note that while capturing was taken care of somewhere
7673     // else we need to deal with stores of the value that is not looked through.
7674     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7675       removeAssumedBits(NO_WRITES);
7676     else
7677       indicatePessimisticFixpoint();
7678     return;
7679 
7680   case Instruction::Call:
7681   case Instruction::CallBr:
7682   case Instruction::Invoke: {
7683     // For call sites we look at the argument memory behavior attribute (this
7684     // could be recursive!) in order to restrict our own state.
7685     const auto *CB = cast<CallBase>(UserI);
7686 
7687     // Give up on operand bundles.
7688     if (CB->isBundleOperand(&U)) {
7689       indicatePessimisticFixpoint();
7690       return;
7691     }
7692 
7693     // Calling a function does read the function pointer, maybe write it if the
7694     // function is self-modifying.
7695     if (CB->isCallee(&U)) {
7696       removeAssumedBits(NO_READS);
7697       break;
7698     }
7699 
7700     // Adjust the possible access behavior based on the information on the
7701     // argument.
7702     IRPosition Pos;
7703     if (U.get()->getType()->isPointerTy())
7704       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7705     else
7706       Pos = IRPosition::callsite_function(*CB);
7707     const auto &MemBehaviorAA =
7708         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7709     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7710     // and at least "known".
7711     intersectAssumedBits(MemBehaviorAA.getAssumed());
7712     return;
7713   }
7714   };
7715 
7716   // Generally, look at the "may-properties" and adjust the assumed state if we
7717   // did not trigger special handling before.
7718   if (UserI->mayReadFromMemory())
7719     removeAssumedBits(NO_READS);
7720   if (UserI->mayWriteToMemory())
7721     removeAssumedBits(NO_WRITES);
7722 }
7723 } // namespace
7724 
7725 /// -------------------- Memory Locations Attributes ---------------------------
7726 /// Includes read-none, argmemonly, inaccessiblememonly,
7727 /// inaccessiblememorargmemonly
7728 /// ----------------------------------------------------------------------------
7729 
7730 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7731     AAMemoryLocation::MemoryLocationsKind MLK) {
7732   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7733     return "all memory";
7734   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7735     return "no memory";
7736   std::string S = "memory:";
7737   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7738     S += "stack,";
7739   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7740     S += "constant,";
7741   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7742     S += "internal global,";
7743   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7744     S += "external global,";
7745   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7746     S += "argument,";
7747   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7748     S += "inaccessible,";
7749   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7750     S += "malloced,";
7751   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7752     S += "unknown,";
7753   S.pop_back();
7754   return S;
7755 }
7756 
7757 namespace {
7758 struct AAMemoryLocationImpl : public AAMemoryLocation {
7759 
7760   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7761       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7762     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7763       AccessKind2Accesses[u] = nullptr;
7764   }
7765 
7766   ~AAMemoryLocationImpl() {
7767     // The AccessSets are allocated via a BumpPtrAllocator, we call
7768     // the destructor manually.
7769     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7770       if (AccessKind2Accesses[u])
7771         AccessKind2Accesses[u]->~AccessSet();
7772   }
7773 
7774   /// See AbstractAttribute::initialize(...).
7775   void initialize(Attributor &A) override {
7776     intersectAssumedBits(BEST_STATE);
7777     getKnownStateFromValue(A, getIRPosition(), getState());
7778     AAMemoryLocation::initialize(A);
7779   }
7780 
7781   /// Return the memory behavior information encoded in the IR for \p IRP.
7782   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7783                                      BitIntegerState &State,
7784                                      bool IgnoreSubsumingPositions = false) {
7785     // For internal functions we ignore `argmemonly` and
7786     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7787     // constant propagation. It is unclear if this is the best way but it is
7788     // unlikely this will cause real performance problems. If we are deriving
7789     // attributes for the anchor function we even remove the attribute in
7790     // addition to ignoring it.
7791     bool UseArgMemOnly = true;
7792     Function *AnchorFn = IRP.getAnchorScope();
7793     if (AnchorFn && A.isRunOn(*AnchorFn))
7794       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7795 
7796     SmallVector<Attribute, 2> Attrs;
7797     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7798     for (const Attribute &Attr : Attrs) {
7799       switch (Attr.getKindAsEnum()) {
7800       case Attribute::ReadNone:
7801         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7802         break;
7803       case Attribute::InaccessibleMemOnly:
7804         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7805         break;
7806       case Attribute::ArgMemOnly:
7807         if (UseArgMemOnly)
7808           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7809         else
7810           IRP.removeAttrs({Attribute::ArgMemOnly});
7811         break;
7812       case Attribute::InaccessibleMemOrArgMemOnly:
7813         if (UseArgMemOnly)
7814           State.addKnownBits(inverseLocation(
7815               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7816         else
7817           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7818         break;
7819       default:
7820         llvm_unreachable("Unexpected attribute!");
7821       }
7822     }
7823   }
7824 
7825   /// See AbstractAttribute::getDeducedAttributes(...).
7826   void getDeducedAttributes(LLVMContext &Ctx,
7827                             SmallVectorImpl<Attribute> &Attrs) const override {
7828     assert(Attrs.size() == 0);
7829     if (isAssumedReadNone()) {
7830       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7831     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7832       if (isAssumedInaccessibleMemOnly())
7833         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7834       else if (isAssumedArgMemOnly())
7835         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7836       else if (isAssumedInaccessibleOrArgMemOnly())
7837         Attrs.push_back(
7838             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7839     }
7840     assert(Attrs.size() <= 1);
7841   }
7842 
7843   /// See AbstractAttribute::manifest(...).
7844   ChangeStatus manifest(Attributor &A) override {
7845     const IRPosition &IRP = getIRPosition();
7846 
7847     // Check if we would improve the existing attributes first.
7848     SmallVector<Attribute, 4> DeducedAttrs;
7849     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7850     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7851           return IRP.hasAttr(Attr.getKindAsEnum(),
7852                              /* IgnoreSubsumingPositions */ true);
7853         }))
7854       return ChangeStatus::UNCHANGED;
7855 
7856     // Clear existing attributes.
7857     IRP.removeAttrs(AttrKinds);
7858     if (isAssumedReadNone())
7859       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7860 
7861     // Use the generic manifest method.
7862     return IRAttribute::manifest(A);
7863   }
7864 
7865   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7866   bool checkForAllAccessesToMemoryKind(
7867       function_ref<bool(const Instruction *, const Value *, AccessKind,
7868                         MemoryLocationsKind)>
7869           Pred,
7870       MemoryLocationsKind RequestedMLK) const override {
7871     if (!isValidState())
7872       return false;
7873 
7874     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7875     if (AssumedMLK == NO_LOCATIONS)
7876       return true;
7877 
7878     unsigned Idx = 0;
7879     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7880          CurMLK *= 2, ++Idx) {
7881       if (CurMLK & RequestedMLK)
7882         continue;
7883 
7884       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7885         for (const AccessInfo &AI : *Accesses)
7886           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7887             return false;
7888     }
7889 
7890     return true;
7891   }
7892 
7893   ChangeStatus indicatePessimisticFixpoint() override {
7894     // If we give up and indicate a pessimistic fixpoint this instruction will
7895     // become an access for all potential access kinds:
7896     // TODO: Add pointers for argmemonly and globals to improve the results of
7897     //       checkForAllAccessesToMemoryKind.
7898     bool Changed = false;
7899     MemoryLocationsKind KnownMLK = getKnown();
7900     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7901     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7902       if (!(CurMLK & KnownMLK))
7903         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7904                                   getAccessKindFromInst(I));
7905     return AAMemoryLocation::indicatePessimisticFixpoint();
7906   }
7907 
7908 protected:
7909   /// Helper struct to tie together an instruction that has a read or write
7910   /// effect with the pointer it accesses (if any).
7911   struct AccessInfo {
7912 
7913     /// The instruction that caused the access.
7914     const Instruction *I;
7915 
7916     /// The base pointer that is accessed, or null if unknown.
7917     const Value *Ptr;
7918 
7919     /// The kind of access (read/write/read+write).
7920     AccessKind Kind;
7921 
7922     bool operator==(const AccessInfo &RHS) const {
7923       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7924     }
7925     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7926       if (LHS.I != RHS.I)
7927         return LHS.I < RHS.I;
7928       if (LHS.Ptr != RHS.Ptr)
7929         return LHS.Ptr < RHS.Ptr;
7930       if (LHS.Kind != RHS.Kind)
7931         return LHS.Kind < RHS.Kind;
7932       return false;
7933     }
7934   };
7935 
7936   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7937   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7938   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7939   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7940 
7941   /// Categorize the pointer arguments of CB that might access memory in
7942   /// AccessedLoc and update the state and access map accordingly.
7943   void
7944   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7945                                      AAMemoryLocation::StateType &AccessedLocs,
7946                                      bool &Changed);
7947 
7948   /// Return the kind(s) of location that may be accessed by \p V.
7949   AAMemoryLocation::MemoryLocationsKind
7950   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7951 
7952   /// Return the access kind as determined by \p I.
7953   AccessKind getAccessKindFromInst(const Instruction *I) {
7954     AccessKind AK = READ_WRITE;
7955     if (I) {
7956       AK = I->mayReadFromMemory() ? READ : NONE;
7957       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7958     }
7959     return AK;
7960   }
7961 
7962   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7963   /// an access of kind \p AK to a \p MLK memory location with the access
7964   /// pointer \p Ptr.
7965   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7966                                  MemoryLocationsKind MLK, const Instruction *I,
7967                                  const Value *Ptr, bool &Changed,
7968                                  AccessKind AK = READ_WRITE) {
7969 
7970     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7971     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7972     if (!Accesses)
7973       Accesses = new (Allocator) AccessSet();
7974     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7975     State.removeAssumedBits(MLK);
7976   }
7977 
7978   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7979   /// arguments, and update the state and access map accordingly.
7980   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7981                           AAMemoryLocation::StateType &State, bool &Changed);
7982 
7983   /// Used to allocate access sets.
7984   BumpPtrAllocator &Allocator;
7985 
7986   /// The set of IR attributes AAMemoryLocation deals with.
7987   static const Attribute::AttrKind AttrKinds[4];
7988 };
7989 
7990 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7991     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7992     Attribute::InaccessibleMemOrArgMemOnly};
7993 
7994 void AAMemoryLocationImpl::categorizePtrValue(
7995     Attributor &A, const Instruction &I, const Value &Ptr,
7996     AAMemoryLocation::StateType &State, bool &Changed) {
7997   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7998                     << Ptr << " ["
7999                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
8000 
8001   SmallVector<Value *, 8> Objects;
8002   bool UsedAssumedInformation = false;
8003   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
8004                                        UsedAssumedInformation,
8005                                        AA::Intraprocedural)) {
8006     LLVM_DEBUG(
8007         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
8008     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
8009                               getAccessKindFromInst(&I));
8010     return;
8011   }
8012 
8013   for (Value *Obj : Objects) {
8014     // TODO: recognize the TBAA used for constant accesses.
8015     MemoryLocationsKind MLK = NO_LOCATIONS;
8016     if (isa<UndefValue>(Obj))
8017       continue;
8018     if (isa<Argument>(Obj)) {
8019       // TODO: For now we do not treat byval arguments as local copies performed
8020       // on the call edge, though, we should. To make that happen we need to
8021       // teach various passes, e.g., DSE, about the copy effect of a byval. That
8022       // would also allow us to mark functions only accessing byval arguments as
8023       // readnone again, atguably their acceses have no effect outside of the
8024       // function, like accesses to allocas.
8025       MLK = NO_ARGUMENT_MEM;
8026     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
8027       // Reading constant memory is not treated as a read "effect" by the
8028       // function attr pass so we won't neither. Constants defined by TBAA are
8029       // similar. (We know we do not write it because it is constant.)
8030       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
8031         if (GVar->isConstant())
8032           continue;
8033 
8034       if (GV->hasLocalLinkage())
8035         MLK = NO_GLOBAL_INTERNAL_MEM;
8036       else
8037         MLK = NO_GLOBAL_EXTERNAL_MEM;
8038     } else if (isa<ConstantPointerNull>(Obj) &&
8039                !NullPointerIsDefined(getAssociatedFunction(),
8040                                      Ptr.getType()->getPointerAddressSpace())) {
8041       continue;
8042     } else if (isa<AllocaInst>(Obj)) {
8043       MLK = NO_LOCAL_MEM;
8044     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
8045       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
8046           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
8047       if (NoAliasAA.isAssumedNoAlias())
8048         MLK = NO_MALLOCED_MEM;
8049       else
8050         MLK = NO_UNKOWN_MEM;
8051     } else {
8052       MLK = NO_UNKOWN_MEM;
8053     }
8054 
8055     assert(MLK != NO_LOCATIONS && "No location specified!");
8056     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
8057                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
8058                       << "\n");
8059     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
8060                               getAccessKindFromInst(&I));
8061   }
8062 
8063   LLVM_DEBUG(
8064       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
8065              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
8066 }
8067 
8068 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
8069     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
8070     bool &Changed) {
8071   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
8072 
8073     // Skip non-pointer arguments.
8074     const Value *ArgOp = CB.getArgOperand(ArgNo);
8075     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
8076       continue;
8077 
8078     // Skip readnone arguments.
8079     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
8080     const auto &ArgOpMemLocationAA =
8081         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
8082 
8083     if (ArgOpMemLocationAA.isAssumedReadNone())
8084       continue;
8085 
8086     // Categorize potentially accessed pointer arguments as if there was an
8087     // access instruction with them as pointer.
8088     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
8089   }
8090 }
8091 
8092 AAMemoryLocation::MemoryLocationsKind
8093 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
8094                                                   bool &Changed) {
8095   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
8096                     << I << "\n");
8097 
8098   AAMemoryLocation::StateType AccessedLocs;
8099   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
8100 
8101   if (auto *CB = dyn_cast<CallBase>(&I)) {
8102 
8103     // First check if we assume any memory is access is visible.
8104     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
8105         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
8106     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
8107                       << " [" << CBMemLocationAA << "]\n");
8108 
8109     if (CBMemLocationAA.isAssumedReadNone())
8110       return NO_LOCATIONS;
8111 
8112     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
8113       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
8114                                 Changed, getAccessKindFromInst(&I));
8115       return AccessedLocs.getAssumed();
8116     }
8117 
8118     uint32_t CBAssumedNotAccessedLocs =
8119         CBMemLocationAA.getAssumedNotAccessedLocation();
8120 
8121     // Set the argmemonly and global bit as we handle them separately below.
8122     uint32_t CBAssumedNotAccessedLocsNoArgMem =
8123         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
8124 
8125     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
8126       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
8127         continue;
8128       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
8129                                 getAccessKindFromInst(&I));
8130     }
8131 
8132     // Now handle global memory if it might be accessed. This is slightly tricky
8133     // as NO_GLOBAL_MEM has multiple bits set.
8134     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
8135     if (HasGlobalAccesses) {
8136       auto AccessPred = [&](const Instruction *, const Value *Ptr,
8137                             AccessKind Kind, MemoryLocationsKind MLK) {
8138         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
8139                                   getAccessKindFromInst(&I));
8140         return true;
8141       };
8142       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
8143               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
8144         return AccessedLocs.getWorstState();
8145     }
8146 
8147     LLVM_DEBUG(
8148         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
8149                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8150 
8151     // Now handle argument memory if it might be accessed.
8152     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
8153     if (HasArgAccesses)
8154       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
8155 
8156     LLVM_DEBUG(
8157         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
8158                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
8159 
8160     return AccessedLocs.getAssumed();
8161   }
8162 
8163   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
8164     LLVM_DEBUG(
8165         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
8166                << I << " [" << *Ptr << "]\n");
8167     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
8168     return AccessedLocs.getAssumed();
8169   }
8170 
8171   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
8172                     << I << "\n");
8173   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
8174                             getAccessKindFromInst(&I));
8175   return AccessedLocs.getAssumed();
8176 }
8177 
8178 /// An AA to represent the memory behavior function attributes.
8179 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
8180   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
8181       : AAMemoryLocationImpl(IRP, A) {}
8182 
8183   /// See AbstractAttribute::updateImpl(Attributor &A).
8184   virtual ChangeStatus updateImpl(Attributor &A) override {
8185 
8186     const auto &MemBehaviorAA =
8187         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
8188     if (MemBehaviorAA.isAssumedReadNone()) {
8189       if (MemBehaviorAA.isKnownReadNone())
8190         return indicateOptimisticFixpoint();
8191       assert(isAssumedReadNone() &&
8192              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
8193       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
8194       return ChangeStatus::UNCHANGED;
8195     }
8196 
8197     // The current assumed state used to determine a change.
8198     auto AssumedState = getAssumed();
8199     bool Changed = false;
8200 
8201     auto CheckRWInst = [&](Instruction &I) {
8202       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
8203       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
8204                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
8205       removeAssumedBits(inverseLocation(MLK, false, false));
8206       // Stop once only the valid bit set in the *not assumed location*, thus
8207       // once we don't actually exclude any memory locations in the state.
8208       return getAssumedNotAccessedLocation() != VALID_STATE;
8209     };
8210 
8211     bool UsedAssumedInformation = false;
8212     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8213                                             UsedAssumedInformation))
8214       return indicatePessimisticFixpoint();
8215 
8216     Changed |= AssumedState != getAssumed();
8217     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8218   }
8219 
8220   /// See AbstractAttribute::trackStatistics()
8221   void trackStatistics() const override {
8222     if (isAssumedReadNone())
8223       STATS_DECLTRACK_FN_ATTR(readnone)
8224     else if (isAssumedArgMemOnly())
8225       STATS_DECLTRACK_FN_ATTR(argmemonly)
8226     else if (isAssumedInaccessibleMemOnly())
8227       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
8228     else if (isAssumedInaccessibleOrArgMemOnly())
8229       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
8230   }
8231 };
8232 
8233 /// AAMemoryLocation attribute for call sites.
8234 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
8235   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
8236       : AAMemoryLocationImpl(IRP, A) {}
8237 
8238   /// See AbstractAttribute::initialize(...).
8239   void initialize(Attributor &A) override {
8240     AAMemoryLocationImpl::initialize(A);
8241     Function *F = getAssociatedFunction();
8242     if (!F || F->isDeclaration())
8243       indicatePessimisticFixpoint();
8244   }
8245 
8246   /// See AbstractAttribute::updateImpl(...).
8247   ChangeStatus updateImpl(Attributor &A) override {
8248     // TODO: Once we have call site specific value information we can provide
8249     //       call site specific liveness liveness information and then it makes
8250     //       sense to specialize attributes for call sites arguments instead of
8251     //       redirecting requests to the callee argument.
8252     Function *F = getAssociatedFunction();
8253     const IRPosition &FnPos = IRPosition::function(*F);
8254     auto &FnAA =
8255         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8256     bool Changed = false;
8257     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8258                           AccessKind Kind, MemoryLocationsKind MLK) {
8259       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8260                                 getAccessKindFromInst(I));
8261       return true;
8262     };
8263     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8264       return indicatePessimisticFixpoint();
8265     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8266   }
8267 
8268   /// See AbstractAttribute::trackStatistics()
8269   void trackStatistics() const override {
8270     if (isAssumedReadNone())
8271       STATS_DECLTRACK_CS_ATTR(readnone)
8272   }
8273 };
8274 } // namespace
8275 
8276 /// ------------------ Value Constant Range Attribute -------------------------
8277 
8278 namespace {
8279 struct AAValueConstantRangeImpl : AAValueConstantRange {
8280   using StateType = IntegerRangeState;
8281   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8282       : AAValueConstantRange(IRP, A) {}
8283 
8284   /// See AbstractAttribute::initialize(..).
8285   void initialize(Attributor &A) override {
8286     if (A.hasSimplificationCallback(getIRPosition())) {
8287       indicatePessimisticFixpoint();
8288       return;
8289     }
8290 
8291     // Intersect a range given by SCEV.
8292     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8293 
8294     // Intersect a range given by LVI.
8295     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8296   }
8297 
8298   /// See AbstractAttribute::getAsStr().
8299   const std::string getAsStr() const override {
8300     std::string Str;
8301     llvm::raw_string_ostream OS(Str);
8302     OS << "range(" << getBitWidth() << ")<";
8303     getKnown().print(OS);
8304     OS << " / ";
8305     getAssumed().print(OS);
8306     OS << ">";
8307     return OS.str();
8308   }
8309 
8310   /// Helper function to get a SCEV expr for the associated value at program
8311   /// point \p I.
8312   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8313     if (!getAnchorScope())
8314       return nullptr;
8315 
8316     ScalarEvolution *SE =
8317         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8318             *getAnchorScope());
8319 
8320     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8321         *getAnchorScope());
8322 
8323     if (!SE || !LI)
8324       return nullptr;
8325 
8326     const SCEV *S = SE->getSCEV(&getAssociatedValue());
8327     if (!I)
8328       return S;
8329 
8330     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8331   }
8332 
8333   /// Helper function to get a range from SCEV for the associated value at
8334   /// program point \p I.
8335   ConstantRange getConstantRangeFromSCEV(Attributor &A,
8336                                          const Instruction *I = nullptr) const {
8337     if (!getAnchorScope())
8338       return getWorstState(getBitWidth());
8339 
8340     ScalarEvolution *SE =
8341         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8342             *getAnchorScope());
8343 
8344     const SCEV *S = getSCEV(A, I);
8345     if (!SE || !S)
8346       return getWorstState(getBitWidth());
8347 
8348     return SE->getUnsignedRange(S);
8349   }
8350 
8351   /// Helper function to get a range from LVI for the associated value at
8352   /// program point \p I.
8353   ConstantRange
8354   getConstantRangeFromLVI(Attributor &A,
8355                           const Instruction *CtxI = nullptr) const {
8356     if (!getAnchorScope())
8357       return getWorstState(getBitWidth());
8358 
8359     LazyValueInfo *LVI =
8360         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8361             *getAnchorScope());
8362 
8363     if (!LVI || !CtxI)
8364       return getWorstState(getBitWidth());
8365     return LVI->getConstantRange(&getAssociatedValue(),
8366                                  const_cast<Instruction *>(CtxI));
8367   }
8368 
8369   /// Return true if \p CtxI is valid for querying outside analyses.
8370   /// This basically makes sure we do not ask intra-procedural analysis
8371   /// about a context in the wrong function or a context that violates
8372   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8373   /// if the original context of this AA is OK or should be considered invalid.
8374   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8375                                                const Instruction *CtxI,
8376                                                bool AllowAACtxI) const {
8377     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8378       return false;
8379 
8380     // Our context might be in a different function, neither intra-procedural
8381     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8382     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8383       return false;
8384 
8385     // If the context is not dominated by the value there are paths to the
8386     // context that do not define the value. This cannot be handled by
8387     // LazyValueInfo so we need to bail.
8388     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8389       InformationCache &InfoCache = A.getInfoCache();
8390       const DominatorTree *DT =
8391           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8392               *I->getFunction());
8393       return DT && DT->dominates(I, CtxI);
8394     }
8395 
8396     return true;
8397   }
8398 
8399   /// See AAValueConstantRange::getKnownConstantRange(..).
8400   ConstantRange
8401   getKnownConstantRange(Attributor &A,
8402                         const Instruction *CtxI = nullptr) const override {
8403     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8404                                                  /* AllowAACtxI */ false))
8405       return getKnown();
8406 
8407     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8408     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8409     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8410   }
8411 
8412   /// See AAValueConstantRange::getAssumedConstantRange(..).
8413   ConstantRange
8414   getAssumedConstantRange(Attributor &A,
8415                           const Instruction *CtxI = nullptr) const override {
8416     // TODO: Make SCEV use Attributor assumption.
8417     //       We may be able to bound a variable range via assumptions in
8418     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8419     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8420     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8421                                                  /* AllowAACtxI */ false))
8422       return getAssumed();
8423 
8424     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8425     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8426     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8427   }
8428 
8429   /// Helper function to create MDNode for range metadata.
8430   static MDNode *
8431   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8432                             const ConstantRange &AssumedConstantRange) {
8433     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8434                                   Ty, AssumedConstantRange.getLower())),
8435                               ConstantAsMetadata::get(ConstantInt::get(
8436                                   Ty, AssumedConstantRange.getUpper()))};
8437     return MDNode::get(Ctx, LowAndHigh);
8438   }
8439 
8440   /// Return true if \p Assumed is included in \p KnownRanges.
8441   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8442 
8443     if (Assumed.isFullSet())
8444       return false;
8445 
8446     if (!KnownRanges)
8447       return true;
8448 
8449     // If multiple ranges are annotated in IR, we give up to annotate assumed
8450     // range for now.
8451 
8452     // TODO:  If there exists a known range which containts assumed range, we
8453     // can say assumed range is better.
8454     if (KnownRanges->getNumOperands() > 2)
8455       return false;
8456 
8457     ConstantInt *Lower =
8458         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8459     ConstantInt *Upper =
8460         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8461 
8462     ConstantRange Known(Lower->getValue(), Upper->getValue());
8463     return Known.contains(Assumed) && Known != Assumed;
8464   }
8465 
8466   /// Helper function to set range metadata.
8467   static bool
8468   setRangeMetadataIfisBetterRange(Instruction *I,
8469                                   const ConstantRange &AssumedConstantRange) {
8470     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8471     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8472       if (!AssumedConstantRange.isEmptySet()) {
8473         I->setMetadata(LLVMContext::MD_range,
8474                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8475                                                  AssumedConstantRange));
8476         return true;
8477       }
8478     }
8479     return false;
8480   }
8481 
8482   /// See AbstractAttribute::manifest()
8483   ChangeStatus manifest(Attributor &A) override {
8484     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8485     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8486     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8487 
8488     auto &V = getAssociatedValue();
8489     if (!AssumedConstantRange.isEmptySet() &&
8490         !AssumedConstantRange.isSingleElement()) {
8491       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8492         assert(I == getCtxI() && "Should not annotate an instruction which is "
8493                                  "not the context instruction");
8494         if (isa<CallInst>(I) || isa<LoadInst>(I))
8495           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8496             Changed = ChangeStatus::CHANGED;
8497       }
8498     }
8499 
8500     return Changed;
8501   }
8502 };
8503 
8504 struct AAValueConstantRangeArgument final
8505     : AAArgumentFromCallSiteArguments<
8506           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8507           true /* BridgeCallBaseContext */> {
8508   using Base = AAArgumentFromCallSiteArguments<
8509       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8510       true /* BridgeCallBaseContext */>;
8511   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8512       : Base(IRP, A) {}
8513 
8514   /// See AbstractAttribute::initialize(..).
8515   void initialize(Attributor &A) override {
8516     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8517       indicatePessimisticFixpoint();
8518     } else {
8519       Base::initialize(A);
8520     }
8521   }
8522 
8523   /// See AbstractAttribute::trackStatistics()
8524   void trackStatistics() const override {
8525     STATS_DECLTRACK_ARG_ATTR(value_range)
8526   }
8527 };
8528 
8529 struct AAValueConstantRangeReturned
8530     : AAReturnedFromReturnedValues<AAValueConstantRange,
8531                                    AAValueConstantRangeImpl,
8532                                    AAValueConstantRangeImpl::StateType,
8533                                    /* PropogateCallBaseContext */ true> {
8534   using Base =
8535       AAReturnedFromReturnedValues<AAValueConstantRange,
8536                                    AAValueConstantRangeImpl,
8537                                    AAValueConstantRangeImpl::StateType,
8538                                    /* PropogateCallBaseContext */ true>;
8539   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8540       : Base(IRP, A) {}
8541 
8542   /// See AbstractAttribute::initialize(...).
8543   void initialize(Attributor &A) override {}
8544 
8545   /// See AbstractAttribute::trackStatistics()
8546   void trackStatistics() const override {
8547     STATS_DECLTRACK_FNRET_ATTR(value_range)
8548   }
8549 };
8550 
8551 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8552   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8553       : AAValueConstantRangeImpl(IRP, A) {}
8554 
8555   /// See AbstractAttribute::initialize(...).
8556   void initialize(Attributor &A) override {
8557     AAValueConstantRangeImpl::initialize(A);
8558     if (isAtFixpoint())
8559       return;
8560 
8561     Value &V = getAssociatedValue();
8562 
8563     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8564       unionAssumed(ConstantRange(C->getValue()));
8565       indicateOptimisticFixpoint();
8566       return;
8567     }
8568 
8569     if (isa<UndefValue>(&V)) {
8570       // Collapse the undef state to 0.
8571       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8572       indicateOptimisticFixpoint();
8573       return;
8574     }
8575 
8576     if (isa<CallBase>(&V))
8577       return;
8578 
8579     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8580       return;
8581 
8582     // If it is a load instruction with range metadata, use it.
8583     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8584       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8585         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8586         return;
8587       }
8588 
8589     // We can work with PHI and select instruction as we traverse their operands
8590     // during update.
8591     if (isa<SelectInst>(V) || isa<PHINode>(V))
8592       return;
8593 
8594     // Otherwise we give up.
8595     indicatePessimisticFixpoint();
8596 
8597     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8598                       << getAssociatedValue() << "\n");
8599   }
8600 
8601   bool calculateBinaryOperator(
8602       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8603       const Instruction *CtxI,
8604       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8605     Value *LHS = BinOp->getOperand(0);
8606     Value *RHS = BinOp->getOperand(1);
8607 
8608     // Simplify the operands first.
8609     bool UsedAssumedInformation = false;
8610     const auto &SimplifiedLHS =
8611         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8612                                *this, UsedAssumedInformation);
8613     if (!SimplifiedLHS.hasValue())
8614       return true;
8615     if (!SimplifiedLHS.getValue())
8616       return false;
8617     LHS = *SimplifiedLHS;
8618 
8619     const auto &SimplifiedRHS =
8620         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8621                                *this, UsedAssumedInformation);
8622     if (!SimplifiedRHS.hasValue())
8623       return true;
8624     if (!SimplifiedRHS.getValue())
8625       return false;
8626     RHS = *SimplifiedRHS;
8627 
8628     // TODO: Allow non integers as well.
8629     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8630       return false;
8631 
8632     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8633         *this, IRPosition::value(*LHS, getCallBaseContext()),
8634         DepClassTy::REQUIRED);
8635     QuerriedAAs.push_back(&LHSAA);
8636     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8637 
8638     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8639         *this, IRPosition::value(*RHS, getCallBaseContext()),
8640         DepClassTy::REQUIRED);
8641     QuerriedAAs.push_back(&RHSAA);
8642     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8643 
8644     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8645 
8646     T.unionAssumed(AssumedRange);
8647 
8648     // TODO: Track a known state too.
8649 
8650     return T.isValidState();
8651   }
8652 
8653   bool calculateCastInst(
8654       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8655       const Instruction *CtxI,
8656       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8657     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8658     // TODO: Allow non integers as well.
8659     Value *OpV = CastI->getOperand(0);
8660 
8661     // Simplify the operand first.
8662     bool UsedAssumedInformation = false;
8663     const auto &SimplifiedOpV =
8664         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8665                                *this, UsedAssumedInformation);
8666     if (!SimplifiedOpV.hasValue())
8667       return true;
8668     if (!SimplifiedOpV.getValue())
8669       return false;
8670     OpV = *SimplifiedOpV;
8671 
8672     if (!OpV->getType()->isIntegerTy())
8673       return false;
8674 
8675     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8676         *this, IRPosition::value(*OpV, getCallBaseContext()),
8677         DepClassTy::REQUIRED);
8678     QuerriedAAs.push_back(&OpAA);
8679     T.unionAssumed(
8680         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8681     return T.isValidState();
8682   }
8683 
8684   bool
8685   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8686                    const Instruction *CtxI,
8687                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8688     Value *LHS = CmpI->getOperand(0);
8689     Value *RHS = CmpI->getOperand(1);
8690 
8691     // Simplify the operands first.
8692     bool UsedAssumedInformation = false;
8693     const auto &SimplifiedLHS =
8694         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8695                                *this, UsedAssumedInformation);
8696     if (!SimplifiedLHS.hasValue())
8697       return true;
8698     if (!SimplifiedLHS.getValue())
8699       return false;
8700     LHS = *SimplifiedLHS;
8701 
8702     const auto &SimplifiedRHS =
8703         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8704                                *this, UsedAssumedInformation);
8705     if (!SimplifiedRHS.hasValue())
8706       return true;
8707     if (!SimplifiedRHS.getValue())
8708       return false;
8709     RHS = *SimplifiedRHS;
8710 
8711     // TODO: Allow non integers as well.
8712     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8713       return false;
8714 
8715     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8716         *this, IRPosition::value(*LHS, getCallBaseContext()),
8717         DepClassTy::REQUIRED);
8718     QuerriedAAs.push_back(&LHSAA);
8719     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8720         *this, IRPosition::value(*RHS, getCallBaseContext()),
8721         DepClassTy::REQUIRED);
8722     QuerriedAAs.push_back(&RHSAA);
8723     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8724     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8725 
8726     // If one of them is empty set, we can't decide.
8727     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8728       return true;
8729 
8730     bool MustTrue = false, MustFalse = false;
8731 
8732     auto AllowedRegion =
8733         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8734 
8735     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8736       MustFalse = true;
8737 
8738     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8739       MustTrue = true;
8740 
8741     assert((!MustTrue || !MustFalse) &&
8742            "Either MustTrue or MustFalse should be false!");
8743 
8744     if (MustTrue)
8745       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8746     else if (MustFalse)
8747       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8748     else
8749       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8750 
8751     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8752                       << " " << RHSAA << "\n");
8753 
8754     // TODO: Track a known state too.
8755     return T.isValidState();
8756   }
8757 
8758   /// See AbstractAttribute::updateImpl(...).
8759   ChangeStatus updateImpl(Attributor &A) override {
8760     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8761                             IntegerRangeState &T, bool Stripped) -> bool {
8762       Instruction *I = dyn_cast<Instruction>(&V);
8763       if (!I || isa<CallBase>(I)) {
8764 
8765         // Simplify the operand first.
8766         bool UsedAssumedInformation = false;
8767         const auto &SimplifiedOpV =
8768             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8769                                    *this, UsedAssumedInformation);
8770         if (!SimplifiedOpV.hasValue())
8771           return true;
8772         if (!SimplifiedOpV.getValue())
8773           return false;
8774         Value *VPtr = *SimplifiedOpV;
8775 
8776         // If the value is not instruction, we query AA to Attributor.
8777         const auto &AA = A.getAAFor<AAValueConstantRange>(
8778             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8779             DepClassTy::REQUIRED);
8780 
8781         // Clamp operator is not used to utilize a program point CtxI.
8782         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8783 
8784         return T.isValidState();
8785       }
8786 
8787       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8788       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8789         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8790           return false;
8791       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8792         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8793           return false;
8794       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8795         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8796           return false;
8797       } else {
8798         // Give up with other instructions.
8799         // TODO: Add other instructions
8800 
8801         T.indicatePessimisticFixpoint();
8802         return false;
8803       }
8804 
8805       // Catch circular reasoning in a pessimistic way for now.
8806       // TODO: Check how the range evolves and if we stripped anything, see also
8807       //       AADereferenceable or AAAlign for similar situations.
8808       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8809         if (QueriedAA != this)
8810           continue;
8811         // If we are in a stady state we do not need to worry.
8812         if (T.getAssumed() == getState().getAssumed())
8813           continue;
8814         T.indicatePessimisticFixpoint();
8815       }
8816 
8817       return T.isValidState();
8818     };
8819 
8820     IntegerRangeState T(getBitWidth());
8821 
8822     bool UsedAssumedInformation = false;
8823     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8824                                                   VisitValueCB, getCtxI(),
8825                                                   UsedAssumedInformation,
8826                                                   /* UseValueSimplify */ false))
8827       return indicatePessimisticFixpoint();
8828 
8829     // Ensure that long def-use chains can't cause circular reasoning either by
8830     // introducing a cutoff below.
8831     if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
8832       return ChangeStatus::UNCHANGED;
8833     if (++NumChanges > MaxNumChanges) {
8834       LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
8835                         << " but only " << MaxNumChanges
8836                         << " are allowed to avoid cyclic reasoning.");
8837       return indicatePessimisticFixpoint();
8838     }
8839     return ChangeStatus::CHANGED;
8840   }
8841 
8842   /// See AbstractAttribute::trackStatistics()
8843   void trackStatistics() const override {
8844     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8845   }
8846 
8847   /// Tracker to bail after too many widening steps of the constant range.
8848   int NumChanges = 0;
8849 
8850   /// Upper bound for the number of allowed changes (=widening steps) for the
8851   /// constant range before we give up.
8852   static constexpr int MaxNumChanges = 5;
8853 };
8854 
8855 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8856   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8857       : AAValueConstantRangeImpl(IRP, A) {}
8858 
8859   /// See AbstractAttribute::initialize(...).
8860   ChangeStatus updateImpl(Attributor &A) override {
8861     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8862                      "not be called");
8863   }
8864 
8865   /// See AbstractAttribute::trackStatistics()
8866   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8867 };
8868 
8869 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8870   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8871       : AAValueConstantRangeFunction(IRP, A) {}
8872 
8873   /// See AbstractAttribute::trackStatistics()
8874   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8875 };
8876 
8877 struct AAValueConstantRangeCallSiteReturned
8878     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8879                                      AAValueConstantRangeImpl,
8880                                      AAValueConstantRangeImpl::StateType,
8881                                      /* IntroduceCallBaseContext */ true> {
8882   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8883       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8884                                        AAValueConstantRangeImpl,
8885                                        AAValueConstantRangeImpl::StateType,
8886                                        /* IntroduceCallBaseContext */ true>(IRP,
8887                                                                             A) {
8888   }
8889 
8890   /// See AbstractAttribute::initialize(...).
8891   void initialize(Attributor &A) override {
8892     // If it is a load instruction with range metadata, use the metadata.
8893     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8894       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8895         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8896 
8897     AAValueConstantRangeImpl::initialize(A);
8898   }
8899 
8900   /// See AbstractAttribute::trackStatistics()
8901   void trackStatistics() const override {
8902     STATS_DECLTRACK_CSRET_ATTR(value_range)
8903   }
8904 };
8905 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8906   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8907       : AAValueConstantRangeFloating(IRP, A) {}
8908 
8909   /// See AbstractAttribute::manifest()
8910   ChangeStatus manifest(Attributor &A) override {
8911     return ChangeStatus::UNCHANGED;
8912   }
8913 
8914   /// See AbstractAttribute::trackStatistics()
8915   void trackStatistics() const override {
8916     STATS_DECLTRACK_CSARG_ATTR(value_range)
8917   }
8918 };
8919 } // namespace
8920 
8921 /// ------------------ Potential Values Attribute -------------------------
8922 
8923 namespace {
8924 struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
8925   using StateType = PotentialConstantIntValuesState;
8926 
8927   AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
8928       : AAPotentialConstantValues(IRP, A) {}
8929 
8930   /// See AbstractAttribute::initialize(..).
8931   void initialize(Attributor &A) override {
8932     if (A.hasSimplificationCallback(getIRPosition()))
8933       indicatePessimisticFixpoint();
8934     else
8935       AAPotentialConstantValues::initialize(A);
8936   }
8937 
8938   /// See AbstractAttribute::getAsStr().
8939   const std::string getAsStr() const override {
8940     std::string Str;
8941     llvm::raw_string_ostream OS(Str);
8942     OS << getState();
8943     return OS.str();
8944   }
8945 
8946   /// See AbstractAttribute::updateImpl(...).
8947   ChangeStatus updateImpl(Attributor &A) override {
8948     return indicatePessimisticFixpoint();
8949   }
8950 };
8951 
8952 struct AAPotentialConstantValuesArgument final
8953     : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8954                                       AAPotentialConstantValuesImpl,
8955                                       PotentialConstantIntValuesState> {
8956   using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8957                                                AAPotentialConstantValuesImpl,
8958                                                PotentialConstantIntValuesState>;
8959   AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
8960       : Base(IRP, A) {}
8961 
8962   /// See AbstractAttribute::initialize(..).
8963   void initialize(Attributor &A) override {
8964     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8965       indicatePessimisticFixpoint();
8966     } else {
8967       Base::initialize(A);
8968     }
8969   }
8970 
8971   /// See AbstractAttribute::trackStatistics()
8972   void trackStatistics() const override {
8973     STATS_DECLTRACK_ARG_ATTR(potential_values)
8974   }
8975 };
8976 
8977 struct AAPotentialConstantValuesReturned
8978     : AAReturnedFromReturnedValues<AAPotentialConstantValues,
8979                                    AAPotentialConstantValuesImpl> {
8980   using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
8981                                             AAPotentialConstantValuesImpl>;
8982   AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
8983       : Base(IRP, A) {}
8984 
8985   /// See AbstractAttribute::trackStatistics()
8986   void trackStatistics() const override {
8987     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8988   }
8989 };
8990 
8991 struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
8992   AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
8993       : AAPotentialConstantValuesImpl(IRP, A) {}
8994 
8995   /// See AbstractAttribute::initialize(..).
8996   void initialize(Attributor &A) override {
8997     AAPotentialConstantValuesImpl::initialize(A);
8998     if (isAtFixpoint())
8999       return;
9000 
9001     Value &V = getAssociatedValue();
9002 
9003     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9004       unionAssumed(C->getValue());
9005       indicateOptimisticFixpoint();
9006       return;
9007     }
9008 
9009     if (isa<UndefValue>(&V)) {
9010       unionAssumedWithUndef();
9011       indicateOptimisticFixpoint();
9012       return;
9013     }
9014 
9015     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
9016       return;
9017 
9018     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
9019       return;
9020 
9021     indicatePessimisticFixpoint();
9022 
9023     LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "
9024                       << getAssociatedValue() << "\n");
9025   }
9026 
9027   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
9028                                 const APInt &RHS) {
9029     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
9030   }
9031 
9032   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
9033                                  uint32_t ResultBitWidth) {
9034     Instruction::CastOps CastOp = CI->getOpcode();
9035     switch (CastOp) {
9036     default:
9037       llvm_unreachable("unsupported or not integer cast");
9038     case Instruction::Trunc:
9039       return Src.trunc(ResultBitWidth);
9040     case Instruction::SExt:
9041       return Src.sext(ResultBitWidth);
9042     case Instruction::ZExt:
9043       return Src.zext(ResultBitWidth);
9044     case Instruction::BitCast:
9045       return Src;
9046     }
9047   }
9048 
9049   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
9050                                        const APInt &LHS, const APInt &RHS,
9051                                        bool &SkipOperation, bool &Unsupported) {
9052     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
9053     // Unsupported is set to true when the binary operator is not supported.
9054     // SkipOperation is set to true when UB occur with the given operand pair
9055     // (LHS, RHS).
9056     // TODO: we should look at nsw and nuw keywords to handle operations
9057     //       that create poison or undef value.
9058     switch (BinOpcode) {
9059     default:
9060       Unsupported = true;
9061       return LHS;
9062     case Instruction::Add:
9063       return LHS + RHS;
9064     case Instruction::Sub:
9065       return LHS - RHS;
9066     case Instruction::Mul:
9067       return LHS * RHS;
9068     case Instruction::UDiv:
9069       if (RHS.isZero()) {
9070         SkipOperation = true;
9071         return LHS;
9072       }
9073       return LHS.udiv(RHS);
9074     case Instruction::SDiv:
9075       if (RHS.isZero()) {
9076         SkipOperation = true;
9077         return LHS;
9078       }
9079       return LHS.sdiv(RHS);
9080     case Instruction::URem:
9081       if (RHS.isZero()) {
9082         SkipOperation = true;
9083         return LHS;
9084       }
9085       return LHS.urem(RHS);
9086     case Instruction::SRem:
9087       if (RHS.isZero()) {
9088         SkipOperation = true;
9089         return LHS;
9090       }
9091       return LHS.srem(RHS);
9092     case Instruction::Shl:
9093       return LHS.shl(RHS);
9094     case Instruction::LShr:
9095       return LHS.lshr(RHS);
9096     case Instruction::AShr:
9097       return LHS.ashr(RHS);
9098     case Instruction::And:
9099       return LHS & RHS;
9100     case Instruction::Or:
9101       return LHS | RHS;
9102     case Instruction::Xor:
9103       return LHS ^ RHS;
9104     }
9105   }
9106 
9107   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
9108                                            const APInt &LHS, const APInt &RHS) {
9109     bool SkipOperation = false;
9110     bool Unsupported = false;
9111     APInt Result =
9112         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
9113     if (Unsupported)
9114       return false;
9115     // If SkipOperation is true, we can ignore this operand pair (L, R).
9116     if (!SkipOperation)
9117       unionAssumed(Result);
9118     return isValidState();
9119   }
9120 
9121   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
9122     auto AssumedBefore = getAssumed();
9123     Value *LHS = ICI->getOperand(0);
9124     Value *RHS = ICI->getOperand(1);
9125 
9126     // Simplify the operands first.
9127     bool UsedAssumedInformation = false;
9128     const auto &SimplifiedLHS =
9129         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9130                                *this, UsedAssumedInformation);
9131     if (!SimplifiedLHS.hasValue())
9132       return ChangeStatus::UNCHANGED;
9133     if (!SimplifiedLHS.getValue())
9134       return indicatePessimisticFixpoint();
9135     LHS = *SimplifiedLHS;
9136 
9137     const auto &SimplifiedRHS =
9138         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9139                                *this, UsedAssumedInformation);
9140     if (!SimplifiedRHS.hasValue())
9141       return ChangeStatus::UNCHANGED;
9142     if (!SimplifiedRHS.getValue())
9143       return indicatePessimisticFixpoint();
9144     RHS = *SimplifiedRHS;
9145 
9146     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9147       return indicatePessimisticFixpoint();
9148 
9149     auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9150         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9151     if (!LHSAA.isValidState())
9152       return indicatePessimisticFixpoint();
9153 
9154     auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9155         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9156     if (!RHSAA.isValidState())
9157       return indicatePessimisticFixpoint();
9158 
9159     const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9160     const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9161 
9162     // TODO: make use of undef flag to limit potential values aggressively.
9163     bool MaybeTrue = false, MaybeFalse = false;
9164     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
9165     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9166       // The result of any comparison between undefs can be soundly replaced
9167       // with undef.
9168       unionAssumedWithUndef();
9169     } else if (LHSAA.undefIsContained()) {
9170       for (const APInt &R : RHSAAPVS) {
9171         bool CmpResult = calculateICmpInst(ICI, Zero, R);
9172         MaybeTrue |= CmpResult;
9173         MaybeFalse |= !CmpResult;
9174         if (MaybeTrue & MaybeFalse)
9175           return indicatePessimisticFixpoint();
9176       }
9177     } else if (RHSAA.undefIsContained()) {
9178       for (const APInt &L : LHSAAPVS) {
9179         bool CmpResult = calculateICmpInst(ICI, L, Zero);
9180         MaybeTrue |= CmpResult;
9181         MaybeFalse |= !CmpResult;
9182         if (MaybeTrue & MaybeFalse)
9183           return indicatePessimisticFixpoint();
9184       }
9185     } else {
9186       for (const APInt &L : LHSAAPVS) {
9187         for (const APInt &R : RHSAAPVS) {
9188           bool CmpResult = calculateICmpInst(ICI, L, R);
9189           MaybeTrue |= CmpResult;
9190           MaybeFalse |= !CmpResult;
9191           if (MaybeTrue & MaybeFalse)
9192             return indicatePessimisticFixpoint();
9193         }
9194       }
9195     }
9196     if (MaybeTrue)
9197       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
9198     if (MaybeFalse)
9199       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
9200     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9201                                          : ChangeStatus::CHANGED;
9202   }
9203 
9204   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
9205     auto AssumedBefore = getAssumed();
9206     Value *LHS = SI->getTrueValue();
9207     Value *RHS = SI->getFalseValue();
9208 
9209     // Simplify the operands first.
9210     bool UsedAssumedInformation = false;
9211     const auto &SimplifiedLHS =
9212         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9213                                *this, UsedAssumedInformation);
9214     if (!SimplifiedLHS.hasValue())
9215       return ChangeStatus::UNCHANGED;
9216     if (!SimplifiedLHS.getValue())
9217       return indicatePessimisticFixpoint();
9218     LHS = *SimplifiedLHS;
9219 
9220     const auto &SimplifiedRHS =
9221         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9222                                *this, UsedAssumedInformation);
9223     if (!SimplifiedRHS.hasValue())
9224       return ChangeStatus::UNCHANGED;
9225     if (!SimplifiedRHS.getValue())
9226       return indicatePessimisticFixpoint();
9227     RHS = *SimplifiedRHS;
9228 
9229     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9230       return indicatePessimisticFixpoint();
9231 
9232     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
9233                                                   UsedAssumedInformation);
9234 
9235     // Check if we only need one operand.
9236     bool OnlyLeft = false, OnlyRight = false;
9237     if (C.hasValue() && *C && (*C)->isOneValue())
9238       OnlyLeft = true;
9239     else if (C.hasValue() && *C && (*C)->isZeroValue())
9240       OnlyRight = true;
9241 
9242     const AAPotentialConstantValues *LHSAA = nullptr, *RHSAA = nullptr;
9243     if (!OnlyRight) {
9244       LHSAA = &A.getAAFor<AAPotentialConstantValues>(
9245           *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9246       if (!LHSAA->isValidState())
9247         return indicatePessimisticFixpoint();
9248     }
9249     if (!OnlyLeft) {
9250       RHSAA = &A.getAAFor<AAPotentialConstantValues>(
9251           *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9252       if (!RHSAA->isValidState())
9253         return indicatePessimisticFixpoint();
9254     }
9255 
9256     if (!LHSAA || !RHSAA) {
9257       // select (true/false), lhs, rhs
9258       auto *OpAA = LHSAA ? LHSAA : RHSAA;
9259 
9260       if (OpAA->undefIsContained())
9261         unionAssumedWithUndef();
9262       else
9263         unionAssumed(*OpAA);
9264 
9265     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
9266       // select i1 *, undef , undef => undef
9267       unionAssumedWithUndef();
9268     } else {
9269       unionAssumed(*LHSAA);
9270       unionAssumed(*RHSAA);
9271     }
9272     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9273                                          : ChangeStatus::CHANGED;
9274   }
9275 
9276   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9277     auto AssumedBefore = getAssumed();
9278     if (!CI->isIntegerCast())
9279       return indicatePessimisticFixpoint();
9280     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
9281     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9282     Value *Src = CI->getOperand(0);
9283 
9284     // Simplify the operand first.
9285     bool UsedAssumedInformation = false;
9286     const auto &SimplifiedSrc =
9287         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
9288                                *this, UsedAssumedInformation);
9289     if (!SimplifiedSrc.hasValue())
9290       return ChangeStatus::UNCHANGED;
9291     if (!SimplifiedSrc.getValue())
9292       return indicatePessimisticFixpoint();
9293     Src = *SimplifiedSrc;
9294 
9295     auto &SrcAA = A.getAAFor<AAPotentialConstantValues>(
9296         *this, IRPosition::value(*Src), DepClassTy::REQUIRED);
9297     if (!SrcAA.isValidState())
9298       return indicatePessimisticFixpoint();
9299     const SetTy &SrcAAPVS = SrcAA.getAssumedSet();
9300     if (SrcAA.undefIsContained())
9301       unionAssumedWithUndef();
9302     else {
9303       for (const APInt &S : SrcAAPVS) {
9304         APInt T = calculateCastInst(CI, S, ResultBitWidth);
9305         unionAssumed(T);
9306       }
9307     }
9308     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9309                                          : ChangeStatus::CHANGED;
9310   }
9311 
9312   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9313     auto AssumedBefore = getAssumed();
9314     Value *LHS = BinOp->getOperand(0);
9315     Value *RHS = BinOp->getOperand(1);
9316 
9317     // Simplify the operands first.
9318     bool UsedAssumedInformation = false;
9319     const auto &SimplifiedLHS =
9320         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9321                                *this, UsedAssumedInformation);
9322     if (!SimplifiedLHS.hasValue())
9323       return ChangeStatus::UNCHANGED;
9324     if (!SimplifiedLHS.getValue())
9325       return indicatePessimisticFixpoint();
9326     LHS = *SimplifiedLHS;
9327 
9328     const auto &SimplifiedRHS =
9329         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9330                                *this, UsedAssumedInformation);
9331     if (!SimplifiedRHS.hasValue())
9332       return ChangeStatus::UNCHANGED;
9333     if (!SimplifiedRHS.getValue())
9334       return indicatePessimisticFixpoint();
9335     RHS = *SimplifiedRHS;
9336 
9337     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9338       return indicatePessimisticFixpoint();
9339 
9340     auto &LHSAA = A.getAAFor<AAPotentialConstantValues>(
9341         *this, IRPosition::value(*LHS), DepClassTy::REQUIRED);
9342     if (!LHSAA.isValidState())
9343       return indicatePessimisticFixpoint();
9344 
9345     auto &RHSAA = A.getAAFor<AAPotentialConstantValues>(
9346         *this, IRPosition::value(*RHS), DepClassTy::REQUIRED);
9347     if (!RHSAA.isValidState())
9348       return indicatePessimisticFixpoint();
9349 
9350     const SetTy &LHSAAPVS = LHSAA.getAssumedSet();
9351     const SetTy &RHSAAPVS = RHSAA.getAssumedSet();
9352     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9353 
9354     // TODO: make use of undef flag to limit potential values aggressively.
9355     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9356       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9357         return indicatePessimisticFixpoint();
9358     } else if (LHSAA.undefIsContained()) {
9359       for (const APInt &R : RHSAAPVS) {
9360         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9361           return indicatePessimisticFixpoint();
9362       }
9363     } else if (RHSAA.undefIsContained()) {
9364       for (const APInt &L : LHSAAPVS) {
9365         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9366           return indicatePessimisticFixpoint();
9367       }
9368     } else {
9369       for (const APInt &L : LHSAAPVS) {
9370         for (const APInt &R : RHSAAPVS) {
9371           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9372             return indicatePessimisticFixpoint();
9373         }
9374       }
9375     }
9376     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9377                                          : ChangeStatus::CHANGED;
9378   }
9379 
9380   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9381     auto AssumedBefore = getAssumed();
9382     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9383       Value *IncomingValue = PHI->getIncomingValue(u);
9384 
9385       // Simplify the operand first.
9386       bool UsedAssumedInformation = false;
9387       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9388           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9389           UsedAssumedInformation);
9390       if (!SimplifiedIncomingValue.hasValue())
9391         continue;
9392       if (!SimplifiedIncomingValue.getValue())
9393         return indicatePessimisticFixpoint();
9394       IncomingValue = *SimplifiedIncomingValue;
9395 
9396       auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
9397           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9398       if (!PotentialValuesAA.isValidState())
9399         return indicatePessimisticFixpoint();
9400       if (PotentialValuesAA.undefIsContained())
9401         unionAssumedWithUndef();
9402       else
9403         unionAssumed(PotentialValuesAA.getAssumed());
9404     }
9405     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9406                                          : ChangeStatus::CHANGED;
9407   }
9408 
9409   /// See AbstractAttribute::updateImpl(...).
9410   ChangeStatus updateImpl(Attributor &A) override {
9411     Value &V = getAssociatedValue();
9412     Instruction *I = dyn_cast<Instruction>(&V);
9413 
9414     if (auto *ICI = dyn_cast<ICmpInst>(I))
9415       return updateWithICmpInst(A, ICI);
9416 
9417     if (auto *SI = dyn_cast<SelectInst>(I))
9418       return updateWithSelectInst(A, SI);
9419 
9420     if (auto *CI = dyn_cast<CastInst>(I))
9421       return updateWithCastInst(A, CI);
9422 
9423     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9424       return updateWithBinaryOperator(A, BinOp);
9425 
9426     if (auto *PHI = dyn_cast<PHINode>(I))
9427       return updateWithPHINode(A, PHI);
9428 
9429     return indicatePessimisticFixpoint();
9430   }
9431 
9432   /// See AbstractAttribute::trackStatistics()
9433   void trackStatistics() const override {
9434     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9435   }
9436 };
9437 
9438 struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
9439   AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
9440       : AAPotentialConstantValuesImpl(IRP, A) {}
9441 
9442   /// See AbstractAttribute::initialize(...).
9443   ChangeStatus updateImpl(Attributor &A) override {
9444     llvm_unreachable(
9445         "AAPotentialConstantValues(Function|CallSite)::updateImpl will "
9446         "not be called");
9447   }
9448 
9449   /// See AbstractAttribute::trackStatistics()
9450   void trackStatistics() const override {
9451     STATS_DECLTRACK_FN_ATTR(potential_values)
9452   }
9453 };
9454 
9455 struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
9456   AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
9457       : AAPotentialConstantValuesFunction(IRP, A) {}
9458 
9459   /// See AbstractAttribute::trackStatistics()
9460   void trackStatistics() const override {
9461     STATS_DECLTRACK_CS_ATTR(potential_values)
9462   }
9463 };
9464 
9465 struct AAPotentialConstantValuesCallSiteReturned
9466     : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9467                                      AAPotentialConstantValuesImpl> {
9468   AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
9469                                             Attributor &A)
9470       : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9471                                        AAPotentialConstantValuesImpl>(IRP, A) {}
9472 
9473   /// See AbstractAttribute::trackStatistics()
9474   void trackStatistics() const override {
9475     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9476   }
9477 };
9478 
9479 struct AAPotentialConstantValuesCallSiteArgument
9480     : AAPotentialConstantValuesFloating {
9481   AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
9482                                             Attributor &A)
9483       : AAPotentialConstantValuesFloating(IRP, A) {}
9484 
9485   /// See AbstractAttribute::initialize(..).
9486   void initialize(Attributor &A) override {
9487     AAPotentialConstantValuesImpl::initialize(A);
9488     if (isAtFixpoint())
9489       return;
9490 
9491     Value &V = getAssociatedValue();
9492 
9493     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9494       unionAssumed(C->getValue());
9495       indicateOptimisticFixpoint();
9496       return;
9497     }
9498 
9499     if (isa<UndefValue>(&V)) {
9500       unionAssumedWithUndef();
9501       indicateOptimisticFixpoint();
9502       return;
9503     }
9504   }
9505 
9506   /// See AbstractAttribute::updateImpl(...).
9507   ChangeStatus updateImpl(Attributor &A) override {
9508     Value &V = getAssociatedValue();
9509     auto AssumedBefore = getAssumed();
9510     auto &AA = A.getAAFor<AAPotentialConstantValues>(
9511         *this, IRPosition::value(V), DepClassTy::REQUIRED);
9512     const auto &S = AA.getAssumed();
9513     unionAssumed(S);
9514     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9515                                          : ChangeStatus::CHANGED;
9516   }
9517 
9518   /// See AbstractAttribute::trackStatistics()
9519   void trackStatistics() const override {
9520     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9521   }
9522 };
9523 
9524 /// ------------------------ NoUndef Attribute ---------------------------------
9525 struct AANoUndefImpl : AANoUndef {
9526   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9527 
9528   /// See AbstractAttribute::initialize(...).
9529   void initialize(Attributor &A) override {
9530     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9531       indicateOptimisticFixpoint();
9532       return;
9533     }
9534     Value &V = getAssociatedValue();
9535     if (isa<UndefValue>(V))
9536       indicatePessimisticFixpoint();
9537     else if (isa<FreezeInst>(V))
9538       indicateOptimisticFixpoint();
9539     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9540              isGuaranteedNotToBeUndefOrPoison(&V))
9541       indicateOptimisticFixpoint();
9542     else
9543       AANoUndef::initialize(A);
9544   }
9545 
9546   /// See followUsesInMBEC
9547   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9548                        AANoUndef::StateType &State) {
9549     const Value *UseV = U->get();
9550     const DominatorTree *DT = nullptr;
9551     AssumptionCache *AC = nullptr;
9552     InformationCache &InfoCache = A.getInfoCache();
9553     if (Function *F = getAnchorScope()) {
9554       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9555       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9556     }
9557     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9558     bool TrackUse = false;
9559     // Track use for instructions which must produce undef or poison bits when
9560     // at least one operand contains such bits.
9561     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9562       TrackUse = true;
9563     return TrackUse;
9564   }
9565 
9566   /// See AbstractAttribute::getAsStr().
9567   const std::string getAsStr() const override {
9568     return getAssumed() ? "noundef" : "may-undef-or-poison";
9569   }
9570 
9571   ChangeStatus manifest(Attributor &A) override {
9572     // We don't manifest noundef attribute for dead positions because the
9573     // associated values with dead positions would be replaced with undef
9574     // values.
9575     bool UsedAssumedInformation = false;
9576     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9577                         UsedAssumedInformation))
9578       return ChangeStatus::UNCHANGED;
9579     // A position whose simplified value does not have any value is
9580     // considered to be dead. We don't manifest noundef in such positions for
9581     // the same reason above.
9582     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9583              .has_value())
9584       return ChangeStatus::UNCHANGED;
9585     return AANoUndef::manifest(A);
9586   }
9587 };
9588 
9589 struct AANoUndefFloating : public AANoUndefImpl {
9590   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9591       : AANoUndefImpl(IRP, A) {}
9592 
9593   /// See AbstractAttribute::initialize(...).
9594   void initialize(Attributor &A) override {
9595     AANoUndefImpl::initialize(A);
9596     if (!getState().isAtFixpoint())
9597       if (Instruction *CtxI = getCtxI())
9598         followUsesInMBEC(*this, A, getState(), *CtxI);
9599   }
9600 
9601   /// See AbstractAttribute::updateImpl(...).
9602   ChangeStatus updateImpl(Attributor &A) override {
9603     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9604                             AANoUndef::StateType &T, bool Stripped) -> bool {
9605       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9606                                              DepClassTy::REQUIRED);
9607       if (!Stripped && this == &AA) {
9608         T.indicatePessimisticFixpoint();
9609       } else {
9610         const AANoUndef::StateType &S =
9611             static_cast<const AANoUndef::StateType &>(AA.getState());
9612         T ^= S;
9613       }
9614       return T.isValidState();
9615     };
9616 
9617     StateType T;
9618     bool UsedAssumedInformation = false;
9619     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9620                                           VisitValueCB, getCtxI(),
9621                                           UsedAssumedInformation))
9622       return indicatePessimisticFixpoint();
9623 
9624     return clampStateAndIndicateChange(getState(), T);
9625   }
9626 
9627   /// See AbstractAttribute::trackStatistics()
9628   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9629 };
9630 
9631 struct AANoUndefReturned final
9632     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9633   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9634       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9635 
9636   /// See AbstractAttribute::trackStatistics()
9637   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9638 };
9639 
9640 struct AANoUndefArgument final
9641     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9642   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9643       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9644 
9645   /// See AbstractAttribute::trackStatistics()
9646   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9647 };
9648 
9649 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9650   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9651       : AANoUndefFloating(IRP, A) {}
9652 
9653   /// See AbstractAttribute::trackStatistics()
9654   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9655 };
9656 
9657 struct AANoUndefCallSiteReturned final
9658     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9659   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9660       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9661 
9662   /// See AbstractAttribute::trackStatistics()
9663   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9664 };
9665 
9666 struct AACallEdgesImpl : public AACallEdges {
9667   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9668 
9669   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9670     return CalledFunctions;
9671   }
9672 
9673   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9674 
9675   virtual bool hasNonAsmUnknownCallee() const override {
9676     return HasUnknownCalleeNonAsm;
9677   }
9678 
9679   const std::string getAsStr() const override {
9680     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9681            std::to_string(CalledFunctions.size()) + "]";
9682   }
9683 
9684   void trackStatistics() const override {}
9685 
9686 protected:
9687   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9688     if (CalledFunctions.insert(Fn)) {
9689       Change = ChangeStatus::CHANGED;
9690       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9691                         << "\n");
9692     }
9693   }
9694 
9695   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9696     if (!HasUnknownCallee)
9697       Change = ChangeStatus::CHANGED;
9698     if (NonAsm && !HasUnknownCalleeNonAsm)
9699       Change = ChangeStatus::CHANGED;
9700     HasUnknownCalleeNonAsm |= NonAsm;
9701     HasUnknownCallee = true;
9702   }
9703 
9704 private:
9705   /// Optimistic set of functions that might be called by this position.
9706   SetVector<Function *> CalledFunctions;
9707 
9708   /// Is there any call with a unknown callee.
9709   bool HasUnknownCallee = false;
9710 
9711   /// Is there any call with a unknown callee, excluding any inline asm.
9712   bool HasUnknownCalleeNonAsm = false;
9713 };
9714 
9715 struct AACallEdgesCallSite : public AACallEdgesImpl {
9716   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9717       : AACallEdgesImpl(IRP, A) {}
9718   /// See AbstractAttribute::updateImpl(...).
9719   ChangeStatus updateImpl(Attributor &A) override {
9720     ChangeStatus Change = ChangeStatus::UNCHANGED;
9721 
9722     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9723                           bool Stripped) -> bool {
9724       if (Function *Fn = dyn_cast<Function>(&V)) {
9725         addCalledFunction(Fn, Change);
9726       } else {
9727         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9728         setHasUnknownCallee(true, Change);
9729       }
9730 
9731       // Explore all values.
9732       return true;
9733     };
9734 
9735     // Process any value that we might call.
9736     auto ProcessCalledOperand = [&](Value *V) {
9737       bool DummyValue = false;
9738       bool UsedAssumedInformation = false;
9739       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9740                                        DummyValue, VisitValue, nullptr,
9741                                        UsedAssumedInformation, false)) {
9742         // If we haven't gone through all values, assume that there are unknown
9743         // callees.
9744         setHasUnknownCallee(true, Change);
9745       }
9746     };
9747 
9748     CallBase *CB = cast<CallBase>(getCtxI());
9749 
9750     if (CB->isInlineAsm()) {
9751       if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
9752           !hasAssumption(*CB, "ompx_no_call_asm"))
9753         setHasUnknownCallee(false, Change);
9754       return Change;
9755     }
9756 
9757     // Process callee metadata if available.
9758     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9759       for (auto &Op : MD->operands()) {
9760         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9761         if (Callee)
9762           addCalledFunction(Callee, Change);
9763       }
9764       return Change;
9765     }
9766 
9767     // The most simple case.
9768     ProcessCalledOperand(CB->getCalledOperand());
9769 
9770     // Process callback functions.
9771     SmallVector<const Use *, 4u> CallbackUses;
9772     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9773     for (const Use *U : CallbackUses)
9774       ProcessCalledOperand(U->get());
9775 
9776     return Change;
9777   }
9778 };
9779 
9780 struct AACallEdgesFunction : public AACallEdgesImpl {
9781   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9782       : AACallEdgesImpl(IRP, A) {}
9783 
9784   /// See AbstractAttribute::updateImpl(...).
9785   ChangeStatus updateImpl(Attributor &A) override {
9786     ChangeStatus Change = ChangeStatus::UNCHANGED;
9787 
9788     auto ProcessCallInst = [&](Instruction &Inst) {
9789       CallBase &CB = cast<CallBase>(Inst);
9790 
9791       auto &CBEdges = A.getAAFor<AACallEdges>(
9792           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9793       if (CBEdges.hasNonAsmUnknownCallee())
9794         setHasUnknownCallee(true, Change);
9795       if (CBEdges.hasUnknownCallee())
9796         setHasUnknownCallee(false, Change);
9797 
9798       for (Function *F : CBEdges.getOptimisticEdges())
9799         addCalledFunction(F, Change);
9800 
9801       return true;
9802     };
9803 
9804     // Visit all callable instructions.
9805     bool UsedAssumedInformation = false;
9806     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9807                                            UsedAssumedInformation,
9808                                            /* CheckBBLivenessOnly */ true)) {
9809       // If we haven't looked at all call like instructions, assume that there
9810       // are unknown callees.
9811       setHasUnknownCallee(true, Change);
9812     }
9813 
9814     return Change;
9815   }
9816 };
9817 
9818 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9819 private:
9820   struct QuerySet {
9821     void markReachable(const Function &Fn) {
9822       Reachable.insert(&Fn);
9823       Unreachable.erase(&Fn);
9824     }
9825 
9826     /// If there is no information about the function None is returned.
9827     Optional<bool> isCachedReachable(const Function &Fn) {
9828       // Assume that we can reach the function.
9829       // TODO: Be more specific with the unknown callee.
9830       if (CanReachUnknownCallee)
9831         return true;
9832 
9833       if (Reachable.count(&Fn))
9834         return true;
9835 
9836       if (Unreachable.count(&Fn))
9837         return false;
9838 
9839       return llvm::None;
9840     }
9841 
9842     /// Set of functions that we know for sure is reachable.
9843     DenseSet<const Function *> Reachable;
9844 
9845     /// Set of functions that are unreachable, but might become reachable.
9846     DenseSet<const Function *> Unreachable;
9847 
9848     /// If we can reach a function with a call to a unknown function we assume
9849     /// that we can reach any function.
9850     bool CanReachUnknownCallee = false;
9851   };
9852 
9853   struct QueryResolver : public QuerySet {
9854     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9855                         ArrayRef<const AACallEdges *> AAEdgesList) {
9856       ChangeStatus Change = ChangeStatus::UNCHANGED;
9857 
9858       for (auto *AAEdges : AAEdgesList) {
9859         if (AAEdges->hasUnknownCallee()) {
9860           if (!CanReachUnknownCallee)
9861             Change = ChangeStatus::CHANGED;
9862           CanReachUnknownCallee = true;
9863           return Change;
9864         }
9865       }
9866 
9867       for (const Function *Fn : make_early_inc_range(Unreachable)) {
9868         if (checkIfReachable(A, AA, AAEdgesList, *Fn)) {
9869           Change = ChangeStatus::CHANGED;
9870           markReachable(*Fn);
9871         }
9872       }
9873       return Change;
9874     }
9875 
9876     bool isReachable(Attributor &A, AAFunctionReachability &AA,
9877                      ArrayRef<const AACallEdges *> AAEdgesList,
9878                      const Function &Fn) {
9879       Optional<bool> Cached = isCachedReachable(Fn);
9880       if (Cached.hasValue())
9881         return Cached.getValue();
9882 
9883       // The query was not cached, thus it is new. We need to request an update
9884       // explicitly to make sure this the information is properly run to a
9885       // fixpoint.
9886       A.registerForUpdate(AA);
9887 
9888       // We need to assume that this function can't reach Fn to prevent
9889       // an infinite loop if this function is recursive.
9890       Unreachable.insert(&Fn);
9891 
9892       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9893       if (Result)
9894         markReachable(Fn);
9895       return Result;
9896     }
9897 
9898     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9899                           ArrayRef<const AACallEdges *> AAEdgesList,
9900                           const Function &Fn) const {
9901 
9902       // Handle the most trivial case first.
9903       for (auto *AAEdges : AAEdgesList) {
9904         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9905 
9906         if (Edges.count(const_cast<Function *>(&Fn)))
9907           return true;
9908       }
9909 
9910       SmallVector<const AAFunctionReachability *, 8> Deps;
9911       for (auto &AAEdges : AAEdgesList) {
9912         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9913 
9914         for (Function *Edge : Edges) {
9915           // Functions that do not call back into the module can be ignored.
9916           if (Edge->hasFnAttribute(Attribute::NoCallback))
9917             continue;
9918 
9919           // We don't need a dependency if the result is reachable.
9920           const AAFunctionReachability &EdgeReachability =
9921               A.getAAFor<AAFunctionReachability>(
9922                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9923           Deps.push_back(&EdgeReachability);
9924 
9925           if (EdgeReachability.canReach(A, Fn))
9926             return true;
9927         }
9928       }
9929 
9930       // The result is false for now, set dependencies and leave.
9931       for (auto *Dep : Deps)
9932         A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
9933 
9934       return false;
9935     }
9936   };
9937 
9938   /// Get call edges that can be reached by this instruction.
9939   bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability,
9940                              const Instruction &Inst,
9941                              SmallVector<const AACallEdges *> &Result) const {
9942     // Determine call like instructions that we can reach from the inst.
9943     auto CheckCallBase = [&](Instruction &CBInst) {
9944       if (!Reachability.isAssumedReachable(A, Inst, CBInst))
9945         return true;
9946 
9947       auto &CB = cast<CallBase>(CBInst);
9948       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9949           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9950 
9951       Result.push_back(&AAEdges);
9952       return true;
9953     };
9954 
9955     bool UsedAssumedInformation = false;
9956     return A.checkForAllCallLikeInstructions(CheckCallBase, *this,
9957                                              UsedAssumedInformation,
9958                                              /* CheckBBLivenessOnly */ true);
9959   }
9960 
9961 public:
9962   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9963       : AAFunctionReachability(IRP, A) {}
9964 
9965   bool canReach(Attributor &A, const Function &Fn) const override {
9966     if (!isValidState())
9967       return true;
9968 
9969     const AACallEdges &AAEdges =
9970         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9971 
9972     // Attributor returns attributes as const, so this function has to be
9973     // const for users of this attribute to use it without having to do
9974     // a const_cast.
9975     // This is a hack for us to be able to cache queries.
9976     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9977     bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis,
9978                                                           {&AAEdges}, Fn);
9979 
9980     return Result;
9981   }
9982 
9983   /// Can \p CB reach \p Fn
9984   bool canReach(Attributor &A, CallBase &CB,
9985                 const Function &Fn) const override {
9986     if (!isValidState())
9987       return true;
9988 
9989     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9990         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9991 
9992     // Attributor returns attributes as const, so this function has to be
9993     // const for users of this attribute to use it without having to do
9994     // a const_cast.
9995     // This is a hack for us to be able to cache queries.
9996     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9997     QueryResolver &CBQuery = NonConstThis->CBQueries[&CB];
9998 
9999     bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn);
10000 
10001     return Result;
10002   }
10003 
10004   bool instructionCanReach(Attributor &A, const Instruction &Inst,
10005                            const Function &Fn,
10006                            bool UseBackwards) const override {
10007     if (!isValidState())
10008       return true;
10009 
10010     if (UseBackwards)
10011       return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr);
10012 
10013     const auto &Reachability = A.getAAFor<AAReachability>(
10014         *this, IRPosition::function(*getAssociatedFunction()),
10015         DepClassTy::REQUIRED);
10016 
10017     SmallVector<const AACallEdges *> CallEdges;
10018     bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges);
10019     // Attributor returns attributes as const, so this function has to be
10020     // const for users of this attribute to use it without having to do
10021     // a const_cast.
10022     // This is a hack for us to be able to cache queries.
10023     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
10024     QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
10025     if (!AllKnown)
10026       InstQSet.CanReachUnknownCallee = true;
10027 
10028     return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
10029   }
10030 
10031   /// See AbstractAttribute::updateImpl(...).
10032   ChangeStatus updateImpl(Attributor &A) override {
10033     const AACallEdges &AAEdges =
10034         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
10035     ChangeStatus Change = ChangeStatus::UNCHANGED;
10036 
10037     Change |= WholeFunction.update(A, *this, {&AAEdges});
10038 
10039     for (auto &CBPair : CBQueries) {
10040       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
10041           *this, IRPosition::callsite_function(*CBPair.first),
10042           DepClassTy::REQUIRED);
10043 
10044       Change |= CBPair.second.update(A, *this, {&AAEdges});
10045     }
10046 
10047     // Update the Instruction queries.
10048     if (!InstQueries.empty()) {
10049       const AAReachability *Reachability = &A.getAAFor<AAReachability>(
10050           *this, IRPosition::function(*getAssociatedFunction()),
10051           DepClassTy::REQUIRED);
10052 
10053       // Check for local callbases first.
10054       for (auto &InstPair : InstQueries) {
10055         SmallVector<const AACallEdges *> CallEdges;
10056         bool AllKnown =
10057             getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
10058         // Update will return change if we this effects any queries.
10059         if (!AllKnown)
10060           InstPair.second.CanReachUnknownCallee = true;
10061         Change |= InstPair.second.update(A, *this, CallEdges);
10062       }
10063     }
10064 
10065     return Change;
10066   }
10067 
10068   const std::string getAsStr() const override {
10069     size_t QueryCount =
10070         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
10071 
10072     return "FunctionReachability [" +
10073            std::to_string(WholeFunction.Reachable.size()) + "," +
10074            std::to_string(QueryCount) + "]";
10075   }
10076 
10077   void trackStatistics() const override {}
10078 
10079 private:
10080   bool canReachUnknownCallee() const override {
10081     return WholeFunction.CanReachUnknownCallee;
10082   }
10083 
10084   /// Used to answer if a the whole function can reacha a specific function.
10085   QueryResolver WholeFunction;
10086 
10087   /// Used to answer if a call base inside this function can reach a specific
10088   /// function.
10089   MapVector<const CallBase *, QueryResolver> CBQueries;
10090 
10091   /// This is for instruction queries than scan "forward".
10092   MapVector<const Instruction *, QueryResolver> InstQueries;
10093 };
10094 } // namespace
10095 
10096 /// ---------------------- Assumption Propagation ------------------------------
10097 namespace {
10098 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
10099   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
10100                        const DenseSet<StringRef> &Known)
10101       : AAAssumptionInfo(IRP, A, Known) {}
10102 
10103   bool hasAssumption(const StringRef Assumption) const override {
10104     return isValidState() && setContains(Assumption);
10105   }
10106 
10107   /// See AbstractAttribute::getAsStr()
10108   const std::string getAsStr() const override {
10109     const SetContents &Known = getKnown();
10110     const SetContents &Assumed = getAssumed();
10111 
10112     const std::string KnownStr =
10113         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
10114     const std::string AssumedStr =
10115         (Assumed.isUniversal())
10116             ? "Universal"
10117             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
10118 
10119     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
10120   }
10121 };
10122 
10123 /// Propagates assumption information from parent functions to all of their
10124 /// successors. An assumption can be propagated if the containing function
10125 /// dominates the called function.
10126 ///
10127 /// We start with a "known" set of assumptions already valid for the associated
10128 /// function and an "assumed" set that initially contains all possible
10129 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
10130 /// contents as concrete values are known. The concrete values are seeded by the
10131 /// first nodes that are either entries into the call graph, or contains no
10132 /// assumptions. Each node is updated as the intersection of the assumed state
10133 /// with all of its predecessors.
10134 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
10135   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
10136       : AAAssumptionInfoImpl(IRP, A,
10137                              getAssumptions(*IRP.getAssociatedFunction())) {}
10138 
10139   /// See AbstractAttribute::manifest(...).
10140   ChangeStatus manifest(Attributor &A) override {
10141     const auto &Assumptions = getKnown();
10142 
10143     // Don't manifest a universal set if it somehow made it here.
10144     if (Assumptions.isUniversal())
10145       return ChangeStatus::UNCHANGED;
10146 
10147     Function *AssociatedFunction = getAssociatedFunction();
10148 
10149     bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
10150 
10151     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10152   }
10153 
10154   /// See AbstractAttribute::updateImpl(...).
10155   ChangeStatus updateImpl(Attributor &A) override {
10156     bool Changed = false;
10157 
10158     auto CallSitePred = [&](AbstractCallSite ACS) {
10159       const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
10160           *this, IRPosition::callsite_function(*ACS.getInstruction()),
10161           DepClassTy::REQUIRED);
10162       // Get the set of assumptions shared by all of this function's callers.
10163       Changed |= getIntersection(AssumptionAA.getAssumed());
10164       return !getAssumed().empty() || !getKnown().empty();
10165     };
10166 
10167     bool UsedAssumedInformation = false;
10168     // Get the intersection of all assumptions held by this node's predecessors.
10169     // If we don't know all the call sites then this is either an entry into the
10170     // call graph or an empty node. This node is known to only contain its own
10171     // assumptions and can be propagated to its successors.
10172     if (!A.checkForAllCallSites(CallSitePred, *this, true,
10173                                 UsedAssumedInformation))
10174       return indicatePessimisticFixpoint();
10175 
10176     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10177   }
10178 
10179   void trackStatistics() const override {}
10180 };
10181 
10182 /// Assumption Info defined for call sites.
10183 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
10184 
10185   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
10186       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
10187 
10188   /// See AbstractAttribute::initialize(...).
10189   void initialize(Attributor &A) override {
10190     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10191     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10192   }
10193 
10194   /// See AbstractAttribute::manifest(...).
10195   ChangeStatus manifest(Attributor &A) override {
10196     // Don't manifest a universal set if it somehow made it here.
10197     if (getKnown().isUniversal())
10198       return ChangeStatus::UNCHANGED;
10199 
10200     CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
10201     bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
10202 
10203     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10204   }
10205 
10206   /// See AbstractAttribute::updateImpl(...).
10207   ChangeStatus updateImpl(Attributor &A) override {
10208     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10209     auto &AssumptionAA =
10210         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10211     bool Changed = getIntersection(AssumptionAA.getAssumed());
10212     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10213   }
10214 
10215   /// See AbstractAttribute::trackStatistics()
10216   void trackStatistics() const override {}
10217 
10218 private:
10219   /// Helper to initialized the known set as all the assumptions this call and
10220   /// the callee contain.
10221   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
10222     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
10223     auto Assumptions = getAssumptions(CB);
10224     if (Function *F = IRP.getAssociatedFunction())
10225       set_union(Assumptions, getAssumptions(*F));
10226     if (Function *F = IRP.getAssociatedFunction())
10227       set_union(Assumptions, getAssumptions(*F));
10228     return Assumptions;
10229   }
10230 };
10231 } // namespace
10232 
10233 AACallGraphNode *AACallEdgeIterator::operator*() const {
10234   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
10235       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
10236 }
10237 
10238 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
10239 
10240 const char AAReturnedValues::ID = 0;
10241 const char AANoUnwind::ID = 0;
10242 const char AANoSync::ID = 0;
10243 const char AANoFree::ID = 0;
10244 const char AANonNull::ID = 0;
10245 const char AANoRecurse::ID = 0;
10246 const char AAWillReturn::ID = 0;
10247 const char AAUndefinedBehavior::ID = 0;
10248 const char AANoAlias::ID = 0;
10249 const char AAReachability::ID = 0;
10250 const char AANoReturn::ID = 0;
10251 const char AAIsDead::ID = 0;
10252 const char AADereferenceable::ID = 0;
10253 const char AAAlign::ID = 0;
10254 const char AAInstanceInfo::ID = 0;
10255 const char AANoCapture::ID = 0;
10256 const char AAValueSimplify::ID = 0;
10257 const char AAHeapToStack::ID = 0;
10258 const char AAPrivatizablePtr::ID = 0;
10259 const char AAMemoryBehavior::ID = 0;
10260 const char AAMemoryLocation::ID = 0;
10261 const char AAValueConstantRange::ID = 0;
10262 const char AAPotentialConstantValues::ID = 0;
10263 const char AANoUndef::ID = 0;
10264 const char AACallEdges::ID = 0;
10265 const char AAFunctionReachability::ID = 0;
10266 const char AAPointerInfo::ID = 0;
10267 const char AAAssumptionInfo::ID = 0;
10268 
10269 // Macro magic to create the static generator function for attributes that
10270 // follow the naming scheme.
10271 
10272 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
10273   case IRPosition::PK:                                                         \
10274     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
10275 
10276 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
10277   case IRPosition::PK:                                                         \
10278     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
10279     ++NumAAs;                                                                  \
10280     break;
10281 
10282 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
10283   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10284     CLASS *AA = nullptr;                                                       \
10285     switch (IRP.getPositionKind()) {                                           \
10286       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10287       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10288       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10289       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10290       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10291       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10292       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10293       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10294     }                                                                          \
10295     return *AA;                                                                \
10296   }
10297 
10298 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
10299   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10300     CLASS *AA = nullptr;                                                       \
10301     switch (IRP.getPositionKind()) {                                           \
10302       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10303       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
10304       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10305       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10306       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10307       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10308       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10309       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10310     }                                                                          \
10311     return *AA;                                                                \
10312   }
10313 
10314 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
10315   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10316     CLASS *AA = nullptr;                                                       \
10317     switch (IRP.getPositionKind()) {                                           \
10318       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10319       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10320       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10321       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10322       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10323       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10324       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10325       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10326     }                                                                          \
10327     return *AA;                                                                \
10328   }
10329 
10330 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
10331   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10332     CLASS *AA = nullptr;                                                       \
10333     switch (IRP.getPositionKind()) {                                           \
10334       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10335       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10336       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10337       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10338       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10339       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10340       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10341       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10342     }                                                                          \
10343     return *AA;                                                                \
10344   }
10345 
10346 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
10347   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10348     CLASS *AA = nullptr;                                                       \
10349     switch (IRP.getPositionKind()) {                                           \
10350       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10351       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10352       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10353       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10354       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10355       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10356       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10357       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10358     }                                                                          \
10359     return *AA;                                                                \
10360   }
10361 
10362 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
10363 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
10364 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
10365 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
10366 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
10367 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
10368 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
10369 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
10370 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
10371 
10372 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
10373 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
10374 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
10375 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
10376 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
10377 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
10378 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
10379 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
10380 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
10381 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
10382 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
10383 
10384 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
10385 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
10386 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
10387 
10388 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
10389 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
10390 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
10391 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
10392 
10393 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
10394 
10395 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
10396 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
10397 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
10398 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
10399 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
10400 #undef SWITCH_PK_CREATE
10401 #undef SWITCH_PK_INV
10402