1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SCCIterator.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumeBundleQueries.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/LazyValueInfo.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/ScalarEvolution.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/IRBuilder.h"
31 #include "llvm/IR/Instruction.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/NoFolder.h"
35 #include "llvm/Support/Alignment.h"
36 #include "llvm/Support/Casting.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/FileSystem.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
42 #include "llvm/Transforms/Utils/Local.h"
43 #include <cassert>
44 
45 using namespace llvm;
46 
47 #define DEBUG_TYPE "attributor"
48 
49 static cl::opt<bool> ManifestInternal(
50     "attributor-manifest-internal", cl::Hidden,
51     cl::desc("Manifest Attributor internal string attributes."),
52     cl::init(false));
53 
54 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
55                                        cl::Hidden);
56 
57 template <>
58 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
59 
60 static cl::opt<unsigned, true> MaxPotentialValues(
61     "attributor-max-potential-values", cl::Hidden,
62     cl::desc("Maximum number of potential values to be "
63              "tracked for each position."),
64     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
65     cl::init(7));
66 
67 STATISTIC(NumAAs, "Number of abstract attributes created");
68 
69 // Some helper macros to deal with statistics tracking.
70 //
71 // Usage:
72 // For simple IR attribute tracking overload trackStatistics in the abstract
73 // attribute and choose the right STATS_DECLTRACK_********* macro,
74 // e.g.,:
75 //  void trackStatistics() const override {
76 //    STATS_DECLTRACK_ARG_ATTR(returned)
77 //  }
78 // If there is a single "increment" side one can use the macro
79 // STATS_DECLTRACK with a custom message. If there are multiple increment
80 // sides, STATS_DECL and STATS_TRACK can also be used separately.
81 //
82 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
83   ("Number of " #TYPE " marked '" #NAME "'")
84 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
85 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
86 #define STATS_DECL(NAME, TYPE, MSG)                                            \
87   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
88 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
89 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
90   {                                                                            \
91     STATS_DECL(NAME, TYPE, MSG)                                                \
92     STATS_TRACK(NAME, TYPE)                                                    \
93   }
94 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
95   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
96 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, CSArguments,                                           \
98                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
99 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
100   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
101 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
102   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
103 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
104   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
105                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
106 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
107   STATS_DECLTRACK(NAME, CSReturn,                                              \
108                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
109 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
110   STATS_DECLTRACK(NAME, Floating,                                              \
111                   ("Number of floating values known to be '" #NAME "'"))
112 
113 // Specialization of the operator<< for abstract attributes subclasses. This
114 // disambiguates situations where multiple operators are applicable.
115 namespace llvm {
116 #define PIPE_OPERATOR(CLASS)                                                   \
117   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
118     return OS << static_cast<const AbstractAttribute &>(AA);                   \
119   }
120 
121 PIPE_OPERATOR(AAIsDead)
122 PIPE_OPERATOR(AANoUnwind)
123 PIPE_OPERATOR(AANoSync)
124 PIPE_OPERATOR(AANoRecurse)
125 PIPE_OPERATOR(AAWillReturn)
126 PIPE_OPERATOR(AANoReturn)
127 PIPE_OPERATOR(AAReturnedValues)
128 PIPE_OPERATOR(AANonNull)
129 PIPE_OPERATOR(AANoAlias)
130 PIPE_OPERATOR(AADereferenceable)
131 PIPE_OPERATOR(AAAlign)
132 PIPE_OPERATOR(AANoCapture)
133 PIPE_OPERATOR(AAValueSimplify)
134 PIPE_OPERATOR(AANoFree)
135 PIPE_OPERATOR(AAHeapToStack)
136 PIPE_OPERATOR(AAReachability)
137 PIPE_OPERATOR(AAMemoryBehavior)
138 PIPE_OPERATOR(AAMemoryLocation)
139 PIPE_OPERATOR(AAValueConstantRange)
140 PIPE_OPERATOR(AAPrivatizablePtr)
141 PIPE_OPERATOR(AAUndefinedBehavior)
142 PIPE_OPERATOR(AAPotentialValues)
143 PIPE_OPERATOR(AANoUndef)
144 PIPE_OPERATOR(AACallEdges)
145 PIPE_OPERATOR(AAFunctionReachability)
146 PIPE_OPERATOR(AAPointerInfo)
147 
148 #undef PIPE_OPERATOR
149 
150 template <>
151 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
152                                                      const DerefState &R) {
153   ChangeStatus CS0 =
154       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
155   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
156   return CS0 | CS1;
157 }
158 
159 } // namespace llvm
160 
161 /// Get pointer operand of memory accessing instruction. If \p I is
162 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
163 /// is set to false and the instruction is volatile, return nullptr.
164 static const Value *getPointerOperand(const Instruction *I,
165                                       bool AllowVolatile) {
166   if (!AllowVolatile && I->isVolatile())
167     return nullptr;
168 
169   if (auto *LI = dyn_cast<LoadInst>(I)) {
170     return LI->getPointerOperand();
171   }
172 
173   if (auto *SI = dyn_cast<StoreInst>(I)) {
174     return SI->getPointerOperand();
175   }
176 
177   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
178     return CXI->getPointerOperand();
179   }
180 
181   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
182     return RMWI->getPointerOperand();
183   }
184 
185   return nullptr;
186 }
187 
188 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
189 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
190 /// getelement pointer instructions that traverse the natural type of \p Ptr if
191 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
192 /// through a cast to i8*.
193 ///
194 /// TODO: This could probably live somewhere more prominantly if it doesn't
195 ///       already exist.
196 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
197                                int64_t Offset, IRBuilder<NoFolder> &IRB,
198                                const DataLayout &DL) {
199   assert(Offset >= 0 && "Negative offset not supported yet!");
200   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
201                     << "-bytes as " << *ResTy << "\n");
202 
203   if (Offset) {
204     SmallVector<Value *, 4> Indices;
205     std::string GEPName = Ptr->getName().str() + ".0";
206 
207     // Add 0 index to look through the pointer.
208     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
209            "Offset out of bounds");
210     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
211 
212     Type *Ty = PtrElemTy;
213     do {
214       auto *STy = dyn_cast<StructType>(Ty);
215       if (!STy)
216         // Non-aggregate type, we cast and make byte-wise progress now.
217         break;
218 
219       const StructLayout *SL = DL.getStructLayout(STy);
220       if (int64_t(SL->getSizeInBytes()) < Offset)
221         break;
222 
223       uint64_t Idx = SL->getElementContainingOffset(Offset);
224       assert(Idx < STy->getNumElements() && "Offset calculation error!");
225       uint64_t Rem = Offset - SL->getElementOffset(Idx);
226       Ty = STy->getElementType(Idx);
227 
228       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
229                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
230 
231       GEPName += "." + std::to_string(Idx);
232       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
233       Offset = Rem;
234     } while (Offset);
235 
236     // Create a GEP for the indices collected above.
237     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
238 
239     // If an offset is left we use byte-wise adjustment.
240     if (Offset) {
241       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
242       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
243                           GEPName + ".b" + Twine(Offset));
244     }
245   }
246 
247   // Ensure the result has the requested type.
248   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
249 
250   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
251   return Ptr;
252 }
253 
254 /// Recursively visit all values that might become \p IRP at some point. This
255 /// will be done by looking through cast instructions, selects, phis, and calls
256 /// with the "returned" attribute. Once we cannot look through the value any
257 /// further, the callback \p VisitValueCB is invoked and passed the current
258 /// value, the \p State, and a flag to indicate if we stripped anything.
259 /// Stripped means that we unpacked the value associated with \p IRP at least
260 /// once. Note that the value used for the callback may still be the value
261 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
262 /// we will never visit more values than specified by \p MaxValues.
263 template <typename StateTy>
264 static bool genericValueTraversal(
265     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
266     StateTy &State,
267     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
268         VisitValueCB,
269     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
270     function_ref<Value *(Value *)> StripCB = nullptr) {
271 
272   const AAIsDead *LivenessAA = nullptr;
273   if (IRP.getAnchorScope())
274     LivenessAA = &A.getAAFor<AAIsDead>(
275         QueryingAA,
276         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
277         DepClassTy::NONE);
278   bool AnyDead = false;
279 
280   Value *InitialV = &IRP.getAssociatedValue();
281   using Item = std::pair<Value *, const Instruction *>;
282   SmallSet<Item, 16> Visited;
283   SmallVector<Item, 16> Worklist;
284   Worklist.push_back({InitialV, CtxI});
285 
286   int Iteration = 0;
287   do {
288     Item I = Worklist.pop_back_val();
289     Value *V = I.first;
290     CtxI = I.second;
291     if (StripCB)
292       V = StripCB(V);
293 
294     // Check if we should process the current value. To prevent endless
295     // recursion keep a record of the values we followed!
296     if (!Visited.insert(I).second)
297       continue;
298 
299     // Make sure we limit the compile time for complex expressions.
300     if (Iteration++ >= MaxValues)
301       return false;
302 
303     // Explicitly look through calls with a "returned" attribute if we do
304     // not have a pointer as stripPointerCasts only works on them.
305     Value *NewV = nullptr;
306     if (V->getType()->isPointerTy()) {
307       NewV = V->stripPointerCasts();
308     } else {
309       auto *CB = dyn_cast<CallBase>(V);
310       if (CB && CB->getCalledFunction()) {
311         for (Argument &Arg : CB->getCalledFunction()->args())
312           if (Arg.hasReturnedAttr()) {
313             NewV = CB->getArgOperand(Arg.getArgNo());
314             break;
315           }
316       }
317     }
318     if (NewV && NewV != V) {
319       Worklist.push_back({NewV, CtxI});
320       continue;
321     }
322 
323     // Look through select instructions, visit assumed potential values.
324     if (auto *SI = dyn_cast<SelectInst>(V)) {
325       bool UsedAssumedInformation = false;
326       Optional<Constant *> C = A.getAssumedConstant(
327           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
328       bool NoValueYet = !C.hasValue();
329       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
330         continue;
331       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
332         if (CI->isZero())
333           Worklist.push_back({SI->getFalseValue(), CtxI});
334         else
335           Worklist.push_back({SI->getTrueValue(), CtxI});
336         continue;
337       }
338       // We could not simplify the condition, assume both values.(
339       Worklist.push_back({SI->getTrueValue(), CtxI});
340       Worklist.push_back({SI->getFalseValue(), CtxI});
341       continue;
342     }
343 
344     // Look through phi nodes, visit all live operands.
345     if (auto *PHI = dyn_cast<PHINode>(V)) {
346       assert(LivenessAA &&
347              "Expected liveness in the presence of instructions!");
348       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
349         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
350         bool UsedAssumedInformation = false;
351         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
352                             LivenessAA, UsedAssumedInformation,
353                             /* CheckBBLivenessOnly */ true)) {
354           AnyDead = true;
355           continue;
356         }
357         Worklist.push_back(
358             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
359       }
360       continue;
361     }
362 
363     if (UseValueSimplify && !isa<Constant>(V)) {
364       bool UsedAssumedInformation = false;
365       Optional<Value *> SimpleV =
366           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
367       if (!SimpleV.hasValue())
368         continue;
369       if (!SimpleV.getValue())
370         return false;
371       Value *NewV = SimpleV.getValue();
372       if (NewV != V) {
373         Worklist.push_back({NewV, CtxI});
374         continue;
375       }
376     }
377 
378     // Once a leaf is reached we inform the user through the callback.
379     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
380       return false;
381   } while (!Worklist.empty());
382 
383   // If we actually used liveness information so we have to record a dependence.
384   if (AnyDead)
385     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
386 
387   // All values have been visited.
388   return true;
389 }
390 
391 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
392                                      SmallVectorImpl<Value *> &Objects,
393                                      const AbstractAttribute &QueryingAA,
394                                      const Instruction *CtxI) {
395   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
396   SmallPtrSet<Value *, 8> SeenObjects;
397   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
398                                      SmallVectorImpl<Value *> &Objects,
399                                      bool) -> bool {
400     if (SeenObjects.insert(&Val).second)
401       Objects.push_back(&Val);
402     return true;
403   };
404   if (!genericValueTraversal<decltype(Objects)>(
405           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
406           true, 32, StripCB))
407     return false;
408   return true;
409 }
410 
411 const Value *stripAndAccumulateMinimalOffsets(
412     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
413     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
414     bool UseAssumed = false) {
415 
416   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
417     const IRPosition &Pos = IRPosition::value(V);
418     // Only track dependence if we are going to use the assumed info.
419     const AAValueConstantRange &ValueConstantRangeAA =
420         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
421                                          UseAssumed ? DepClassTy::OPTIONAL
422                                                     : DepClassTy::NONE);
423     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
424                                      : ValueConstantRangeAA.getKnown();
425     // We can only use the lower part of the range because the upper part can
426     // be higher than what the value can really be.
427     ROffset = Range.getSignedMin();
428     return true;
429   };
430 
431   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
432                                                 AttributorAnalysis);
433 }
434 
435 static const Value *getMinimalBaseOfAccsesPointerOperand(
436     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
437     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
438   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
439   if (!Ptr)
440     return nullptr;
441   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
442   const Value *Base = stripAndAccumulateMinimalOffsets(
443       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
444 
445   BytesOffset = OffsetAPInt.getSExtValue();
446   return Base;
447 }
448 
449 static const Value *
450 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
451                                      const DataLayout &DL,
452                                      bool AllowNonInbounds = false) {
453   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
454   if (!Ptr)
455     return nullptr;
456 
457   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
458                                           AllowNonInbounds);
459 }
460 
461 /// Clamp the information known for all returned values of a function
462 /// (identified by \p QueryingAA) into \p S.
463 template <typename AAType, typename StateType = typename AAType::StateType>
464 static void clampReturnedValueStates(
465     Attributor &A, const AAType &QueryingAA, StateType &S,
466     const IRPosition::CallBaseContext *CBContext = nullptr) {
467   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
468                     << QueryingAA << " into " << S << "\n");
469 
470   assert((QueryingAA.getIRPosition().getPositionKind() ==
471               IRPosition::IRP_RETURNED ||
472           QueryingAA.getIRPosition().getPositionKind() ==
473               IRPosition::IRP_CALL_SITE_RETURNED) &&
474          "Can only clamp returned value states for a function returned or call "
475          "site returned position!");
476 
477   // Use an optional state as there might not be any return values and we want
478   // to join (IntegerState::operator&) the state of all there are.
479   Optional<StateType> T;
480 
481   // Callback for each possibly returned value.
482   auto CheckReturnValue = [&](Value &RV) -> bool {
483     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
484     const AAType &AA =
485         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
486     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
487                       << " @ " << RVPos << "\n");
488     const StateType &AAS = AA.getState();
489     if (T.hasValue())
490       *T &= AAS;
491     else
492       T = AAS;
493     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
494                       << "\n");
495     return T->isValidState();
496   };
497 
498   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
499     S.indicatePessimisticFixpoint();
500   else if (T.hasValue())
501     S ^= *T;
502 }
503 
504 /// Helper class for generic deduction: return value -> returned position.
505 template <typename AAType, typename BaseType,
506           typename StateType = typename BaseType::StateType,
507           bool PropagateCallBaseContext = false>
508 struct AAReturnedFromReturnedValues : public BaseType {
509   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
510       : BaseType(IRP, A) {}
511 
512   /// See AbstractAttribute::updateImpl(...).
513   ChangeStatus updateImpl(Attributor &A) override {
514     StateType S(StateType::getBestState(this->getState()));
515     clampReturnedValueStates<AAType, StateType>(
516         A, *this, S,
517         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
518     // TODO: If we know we visited all returned values, thus no are assumed
519     // dead, we can take the known information from the state T.
520     return clampStateAndIndicateChange<StateType>(this->getState(), S);
521   }
522 };
523 
524 /// Clamp the information known at all call sites for a given argument
525 /// (identified by \p QueryingAA) into \p S.
526 template <typename AAType, typename StateType = typename AAType::StateType>
527 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
528                                         StateType &S) {
529   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
530                     << QueryingAA << " into " << S << "\n");
531 
532   assert(QueryingAA.getIRPosition().getPositionKind() ==
533              IRPosition::IRP_ARGUMENT &&
534          "Can only clamp call site argument states for an argument position!");
535 
536   // Use an optional state as there might not be any return values and we want
537   // to join (IntegerState::operator&) the state of all there are.
538   Optional<StateType> T;
539 
540   // The argument number which is also the call site argument number.
541   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
542 
543   auto CallSiteCheck = [&](AbstractCallSite ACS) {
544     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
545     // Check if a coresponding argument was found or if it is on not associated
546     // (which can happen for callback calls).
547     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
548       return false;
549 
550     const AAType &AA =
551         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
552     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
553                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
554     const StateType &AAS = AA.getState();
555     if (T.hasValue())
556       *T &= AAS;
557     else
558       T = AAS;
559     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
560                       << "\n");
561     return T->isValidState();
562   };
563 
564   bool AllCallSitesKnown;
565   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
566                               AllCallSitesKnown))
567     S.indicatePessimisticFixpoint();
568   else if (T.hasValue())
569     S ^= *T;
570 }
571 
572 /// This function is the bridge between argument position and the call base
573 /// context.
574 template <typename AAType, typename BaseType,
575           typename StateType = typename AAType::StateType>
576 bool getArgumentStateFromCallBaseContext(Attributor &A,
577                                          BaseType &QueryingAttribute,
578                                          IRPosition &Pos, StateType &State) {
579   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
580          "Expected an 'argument' position !");
581   const CallBase *CBContext = Pos.getCallBaseContext();
582   if (!CBContext)
583     return false;
584 
585   int ArgNo = Pos.getCallSiteArgNo();
586   assert(ArgNo >= 0 && "Invalid Arg No!");
587 
588   const auto &AA = A.getAAFor<AAType>(
589       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
590       DepClassTy::REQUIRED);
591   const StateType &CBArgumentState =
592       static_cast<const StateType &>(AA.getState());
593 
594   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
595                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
596                     << "\n");
597 
598   // NOTE: If we want to do call site grouping it should happen here.
599   State ^= CBArgumentState;
600   return true;
601 }
602 
603 /// Helper class for generic deduction: call site argument -> argument position.
604 template <typename AAType, typename BaseType,
605           typename StateType = typename AAType::StateType,
606           bool BridgeCallBaseContext = false>
607 struct AAArgumentFromCallSiteArguments : public BaseType {
608   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
609       : BaseType(IRP, A) {}
610 
611   /// See AbstractAttribute::updateImpl(...).
612   ChangeStatus updateImpl(Attributor &A) override {
613     StateType S = StateType::getBestState(this->getState());
614 
615     if (BridgeCallBaseContext) {
616       bool Success =
617           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
618               A, *this, this->getIRPosition(), S);
619       if (Success)
620         return clampStateAndIndicateChange<StateType>(this->getState(), S);
621     }
622     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
623 
624     // TODO: If we know we visited all incoming values, thus no are assumed
625     // dead, we can take the known information from the state T.
626     return clampStateAndIndicateChange<StateType>(this->getState(), S);
627   }
628 };
629 
630 /// Helper class for generic replication: function returned -> cs returned.
631 template <typename AAType, typename BaseType,
632           typename StateType = typename BaseType::StateType,
633           bool IntroduceCallBaseContext = false>
634 struct AACallSiteReturnedFromReturned : public BaseType {
635   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
636       : BaseType(IRP, A) {}
637 
638   /// See AbstractAttribute::updateImpl(...).
639   ChangeStatus updateImpl(Attributor &A) override {
640     assert(this->getIRPosition().getPositionKind() ==
641                IRPosition::IRP_CALL_SITE_RETURNED &&
642            "Can only wrap function returned positions for call site returned "
643            "positions!");
644     auto &S = this->getState();
645 
646     const Function *AssociatedFunction =
647         this->getIRPosition().getAssociatedFunction();
648     if (!AssociatedFunction)
649       return S.indicatePessimisticFixpoint();
650 
651     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
652     if (IntroduceCallBaseContext)
653       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
654                         << CBContext << "\n");
655 
656     IRPosition FnPos = IRPosition::returned(
657         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
658     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
659     return clampStateAndIndicateChange(S, AA.getState());
660   }
661 };
662 
663 /// Helper function to accumulate uses.
664 template <class AAType, typename StateType = typename AAType::StateType>
665 static void followUsesInContext(AAType &AA, Attributor &A,
666                                 MustBeExecutedContextExplorer &Explorer,
667                                 const Instruction *CtxI,
668                                 SetVector<const Use *> &Uses,
669                                 StateType &State) {
670   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
671   for (unsigned u = 0; u < Uses.size(); ++u) {
672     const Use *U = Uses[u];
673     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
674       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
675       if (Found && AA.followUseInMBEC(A, U, UserI, State))
676         for (const Use &Us : UserI->uses())
677           Uses.insert(&Us);
678     }
679   }
680 }
681 
682 /// Use the must-be-executed-context around \p I to add information into \p S.
683 /// The AAType class is required to have `followUseInMBEC` method with the
684 /// following signature and behaviour:
685 ///
686 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
687 /// U - Underlying use.
688 /// I - The user of the \p U.
689 /// Returns true if the value should be tracked transitively.
690 ///
691 template <class AAType, typename StateType = typename AAType::StateType>
692 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
693                              Instruction &CtxI) {
694 
695   // Container for (transitive) uses of the associated value.
696   SetVector<const Use *> Uses;
697   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
698     Uses.insert(&U);
699 
700   MustBeExecutedContextExplorer &Explorer =
701       A.getInfoCache().getMustBeExecutedContextExplorer();
702 
703   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
704 
705   if (S.isAtFixpoint())
706     return;
707 
708   SmallVector<const BranchInst *, 4> BrInsts;
709   auto Pred = [&](const Instruction *I) {
710     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
711       if (Br->isConditional())
712         BrInsts.push_back(Br);
713     return true;
714   };
715 
716   // Here, accumulate conditional branch instructions in the context. We
717   // explore the child paths and collect the known states. The disjunction of
718   // those states can be merged to its own state. Let ParentState_i be a state
719   // to indicate the known information for an i-th branch instruction in the
720   // context. ChildStates are created for its successors respectively.
721   //
722   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
723   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
724   //      ...
725   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
726   //
727   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
728   //
729   // FIXME: Currently, recursive branches are not handled. For example, we
730   // can't deduce that ptr must be dereferenced in below function.
731   //
732   // void f(int a, int c, int *ptr) {
733   //    if(a)
734   //      if (b) {
735   //        *ptr = 0;
736   //      } else {
737   //        *ptr = 1;
738   //      }
739   //    else {
740   //      if (b) {
741   //        *ptr = 0;
742   //      } else {
743   //        *ptr = 1;
744   //      }
745   //    }
746   // }
747 
748   Explorer.checkForAllContext(&CtxI, Pred);
749   for (const BranchInst *Br : BrInsts) {
750     StateType ParentState;
751 
752     // The known state of the parent state is a conjunction of children's
753     // known states so it is initialized with a best state.
754     ParentState.indicateOptimisticFixpoint();
755 
756     for (const BasicBlock *BB : Br->successors()) {
757       StateType ChildState;
758 
759       size_t BeforeSize = Uses.size();
760       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
761 
762       // Erase uses which only appear in the child.
763       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
764         It = Uses.erase(It);
765 
766       ParentState &= ChildState;
767     }
768 
769     // Use only known state.
770     S += ParentState;
771   }
772 }
773 
774 /// ------------------------ PointerInfo ---------------------------------------
775 
776 namespace llvm {
777 namespace AA {
778 namespace PointerInfo {
779 
780 /// An access kind description as used by AAPointerInfo.
781 struct OffsetAndSize;
782 
783 struct State;
784 
785 } // namespace PointerInfo
786 } // namespace AA
787 
788 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
789 template <>
790 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
791   using Access = AAPointerInfo::Access;
792   static inline Access getEmptyKey();
793   static inline Access getTombstoneKey();
794   static unsigned getHashValue(const Access &A);
795   static bool isEqual(const Access &LHS, const Access &RHS);
796 };
797 
798 /// Helper that allows OffsetAndSize as a key in a DenseMap.
799 template <>
800 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
801     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
802 
803 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
804 /// but the instruction
805 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
806   using Base = DenseMapInfo<Instruction *>;
807   using Access = AAPointerInfo::Access;
808   static inline Access getEmptyKey();
809   static inline Access getTombstoneKey();
810   static unsigned getHashValue(const Access &A);
811   static bool isEqual(const Access &LHS, const Access &RHS);
812 };
813 
814 } // namespace llvm
815 
816 /// Helper to represent an access offset and size, with logic to deal with
817 /// uncertainty and check for overlapping accesses.
818 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
819   using BaseTy = std::pair<int64_t, int64_t>;
820   OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
821   OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
822   int64_t getOffset() const { return first; }
823   int64_t getSize() const { return second; }
824   static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); }
825 
826   /// Return true if this offset and size pair might describe an address that
827   /// overlaps with \p OAS.
828   bool mayOverlap(const OffsetAndSize &OAS) const {
829     // Any unknown value and we are giving up -> overlap.
830     if (OAS.getOffset() == OffsetAndSize::Unknown ||
831         OAS.getSize() == OffsetAndSize::Unknown ||
832         getOffset() == OffsetAndSize::Unknown ||
833         getSize() == OffsetAndSize::Unknown)
834       return true;
835 
836     // Check if one offset point is in the other interval [offset, offset+size].
837     return OAS.getOffset() + OAS.getSize() > getOffset() &&
838            OAS.getOffset() < getOffset() + getSize();
839   }
840 
841   /// Constant used to represent unknown offset or sizes.
842   static constexpr int64_t Unknown = 1 << 31;
843 };
844 
845 /// Implementation of the DenseMapInfo.
846 ///
847 ///{
848 inline llvm::AccessAsInstructionInfo::Access
849 llvm::AccessAsInstructionInfo::getEmptyKey() {
850   return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
851 }
852 inline llvm::AccessAsInstructionInfo::Access
853 llvm::AccessAsInstructionInfo::getTombstoneKey() {
854   return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
855                 nullptr);
856 }
857 unsigned llvm::AccessAsInstructionInfo::getHashValue(
858     const llvm::AccessAsInstructionInfo::Access &A) {
859   return Base::getHashValue(A.getRemoteInst());
860 }
861 bool llvm::AccessAsInstructionInfo::isEqual(
862     const llvm::AccessAsInstructionInfo::Access &LHS,
863     const llvm::AccessAsInstructionInfo::Access &RHS) {
864   return LHS.getRemoteInst() == RHS.getRemoteInst();
865 }
866 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
867 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() {
868   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
869                                nullptr);
870 }
871 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
872 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() {
873   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
874                                nullptr);
875 }
876 
877 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue(
878     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) {
879   return detail::combineHashValue(
880              DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()),
881              (A.isWrittenValueYetUndetermined()
882                   ? ~0
883                   : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
884          A.getKind();
885 }
886 
887 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual(
888     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS,
889     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) {
890   return LHS == RHS;
891 }
892 ///}
893 
894 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
895 struct AA::PointerInfo::State : public AbstractState {
896 
897   /// Return the best possible representable state.
898   static State getBestState(const State &SIS) { return State(); }
899 
900   /// Return the worst possible representable state.
901   static State getWorstState(const State &SIS) {
902     State R;
903     R.indicatePessimisticFixpoint();
904     return R;
905   }
906 
907   State() {}
908   State(const State &SIS) : AccessBins(SIS.AccessBins) {}
909   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {}
910 
911   const State &getAssumed() const { return *this; }
912 
913   /// See AbstractState::isValidState().
914   bool isValidState() const override { return BS.isValidState(); }
915 
916   /// See AbstractState::isAtFixpoint().
917   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
918 
919   /// See AbstractState::indicateOptimisticFixpoint().
920   ChangeStatus indicateOptimisticFixpoint() override {
921     BS.indicateOptimisticFixpoint();
922     return ChangeStatus::UNCHANGED;
923   }
924 
925   /// See AbstractState::indicatePessimisticFixpoint().
926   ChangeStatus indicatePessimisticFixpoint() override {
927     BS.indicatePessimisticFixpoint();
928     return ChangeStatus::CHANGED;
929   }
930 
931   State &operator=(const State &R) {
932     if (this == &R)
933       return *this;
934     BS = R.BS;
935     AccessBins = R.AccessBins;
936     return *this;
937   }
938 
939   State &operator=(State &&R) {
940     if (this == &R)
941       return *this;
942     std::swap(BS, R.BS);
943     std::swap(AccessBins, R.AccessBins);
944     return *this;
945   }
946 
947   bool operator==(const State &R) const {
948     if (BS != R.BS)
949       return false;
950     if (AccessBins.size() != R.AccessBins.size())
951       return false;
952     auto It = begin(), RIt = R.begin(), E = end();
953     while (It != E) {
954       if (It->getFirst() != RIt->getFirst())
955         return false;
956       auto &Accs = It->getSecond();
957       auto &RAccs = RIt->getSecond();
958       if (Accs.size() != RAccs.size())
959         return false;
960       auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
961       while (AccIt != AccE) {
962         if (*AccIt != *RAccIt)
963           return false;
964         ++AccIt;
965         ++RAccIt;
966       }
967       ++It;
968       ++RIt;
969     }
970     return true;
971   }
972   bool operator!=(const State &R) const { return !(*this == R); }
973 
974   /// We store accesses in a set with the instruction as key.
975   using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>;
976 
977   /// We store all accesses in bins denoted by their offset and size.
978   using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>;
979 
980   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
981   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
982 
983 protected:
984   /// The bins with all the accesses for the associated pointer.
985   DenseMap<OffsetAndSize, Accesses> AccessBins;
986 
987   /// Add a new access to the state at offset \p Offset and with size \p Size.
988   /// The access is associated with \p I, writes \p Content (if anything), and
989   /// is of kind \p Kind.
990   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
991   ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I,
992                          Optional<Value *> Content,
993                          AAPointerInfo::AccessKind Kind, Type *Ty,
994                          Instruction *RemoteI = nullptr,
995                          Accesses *BinPtr = nullptr) {
996     OffsetAndSize Key{Offset, Size};
997     Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
998     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
999     // Check if we have an access for this instruction in this bin, if not,
1000     // simply add it.
1001     auto It = Bin.find(Acc);
1002     if (It == Bin.end()) {
1003       Bin.insert(Acc);
1004       return ChangeStatus::CHANGED;
1005     }
1006     // If the existing access is the same as then new one, nothing changed.
1007     AAPointerInfo::Access Before = *It;
1008     // The new one will be combined with the existing one.
1009     *It &= Acc;
1010     return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1011   }
1012 
1013   /// See AAPointerInfo::forallInterferingAccesses.
1014   bool forallInterferingAccesses(
1015       Instruction &I,
1016       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1017     if (!isValidState())
1018       return false;
1019     // First find the offset and size of I.
1020     OffsetAndSize OAS(-1, -1);
1021     for (auto &It : AccessBins) {
1022       for (auto &Access : It.getSecond()) {
1023         if (Access.getRemoteInst() == &I) {
1024           OAS = It.getFirst();
1025           break;
1026         }
1027       }
1028       if (OAS.getSize() != -1)
1029         break;
1030     }
1031     if (OAS.getSize() == -1)
1032       return true;
1033 
1034     // Now that we have an offset and size, find all overlapping ones and use
1035     // the callback on the accesses.
1036     for (auto &It : AccessBins) {
1037       OffsetAndSize ItOAS = It.getFirst();
1038       if (!OAS.mayOverlap(ItOAS))
1039         continue;
1040       for (auto &Access : It.getSecond())
1041         if (!CB(Access, OAS == ItOAS))
1042           return false;
1043     }
1044     return true;
1045   }
1046 
1047 private:
1048   /// State to track fixpoint and validity.
1049   BooleanState BS;
1050 };
1051 
1052 struct AAPointerInfoImpl
1053     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1054   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1055   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1056 
1057   /// See AbstractAttribute::initialize(...).
1058   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1059 
1060   /// See AbstractAttribute::getAsStr().
1061   const std::string getAsStr() const override {
1062     return std::string("PointerInfo ") +
1063            (isValidState() ? (std::string("#") +
1064                               std::to_string(AccessBins.size()) + " bins")
1065                            : "<invalid>");
1066   }
1067 
1068   /// See AbstractAttribute::manifest(...).
1069   ChangeStatus manifest(Attributor &A) override {
1070     return AAPointerInfo::manifest(A);
1071   }
1072 
1073   bool forallInterferingAccesses(
1074       LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1075       const override {
1076     return State::forallInterferingAccesses(LI, CB);
1077   }
1078   bool forallInterferingAccesses(
1079       StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1080       const override {
1081     return State::forallInterferingAccesses(SI, CB);
1082   }
1083 
1084   ChangeStatus translateAndAddCalleeState(Attributor &A,
1085                                           const AAPointerInfo &CalleeAA,
1086                                           int64_t CallArgOffset, CallBase &CB) {
1087     using namespace AA::PointerInfo;
1088     if (!CalleeAA.getState().isValidState() || !isValidState())
1089       return indicatePessimisticFixpoint();
1090 
1091     const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1092     bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1093 
1094     // Combine the accesses bin by bin.
1095     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1096     for (auto &It : CalleeImplAA.getState()) {
1097       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1098       if (CallArgOffset != OffsetAndSize::Unknown)
1099         OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1100                             It.first.getSize());
1101       Accesses &Bin = AccessBins[OAS];
1102       for (const AAPointerInfo::Access &RAcc : It.second) {
1103         if (IsByval && !RAcc.isRead())
1104           continue;
1105         bool UsedAssumedInformation = false;
1106         Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1107             RAcc.getContent(), CB, *this, UsedAssumedInformation);
1108         AccessKind AK =
1109             AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1110                                                  : AccessKind::AK_READ_WRITE));
1111         Changed =
1112             Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
1113                                 RAcc.getType(), RAcc.getRemoteInst(), &Bin);
1114       }
1115     }
1116     return Changed;
1117   }
1118 
1119   /// Statistic tracking for all AAPointerInfo implementations.
1120   /// See AbstractAttribute::trackStatistics().
1121   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1122 };
1123 
1124 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1125   using AccessKind = AAPointerInfo::AccessKind;
1126   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1127       : AAPointerInfoImpl(IRP, A) {}
1128 
1129   /// See AbstractAttribute::initialize(...).
1130   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1131 
1132   /// Deal with an access and signal if it was handled successfully.
1133   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1134                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1135                     ChangeStatus &Changed, Type *Ty,
1136                     int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) {
1137     using namespace AA::PointerInfo;
1138     // No need to find a size if one is given or the offset is unknown.
1139     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1140         Ty) {
1141       const DataLayout &DL = A.getDataLayout();
1142       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1143       if (!AccessSize.isScalable())
1144         Size = AccessSize.getFixedSize();
1145     }
1146     Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
1147     return true;
1148   };
1149 
1150   /// See AbstractAttribute::updateImpl(...).
1151   ChangeStatus updateImpl(Attributor &A) override {
1152     using namespace AA::PointerInfo;
1153     State S = getState();
1154     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1155     Value &AssociatedValue = getAssociatedValue();
1156     struct OffsetInfo {
1157       int64_t Offset = 0;
1158     };
1159 
1160     const DataLayout &DL = A.getDataLayout();
1161     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1162     OffsetInfoMap[&AssociatedValue] = {};
1163 
1164     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
1165                                      bool &Follow) {
1166       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1167       UsrOI = PtrOI;
1168       Follow = true;
1169       return true;
1170     };
1171 
1172     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1173       Value *CurPtr = U.get();
1174       User *Usr = U.getUser();
1175       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1176                         << *Usr << "\n");
1177 
1178       OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1179 
1180       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1181         if (CE->isCast())
1182           return HandlePassthroughUser(Usr, PtrOI, Follow);
1183         if (CE->isCompare())
1184           return true;
1185         if (!CE->isGEPWithNoNotionalOverIndexing()) {
1186           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1187                             << "\n");
1188           return false;
1189         }
1190       }
1191       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1192         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1193         UsrOI = PtrOI;
1194 
1195         // TODO: Use range information.
1196         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1197             !GEP->hasAllConstantIndices()) {
1198           UsrOI.Offset = OffsetAndSize::Unknown;
1199           Follow = true;
1200           return true;
1201         }
1202 
1203         SmallVector<Value *, 8> Indices;
1204         for (Use &Idx : llvm::make_range(GEP->idx_begin(), GEP->idx_end())) {
1205           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1206             Indices.push_back(CIdx);
1207             continue;
1208           }
1209 
1210           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1211                             << " : " << *Idx << "\n");
1212           return false;
1213         }
1214         UsrOI.Offset = PtrOI.Offset +
1215                        DL.getIndexedOffsetInType(
1216                            CurPtr->getType()->getPointerElementType(), Indices);
1217         Follow = true;
1218         return true;
1219       }
1220       if (isa<CastInst>(Usr) || isa<PHINode>(Usr) || isa<SelectInst>(Usr))
1221         return HandlePassthroughUser(Usr, PtrOI, Follow);
1222       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1223         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1224                             AccessKind::AK_READ, PtrOI.Offset, Changed,
1225                             LoadI->getType());
1226       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1227         if (StoreI->getValueOperand() == CurPtr) {
1228           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1229                             << *StoreI << "\n");
1230           return false;
1231         }
1232         bool UsedAssumedInformation = false;
1233         Optional<Value *> Content = A.getAssumedSimplified(
1234             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1235         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1236                             PtrOI.Offset, Changed,
1237                             StoreI->getValueOperand()->getType());
1238       }
1239       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1240         if (CB->isLifetimeStartOrEnd())
1241           return true;
1242         if (CB->isArgOperand(&U)) {
1243           unsigned ArgNo = CB->getArgOperandNo(&U);
1244           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1245               *this, IRPosition::callsite_argument(*CB, ArgNo),
1246               DepClassTy::REQUIRED);
1247           Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) |
1248                     Changed;
1249           return true;
1250         }
1251         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1252                           << "\n");
1253         // TODO: Allow some call uses
1254         return false;
1255       }
1256 
1257       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1258       return false;
1259     };
1260     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1261                            /* CheckBBLivenessOnly */ true))
1262       return indicatePessimisticFixpoint();
1263 
1264     LLVM_DEBUG({
1265       dbgs() << "Accesses by bin after update:\n";
1266       for (auto &It : AccessBins) {
1267         dbgs() << "[" << It.first.getOffset() << "-"
1268                << It.first.getOffset() + It.first.getSize()
1269                << "] : " << It.getSecond().size() << "\n";
1270         for (auto &Acc : It.getSecond()) {
1271           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1272                  << "\n";
1273           if (Acc.getLocalInst() != Acc.getRemoteInst())
1274             dbgs() << "     -->                         "
1275                    << *Acc.getRemoteInst() << "\n";
1276           if (!Acc.isWrittenValueYetUndetermined())
1277             dbgs() << "     - " << Acc.getWrittenValue() << "\n";
1278         }
1279       }
1280     });
1281 
1282     return Changed;
1283   }
1284 
1285   /// See AbstractAttribute::trackStatistics()
1286   void trackStatistics() const override {
1287     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1288   }
1289 };
1290 
1291 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1292   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1293       : AAPointerInfoImpl(IRP, A) {}
1294 
1295   /// See AbstractAttribute::updateImpl(...).
1296   ChangeStatus updateImpl(Attributor &A) override {
1297     return indicatePessimisticFixpoint();
1298   }
1299 
1300   /// See AbstractAttribute::trackStatistics()
1301   void trackStatistics() const override {
1302     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1303   }
1304 };
1305 
1306 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1307   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1308       : AAPointerInfoFloating(IRP, A) {}
1309 
1310   /// See AbstractAttribute::initialize(...).
1311   void initialize(Attributor &A) override {
1312     AAPointerInfoFloating::initialize(A);
1313     if (getAnchorScope()->isDeclaration())
1314       indicatePessimisticFixpoint();
1315   }
1316 
1317   /// See AbstractAttribute::trackStatistics()
1318   void trackStatistics() const override {
1319     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1320   }
1321 };
1322 
1323 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1324   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1325       : AAPointerInfoFloating(IRP, A) {}
1326 
1327   /// See AbstractAttribute::updateImpl(...).
1328   ChangeStatus updateImpl(Attributor &A) override {
1329     using namespace AA::PointerInfo;
1330     // We handle memory intrinsics explicitly, at least the first (=
1331     // destination) and second (=source) arguments as we know how they are
1332     // accessed.
1333     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1334       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1335       int64_t LengthVal = OffsetAndSize::Unknown;
1336       if (Length)
1337         LengthVal = Length->getSExtValue();
1338       Value &Ptr = getAssociatedValue();
1339       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1340       ChangeStatus Changed;
1341       if (ArgNo == 0) {
1342         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1343                      nullptr, LengthVal);
1344       } else if (ArgNo == 1) {
1345         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1346                      nullptr, LengthVal);
1347       } else {
1348         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1349                           << *MI << "\n");
1350         return indicatePessimisticFixpoint();
1351       }
1352       return Changed;
1353     }
1354 
1355     // TODO: Once we have call site specific value information we can provide
1356     //       call site specific liveness information and then it makes
1357     //       sense to specialize attributes for call sites arguments instead of
1358     //       redirecting requests to the callee argument.
1359     Argument *Arg = getAssociatedArgument();
1360     if (!Arg)
1361       return indicatePessimisticFixpoint();
1362     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1363     auto &ArgAA =
1364         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1365     return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1366   }
1367 
1368   /// See AbstractAttribute::trackStatistics()
1369   void trackStatistics() const override {
1370     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1371   }
1372 };
1373 
1374 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1375   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1376       : AAPointerInfoFloating(IRP, A) {}
1377 
1378   /// See AbstractAttribute::trackStatistics()
1379   void trackStatistics() const override {
1380     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1381   }
1382 };
1383 
1384 /// -----------------------NoUnwind Function Attribute--------------------------
1385 
1386 struct AANoUnwindImpl : AANoUnwind {
1387   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1388 
1389   const std::string getAsStr() const override {
1390     return getAssumed() ? "nounwind" : "may-unwind";
1391   }
1392 
1393   /// See AbstractAttribute::updateImpl(...).
1394   ChangeStatus updateImpl(Attributor &A) override {
1395     auto Opcodes = {
1396         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1397         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1398         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1399 
1400     auto CheckForNoUnwind = [&](Instruction &I) {
1401       if (!I.mayThrow())
1402         return true;
1403 
1404       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1405         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1406             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1407         return NoUnwindAA.isAssumedNoUnwind();
1408       }
1409       return false;
1410     };
1411 
1412     bool UsedAssumedInformation = false;
1413     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1414                                    UsedAssumedInformation))
1415       return indicatePessimisticFixpoint();
1416 
1417     return ChangeStatus::UNCHANGED;
1418   }
1419 };
1420 
1421 struct AANoUnwindFunction final : public AANoUnwindImpl {
1422   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1423       : AANoUnwindImpl(IRP, A) {}
1424 
1425   /// See AbstractAttribute::trackStatistics()
1426   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1427 };
1428 
1429 /// NoUnwind attribute deduction for a call sites.
1430 struct AANoUnwindCallSite final : AANoUnwindImpl {
1431   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1432       : AANoUnwindImpl(IRP, A) {}
1433 
1434   /// See AbstractAttribute::initialize(...).
1435   void initialize(Attributor &A) override {
1436     AANoUnwindImpl::initialize(A);
1437     Function *F = getAssociatedFunction();
1438     if (!F || F->isDeclaration())
1439       indicatePessimisticFixpoint();
1440   }
1441 
1442   /// See AbstractAttribute::updateImpl(...).
1443   ChangeStatus updateImpl(Attributor &A) override {
1444     // TODO: Once we have call site specific value information we can provide
1445     //       call site specific liveness information and then it makes
1446     //       sense to specialize attributes for call sites arguments instead of
1447     //       redirecting requests to the callee argument.
1448     Function *F = getAssociatedFunction();
1449     const IRPosition &FnPos = IRPosition::function(*F);
1450     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1451     return clampStateAndIndicateChange(getState(), FnAA.getState());
1452   }
1453 
1454   /// See AbstractAttribute::trackStatistics()
1455   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1456 };
1457 
1458 /// --------------------- Function Return Values -------------------------------
1459 
1460 /// "Attribute" that collects all potential returned values and the return
1461 /// instructions that they arise from.
1462 ///
1463 /// If there is a unique returned value R, the manifest method will:
1464 ///   - mark R with the "returned" attribute, if R is an argument.
1465 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1466 
1467   /// Mapping of values potentially returned by the associated function to the
1468   /// return instructions that might return them.
1469   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1470 
1471   /// State flags
1472   ///
1473   ///{
1474   bool IsFixed = false;
1475   bool IsValidState = true;
1476   ///}
1477 
1478 public:
1479   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1480       : AAReturnedValues(IRP, A) {}
1481 
1482   /// See AbstractAttribute::initialize(...).
1483   void initialize(Attributor &A) override {
1484     // Reset the state.
1485     IsFixed = false;
1486     IsValidState = true;
1487     ReturnedValues.clear();
1488 
1489     Function *F = getAssociatedFunction();
1490     if (!F || F->isDeclaration()) {
1491       indicatePessimisticFixpoint();
1492       return;
1493     }
1494     assert(!F->getReturnType()->isVoidTy() &&
1495            "Did not expect a void return type!");
1496 
1497     // The map from instruction opcodes to those instructions in the function.
1498     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1499 
1500     // Look through all arguments, if one is marked as returned we are done.
1501     for (Argument &Arg : F->args()) {
1502       if (Arg.hasReturnedAttr()) {
1503         auto &ReturnInstSet = ReturnedValues[&Arg];
1504         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1505           for (Instruction *RI : *Insts)
1506             ReturnInstSet.insert(cast<ReturnInst>(RI));
1507 
1508         indicateOptimisticFixpoint();
1509         return;
1510       }
1511     }
1512 
1513     if (!A.isFunctionIPOAmendable(*F))
1514       indicatePessimisticFixpoint();
1515   }
1516 
1517   /// See AbstractAttribute::manifest(...).
1518   ChangeStatus manifest(Attributor &A) override;
1519 
1520   /// See AbstractAttribute::getState(...).
1521   AbstractState &getState() override { return *this; }
1522 
1523   /// See AbstractAttribute::getState(...).
1524   const AbstractState &getState() const override { return *this; }
1525 
1526   /// See AbstractAttribute::updateImpl(Attributor &A).
1527   ChangeStatus updateImpl(Attributor &A) override;
1528 
1529   llvm::iterator_range<iterator> returned_values() override {
1530     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1531   }
1532 
1533   llvm::iterator_range<const_iterator> returned_values() const override {
1534     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1535   }
1536 
1537   /// Return the number of potential return values, -1 if unknown.
1538   size_t getNumReturnValues() const override {
1539     return isValidState() ? ReturnedValues.size() : -1;
1540   }
1541 
1542   /// Return an assumed unique return value if a single candidate is found. If
1543   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1544   /// Optional::NoneType.
1545   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1546 
1547   /// See AbstractState::checkForAllReturnedValues(...).
1548   bool checkForAllReturnedValuesAndReturnInsts(
1549       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1550       const override;
1551 
1552   /// Pretty print the attribute similar to the IR representation.
1553   const std::string getAsStr() const override;
1554 
1555   /// See AbstractState::isAtFixpoint().
1556   bool isAtFixpoint() const override { return IsFixed; }
1557 
1558   /// See AbstractState::isValidState().
1559   bool isValidState() const override { return IsValidState; }
1560 
1561   /// See AbstractState::indicateOptimisticFixpoint(...).
1562   ChangeStatus indicateOptimisticFixpoint() override {
1563     IsFixed = true;
1564     return ChangeStatus::UNCHANGED;
1565   }
1566 
1567   ChangeStatus indicatePessimisticFixpoint() override {
1568     IsFixed = true;
1569     IsValidState = false;
1570     return ChangeStatus::CHANGED;
1571   }
1572 };
1573 
1574 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1575   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1576 
1577   // Bookkeeping.
1578   assert(isValidState());
1579   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1580                   "Number of function with known return values");
1581 
1582   // Check if we have an assumed unique return value that we could manifest.
1583   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1584 
1585   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1586     return Changed;
1587 
1588   // Bookkeeping.
1589   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1590                   "Number of function with unique return");
1591   // If the assumed unique return value is an argument, annotate it.
1592   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1593     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1594             getAssociatedFunction()->getReturnType())) {
1595       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1596       Changed = IRAttribute::manifest(A);
1597     }
1598   }
1599   return Changed;
1600 }
1601 
1602 const std::string AAReturnedValuesImpl::getAsStr() const {
1603   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1604          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1605 }
1606 
1607 Optional<Value *>
1608 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1609   // If checkForAllReturnedValues provides a unique value, ignoring potential
1610   // undef values that can also be present, it is assumed to be the actual
1611   // return value and forwarded to the caller of this method. If there are
1612   // multiple, a nullptr is returned indicating there cannot be a unique
1613   // returned value.
1614   Optional<Value *> UniqueRV;
1615   Type *Ty = getAssociatedFunction()->getReturnType();
1616 
1617   auto Pred = [&](Value &RV) -> bool {
1618     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1619     return UniqueRV != Optional<Value *>(nullptr);
1620   };
1621 
1622   if (!A.checkForAllReturnedValues(Pred, *this))
1623     UniqueRV = nullptr;
1624 
1625   return UniqueRV;
1626 }
1627 
1628 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1629     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1630     const {
1631   if (!isValidState())
1632     return false;
1633 
1634   // Check all returned values but ignore call sites as long as we have not
1635   // encountered an overdefined one during an update.
1636   for (auto &It : ReturnedValues) {
1637     Value *RV = It.first;
1638     if (!Pred(*RV, It.second))
1639       return false;
1640   }
1641 
1642   return true;
1643 }
1644 
1645 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1646   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1647 
1648   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1649                            bool) -> bool {
1650     bool UsedAssumedInformation = false;
1651     Optional<Value *> SimpleRetVal =
1652         A.getAssumedSimplified(V, *this, UsedAssumedInformation);
1653     if (!SimpleRetVal.hasValue())
1654       return true;
1655     if (!SimpleRetVal.getValue())
1656       return false;
1657     Value *RetVal = *SimpleRetVal;
1658     assert(AA::isValidInScope(*RetVal, Ret.getFunction()) &&
1659            "Assumed returned value should be valid in function scope!");
1660     if (ReturnedValues[RetVal].insert(&Ret))
1661       Changed = ChangeStatus::CHANGED;
1662     return true;
1663   };
1664 
1665   auto ReturnInstCB = [&](Instruction &I) {
1666     ReturnInst &Ret = cast<ReturnInst>(I);
1667     return genericValueTraversal<ReturnInst>(
1668         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1669         &I);
1670   };
1671 
1672   // Discover returned values from all live returned instructions in the
1673   // associated function.
1674   bool UsedAssumedInformation = false;
1675   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1676                                  UsedAssumedInformation))
1677     return indicatePessimisticFixpoint();
1678   return Changed;
1679 }
1680 
1681 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1682   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1683       : AAReturnedValuesImpl(IRP, A) {}
1684 
1685   /// See AbstractAttribute::trackStatistics()
1686   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1687 };
1688 
1689 /// Returned values information for a call sites.
1690 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1691   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1692       : AAReturnedValuesImpl(IRP, A) {}
1693 
1694   /// See AbstractAttribute::initialize(...).
1695   void initialize(Attributor &A) override {
1696     // TODO: Once we have call site specific value information we can provide
1697     //       call site specific liveness information and then it makes
1698     //       sense to specialize attributes for call sites instead of
1699     //       redirecting requests to the callee.
1700     llvm_unreachable("Abstract attributes for returned values are not "
1701                      "supported for call sites yet!");
1702   }
1703 
1704   /// See AbstractAttribute::updateImpl(...).
1705   ChangeStatus updateImpl(Attributor &A) override {
1706     return indicatePessimisticFixpoint();
1707   }
1708 
1709   /// See AbstractAttribute::trackStatistics()
1710   void trackStatistics() const override {}
1711 };
1712 
1713 /// ------------------------ NoSync Function Attribute -------------------------
1714 
1715 struct AANoSyncImpl : AANoSync {
1716   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1717 
1718   const std::string getAsStr() const override {
1719     return getAssumed() ? "nosync" : "may-sync";
1720   }
1721 
1722   /// See AbstractAttribute::updateImpl(...).
1723   ChangeStatus updateImpl(Attributor &A) override;
1724 
1725   /// Helper function used to determine whether an instruction is non-relaxed
1726   /// atomic. In other words, if an atomic instruction does not have unordered
1727   /// or monotonic ordering
1728   static bool isNonRelaxedAtomic(Instruction *I);
1729 
1730   /// Helper function specific for intrinsics which are potentially volatile
1731   static bool isNoSyncIntrinsic(Instruction *I);
1732 };
1733 
1734 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1735   if (!I->isAtomic())
1736     return false;
1737 
1738   if (auto *FI = dyn_cast<FenceInst>(I))
1739     // All legal orderings for fence are stronger than monotonic.
1740     return FI->getSyncScopeID() != SyncScope::SingleThread;
1741   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1742     // Unordered is not a legal ordering for cmpxchg.
1743     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1744             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1745   }
1746 
1747   AtomicOrdering Ordering;
1748   switch (I->getOpcode()) {
1749   case Instruction::AtomicRMW:
1750     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1751     break;
1752   case Instruction::Store:
1753     Ordering = cast<StoreInst>(I)->getOrdering();
1754     break;
1755   case Instruction::Load:
1756     Ordering = cast<LoadInst>(I)->getOrdering();
1757     break;
1758   default:
1759     llvm_unreachable(
1760         "New atomic operations need to be known in the attributor.");
1761   }
1762 
1763   return (Ordering != AtomicOrdering::Unordered &&
1764           Ordering != AtomicOrdering::Monotonic);
1765 }
1766 
1767 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1768 /// which would be nosync except that they have a volatile flag.  All other
1769 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1770 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1771   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1772     return !MI->isVolatile();
1773   return false;
1774 }
1775 
1776 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1777 
1778   auto CheckRWInstForNoSync = [&](Instruction &I) {
1779     /// We are looking for volatile instructions or Non-Relaxed atomics.
1780 
1781     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1782       if (CB->hasFnAttr(Attribute::NoSync))
1783         return true;
1784 
1785       if (isNoSyncIntrinsic(&I))
1786         return true;
1787 
1788       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1789           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1790       return NoSyncAA.isAssumedNoSync();
1791     }
1792 
1793     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1794       return true;
1795 
1796     return false;
1797   };
1798 
1799   auto CheckForNoSync = [&](Instruction &I) {
1800     // At this point we handled all read/write effects and they are all
1801     // nosync, so they can be skipped.
1802     if (I.mayReadOrWriteMemory())
1803       return true;
1804 
1805     // non-convergent and readnone imply nosync.
1806     return !cast<CallBase>(I).isConvergent();
1807   };
1808 
1809   bool UsedAssumedInformation = false;
1810   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1811                                           UsedAssumedInformation) ||
1812       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1813                                          UsedAssumedInformation))
1814     return indicatePessimisticFixpoint();
1815 
1816   return ChangeStatus::UNCHANGED;
1817 }
1818 
1819 struct AANoSyncFunction final : public AANoSyncImpl {
1820   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1821       : AANoSyncImpl(IRP, A) {}
1822 
1823   /// See AbstractAttribute::trackStatistics()
1824   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1825 };
1826 
1827 /// NoSync attribute deduction for a call sites.
1828 struct AANoSyncCallSite final : AANoSyncImpl {
1829   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1830       : AANoSyncImpl(IRP, A) {}
1831 
1832   /// See AbstractAttribute::initialize(...).
1833   void initialize(Attributor &A) override {
1834     AANoSyncImpl::initialize(A);
1835     Function *F = getAssociatedFunction();
1836     if (!F || F->isDeclaration())
1837       indicatePessimisticFixpoint();
1838   }
1839 
1840   /// See AbstractAttribute::updateImpl(...).
1841   ChangeStatus updateImpl(Attributor &A) override {
1842     // TODO: Once we have call site specific value information we can provide
1843     //       call site specific liveness information and then it makes
1844     //       sense to specialize attributes for call sites arguments instead of
1845     //       redirecting requests to the callee argument.
1846     Function *F = getAssociatedFunction();
1847     const IRPosition &FnPos = IRPosition::function(*F);
1848     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1849     return clampStateAndIndicateChange(getState(), FnAA.getState());
1850   }
1851 
1852   /// See AbstractAttribute::trackStatistics()
1853   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1854 };
1855 
1856 /// ------------------------ No-Free Attributes ----------------------------
1857 
1858 struct AANoFreeImpl : public AANoFree {
1859   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1860 
1861   /// See AbstractAttribute::updateImpl(...).
1862   ChangeStatus updateImpl(Attributor &A) override {
1863     auto CheckForNoFree = [&](Instruction &I) {
1864       const auto &CB = cast<CallBase>(I);
1865       if (CB.hasFnAttr(Attribute::NoFree))
1866         return true;
1867 
1868       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1869           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1870       return NoFreeAA.isAssumedNoFree();
1871     };
1872 
1873     bool UsedAssumedInformation = false;
1874     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
1875                                            UsedAssumedInformation))
1876       return indicatePessimisticFixpoint();
1877     return ChangeStatus::UNCHANGED;
1878   }
1879 
1880   /// See AbstractAttribute::getAsStr().
1881   const std::string getAsStr() const override {
1882     return getAssumed() ? "nofree" : "may-free";
1883   }
1884 };
1885 
1886 struct AANoFreeFunction final : public AANoFreeImpl {
1887   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1888       : AANoFreeImpl(IRP, A) {}
1889 
1890   /// See AbstractAttribute::trackStatistics()
1891   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1892 };
1893 
1894 /// NoFree attribute deduction for a call sites.
1895 struct AANoFreeCallSite final : AANoFreeImpl {
1896   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1897       : AANoFreeImpl(IRP, A) {}
1898 
1899   /// See AbstractAttribute::initialize(...).
1900   void initialize(Attributor &A) override {
1901     AANoFreeImpl::initialize(A);
1902     Function *F = getAssociatedFunction();
1903     if (!F || F->isDeclaration())
1904       indicatePessimisticFixpoint();
1905   }
1906 
1907   /// See AbstractAttribute::updateImpl(...).
1908   ChangeStatus updateImpl(Attributor &A) override {
1909     // TODO: Once we have call site specific value information we can provide
1910     //       call site specific liveness information and then it makes
1911     //       sense to specialize attributes for call sites arguments instead of
1912     //       redirecting requests to the callee argument.
1913     Function *F = getAssociatedFunction();
1914     const IRPosition &FnPos = IRPosition::function(*F);
1915     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1916     return clampStateAndIndicateChange(getState(), FnAA.getState());
1917   }
1918 
1919   /// See AbstractAttribute::trackStatistics()
1920   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1921 };
1922 
1923 /// NoFree attribute for floating values.
1924 struct AANoFreeFloating : AANoFreeImpl {
1925   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1926       : AANoFreeImpl(IRP, A) {}
1927 
1928   /// See AbstractAttribute::trackStatistics()
1929   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1930 
1931   /// See Abstract Attribute::updateImpl(...).
1932   ChangeStatus updateImpl(Attributor &A) override {
1933     const IRPosition &IRP = getIRPosition();
1934 
1935     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1936         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1937     if (NoFreeAA.isAssumedNoFree())
1938       return ChangeStatus::UNCHANGED;
1939 
1940     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1941     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1942       Instruction *UserI = cast<Instruction>(U.getUser());
1943       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1944         if (CB->isBundleOperand(&U))
1945           return false;
1946         if (!CB->isArgOperand(&U))
1947           return true;
1948         unsigned ArgNo = CB->getArgOperandNo(&U);
1949 
1950         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1951             *this, IRPosition::callsite_argument(*CB, ArgNo),
1952             DepClassTy::REQUIRED);
1953         return NoFreeArg.isAssumedNoFree();
1954       }
1955 
1956       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1957           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1958         Follow = true;
1959         return true;
1960       }
1961       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
1962           isa<ReturnInst>(UserI))
1963         return true;
1964 
1965       // Unknown user.
1966       return false;
1967     };
1968     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1969       return indicatePessimisticFixpoint();
1970 
1971     return ChangeStatus::UNCHANGED;
1972   }
1973 };
1974 
1975 /// NoFree attribute for a call site argument.
1976 struct AANoFreeArgument final : AANoFreeFloating {
1977   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1978       : AANoFreeFloating(IRP, A) {}
1979 
1980   /// See AbstractAttribute::trackStatistics()
1981   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1982 };
1983 
1984 /// NoFree attribute for call site arguments.
1985 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1986   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1987       : AANoFreeFloating(IRP, A) {}
1988 
1989   /// See AbstractAttribute::updateImpl(...).
1990   ChangeStatus updateImpl(Attributor &A) override {
1991     // TODO: Once we have call site specific value information we can provide
1992     //       call site specific liveness information and then it makes
1993     //       sense to specialize attributes for call sites arguments instead of
1994     //       redirecting requests to the callee argument.
1995     Argument *Arg = getAssociatedArgument();
1996     if (!Arg)
1997       return indicatePessimisticFixpoint();
1998     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1999     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2000     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2001   }
2002 
2003   /// See AbstractAttribute::trackStatistics()
2004   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2005 };
2006 
2007 /// NoFree attribute for function return value.
2008 struct AANoFreeReturned final : AANoFreeFloating {
2009   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2010       : AANoFreeFloating(IRP, A) {
2011     llvm_unreachable("NoFree is not applicable to function returns!");
2012   }
2013 
2014   /// See AbstractAttribute::initialize(...).
2015   void initialize(Attributor &A) override {
2016     llvm_unreachable("NoFree is not applicable to function returns!");
2017   }
2018 
2019   /// See AbstractAttribute::updateImpl(...).
2020   ChangeStatus updateImpl(Attributor &A) override {
2021     llvm_unreachable("NoFree is not applicable to function returns!");
2022   }
2023 
2024   /// See AbstractAttribute::trackStatistics()
2025   void trackStatistics() const override {}
2026 };
2027 
2028 /// NoFree attribute deduction for a call site return value.
2029 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2030   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2031       : AANoFreeFloating(IRP, A) {}
2032 
2033   ChangeStatus manifest(Attributor &A) override {
2034     return ChangeStatus::UNCHANGED;
2035   }
2036   /// See AbstractAttribute::trackStatistics()
2037   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2038 };
2039 
2040 /// ------------------------ NonNull Argument Attribute ------------------------
2041 static int64_t getKnownNonNullAndDerefBytesForUse(
2042     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2043     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2044   TrackUse = false;
2045 
2046   const Value *UseV = U->get();
2047   if (!UseV->getType()->isPointerTy())
2048     return 0;
2049 
2050   // We need to follow common pointer manipulation uses to the accesses they
2051   // feed into. We can try to be smart to avoid looking through things we do not
2052   // like for now, e.g., non-inbounds GEPs.
2053   if (isa<CastInst>(I)) {
2054     TrackUse = true;
2055     return 0;
2056   }
2057 
2058   if (isa<GetElementPtrInst>(I)) {
2059     TrackUse = true;
2060     return 0;
2061   }
2062 
2063   Type *PtrTy = UseV->getType();
2064   const Function *F = I->getFunction();
2065   bool NullPointerIsDefined =
2066       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2067   const DataLayout &DL = A.getInfoCache().getDL();
2068   if (const auto *CB = dyn_cast<CallBase>(I)) {
2069     if (CB->isBundleOperand(U)) {
2070       if (RetainedKnowledge RK = getKnowledgeFromUse(
2071               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2072         IsNonNull |=
2073             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2074         return RK.ArgValue;
2075       }
2076       return 0;
2077     }
2078 
2079     if (CB->isCallee(U)) {
2080       IsNonNull |= !NullPointerIsDefined;
2081       return 0;
2082     }
2083 
2084     unsigned ArgNo = CB->getArgOperandNo(U);
2085     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2086     // As long as we only use known information there is no need to track
2087     // dependences here.
2088     auto &DerefAA =
2089         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2090     IsNonNull |= DerefAA.isKnownNonNull();
2091     return DerefAA.getKnownDereferenceableBytes();
2092   }
2093 
2094   int64_t Offset;
2095   const Value *Base =
2096       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
2097   if (Base) {
2098     if (Base == &AssociatedValue &&
2099         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2100       int64_t DerefBytes =
2101           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
2102 
2103       IsNonNull |= !NullPointerIsDefined;
2104       return std::max(int64_t(0), DerefBytes);
2105     }
2106   }
2107 
2108   /// Corner case when an offset is 0.
2109   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
2110                                               /*AllowNonInbounds*/ true);
2111   if (Base) {
2112     if (Offset == 0 && Base == &AssociatedValue &&
2113         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2114       int64_t DerefBytes =
2115           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
2116       IsNonNull |= !NullPointerIsDefined;
2117       return std::max(int64_t(0), DerefBytes);
2118     }
2119   }
2120 
2121   return 0;
2122 }
2123 
2124 struct AANonNullImpl : AANonNull {
2125   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2126       : AANonNull(IRP, A),
2127         NullIsDefined(NullPointerIsDefined(
2128             getAnchorScope(),
2129             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2130 
2131   /// See AbstractAttribute::initialize(...).
2132   void initialize(Attributor &A) override {
2133     Value &V = getAssociatedValue();
2134     if (!NullIsDefined &&
2135         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2136                 /* IgnoreSubsumingPositions */ false, &A)) {
2137       indicateOptimisticFixpoint();
2138       return;
2139     }
2140 
2141     if (isa<ConstantPointerNull>(V)) {
2142       indicatePessimisticFixpoint();
2143       return;
2144     }
2145 
2146     AANonNull::initialize(A);
2147 
2148     bool CanBeNull, CanBeFreed;
2149     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2150                                          CanBeFreed)) {
2151       if (!CanBeNull) {
2152         indicateOptimisticFixpoint();
2153         return;
2154       }
2155     }
2156 
2157     if (isa<GlobalValue>(&getAssociatedValue())) {
2158       indicatePessimisticFixpoint();
2159       return;
2160     }
2161 
2162     if (Instruction *CtxI = getCtxI())
2163       followUsesInMBEC(*this, A, getState(), *CtxI);
2164   }
2165 
2166   /// See followUsesInMBEC
2167   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2168                        AANonNull::StateType &State) {
2169     bool IsNonNull = false;
2170     bool TrackUse = false;
2171     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2172                                        IsNonNull, TrackUse);
2173     State.setKnown(IsNonNull);
2174     return TrackUse;
2175   }
2176 
2177   /// See AbstractAttribute::getAsStr().
2178   const std::string getAsStr() const override {
2179     return getAssumed() ? "nonnull" : "may-null";
2180   }
2181 
2182   /// Flag to determine if the underlying value can be null and still allow
2183   /// valid accesses.
2184   const bool NullIsDefined;
2185 };
2186 
2187 /// NonNull attribute for a floating value.
2188 struct AANonNullFloating : public AANonNullImpl {
2189   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2190       : AANonNullImpl(IRP, A) {}
2191 
2192   /// See AbstractAttribute::updateImpl(...).
2193   ChangeStatus updateImpl(Attributor &A) override {
2194     const DataLayout &DL = A.getDataLayout();
2195 
2196     DominatorTree *DT = nullptr;
2197     AssumptionCache *AC = nullptr;
2198     InformationCache &InfoCache = A.getInfoCache();
2199     if (const Function *Fn = getAnchorScope()) {
2200       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2201       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2202     }
2203 
2204     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2205                             AANonNull::StateType &T, bool Stripped) -> bool {
2206       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2207                                              DepClassTy::REQUIRED);
2208       if (!Stripped && this == &AA) {
2209         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2210           T.indicatePessimisticFixpoint();
2211       } else {
2212         // Use abstract attribute information.
2213         const AANonNull::StateType &NS = AA.getState();
2214         T ^= NS;
2215       }
2216       return T.isValidState();
2217     };
2218 
2219     StateType T;
2220     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2221                                           VisitValueCB, getCtxI()))
2222       return indicatePessimisticFixpoint();
2223 
2224     return clampStateAndIndicateChange(getState(), T);
2225   }
2226 
2227   /// See AbstractAttribute::trackStatistics()
2228   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2229 };
2230 
2231 /// NonNull attribute for function return value.
2232 struct AANonNullReturned final
2233     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2234   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2235       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2236 
2237   /// See AbstractAttribute::getAsStr().
2238   const std::string getAsStr() const override {
2239     return getAssumed() ? "nonnull" : "may-null";
2240   }
2241 
2242   /// See AbstractAttribute::trackStatistics()
2243   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2244 };
2245 
2246 /// NonNull attribute for function argument.
2247 struct AANonNullArgument final
2248     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2249   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2250       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2251 
2252   /// See AbstractAttribute::trackStatistics()
2253   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2254 };
2255 
2256 struct AANonNullCallSiteArgument final : AANonNullFloating {
2257   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2258       : AANonNullFloating(IRP, A) {}
2259 
2260   /// See AbstractAttribute::trackStatistics()
2261   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2262 };
2263 
2264 /// NonNull attribute for a call site return position.
2265 struct AANonNullCallSiteReturned final
2266     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2267   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2268       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2269 
2270   /// See AbstractAttribute::trackStatistics()
2271   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2272 };
2273 
2274 /// ------------------------ No-Recurse Attributes ----------------------------
2275 
2276 struct AANoRecurseImpl : public AANoRecurse {
2277   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2278 
2279   /// See AbstractAttribute::getAsStr()
2280   const std::string getAsStr() const override {
2281     return getAssumed() ? "norecurse" : "may-recurse";
2282   }
2283 };
2284 
2285 struct AANoRecurseFunction final : AANoRecurseImpl {
2286   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2287       : AANoRecurseImpl(IRP, A) {}
2288 
2289   /// See AbstractAttribute::initialize(...).
2290   void initialize(Attributor &A) override {
2291     AANoRecurseImpl::initialize(A);
2292     if (const Function *F = getAnchorScope())
2293       if (A.getInfoCache().getSccSize(*F) != 1)
2294         indicatePessimisticFixpoint();
2295   }
2296 
2297   /// See AbstractAttribute::updateImpl(...).
2298   ChangeStatus updateImpl(Attributor &A) override {
2299 
2300     // If all live call sites are known to be no-recurse, we are as well.
2301     auto CallSitePred = [&](AbstractCallSite ACS) {
2302       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2303           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2304           DepClassTy::NONE);
2305       return NoRecurseAA.isKnownNoRecurse();
2306     };
2307     bool AllCallSitesKnown;
2308     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2309       // If we know all call sites and all are known no-recurse, we are done.
2310       // If all known call sites, which might not be all that exist, are known
2311       // to be no-recurse, we are not done but we can continue to assume
2312       // no-recurse. If one of the call sites we have not visited will become
2313       // live, another update is triggered.
2314       if (AllCallSitesKnown)
2315         indicateOptimisticFixpoint();
2316       return ChangeStatus::UNCHANGED;
2317     }
2318 
2319     // If the above check does not hold anymore we look at the calls.
2320     auto CheckForNoRecurse = [&](Instruction &I) {
2321       const auto &CB = cast<CallBase>(I);
2322       if (CB.hasFnAttr(Attribute::NoRecurse))
2323         return true;
2324 
2325       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2326           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2327       if (!NoRecurseAA.isAssumedNoRecurse())
2328         return false;
2329 
2330       // Recursion to the same function
2331       if (CB.getCalledFunction() == getAnchorScope())
2332         return false;
2333 
2334       return true;
2335     };
2336 
2337     bool UsedAssumedInformation = false;
2338     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this,
2339                                            UsedAssumedInformation))
2340       return indicatePessimisticFixpoint();
2341     return ChangeStatus::UNCHANGED;
2342   }
2343 
2344   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2345 };
2346 
2347 /// NoRecurse attribute deduction for a call sites.
2348 struct AANoRecurseCallSite final : AANoRecurseImpl {
2349   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2350       : AANoRecurseImpl(IRP, A) {}
2351 
2352   /// See AbstractAttribute::initialize(...).
2353   void initialize(Attributor &A) override {
2354     AANoRecurseImpl::initialize(A);
2355     Function *F = getAssociatedFunction();
2356     if (!F || F->isDeclaration())
2357       indicatePessimisticFixpoint();
2358   }
2359 
2360   /// See AbstractAttribute::updateImpl(...).
2361   ChangeStatus updateImpl(Attributor &A) override {
2362     // TODO: Once we have call site specific value information we can provide
2363     //       call site specific liveness information and then it makes
2364     //       sense to specialize attributes for call sites arguments instead of
2365     //       redirecting requests to the callee argument.
2366     Function *F = getAssociatedFunction();
2367     const IRPosition &FnPos = IRPosition::function(*F);
2368     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2369     return clampStateAndIndicateChange(getState(), FnAA.getState());
2370   }
2371 
2372   /// See AbstractAttribute::trackStatistics()
2373   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2374 };
2375 
2376 /// -------------------- Undefined-Behavior Attributes ------------------------
2377 
2378 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2379   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2380       : AAUndefinedBehavior(IRP, A) {}
2381 
2382   /// See AbstractAttribute::updateImpl(...).
2383   // through a pointer (i.e. also branches etc.)
2384   ChangeStatus updateImpl(Attributor &A) override {
2385     const size_t UBPrevSize = KnownUBInsts.size();
2386     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2387 
2388     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2389       // Skip instructions that are already saved.
2390       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2391         return true;
2392 
2393       // If we reach here, we know we have an instruction
2394       // that accesses memory through a pointer operand,
2395       // for which getPointerOperand() should give it to us.
2396       Value *PtrOp =
2397           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2398       assert(PtrOp &&
2399              "Expected pointer operand of memory accessing instruction");
2400 
2401       // Either we stopped and the appropriate action was taken,
2402       // or we got back a simplified value to continue.
2403       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2404       if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2405         return true;
2406       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2407 
2408       // A memory access through a pointer is considered UB
2409       // only if the pointer has constant null value.
2410       // TODO: Expand it to not only check constant values.
2411       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2412         AssumedNoUBInsts.insert(&I);
2413         return true;
2414       }
2415       const Type *PtrTy = PtrOpVal->getType();
2416 
2417       // Because we only consider instructions inside functions,
2418       // assume that a parent function exists.
2419       const Function *F = I.getFunction();
2420 
2421       // A memory access using constant null pointer is only considered UB
2422       // if null pointer is _not_ defined for the target platform.
2423       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2424         AssumedNoUBInsts.insert(&I);
2425       else
2426         KnownUBInsts.insert(&I);
2427       return true;
2428     };
2429 
2430     auto InspectBrInstForUB = [&](Instruction &I) {
2431       // A conditional branch instruction is considered UB if it has `undef`
2432       // condition.
2433 
2434       // Skip instructions that are already saved.
2435       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2436         return true;
2437 
2438       // We know we have a branch instruction.
2439       auto *BrInst = cast<BranchInst>(&I);
2440 
2441       // Unconditional branches are never considered UB.
2442       if (BrInst->isUnconditional())
2443         return true;
2444 
2445       // Either we stopped and the appropriate action was taken,
2446       // or we got back a simplified value to continue.
2447       Optional<Value *> SimplifiedCond =
2448           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2449       if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2450         return true;
2451       AssumedNoUBInsts.insert(&I);
2452       return true;
2453     };
2454 
2455     auto InspectCallSiteForUB = [&](Instruction &I) {
2456       // Check whether a callsite always cause UB or not
2457 
2458       // Skip instructions that are already saved.
2459       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2460         return true;
2461 
2462       // Check nonnull and noundef argument attribute violation for each
2463       // callsite.
2464       CallBase &CB = cast<CallBase>(I);
2465       Function *Callee = CB.getCalledFunction();
2466       if (!Callee)
2467         return true;
2468       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2469         // If current argument is known to be simplified to null pointer and the
2470         // corresponding argument position is known to have nonnull attribute,
2471         // the argument is poison. Furthermore, if the argument is poison and
2472         // the position is known to have noundef attriubte, this callsite is
2473         // considered UB.
2474         if (idx >= Callee->arg_size())
2475           break;
2476         Value *ArgVal = CB.getArgOperand(idx);
2477         if (!ArgVal)
2478           continue;
2479         // Here, we handle three cases.
2480         //   (1) Not having a value means it is dead. (we can replace the value
2481         //       with undef)
2482         //   (2) Simplified to undef. The argument violate noundef attriubte.
2483         //   (3) Simplified to null pointer where known to be nonnull.
2484         //       The argument is a poison value and violate noundef attribute.
2485         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2486         auto &NoUndefAA =
2487             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2488         if (!NoUndefAA.isKnownNoUndef())
2489           continue;
2490         bool UsedAssumedInformation = false;
2491         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2492             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2493         if (UsedAssumedInformation)
2494           continue;
2495         if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2496           return true;
2497         if (!SimplifiedVal.hasValue() ||
2498             isa<UndefValue>(*SimplifiedVal.getValue())) {
2499           KnownUBInsts.insert(&I);
2500           continue;
2501         }
2502         if (!ArgVal->getType()->isPointerTy() ||
2503             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2504           continue;
2505         auto &NonNullAA =
2506             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2507         if (NonNullAA.isKnownNonNull())
2508           KnownUBInsts.insert(&I);
2509       }
2510       return true;
2511     };
2512 
2513     auto InspectReturnInstForUB =
2514         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2515           // Check if a return instruction always cause UB or not
2516           // Note: It is guaranteed that the returned position of the anchor
2517           //       scope has noundef attribute when this is called.
2518           //       We also ensure the return position is not "assumed dead"
2519           //       because the returned value was then potentially simplified to
2520           //       `undef` in AAReturnedValues without removing the `noundef`
2521           //       attribute yet.
2522 
2523           // When the returned position has noundef attriubte, UB occur in the
2524           // following cases.
2525           //   (1) Returned value is known to be undef.
2526           //   (2) The value is known to be a null pointer and the returned
2527           //       position has nonnull attribute (because the returned value is
2528           //       poison).
2529           bool FoundUB = false;
2530           if (isa<UndefValue>(V)) {
2531             FoundUB = true;
2532           } else {
2533             if (isa<ConstantPointerNull>(V)) {
2534               auto &NonNullAA = A.getAAFor<AANonNull>(
2535                   *this, IRPosition::returned(*getAnchorScope()),
2536                   DepClassTy::NONE);
2537               if (NonNullAA.isKnownNonNull())
2538                 FoundUB = true;
2539             }
2540           }
2541 
2542           if (FoundUB)
2543             for (ReturnInst *RI : RetInsts)
2544               KnownUBInsts.insert(RI);
2545           return true;
2546         };
2547 
2548     bool UsedAssumedInformation = false;
2549     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2550                               {Instruction::Load, Instruction::Store,
2551                                Instruction::AtomicCmpXchg,
2552                                Instruction::AtomicRMW},
2553                               UsedAssumedInformation,
2554                               /* CheckBBLivenessOnly */ true);
2555     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2556                               UsedAssumedInformation,
2557                               /* CheckBBLivenessOnly */ true);
2558     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2559                                       UsedAssumedInformation);
2560 
2561     // If the returned position of the anchor scope has noundef attriubte, check
2562     // all returned instructions.
2563     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2564       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2565       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2566         auto &RetPosNoUndefAA =
2567             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2568         if (RetPosNoUndefAA.isKnownNoUndef())
2569           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2570                                                     *this);
2571       }
2572     }
2573 
2574     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2575         UBPrevSize != KnownUBInsts.size())
2576       return ChangeStatus::CHANGED;
2577     return ChangeStatus::UNCHANGED;
2578   }
2579 
2580   bool isKnownToCauseUB(Instruction *I) const override {
2581     return KnownUBInsts.count(I);
2582   }
2583 
2584   bool isAssumedToCauseUB(Instruction *I) const override {
2585     // In simple words, if an instruction is not in the assumed to _not_
2586     // cause UB, then it is assumed UB (that includes those
2587     // in the KnownUBInsts set). The rest is boilerplate
2588     // is to ensure that it is one of the instructions we test
2589     // for UB.
2590 
2591     switch (I->getOpcode()) {
2592     case Instruction::Load:
2593     case Instruction::Store:
2594     case Instruction::AtomicCmpXchg:
2595     case Instruction::AtomicRMW:
2596       return !AssumedNoUBInsts.count(I);
2597     case Instruction::Br: {
2598       auto BrInst = cast<BranchInst>(I);
2599       if (BrInst->isUnconditional())
2600         return false;
2601       return !AssumedNoUBInsts.count(I);
2602     } break;
2603     default:
2604       return false;
2605     }
2606     return false;
2607   }
2608 
2609   ChangeStatus manifest(Attributor &A) override {
2610     if (KnownUBInsts.empty())
2611       return ChangeStatus::UNCHANGED;
2612     for (Instruction *I : KnownUBInsts)
2613       A.changeToUnreachableAfterManifest(I);
2614     return ChangeStatus::CHANGED;
2615   }
2616 
2617   /// See AbstractAttribute::getAsStr()
2618   const std::string getAsStr() const override {
2619     return getAssumed() ? "undefined-behavior" : "no-ub";
2620   }
2621 
2622   /// Note: The correctness of this analysis depends on the fact that the
2623   /// following 2 sets will stop changing after some point.
2624   /// "Change" here means that their size changes.
2625   /// The size of each set is monotonically increasing
2626   /// (we only add items to them) and it is upper bounded by the number of
2627   /// instructions in the processed function (we can never save more
2628   /// elements in either set than this number). Hence, at some point,
2629   /// they will stop increasing.
2630   /// Consequently, at some point, both sets will have stopped
2631   /// changing, effectively making the analysis reach a fixpoint.
2632 
2633   /// Note: These 2 sets are disjoint and an instruction can be considered
2634   /// one of 3 things:
2635   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2636   ///    the KnownUBInsts set.
2637   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2638   ///    has a reason to assume it).
2639   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2640   ///    could not find a reason to assume or prove that it can cause UB,
2641   ///    hence it assumes it doesn't. We have a set for these instructions
2642   ///    so that we don't reprocess them in every update.
2643   ///    Note however that instructions in this set may cause UB.
2644 
2645 protected:
2646   /// A set of all live instructions _known_ to cause UB.
2647   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2648 
2649 private:
2650   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2651   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2652 
2653   // Should be called on updates in which if we're processing an instruction
2654   // \p I that depends on a value \p V, one of the following has to happen:
2655   // - If the value is assumed, then stop.
2656   // - If the value is known but undef, then consider it UB.
2657   // - Otherwise, do specific processing with the simplified value.
2658   // We return None in the first 2 cases to signify that an appropriate
2659   // action was taken and the caller should stop.
2660   // Otherwise, we return the simplified value that the caller should
2661   // use for specific processing.
2662   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2663                                          Instruction *I) {
2664     bool UsedAssumedInformation = false;
2665     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2666         IRPosition::value(*V), *this, UsedAssumedInformation);
2667     if (!UsedAssumedInformation) {
2668       // Don't depend on assumed values.
2669       if (!SimplifiedV.hasValue()) {
2670         // If it is known (which we tested above) but it doesn't have a value,
2671         // then we can assume `undef` and hence the instruction is UB.
2672         KnownUBInsts.insert(I);
2673         return llvm::None;
2674       }
2675       if (!SimplifiedV.getValue())
2676         return nullptr;
2677       V = *SimplifiedV;
2678     }
2679     if (isa<UndefValue>(V)) {
2680       KnownUBInsts.insert(I);
2681       return llvm::None;
2682     }
2683     return V;
2684   }
2685 };
2686 
2687 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2688   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2689       : AAUndefinedBehaviorImpl(IRP, A) {}
2690 
2691   /// See AbstractAttribute::trackStatistics()
2692   void trackStatistics() const override {
2693     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2694                "Number of instructions known to have UB");
2695     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2696         KnownUBInsts.size();
2697   }
2698 };
2699 
2700 /// ------------------------ Will-Return Attributes ----------------------------
2701 
2702 // Helper function that checks whether a function has any cycle which we don't
2703 // know if it is bounded or not.
2704 // Loops with maximum trip count are considered bounded, any other cycle not.
2705 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2706   ScalarEvolution *SE =
2707       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2708   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2709   // If either SCEV or LoopInfo is not available for the function then we assume
2710   // any cycle to be unbounded cycle.
2711   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2712   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2713   if (!SE || !LI) {
2714     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2715       if (SCCI.hasCycle())
2716         return true;
2717     return false;
2718   }
2719 
2720   // If there's irreducible control, the function may contain non-loop cycles.
2721   if (mayContainIrreducibleControl(F, LI))
2722     return true;
2723 
2724   // Any loop that does not have a max trip count is considered unbounded cycle.
2725   for (auto *L : LI->getLoopsInPreorder()) {
2726     if (!SE->getSmallConstantMaxTripCount(L))
2727       return true;
2728   }
2729   return false;
2730 }
2731 
2732 struct AAWillReturnImpl : public AAWillReturn {
2733   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2734       : AAWillReturn(IRP, A) {}
2735 
2736   /// See AbstractAttribute::initialize(...).
2737   void initialize(Attributor &A) override {
2738     AAWillReturn::initialize(A);
2739 
2740     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2741       indicateOptimisticFixpoint();
2742       return;
2743     }
2744   }
2745 
2746   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2747   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2748     // Check for `mustprogress` in the scope and the associated function which
2749     // might be different if this is a call site.
2750     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2751         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2752       return false;
2753 
2754     const auto &MemAA =
2755         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2756     if (!MemAA.isAssumedReadOnly())
2757       return false;
2758     if (KnownOnly && !MemAA.isKnownReadOnly())
2759       return false;
2760     if (!MemAA.isKnownReadOnly())
2761       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2762 
2763     return true;
2764   }
2765 
2766   /// See AbstractAttribute::updateImpl(...).
2767   ChangeStatus updateImpl(Attributor &A) override {
2768     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2769       return ChangeStatus::UNCHANGED;
2770 
2771     auto CheckForWillReturn = [&](Instruction &I) {
2772       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2773       const auto &WillReturnAA =
2774           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2775       if (WillReturnAA.isKnownWillReturn())
2776         return true;
2777       if (!WillReturnAA.isAssumedWillReturn())
2778         return false;
2779       const auto &NoRecurseAA =
2780           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2781       return NoRecurseAA.isAssumedNoRecurse();
2782     };
2783 
2784     bool UsedAssumedInformation = false;
2785     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2786                                            UsedAssumedInformation))
2787       return indicatePessimisticFixpoint();
2788 
2789     return ChangeStatus::UNCHANGED;
2790   }
2791 
2792   /// See AbstractAttribute::getAsStr()
2793   const std::string getAsStr() const override {
2794     return getAssumed() ? "willreturn" : "may-noreturn";
2795   }
2796 };
2797 
2798 struct AAWillReturnFunction final : AAWillReturnImpl {
2799   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2800       : AAWillReturnImpl(IRP, A) {}
2801 
2802   /// See AbstractAttribute::initialize(...).
2803   void initialize(Attributor &A) override {
2804     AAWillReturnImpl::initialize(A);
2805 
2806     Function *F = getAnchorScope();
2807     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2808       indicatePessimisticFixpoint();
2809   }
2810 
2811   /// See AbstractAttribute::trackStatistics()
2812   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2813 };
2814 
2815 /// WillReturn attribute deduction for a call sites.
2816 struct AAWillReturnCallSite final : AAWillReturnImpl {
2817   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2818       : AAWillReturnImpl(IRP, A) {}
2819 
2820   /// See AbstractAttribute::initialize(...).
2821   void initialize(Attributor &A) override {
2822     AAWillReturnImpl::initialize(A);
2823     Function *F = getAssociatedFunction();
2824     if (!F || !A.isFunctionIPOAmendable(*F))
2825       indicatePessimisticFixpoint();
2826   }
2827 
2828   /// See AbstractAttribute::updateImpl(...).
2829   ChangeStatus updateImpl(Attributor &A) override {
2830     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2831       return ChangeStatus::UNCHANGED;
2832 
2833     // TODO: Once we have call site specific value information we can provide
2834     //       call site specific liveness information and then it makes
2835     //       sense to specialize attributes for call sites arguments instead of
2836     //       redirecting requests to the callee argument.
2837     Function *F = getAssociatedFunction();
2838     const IRPosition &FnPos = IRPosition::function(*F);
2839     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2840     return clampStateAndIndicateChange(getState(), FnAA.getState());
2841   }
2842 
2843   /// See AbstractAttribute::trackStatistics()
2844   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2845 };
2846 
2847 /// -------------------AAReachability Attribute--------------------------
2848 
2849 struct AAReachabilityImpl : AAReachability {
2850   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2851       : AAReachability(IRP, A) {}
2852 
2853   const std::string getAsStr() const override {
2854     // TODO: Return the number of reachable queries.
2855     return "reachable";
2856   }
2857 
2858   /// See AbstractAttribute::updateImpl(...).
2859   ChangeStatus updateImpl(Attributor &A) override {
2860     return ChangeStatus::UNCHANGED;
2861   }
2862 };
2863 
2864 struct AAReachabilityFunction final : public AAReachabilityImpl {
2865   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2866       : AAReachabilityImpl(IRP, A) {}
2867 
2868   /// See AbstractAttribute::trackStatistics()
2869   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2870 };
2871 
2872 /// ------------------------ NoAlias Argument Attribute ------------------------
2873 
2874 struct AANoAliasImpl : AANoAlias {
2875   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2876     assert(getAssociatedType()->isPointerTy() &&
2877            "Noalias is a pointer attribute");
2878   }
2879 
2880   const std::string getAsStr() const override {
2881     return getAssumed() ? "noalias" : "may-alias";
2882   }
2883 };
2884 
2885 /// NoAlias attribute for a floating value.
2886 struct AANoAliasFloating final : AANoAliasImpl {
2887   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2888       : AANoAliasImpl(IRP, A) {}
2889 
2890   /// See AbstractAttribute::initialize(...).
2891   void initialize(Attributor &A) override {
2892     AANoAliasImpl::initialize(A);
2893     Value *Val = &getAssociatedValue();
2894     do {
2895       CastInst *CI = dyn_cast<CastInst>(Val);
2896       if (!CI)
2897         break;
2898       Value *Base = CI->getOperand(0);
2899       if (!Base->hasOneUse())
2900         break;
2901       Val = Base;
2902     } while (true);
2903 
2904     if (!Val->getType()->isPointerTy()) {
2905       indicatePessimisticFixpoint();
2906       return;
2907     }
2908 
2909     if (isa<AllocaInst>(Val))
2910       indicateOptimisticFixpoint();
2911     else if (isa<ConstantPointerNull>(Val) &&
2912              !NullPointerIsDefined(getAnchorScope(),
2913                                    Val->getType()->getPointerAddressSpace()))
2914       indicateOptimisticFixpoint();
2915     else if (Val != &getAssociatedValue()) {
2916       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2917           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2918       if (ValNoAliasAA.isKnownNoAlias())
2919         indicateOptimisticFixpoint();
2920     }
2921   }
2922 
2923   /// See AbstractAttribute::updateImpl(...).
2924   ChangeStatus updateImpl(Attributor &A) override {
2925     // TODO: Implement this.
2926     return indicatePessimisticFixpoint();
2927   }
2928 
2929   /// See AbstractAttribute::trackStatistics()
2930   void trackStatistics() const override {
2931     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2932   }
2933 };
2934 
2935 /// NoAlias attribute for an argument.
2936 struct AANoAliasArgument final
2937     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2938   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2939   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2940 
2941   /// See AbstractAttribute::initialize(...).
2942   void initialize(Attributor &A) override {
2943     Base::initialize(A);
2944     // See callsite argument attribute and callee argument attribute.
2945     if (hasAttr({Attribute::ByVal}))
2946       indicateOptimisticFixpoint();
2947   }
2948 
2949   /// See AbstractAttribute::update(...).
2950   ChangeStatus updateImpl(Attributor &A) override {
2951     // We have to make sure no-alias on the argument does not break
2952     // synchronization when this is a callback argument, see also [1] below.
2953     // If synchronization cannot be affected, we delegate to the base updateImpl
2954     // function, otherwise we give up for now.
2955 
2956     // If the function is no-sync, no-alias cannot break synchronization.
2957     const auto &NoSyncAA =
2958         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2959                              DepClassTy::OPTIONAL);
2960     if (NoSyncAA.isAssumedNoSync())
2961       return Base::updateImpl(A);
2962 
2963     // If the argument is read-only, no-alias cannot break synchronization.
2964     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2965         *this, getIRPosition(), DepClassTy::OPTIONAL);
2966     if (MemBehaviorAA.isAssumedReadOnly())
2967       return Base::updateImpl(A);
2968 
2969     // If the argument is never passed through callbacks, no-alias cannot break
2970     // synchronization.
2971     bool AllCallSitesKnown;
2972     if (A.checkForAllCallSites(
2973             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2974             true, AllCallSitesKnown))
2975       return Base::updateImpl(A);
2976 
2977     // TODO: add no-alias but make sure it doesn't break synchronization by
2978     // introducing fake uses. See:
2979     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2980     //     International Workshop on OpenMP 2018,
2981     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2982 
2983     return indicatePessimisticFixpoint();
2984   }
2985 
2986   /// See AbstractAttribute::trackStatistics()
2987   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2988 };
2989 
2990 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2991   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2992       : AANoAliasImpl(IRP, A) {}
2993 
2994   /// See AbstractAttribute::initialize(...).
2995   void initialize(Attributor &A) override {
2996     // See callsite argument attribute and callee argument attribute.
2997     const auto &CB = cast<CallBase>(getAnchorValue());
2998     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2999       indicateOptimisticFixpoint();
3000     Value &Val = getAssociatedValue();
3001     if (isa<ConstantPointerNull>(Val) &&
3002         !NullPointerIsDefined(getAnchorScope(),
3003                               Val.getType()->getPointerAddressSpace()))
3004       indicateOptimisticFixpoint();
3005   }
3006 
3007   /// Determine if the underlying value may alias with the call site argument
3008   /// \p OtherArgNo of \p ICS (= the underlying call site).
3009   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3010                             const AAMemoryBehavior &MemBehaviorAA,
3011                             const CallBase &CB, unsigned OtherArgNo) {
3012     // We do not need to worry about aliasing with the underlying IRP.
3013     if (this->getCalleeArgNo() == (int)OtherArgNo)
3014       return false;
3015 
3016     // If it is not a pointer or pointer vector we do not alias.
3017     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3018     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3019       return false;
3020 
3021     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3022         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3023 
3024     // If the argument is readnone, there is no read-write aliasing.
3025     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3026       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3027       return false;
3028     }
3029 
3030     // If the argument is readonly and the underlying value is readonly, there
3031     // is no read-write aliasing.
3032     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3033     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3034       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3035       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3036       return false;
3037     }
3038 
3039     // We have to utilize actual alias analysis queries so we need the object.
3040     if (!AAR)
3041       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3042 
3043     // Try to rule it out at the call site.
3044     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3045     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3046                          "callsite arguments: "
3047                       << getAssociatedValue() << " " << *ArgOp << " => "
3048                       << (IsAliasing ? "" : "no-") << "alias \n");
3049 
3050     return IsAliasing;
3051   }
3052 
3053   bool
3054   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3055                                          const AAMemoryBehavior &MemBehaviorAA,
3056                                          const AANoAlias &NoAliasAA) {
3057     // We can deduce "noalias" if the following conditions hold.
3058     // (i)   Associated value is assumed to be noalias in the definition.
3059     // (ii)  Associated value is assumed to be no-capture in all the uses
3060     //       possibly executed before this callsite.
3061     // (iii) There is no other pointer argument which could alias with the
3062     //       value.
3063 
3064     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3065     if (!AssociatedValueIsNoAliasAtDef) {
3066       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3067                         << " is not no-alias at the definition\n");
3068       return false;
3069     }
3070 
3071     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3072 
3073     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3074     const Function *ScopeFn = VIRP.getAnchorScope();
3075     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3076     // Check whether the value is captured in the scope using AANoCapture.
3077     //      Look at CFG and check only uses possibly executed before this
3078     //      callsite.
3079     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3080       Instruction *UserI = cast<Instruction>(U.getUser());
3081 
3082       // If UserI is the curr instruction and there is a single potential use of
3083       // the value in UserI we allow the use.
3084       // TODO: We should inspect the operands and allow those that cannot alias
3085       //       with the value.
3086       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3087         return true;
3088 
3089       if (ScopeFn) {
3090         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
3091             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
3092 
3093         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
3094           return true;
3095 
3096         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3097           if (CB->isArgOperand(&U)) {
3098 
3099             unsigned ArgNo = CB->getArgOperandNo(&U);
3100 
3101             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3102                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3103                 DepClassTy::OPTIONAL);
3104 
3105             if (NoCaptureAA.isAssumedNoCapture())
3106               return true;
3107           }
3108         }
3109       }
3110 
3111       // For cases which can potentially have more users
3112       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3113           isa<SelectInst>(U)) {
3114         Follow = true;
3115         return true;
3116       }
3117 
3118       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
3119       return false;
3120     };
3121 
3122     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3123       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3124         LLVM_DEBUG(
3125             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3126                    << " cannot be noalias as it is potentially captured\n");
3127         return false;
3128       }
3129     }
3130     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3131 
3132     // Check there is no other pointer argument which could alias with the
3133     // value passed at this call site.
3134     // TODO: AbstractCallSite
3135     const auto &CB = cast<CallBase>(getAnchorValue());
3136     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
3137          OtherArgNo++)
3138       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3139         return false;
3140 
3141     return true;
3142   }
3143 
3144   /// See AbstractAttribute::updateImpl(...).
3145   ChangeStatus updateImpl(Attributor &A) override {
3146     // If the argument is readnone we are done as there are no accesses via the
3147     // argument.
3148     auto &MemBehaviorAA =
3149         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3150     if (MemBehaviorAA.isAssumedReadNone()) {
3151       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3152       return ChangeStatus::UNCHANGED;
3153     }
3154 
3155     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3156     const auto &NoAliasAA =
3157         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3158 
3159     AAResults *AAR = nullptr;
3160     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3161                                                NoAliasAA)) {
3162       LLVM_DEBUG(
3163           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3164       return ChangeStatus::UNCHANGED;
3165     }
3166 
3167     return indicatePessimisticFixpoint();
3168   }
3169 
3170   /// See AbstractAttribute::trackStatistics()
3171   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3172 };
3173 
3174 /// NoAlias attribute for function return value.
3175 struct AANoAliasReturned final : AANoAliasImpl {
3176   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3177       : AANoAliasImpl(IRP, A) {}
3178 
3179   /// See AbstractAttribute::initialize(...).
3180   void initialize(Attributor &A) override {
3181     AANoAliasImpl::initialize(A);
3182     Function *F = getAssociatedFunction();
3183     if (!F || F->isDeclaration())
3184       indicatePessimisticFixpoint();
3185   }
3186 
3187   /// See AbstractAttribute::updateImpl(...).
3188   virtual ChangeStatus updateImpl(Attributor &A) override {
3189 
3190     auto CheckReturnValue = [&](Value &RV) -> bool {
3191       if (Constant *C = dyn_cast<Constant>(&RV))
3192         if (C->isNullValue() || isa<UndefValue>(C))
3193           return true;
3194 
3195       /// For now, we can only deduce noalias if we have call sites.
3196       /// FIXME: add more support.
3197       if (!isa<CallBase>(&RV))
3198         return false;
3199 
3200       const IRPosition &RVPos = IRPosition::value(RV);
3201       const auto &NoAliasAA =
3202           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3203       if (!NoAliasAA.isAssumedNoAlias())
3204         return false;
3205 
3206       const auto &NoCaptureAA =
3207           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3208       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3209     };
3210 
3211     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3212       return indicatePessimisticFixpoint();
3213 
3214     return ChangeStatus::UNCHANGED;
3215   }
3216 
3217   /// See AbstractAttribute::trackStatistics()
3218   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3219 };
3220 
3221 /// NoAlias attribute deduction for a call site return value.
3222 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3223   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3224       : AANoAliasImpl(IRP, A) {}
3225 
3226   /// See AbstractAttribute::initialize(...).
3227   void initialize(Attributor &A) override {
3228     AANoAliasImpl::initialize(A);
3229     Function *F = getAssociatedFunction();
3230     if (!F || F->isDeclaration())
3231       indicatePessimisticFixpoint();
3232   }
3233 
3234   /// See AbstractAttribute::updateImpl(...).
3235   ChangeStatus updateImpl(Attributor &A) override {
3236     // TODO: Once we have call site specific value information we can provide
3237     //       call site specific liveness information and then it makes
3238     //       sense to specialize attributes for call sites arguments instead of
3239     //       redirecting requests to the callee argument.
3240     Function *F = getAssociatedFunction();
3241     const IRPosition &FnPos = IRPosition::returned(*F);
3242     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3243     return clampStateAndIndicateChange(getState(), FnAA.getState());
3244   }
3245 
3246   /// See AbstractAttribute::trackStatistics()
3247   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3248 };
3249 
3250 /// -------------------AAIsDead Function Attribute-----------------------
3251 
3252 struct AAIsDeadValueImpl : public AAIsDead {
3253   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3254 
3255   /// See AAIsDead::isAssumedDead().
3256   bool isAssumedDead() const override { return getAssumed(); }
3257 
3258   /// See AAIsDead::isKnownDead().
3259   bool isKnownDead() const override { return getKnown(); }
3260 
3261   /// See AAIsDead::isAssumedDead(BasicBlock *).
3262   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3263 
3264   /// See AAIsDead::isKnownDead(BasicBlock *).
3265   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3266 
3267   /// See AAIsDead::isAssumedDead(Instruction *I).
3268   bool isAssumedDead(const Instruction *I) const override {
3269     return I == getCtxI() && isAssumedDead();
3270   }
3271 
3272   /// See AAIsDead::isKnownDead(Instruction *I).
3273   bool isKnownDead(const Instruction *I) const override {
3274     return isAssumedDead(I) && getKnown();
3275   }
3276 
3277   /// See AbstractAttribute::getAsStr().
3278   const std::string getAsStr() const override {
3279     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3280   }
3281 
3282   /// Check if all uses are assumed dead.
3283   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3284     // Callers might not check the type, void has no uses.
3285     if (V.getType()->isVoidTy())
3286       return true;
3287 
3288     // If we replace a value with a constant there are no uses left afterwards.
3289     if (!isa<Constant>(V)) {
3290       bool UsedAssumedInformation = false;
3291       Optional<Constant *> C =
3292           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3293       if (!C.hasValue() || *C)
3294         return true;
3295     }
3296 
3297     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3298     // Explicitly set the dependence class to required because we want a long
3299     // chain of N dependent instructions to be considered live as soon as one is
3300     // without going through N update cycles. This is not required for
3301     // correctness.
3302     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3303                              DepClassTy::REQUIRED);
3304   }
3305 
3306   /// Determine if \p I is assumed to be side-effect free.
3307   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3308     if (!I || wouldInstructionBeTriviallyDead(I))
3309       return true;
3310 
3311     auto *CB = dyn_cast<CallBase>(I);
3312     if (!CB || isa<IntrinsicInst>(CB))
3313       return false;
3314 
3315     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3316     const auto &NoUnwindAA =
3317         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3318     if (!NoUnwindAA.isAssumedNoUnwind())
3319       return false;
3320     if (!NoUnwindAA.isKnownNoUnwind())
3321       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3322 
3323     const auto &MemBehaviorAA =
3324         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
3325     if (MemBehaviorAA.isAssumedReadOnly()) {
3326       if (!MemBehaviorAA.isKnownReadOnly())
3327         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3328       return true;
3329     }
3330     return false;
3331   }
3332 };
3333 
3334 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3335   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3336       : AAIsDeadValueImpl(IRP, A) {}
3337 
3338   /// See AbstractAttribute::initialize(...).
3339   void initialize(Attributor &A) override {
3340     if (isa<UndefValue>(getAssociatedValue())) {
3341       indicatePessimisticFixpoint();
3342       return;
3343     }
3344 
3345     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3346     if (!isAssumedSideEffectFree(A, I))
3347       indicatePessimisticFixpoint();
3348   }
3349 
3350   /// See AbstractAttribute::updateImpl(...).
3351   ChangeStatus updateImpl(Attributor &A) override {
3352     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3353     if (!isAssumedSideEffectFree(A, I))
3354       return indicatePessimisticFixpoint();
3355     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3356       return indicatePessimisticFixpoint();
3357     return ChangeStatus::UNCHANGED;
3358   }
3359 
3360   /// See AbstractAttribute::manifest(...).
3361   ChangeStatus manifest(Attributor &A) override {
3362     Value &V = getAssociatedValue();
3363     if (auto *I = dyn_cast<Instruction>(&V)) {
3364       // If we get here we basically know the users are all dead. We check if
3365       // isAssumedSideEffectFree returns true here again because it might not be
3366       // the case and only the users are dead but the instruction (=call) is
3367       // still needed.
3368       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
3369         A.deleteAfterManifest(*I);
3370         return ChangeStatus::CHANGED;
3371       }
3372     }
3373     if (V.use_empty())
3374       return ChangeStatus::UNCHANGED;
3375 
3376     bool UsedAssumedInformation = false;
3377     Optional<Constant *> C =
3378         A.getAssumedConstant(V, *this, UsedAssumedInformation);
3379     if (C.hasValue() && C.getValue())
3380       return ChangeStatus::UNCHANGED;
3381 
3382     // Replace the value with undef as it is dead but keep droppable uses around
3383     // as they provide information we don't want to give up on just yet.
3384     UndefValue &UV = *UndefValue::get(V.getType());
3385     bool AnyChange =
3386         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
3387     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3388   }
3389 
3390   /// See AbstractAttribute::trackStatistics()
3391   void trackStatistics() const override {
3392     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3393   }
3394 };
3395 
3396 struct AAIsDeadArgument : public AAIsDeadFloating {
3397   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3398       : AAIsDeadFloating(IRP, A) {}
3399 
3400   /// See AbstractAttribute::initialize(...).
3401   void initialize(Attributor &A) override {
3402     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3403       indicatePessimisticFixpoint();
3404   }
3405 
3406   /// See AbstractAttribute::manifest(...).
3407   ChangeStatus manifest(Attributor &A) override {
3408     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
3409     Argument &Arg = *getAssociatedArgument();
3410     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3411       if (A.registerFunctionSignatureRewrite(
3412               Arg, /* ReplacementTypes */ {},
3413               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3414               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3415         Arg.dropDroppableUses();
3416         return ChangeStatus::CHANGED;
3417       }
3418     return Changed;
3419   }
3420 
3421   /// See AbstractAttribute::trackStatistics()
3422   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3423 };
3424 
3425 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3426   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3427       : AAIsDeadValueImpl(IRP, A) {}
3428 
3429   /// See AbstractAttribute::initialize(...).
3430   void initialize(Attributor &A) override {
3431     if (isa<UndefValue>(getAssociatedValue()))
3432       indicatePessimisticFixpoint();
3433   }
3434 
3435   /// See AbstractAttribute::updateImpl(...).
3436   ChangeStatus updateImpl(Attributor &A) override {
3437     // TODO: Once we have call site specific value information we can provide
3438     //       call site specific liveness information and then it makes
3439     //       sense to specialize attributes for call sites arguments instead of
3440     //       redirecting requests to the callee argument.
3441     Argument *Arg = getAssociatedArgument();
3442     if (!Arg)
3443       return indicatePessimisticFixpoint();
3444     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3445     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3446     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3447   }
3448 
3449   /// See AbstractAttribute::manifest(...).
3450   ChangeStatus manifest(Attributor &A) override {
3451     CallBase &CB = cast<CallBase>(getAnchorValue());
3452     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3453     assert(!isa<UndefValue>(U.get()) &&
3454            "Expected undef values to be filtered out!");
3455     UndefValue &UV = *UndefValue::get(U->getType());
3456     if (A.changeUseAfterManifest(U, UV))
3457       return ChangeStatus::CHANGED;
3458     return ChangeStatus::UNCHANGED;
3459   }
3460 
3461   /// See AbstractAttribute::trackStatistics()
3462   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3463 };
3464 
3465 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3466   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3467       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3468 
3469   /// See AAIsDead::isAssumedDead().
3470   bool isAssumedDead() const override {
3471     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3472   }
3473 
3474   /// See AbstractAttribute::initialize(...).
3475   void initialize(Attributor &A) override {
3476     if (isa<UndefValue>(getAssociatedValue())) {
3477       indicatePessimisticFixpoint();
3478       return;
3479     }
3480 
3481     // We track this separately as a secondary state.
3482     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3483   }
3484 
3485   /// See AbstractAttribute::updateImpl(...).
3486   ChangeStatus updateImpl(Attributor &A) override {
3487     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3488     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3489       IsAssumedSideEffectFree = false;
3490       Changed = ChangeStatus::CHANGED;
3491     }
3492     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3493       return indicatePessimisticFixpoint();
3494     return Changed;
3495   }
3496 
3497   /// See AbstractAttribute::trackStatistics()
3498   void trackStatistics() const override {
3499     if (IsAssumedSideEffectFree)
3500       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3501     else
3502       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3503   }
3504 
3505   /// See AbstractAttribute::getAsStr().
3506   const std::string getAsStr() const override {
3507     return isAssumedDead()
3508                ? "assumed-dead"
3509                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3510   }
3511 
3512 private:
3513   bool IsAssumedSideEffectFree;
3514 };
3515 
3516 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3517   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3518       : AAIsDeadValueImpl(IRP, A) {}
3519 
3520   /// See AbstractAttribute::updateImpl(...).
3521   ChangeStatus updateImpl(Attributor &A) override {
3522 
3523     bool UsedAssumedInformation = false;
3524     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3525                               {Instruction::Ret}, UsedAssumedInformation);
3526 
3527     auto PredForCallSite = [&](AbstractCallSite ACS) {
3528       if (ACS.isCallbackCall() || !ACS.getInstruction())
3529         return false;
3530       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3531     };
3532 
3533     bool AllCallSitesKnown;
3534     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3535                                 AllCallSitesKnown))
3536       return indicatePessimisticFixpoint();
3537 
3538     return ChangeStatus::UNCHANGED;
3539   }
3540 
3541   /// See AbstractAttribute::manifest(...).
3542   ChangeStatus manifest(Attributor &A) override {
3543     // TODO: Rewrite the signature to return void?
3544     bool AnyChange = false;
3545     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3546     auto RetInstPred = [&](Instruction &I) {
3547       ReturnInst &RI = cast<ReturnInst>(I);
3548       if (!isa<UndefValue>(RI.getReturnValue()))
3549         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3550       return true;
3551     };
3552     bool UsedAssumedInformation = false;
3553     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3554                               UsedAssumedInformation);
3555     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3556   }
3557 
3558   /// See AbstractAttribute::trackStatistics()
3559   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3560 };
3561 
3562 struct AAIsDeadFunction : public AAIsDead {
3563   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3564 
3565   /// See AbstractAttribute::initialize(...).
3566   void initialize(Attributor &A) override {
3567     const Function *F = getAnchorScope();
3568     if (F && !F->isDeclaration()) {
3569       // We only want to compute liveness once. If the function is not part of
3570       // the SCC, skip it.
3571       if (A.isRunOn(*const_cast<Function *>(F))) {
3572         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3573         assumeLive(A, F->getEntryBlock());
3574       } else {
3575         indicatePessimisticFixpoint();
3576       }
3577     }
3578   }
3579 
3580   /// See AbstractAttribute::getAsStr().
3581   const std::string getAsStr() const override {
3582     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3583            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3584            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3585            std::to_string(KnownDeadEnds.size()) + "]";
3586   }
3587 
3588   /// See AbstractAttribute::manifest(...).
3589   ChangeStatus manifest(Attributor &A) override {
3590     assert(getState().isValidState() &&
3591            "Attempted to manifest an invalid state!");
3592 
3593     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3594     Function &F = *getAnchorScope();
3595 
3596     if (AssumedLiveBlocks.empty()) {
3597       A.deleteAfterManifest(F);
3598       return ChangeStatus::CHANGED;
3599     }
3600 
3601     // Flag to determine if we can change an invoke to a call assuming the
3602     // callee is nounwind. This is not possible if the personality of the
3603     // function allows to catch asynchronous exceptions.
3604     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3605 
3606     KnownDeadEnds.set_union(ToBeExploredFrom);
3607     for (const Instruction *DeadEndI : KnownDeadEnds) {
3608       auto *CB = dyn_cast<CallBase>(DeadEndI);
3609       if (!CB)
3610         continue;
3611       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3612           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3613       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3614       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3615         continue;
3616 
3617       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3618         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3619       else
3620         A.changeToUnreachableAfterManifest(
3621             const_cast<Instruction *>(DeadEndI->getNextNode()));
3622       HasChanged = ChangeStatus::CHANGED;
3623     }
3624 
3625     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3626     for (BasicBlock &BB : F)
3627       if (!AssumedLiveBlocks.count(&BB)) {
3628         A.deleteAfterManifest(BB);
3629         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3630       }
3631 
3632     return HasChanged;
3633   }
3634 
3635   /// See AbstractAttribute::updateImpl(...).
3636   ChangeStatus updateImpl(Attributor &A) override;
3637 
3638   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3639     return !AssumedLiveEdges.count(std::make_pair(From, To));
3640   }
3641 
3642   /// See AbstractAttribute::trackStatistics()
3643   void trackStatistics() const override {}
3644 
3645   /// Returns true if the function is assumed dead.
3646   bool isAssumedDead() const override { return false; }
3647 
3648   /// See AAIsDead::isKnownDead().
3649   bool isKnownDead() const override { return false; }
3650 
3651   /// See AAIsDead::isAssumedDead(BasicBlock *).
3652   bool isAssumedDead(const BasicBlock *BB) const override {
3653     assert(BB->getParent() == getAnchorScope() &&
3654            "BB must be in the same anchor scope function.");
3655 
3656     if (!getAssumed())
3657       return false;
3658     return !AssumedLiveBlocks.count(BB);
3659   }
3660 
3661   /// See AAIsDead::isKnownDead(BasicBlock *).
3662   bool isKnownDead(const BasicBlock *BB) const override {
3663     return getKnown() && isAssumedDead(BB);
3664   }
3665 
3666   /// See AAIsDead::isAssumed(Instruction *I).
3667   bool isAssumedDead(const Instruction *I) const override {
3668     assert(I->getParent()->getParent() == getAnchorScope() &&
3669            "Instruction must be in the same anchor scope function.");
3670 
3671     if (!getAssumed())
3672       return false;
3673 
3674     // If it is not in AssumedLiveBlocks then it for sure dead.
3675     // Otherwise, it can still be after noreturn call in a live block.
3676     if (!AssumedLiveBlocks.count(I->getParent()))
3677       return true;
3678 
3679     // If it is not after a liveness barrier it is live.
3680     const Instruction *PrevI = I->getPrevNode();
3681     while (PrevI) {
3682       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3683         return true;
3684       PrevI = PrevI->getPrevNode();
3685     }
3686     return false;
3687   }
3688 
3689   /// See AAIsDead::isKnownDead(Instruction *I).
3690   bool isKnownDead(const Instruction *I) const override {
3691     return getKnown() && isAssumedDead(I);
3692   }
3693 
3694   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3695   /// that internal function called from \p BB should now be looked at.
3696   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3697     if (!AssumedLiveBlocks.insert(&BB).second)
3698       return false;
3699 
3700     // We assume that all of BB is (probably) live now and if there are calls to
3701     // internal functions we will assume that those are now live as well. This
3702     // is a performance optimization for blocks with calls to a lot of internal
3703     // functions. It can however cause dead functions to be treated as live.
3704     for (const Instruction &I : BB)
3705       if (const auto *CB = dyn_cast<CallBase>(&I))
3706         if (const Function *F = CB->getCalledFunction())
3707           if (F->hasLocalLinkage())
3708             A.markLiveInternalFunction(*F);
3709     return true;
3710   }
3711 
3712   /// Collection of instructions that need to be explored again, e.g., we
3713   /// did assume they do not transfer control to (one of their) successors.
3714   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3715 
3716   /// Collection of instructions that are known to not transfer control.
3717   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3718 
3719   /// Collection of all assumed live edges
3720   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3721 
3722   /// Collection of all assumed live BasicBlocks.
3723   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3724 };
3725 
3726 static bool
3727 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3728                         AbstractAttribute &AA,
3729                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3730   const IRPosition &IPos = IRPosition::callsite_function(CB);
3731 
3732   const auto &NoReturnAA =
3733       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3734   if (NoReturnAA.isAssumedNoReturn())
3735     return !NoReturnAA.isKnownNoReturn();
3736   if (CB.isTerminator())
3737     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3738   else
3739     AliveSuccessors.push_back(CB.getNextNode());
3740   return false;
3741 }
3742 
3743 static bool
3744 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3745                         AbstractAttribute &AA,
3746                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3747   bool UsedAssumedInformation =
3748       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3749 
3750   // First, determine if we can change an invoke to a call assuming the
3751   // callee is nounwind. This is not possible if the personality of the
3752   // function allows to catch asynchronous exceptions.
3753   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3754     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3755   } else {
3756     const IRPosition &IPos = IRPosition::callsite_function(II);
3757     const auto &AANoUnw =
3758         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3759     if (AANoUnw.isAssumedNoUnwind()) {
3760       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3761     } else {
3762       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3763     }
3764   }
3765   return UsedAssumedInformation;
3766 }
3767 
3768 static bool
3769 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3770                         AbstractAttribute &AA,
3771                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3772   bool UsedAssumedInformation = false;
3773   if (BI.getNumSuccessors() == 1) {
3774     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3775   } else {
3776     Optional<Constant *> C =
3777         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3778     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3779       // No value yet, assume both edges are dead.
3780     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3781       const BasicBlock *SuccBB =
3782           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3783       AliveSuccessors.push_back(&SuccBB->front());
3784     } else {
3785       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3786       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3787       UsedAssumedInformation = false;
3788     }
3789   }
3790   return UsedAssumedInformation;
3791 }
3792 
3793 static bool
3794 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3795                         AbstractAttribute &AA,
3796                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3797   bool UsedAssumedInformation = false;
3798   Optional<Constant *> C =
3799       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3800   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3801     // No value yet, assume all edges are dead.
3802   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3803     for (auto &CaseIt : SI.cases()) {
3804       if (CaseIt.getCaseValue() == C.getValue()) {
3805         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3806         return UsedAssumedInformation;
3807       }
3808     }
3809     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3810     return UsedAssumedInformation;
3811   } else {
3812     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3813       AliveSuccessors.push_back(&SuccBB->front());
3814   }
3815   return UsedAssumedInformation;
3816 }
3817 
3818 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3819   ChangeStatus Change = ChangeStatus::UNCHANGED;
3820 
3821   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3822                     << getAnchorScope()->size() << "] BBs and "
3823                     << ToBeExploredFrom.size() << " exploration points and "
3824                     << KnownDeadEnds.size() << " known dead ends\n");
3825 
3826   // Copy and clear the list of instructions we need to explore from. It is
3827   // refilled with instructions the next update has to look at.
3828   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3829                                                ToBeExploredFrom.end());
3830   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3831 
3832   SmallVector<const Instruction *, 8> AliveSuccessors;
3833   while (!Worklist.empty()) {
3834     const Instruction *I = Worklist.pop_back_val();
3835     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3836 
3837     // Fast forward for uninteresting instructions. We could look for UB here
3838     // though.
3839     while (!I->isTerminator() && !isa<CallBase>(I)) {
3840       Change = ChangeStatus::CHANGED;
3841       I = I->getNextNode();
3842     }
3843 
3844     AliveSuccessors.clear();
3845 
3846     bool UsedAssumedInformation = false;
3847     switch (I->getOpcode()) {
3848     // TODO: look for (assumed) UB to backwards propagate "deadness".
3849     default:
3850       assert(I->isTerminator() &&
3851              "Expected non-terminators to be handled already!");
3852       for (const BasicBlock *SuccBB : successors(I->getParent()))
3853         AliveSuccessors.push_back(&SuccBB->front());
3854       break;
3855     case Instruction::Call:
3856       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3857                                                        *this, AliveSuccessors);
3858       break;
3859     case Instruction::Invoke:
3860       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3861                                                        *this, AliveSuccessors);
3862       break;
3863     case Instruction::Br:
3864       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3865                                                        *this, AliveSuccessors);
3866       break;
3867     case Instruction::Switch:
3868       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3869                                                        *this, AliveSuccessors);
3870       break;
3871     }
3872 
3873     if (UsedAssumedInformation) {
3874       NewToBeExploredFrom.insert(I);
3875     } else {
3876       Change = ChangeStatus::CHANGED;
3877       if (AliveSuccessors.empty() ||
3878           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3879         KnownDeadEnds.insert(I);
3880     }
3881 
3882     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3883                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3884                       << UsedAssumedInformation << "\n");
3885 
3886     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3887       if (!I->isTerminator()) {
3888         assert(AliveSuccessors.size() == 1 &&
3889                "Non-terminator expected to have a single successor!");
3890         Worklist.push_back(AliveSuccessor);
3891       } else {
3892         // record the assumed live edge
3893         AssumedLiveEdges.insert(
3894             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3895         if (assumeLive(A, *AliveSuccessor->getParent()))
3896           Worklist.push_back(AliveSuccessor);
3897       }
3898     }
3899   }
3900 
3901   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3902 
3903   // If we know everything is live there is no need to query for liveness.
3904   // Instead, indicating a pessimistic fixpoint will cause the state to be
3905   // "invalid" and all queries to be answered conservatively without lookups.
3906   // To be in this state we have to (1) finished the exploration and (3) not
3907   // discovered any non-trivial dead end and (2) not ruled unreachable code
3908   // dead.
3909   if (ToBeExploredFrom.empty() &&
3910       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3911       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3912         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3913       }))
3914     return indicatePessimisticFixpoint();
3915   return Change;
3916 }
3917 
3918 /// Liveness information for a call sites.
3919 struct AAIsDeadCallSite final : AAIsDeadFunction {
3920   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3921       : AAIsDeadFunction(IRP, A) {}
3922 
3923   /// See AbstractAttribute::initialize(...).
3924   void initialize(Attributor &A) override {
3925     // TODO: Once we have call site specific value information we can provide
3926     //       call site specific liveness information and then it makes
3927     //       sense to specialize attributes for call sites instead of
3928     //       redirecting requests to the callee.
3929     llvm_unreachable("Abstract attributes for liveness are not "
3930                      "supported for call sites yet!");
3931   }
3932 
3933   /// See AbstractAttribute::updateImpl(...).
3934   ChangeStatus updateImpl(Attributor &A) override {
3935     return indicatePessimisticFixpoint();
3936   }
3937 
3938   /// See AbstractAttribute::trackStatistics()
3939   void trackStatistics() const override {}
3940 };
3941 
3942 /// -------------------- Dereferenceable Argument Attribute --------------------
3943 
3944 struct AADereferenceableImpl : AADereferenceable {
3945   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3946       : AADereferenceable(IRP, A) {}
3947   using StateType = DerefState;
3948 
3949   /// See AbstractAttribute::initialize(...).
3950   void initialize(Attributor &A) override {
3951     SmallVector<Attribute, 4> Attrs;
3952     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3953              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3954     for (const Attribute &Attr : Attrs)
3955       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3956 
3957     const IRPosition &IRP = this->getIRPosition();
3958     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3959 
3960     bool CanBeNull, CanBeFreed;
3961     takeKnownDerefBytesMaximum(
3962         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3963             A.getDataLayout(), CanBeNull, CanBeFreed));
3964 
3965     bool IsFnInterface = IRP.isFnInterfaceKind();
3966     Function *FnScope = IRP.getAnchorScope();
3967     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3968       indicatePessimisticFixpoint();
3969       return;
3970     }
3971 
3972     if (Instruction *CtxI = getCtxI())
3973       followUsesInMBEC(*this, A, getState(), *CtxI);
3974   }
3975 
3976   /// See AbstractAttribute::getState()
3977   /// {
3978   StateType &getState() override { return *this; }
3979   const StateType &getState() const override { return *this; }
3980   /// }
3981 
3982   /// Helper function for collecting accessed bytes in must-be-executed-context
3983   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3984                               DerefState &State) {
3985     const Value *UseV = U->get();
3986     if (!UseV->getType()->isPointerTy())
3987       return;
3988 
3989     Type *PtrTy = UseV->getType();
3990     const DataLayout &DL = A.getDataLayout();
3991     int64_t Offset;
3992     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3993             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3994       if (Base == &getAssociatedValue() &&
3995           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3996         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3997         State.addAccessedBytes(Offset, Size);
3998       }
3999     }
4000   }
4001 
4002   /// See followUsesInMBEC
4003   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4004                        AADereferenceable::StateType &State) {
4005     bool IsNonNull = false;
4006     bool TrackUse = false;
4007     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4008         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4009     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4010                       << " for instruction " << *I << "\n");
4011 
4012     addAccessedBytesForUse(A, U, I, State);
4013     State.takeKnownDerefBytesMaximum(DerefBytes);
4014     return TrackUse;
4015   }
4016 
4017   /// See AbstractAttribute::manifest(...).
4018   ChangeStatus manifest(Attributor &A) override {
4019     ChangeStatus Change = AADereferenceable::manifest(A);
4020     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4021       removeAttrs({Attribute::DereferenceableOrNull});
4022       return ChangeStatus::CHANGED;
4023     }
4024     return Change;
4025   }
4026 
4027   void getDeducedAttributes(LLVMContext &Ctx,
4028                             SmallVectorImpl<Attribute> &Attrs) const override {
4029     // TODO: Add *_globally support
4030     if (isAssumedNonNull())
4031       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4032           Ctx, getAssumedDereferenceableBytes()));
4033     else
4034       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4035           Ctx, getAssumedDereferenceableBytes()));
4036   }
4037 
4038   /// See AbstractAttribute::getAsStr().
4039   const std::string getAsStr() const override {
4040     if (!getAssumedDereferenceableBytes())
4041       return "unknown-dereferenceable";
4042     return std::string("dereferenceable") +
4043            (isAssumedNonNull() ? "" : "_or_null") +
4044            (isAssumedGlobal() ? "_globally" : "") + "<" +
4045            std::to_string(getKnownDereferenceableBytes()) + "-" +
4046            std::to_string(getAssumedDereferenceableBytes()) + ">";
4047   }
4048 };
4049 
4050 /// Dereferenceable attribute for a floating value.
4051 struct AADereferenceableFloating : AADereferenceableImpl {
4052   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4053       : AADereferenceableImpl(IRP, A) {}
4054 
4055   /// See AbstractAttribute::updateImpl(...).
4056   ChangeStatus updateImpl(Attributor &A) override {
4057     const DataLayout &DL = A.getDataLayout();
4058 
4059     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4060                             bool Stripped) -> bool {
4061       unsigned IdxWidth =
4062           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4063       APInt Offset(IdxWidth, 0);
4064       const Value *Base =
4065           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
4066 
4067       const auto &AA = A.getAAFor<AADereferenceable>(
4068           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4069       int64_t DerefBytes = 0;
4070       if (!Stripped && this == &AA) {
4071         // Use IR information if we did not strip anything.
4072         // TODO: track globally.
4073         bool CanBeNull, CanBeFreed;
4074         DerefBytes =
4075             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4076         T.GlobalState.indicatePessimisticFixpoint();
4077       } else {
4078         const DerefState &DS = AA.getState();
4079         DerefBytes = DS.DerefBytesState.getAssumed();
4080         T.GlobalState &= DS.GlobalState;
4081       }
4082 
4083       // For now we do not try to "increase" dereferenceability due to negative
4084       // indices as we first have to come up with code to deal with loops and
4085       // for overflows of the dereferenceable bytes.
4086       int64_t OffsetSExt = Offset.getSExtValue();
4087       if (OffsetSExt < 0)
4088         OffsetSExt = 0;
4089 
4090       T.takeAssumedDerefBytesMinimum(
4091           std::max(int64_t(0), DerefBytes - OffsetSExt));
4092 
4093       if (this == &AA) {
4094         if (!Stripped) {
4095           // If nothing was stripped IR information is all we got.
4096           T.takeKnownDerefBytesMaximum(
4097               std::max(int64_t(0), DerefBytes - OffsetSExt));
4098           T.indicatePessimisticFixpoint();
4099         } else if (OffsetSExt > 0) {
4100           // If something was stripped but there is circular reasoning we look
4101           // for the offset. If it is positive we basically decrease the
4102           // dereferenceable bytes in a circluar loop now, which will simply
4103           // drive them down to the known value in a very slow way which we
4104           // can accelerate.
4105           T.indicatePessimisticFixpoint();
4106         }
4107       }
4108 
4109       return T.isValidState();
4110     };
4111 
4112     DerefState T;
4113     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4114                                            VisitValueCB, getCtxI()))
4115       return indicatePessimisticFixpoint();
4116 
4117     return clampStateAndIndicateChange(getState(), T);
4118   }
4119 
4120   /// See AbstractAttribute::trackStatistics()
4121   void trackStatistics() const override {
4122     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4123   }
4124 };
4125 
4126 /// Dereferenceable attribute for a return value.
4127 struct AADereferenceableReturned final
4128     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4129   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4130       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4131             IRP, A) {}
4132 
4133   /// See AbstractAttribute::trackStatistics()
4134   void trackStatistics() const override {
4135     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4136   }
4137 };
4138 
4139 /// Dereferenceable attribute for an argument
4140 struct AADereferenceableArgument final
4141     : AAArgumentFromCallSiteArguments<AADereferenceable,
4142                                       AADereferenceableImpl> {
4143   using Base =
4144       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4145   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4146       : Base(IRP, A) {}
4147 
4148   /// See AbstractAttribute::trackStatistics()
4149   void trackStatistics() const override {
4150     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4151   }
4152 };
4153 
4154 /// Dereferenceable attribute for a call site argument.
4155 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4156   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4157       : AADereferenceableFloating(IRP, A) {}
4158 
4159   /// See AbstractAttribute::trackStatistics()
4160   void trackStatistics() const override {
4161     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4162   }
4163 };
4164 
4165 /// Dereferenceable attribute deduction for a call site return value.
4166 struct AADereferenceableCallSiteReturned final
4167     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4168   using Base =
4169       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4170   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4171       : Base(IRP, A) {}
4172 
4173   /// See AbstractAttribute::trackStatistics()
4174   void trackStatistics() const override {
4175     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4176   }
4177 };
4178 
4179 // ------------------------ Align Argument Attribute ------------------------
4180 
4181 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4182                                     Value &AssociatedValue, const Use *U,
4183                                     const Instruction *I, bool &TrackUse) {
4184   // We need to follow common pointer manipulation uses to the accesses they
4185   // feed into.
4186   if (isa<CastInst>(I)) {
4187     // Follow all but ptr2int casts.
4188     TrackUse = !isa<PtrToIntInst>(I);
4189     return 0;
4190   }
4191   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4192     if (GEP->hasAllConstantIndices())
4193       TrackUse = true;
4194     return 0;
4195   }
4196 
4197   MaybeAlign MA;
4198   if (const auto *CB = dyn_cast<CallBase>(I)) {
4199     if (CB->isBundleOperand(U) || CB->isCallee(U))
4200       return 0;
4201 
4202     unsigned ArgNo = CB->getArgOperandNo(U);
4203     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4204     // As long as we only use known information there is no need to track
4205     // dependences here.
4206     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4207     MA = MaybeAlign(AlignAA.getKnownAlign());
4208   }
4209 
4210   const DataLayout &DL = A.getDataLayout();
4211   const Value *UseV = U->get();
4212   if (auto *SI = dyn_cast<StoreInst>(I)) {
4213     if (SI->getPointerOperand() == UseV)
4214       MA = SI->getAlign();
4215   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4216     if (LI->getPointerOperand() == UseV)
4217       MA = LI->getAlign();
4218   }
4219 
4220   if (!MA || *MA <= QueryingAA.getKnownAlign())
4221     return 0;
4222 
4223   unsigned Alignment = MA->value();
4224   int64_t Offset;
4225 
4226   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4227     if (Base == &AssociatedValue) {
4228       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4229       // So we can say that the maximum power of two which is a divisor of
4230       // gcd(Offset, Alignment) is an alignment.
4231 
4232       uint32_t gcd =
4233           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4234       Alignment = llvm::PowerOf2Floor(gcd);
4235     }
4236   }
4237 
4238   return Alignment;
4239 }
4240 
4241 struct AAAlignImpl : AAAlign {
4242   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4243 
4244   /// See AbstractAttribute::initialize(...).
4245   void initialize(Attributor &A) override {
4246     SmallVector<Attribute, 4> Attrs;
4247     getAttrs({Attribute::Alignment}, Attrs);
4248     for (const Attribute &Attr : Attrs)
4249       takeKnownMaximum(Attr.getValueAsInt());
4250 
4251     Value &V = getAssociatedValue();
4252     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
4253     //       use of the function pointer. This was caused by D73131. We want to
4254     //       avoid this for function pointers especially because we iterate
4255     //       their uses and int2ptr is not handled. It is not a correctness
4256     //       problem though!
4257     if (!V.getType()->getPointerElementType()->isFunctionTy())
4258       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4259 
4260     if (getIRPosition().isFnInterfaceKind() &&
4261         (!getAnchorScope() ||
4262          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4263       indicatePessimisticFixpoint();
4264       return;
4265     }
4266 
4267     if (Instruction *CtxI = getCtxI())
4268       followUsesInMBEC(*this, A, getState(), *CtxI);
4269   }
4270 
4271   /// See AbstractAttribute::manifest(...).
4272   ChangeStatus manifest(Attributor &A) override {
4273     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4274 
4275     // Check for users that allow alignment annotations.
4276     Value &AssociatedValue = getAssociatedValue();
4277     for (const Use &U : AssociatedValue.uses()) {
4278       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4279         if (SI->getPointerOperand() == &AssociatedValue)
4280           if (SI->getAlignment() < getAssumedAlign()) {
4281             STATS_DECLTRACK(AAAlign, Store,
4282                             "Number of times alignment added to a store");
4283             SI->setAlignment(Align(getAssumedAlign()));
4284             LoadStoreChanged = ChangeStatus::CHANGED;
4285           }
4286       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4287         if (LI->getPointerOperand() == &AssociatedValue)
4288           if (LI->getAlignment() < getAssumedAlign()) {
4289             LI->setAlignment(Align(getAssumedAlign()));
4290             STATS_DECLTRACK(AAAlign, Load,
4291                             "Number of times alignment added to a load");
4292             LoadStoreChanged = ChangeStatus::CHANGED;
4293           }
4294       }
4295     }
4296 
4297     ChangeStatus Changed = AAAlign::manifest(A);
4298 
4299     Align InheritAlign =
4300         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4301     if (InheritAlign >= getAssumedAlign())
4302       return LoadStoreChanged;
4303     return Changed | LoadStoreChanged;
4304   }
4305 
4306   // TODO: Provide a helper to determine the implied ABI alignment and check in
4307   //       the existing manifest method and a new one for AAAlignImpl that value
4308   //       to avoid making the alignment explicit if it did not improve.
4309 
4310   /// See AbstractAttribute::getDeducedAttributes
4311   virtual void
4312   getDeducedAttributes(LLVMContext &Ctx,
4313                        SmallVectorImpl<Attribute> &Attrs) const override {
4314     if (getAssumedAlign() > 1)
4315       Attrs.emplace_back(
4316           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4317   }
4318 
4319   /// See followUsesInMBEC
4320   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4321                        AAAlign::StateType &State) {
4322     bool TrackUse = false;
4323 
4324     unsigned int KnownAlign =
4325         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4326     State.takeKnownMaximum(KnownAlign);
4327 
4328     return TrackUse;
4329   }
4330 
4331   /// See AbstractAttribute::getAsStr().
4332   const std::string getAsStr() const override {
4333     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4334                                 "-" + std::to_string(getAssumedAlign()) + ">")
4335                              : "unknown-align";
4336   }
4337 };
4338 
4339 /// Align attribute for a floating value.
4340 struct AAAlignFloating : AAAlignImpl {
4341   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4342 
4343   /// See AbstractAttribute::updateImpl(...).
4344   ChangeStatus updateImpl(Attributor &A) override {
4345     const DataLayout &DL = A.getDataLayout();
4346 
4347     auto VisitValueCB = [&](Value &V, const Instruction *,
4348                             AAAlign::StateType &T, bool Stripped) -> bool {
4349       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4350                                            DepClassTy::REQUIRED);
4351       if (!Stripped && this == &AA) {
4352         int64_t Offset;
4353         unsigned Alignment = 1;
4354         if (const Value *Base =
4355                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4356           Align PA = Base->getPointerAlignment(DL);
4357           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4358           // So we can say that the maximum power of two which is a divisor of
4359           // gcd(Offset, Alignment) is an alignment.
4360 
4361           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4362                                                uint32_t(PA.value()));
4363           Alignment = llvm::PowerOf2Floor(gcd);
4364         } else {
4365           Alignment = V.getPointerAlignment(DL).value();
4366         }
4367         // Use only IR information if we did not strip anything.
4368         T.takeKnownMaximum(Alignment);
4369         T.indicatePessimisticFixpoint();
4370       } else {
4371         // Use abstract attribute information.
4372         const AAAlign::StateType &DS = AA.getState();
4373         T ^= DS;
4374       }
4375       return T.isValidState();
4376     };
4377 
4378     StateType T;
4379     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4380                                           VisitValueCB, getCtxI()))
4381       return indicatePessimisticFixpoint();
4382 
4383     // TODO: If we know we visited all incoming values, thus no are assumed
4384     // dead, we can take the known information from the state T.
4385     return clampStateAndIndicateChange(getState(), T);
4386   }
4387 
4388   /// See AbstractAttribute::trackStatistics()
4389   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4390 };
4391 
4392 /// Align attribute for function return value.
4393 struct AAAlignReturned final
4394     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4395   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4396   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4397 
4398   /// See AbstractAttribute::initialize(...).
4399   void initialize(Attributor &A) override {
4400     Base::initialize(A);
4401     Function *F = getAssociatedFunction();
4402     if (!F || F->isDeclaration())
4403       indicatePessimisticFixpoint();
4404   }
4405 
4406   /// See AbstractAttribute::trackStatistics()
4407   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4408 };
4409 
4410 /// Align attribute for function argument.
4411 struct AAAlignArgument final
4412     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4413   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4414   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4415 
4416   /// See AbstractAttribute::manifest(...).
4417   ChangeStatus manifest(Attributor &A) override {
4418     // If the associated argument is involved in a must-tail call we give up
4419     // because we would need to keep the argument alignments of caller and
4420     // callee in-sync. Just does not seem worth the trouble right now.
4421     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4422       return ChangeStatus::UNCHANGED;
4423     return Base::manifest(A);
4424   }
4425 
4426   /// See AbstractAttribute::trackStatistics()
4427   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4428 };
4429 
4430 struct AAAlignCallSiteArgument final : AAAlignFloating {
4431   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4432       : AAAlignFloating(IRP, A) {}
4433 
4434   /// See AbstractAttribute::manifest(...).
4435   ChangeStatus manifest(Attributor &A) override {
4436     // If the associated argument is involved in a must-tail call we give up
4437     // because we would need to keep the argument alignments of caller and
4438     // callee in-sync. Just does not seem worth the trouble right now.
4439     if (Argument *Arg = getAssociatedArgument())
4440       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4441         return ChangeStatus::UNCHANGED;
4442     ChangeStatus Changed = AAAlignImpl::manifest(A);
4443     Align InheritAlign =
4444         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4445     if (InheritAlign >= getAssumedAlign())
4446       Changed = ChangeStatus::UNCHANGED;
4447     return Changed;
4448   }
4449 
4450   /// See AbstractAttribute::updateImpl(Attributor &A).
4451   ChangeStatus updateImpl(Attributor &A) override {
4452     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4453     if (Argument *Arg = getAssociatedArgument()) {
4454       // We only take known information from the argument
4455       // so we do not need to track a dependence.
4456       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4457           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4458       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4459     }
4460     return Changed;
4461   }
4462 
4463   /// See AbstractAttribute::trackStatistics()
4464   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4465 };
4466 
4467 /// Align attribute deduction for a call site return value.
4468 struct AAAlignCallSiteReturned final
4469     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4470   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4471   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4472       : Base(IRP, A) {}
4473 
4474   /// See AbstractAttribute::initialize(...).
4475   void initialize(Attributor &A) override {
4476     Base::initialize(A);
4477     Function *F = getAssociatedFunction();
4478     if (!F || F->isDeclaration())
4479       indicatePessimisticFixpoint();
4480   }
4481 
4482   /// See AbstractAttribute::trackStatistics()
4483   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4484 };
4485 
4486 /// ------------------ Function No-Return Attribute ----------------------------
4487 struct AANoReturnImpl : public AANoReturn {
4488   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4489 
4490   /// See AbstractAttribute::initialize(...).
4491   void initialize(Attributor &A) override {
4492     AANoReturn::initialize(A);
4493     Function *F = getAssociatedFunction();
4494     if (!F || F->isDeclaration())
4495       indicatePessimisticFixpoint();
4496   }
4497 
4498   /// See AbstractAttribute::getAsStr().
4499   const std::string getAsStr() const override {
4500     return getAssumed() ? "noreturn" : "may-return";
4501   }
4502 
4503   /// See AbstractAttribute::updateImpl(Attributor &A).
4504   virtual ChangeStatus updateImpl(Attributor &A) override {
4505     auto CheckForNoReturn = [](Instruction &) { return false; };
4506     bool UsedAssumedInformation = false;
4507     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4508                                    {(unsigned)Instruction::Ret},
4509                                    UsedAssumedInformation))
4510       return indicatePessimisticFixpoint();
4511     return ChangeStatus::UNCHANGED;
4512   }
4513 };
4514 
4515 struct AANoReturnFunction final : AANoReturnImpl {
4516   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4517       : AANoReturnImpl(IRP, A) {}
4518 
4519   /// See AbstractAttribute::trackStatistics()
4520   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4521 };
4522 
4523 /// NoReturn attribute deduction for a call sites.
4524 struct AANoReturnCallSite final : AANoReturnImpl {
4525   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4526       : AANoReturnImpl(IRP, A) {}
4527 
4528   /// See AbstractAttribute::initialize(...).
4529   void initialize(Attributor &A) override {
4530     AANoReturnImpl::initialize(A);
4531     if (Function *F = getAssociatedFunction()) {
4532       const IRPosition &FnPos = IRPosition::function(*F);
4533       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4534       if (!FnAA.isAssumedNoReturn())
4535         indicatePessimisticFixpoint();
4536     }
4537   }
4538 
4539   /// See AbstractAttribute::updateImpl(...).
4540   ChangeStatus updateImpl(Attributor &A) override {
4541     // TODO: Once we have call site specific value information we can provide
4542     //       call site specific liveness information and then it makes
4543     //       sense to specialize attributes for call sites arguments instead of
4544     //       redirecting requests to the callee argument.
4545     Function *F = getAssociatedFunction();
4546     const IRPosition &FnPos = IRPosition::function(*F);
4547     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4548     return clampStateAndIndicateChange(getState(), FnAA.getState());
4549   }
4550 
4551   /// See AbstractAttribute::trackStatistics()
4552   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4553 };
4554 
4555 /// ----------------------- Variable Capturing ---------------------------------
4556 
4557 /// A class to hold the state of for no-capture attributes.
4558 struct AANoCaptureImpl : public AANoCapture {
4559   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4560 
4561   /// See AbstractAttribute::initialize(...).
4562   void initialize(Attributor &A) override {
4563     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4564       indicateOptimisticFixpoint();
4565       return;
4566     }
4567     Function *AnchorScope = getAnchorScope();
4568     if (isFnInterfaceKind() &&
4569         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4570       indicatePessimisticFixpoint();
4571       return;
4572     }
4573 
4574     // You cannot "capture" null in the default address space.
4575     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4576         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4577       indicateOptimisticFixpoint();
4578       return;
4579     }
4580 
4581     const Function *F =
4582         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4583 
4584     // Check what state the associated function can actually capture.
4585     if (F)
4586       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4587     else
4588       indicatePessimisticFixpoint();
4589   }
4590 
4591   /// See AbstractAttribute::updateImpl(...).
4592   ChangeStatus updateImpl(Attributor &A) override;
4593 
4594   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4595   virtual void
4596   getDeducedAttributes(LLVMContext &Ctx,
4597                        SmallVectorImpl<Attribute> &Attrs) const override {
4598     if (!isAssumedNoCaptureMaybeReturned())
4599       return;
4600 
4601     if (isArgumentPosition()) {
4602       if (isAssumedNoCapture())
4603         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4604       else if (ManifestInternal)
4605         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4606     }
4607   }
4608 
4609   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4610   /// depending on the ability of the function associated with \p IRP to capture
4611   /// state in memory and through "returning/throwing", respectively.
4612   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4613                                                    const Function &F,
4614                                                    BitIntegerState &State) {
4615     // TODO: Once we have memory behavior attributes we should use them here.
4616 
4617     // If we know we cannot communicate or write to memory, we do not care about
4618     // ptr2int anymore.
4619     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4620         F.getReturnType()->isVoidTy()) {
4621       State.addKnownBits(NO_CAPTURE);
4622       return;
4623     }
4624 
4625     // A function cannot capture state in memory if it only reads memory, it can
4626     // however return/throw state and the state might be influenced by the
4627     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4628     if (F.onlyReadsMemory())
4629       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4630 
4631     // A function cannot communicate state back if it does not through
4632     // exceptions and doesn not return values.
4633     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4634       State.addKnownBits(NOT_CAPTURED_IN_RET);
4635 
4636     // Check existing "returned" attributes.
4637     int ArgNo = IRP.getCalleeArgNo();
4638     if (F.doesNotThrow() && ArgNo >= 0) {
4639       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4640         if (F.hasParamAttribute(u, Attribute::Returned)) {
4641           if (u == unsigned(ArgNo))
4642             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4643           else if (F.onlyReadsMemory())
4644             State.addKnownBits(NO_CAPTURE);
4645           else
4646             State.addKnownBits(NOT_CAPTURED_IN_RET);
4647           break;
4648         }
4649     }
4650   }
4651 
4652   /// See AbstractState::getAsStr().
4653   const std::string getAsStr() const override {
4654     if (isKnownNoCapture())
4655       return "known not-captured";
4656     if (isAssumedNoCapture())
4657       return "assumed not-captured";
4658     if (isKnownNoCaptureMaybeReturned())
4659       return "known not-captured-maybe-returned";
4660     if (isAssumedNoCaptureMaybeReturned())
4661       return "assumed not-captured-maybe-returned";
4662     return "assumed-captured";
4663   }
4664 };
4665 
4666 /// Attributor-aware capture tracker.
4667 struct AACaptureUseTracker final : public CaptureTracker {
4668 
4669   /// Create a capture tracker that can lookup in-flight abstract attributes
4670   /// through the Attributor \p A.
4671   ///
4672   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4673   /// search is stopped. If a use leads to a return instruction,
4674   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4675   /// If a use leads to a ptr2int which may capture the value,
4676   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4677   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4678   /// set. All values in \p PotentialCopies are later tracked as well. For every
4679   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4680   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4681   /// conservatively set to true.
4682   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4683                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4684                       SmallSetVector<Value *, 4> &PotentialCopies,
4685                       unsigned &RemainingUsesToExplore)
4686       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4687         PotentialCopies(PotentialCopies),
4688         RemainingUsesToExplore(RemainingUsesToExplore) {}
4689 
4690   /// Determine if \p V maybe captured. *Also updates the state!*
4691   bool valueMayBeCaptured(const Value *V) {
4692     if (V->getType()->isPointerTy()) {
4693       PointerMayBeCaptured(V, this);
4694     } else {
4695       State.indicatePessimisticFixpoint();
4696     }
4697     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4698   }
4699 
4700   /// See CaptureTracker::tooManyUses().
4701   void tooManyUses() override {
4702     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4703   }
4704 
4705   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4706     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4707       return true;
4708     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4709         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4710     return DerefAA.getAssumedDereferenceableBytes();
4711   }
4712 
4713   /// See CaptureTracker::captured(...).
4714   bool captured(const Use *U) override {
4715     Instruction *UInst = cast<Instruction>(U->getUser());
4716     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4717                       << "\n");
4718 
4719     // Because we may reuse the tracker multiple times we keep track of the
4720     // number of explored uses ourselves as well.
4721     if (RemainingUsesToExplore-- == 0) {
4722       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4723       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4724                           /* Return */ true);
4725     }
4726 
4727     // Deal with ptr2int by following uses.
4728     if (isa<PtrToIntInst>(UInst)) {
4729       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4730       return valueMayBeCaptured(UInst);
4731     }
4732 
4733     // Explicitly catch return instructions.
4734     if (isa<ReturnInst>(UInst))
4735       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4736                           /* Return */ true);
4737 
4738     // For now we only use special logic for call sites. However, the tracker
4739     // itself knows about a lot of other non-capturing cases already.
4740     auto *CB = dyn_cast<CallBase>(UInst);
4741     if (!CB || !CB->isArgOperand(U))
4742       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4743                           /* Return */ true);
4744 
4745     unsigned ArgNo = CB->getArgOperandNo(U);
4746     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4747     // If we have a abstract no-capture attribute for the argument we can use
4748     // it to justify a non-capture attribute here. This allows recursion!
4749     auto &ArgNoCaptureAA =
4750         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4751     if (ArgNoCaptureAA.isAssumedNoCapture())
4752       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4753                           /* Return */ false);
4754     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4755       addPotentialCopy(*CB);
4756       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4757                           /* Return */ false);
4758     }
4759 
4760     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4761     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4762                         /* Return */ true);
4763   }
4764 
4765   /// Register \p CS as potential copy of the value we are checking.
4766   void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); }
4767 
4768   /// See CaptureTracker::shouldExplore(...).
4769   bool shouldExplore(const Use *U) override {
4770     // Check liveness and ignore droppable users.
4771     bool UsedAssumedInformation = false;
4772     return !U->getUser()->isDroppable() &&
4773            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA,
4774                             UsedAssumedInformation);
4775   }
4776 
4777   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4778   /// \p CapturedInRet, then return the appropriate value for use in the
4779   /// CaptureTracker::captured() interface.
4780   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4781                     bool CapturedInRet) {
4782     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4783                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4784     if (CapturedInMem)
4785       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4786     if (CapturedInInt)
4787       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4788     if (CapturedInRet)
4789       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4790     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4791   }
4792 
4793 private:
4794   /// The attributor providing in-flight abstract attributes.
4795   Attributor &A;
4796 
4797   /// The abstract attribute currently updated.
4798   AANoCapture &NoCaptureAA;
4799 
4800   /// The abstract liveness state.
4801   const AAIsDead &IsDeadAA;
4802 
4803   /// The state currently updated.
4804   AANoCapture::StateType &State;
4805 
4806   /// Set of potential copies of the tracked value.
4807   SmallSetVector<Value *, 4> &PotentialCopies;
4808 
4809   /// Global counter to limit the number of explored uses.
4810   unsigned &RemainingUsesToExplore;
4811 };
4812 
4813 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4814   const IRPosition &IRP = getIRPosition();
4815   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4816                                   : &IRP.getAssociatedValue();
4817   if (!V)
4818     return indicatePessimisticFixpoint();
4819 
4820   const Function *F =
4821       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4822   assert(F && "Expected a function!");
4823   const IRPosition &FnPos = IRPosition::function(*F);
4824   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4825 
4826   AANoCapture::StateType T;
4827 
4828   // Readonly means we cannot capture through memory.
4829   const auto &FnMemAA =
4830       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4831   if (FnMemAA.isAssumedReadOnly()) {
4832     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4833     if (FnMemAA.isKnownReadOnly())
4834       addKnownBits(NOT_CAPTURED_IN_MEM);
4835     else
4836       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4837   }
4838 
4839   // Make sure all returned values are different than the underlying value.
4840   // TODO: we could do this in a more sophisticated way inside
4841   //       AAReturnedValues, e.g., track all values that escape through returns
4842   //       directly somehow.
4843   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4844     bool SeenConstant = false;
4845     for (auto &It : RVAA.returned_values()) {
4846       if (isa<Constant>(It.first)) {
4847         if (SeenConstant)
4848           return false;
4849         SeenConstant = true;
4850       } else if (!isa<Argument>(It.first) ||
4851                  It.first == getAssociatedArgument())
4852         return false;
4853     }
4854     return true;
4855   };
4856 
4857   const auto &NoUnwindAA =
4858       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4859   if (NoUnwindAA.isAssumedNoUnwind()) {
4860     bool IsVoidTy = F->getReturnType()->isVoidTy();
4861     const AAReturnedValues *RVAA =
4862         IsVoidTy ? nullptr
4863                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4864 
4865                                                  DepClassTy::OPTIONAL);
4866     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4867       T.addKnownBits(NOT_CAPTURED_IN_RET);
4868       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4869         return ChangeStatus::UNCHANGED;
4870       if (NoUnwindAA.isKnownNoUnwind() &&
4871           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4872         addKnownBits(NOT_CAPTURED_IN_RET);
4873         if (isKnown(NOT_CAPTURED_IN_MEM))
4874           return indicateOptimisticFixpoint();
4875       }
4876     }
4877   }
4878 
4879   // Use the CaptureTracker interface and logic with the specialized tracker,
4880   // defined in AACaptureUseTracker, that can look at in-flight abstract
4881   // attributes and directly updates the assumed state.
4882   SmallSetVector<Value *, 4> PotentialCopies;
4883   unsigned RemainingUsesToExplore =
4884       getDefaultMaxUsesToExploreForCaptureTracking();
4885   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4886                               RemainingUsesToExplore);
4887 
4888   // Check all potential copies of the associated value until we can assume
4889   // none will be captured or we have to assume at least one might be.
4890   unsigned Idx = 0;
4891   PotentialCopies.insert(V);
4892   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4893     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4894 
4895   AANoCapture::StateType &S = getState();
4896   auto Assumed = S.getAssumed();
4897   S.intersectAssumedBits(T.getAssumed());
4898   if (!isAssumedNoCaptureMaybeReturned())
4899     return indicatePessimisticFixpoint();
4900   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4901                                    : ChangeStatus::CHANGED;
4902 }
4903 
4904 /// NoCapture attribute for function arguments.
4905 struct AANoCaptureArgument final : AANoCaptureImpl {
4906   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4907       : AANoCaptureImpl(IRP, A) {}
4908 
4909   /// See AbstractAttribute::trackStatistics()
4910   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4911 };
4912 
4913 /// NoCapture attribute for call site arguments.
4914 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4915   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4916       : AANoCaptureImpl(IRP, A) {}
4917 
4918   /// See AbstractAttribute::initialize(...).
4919   void initialize(Attributor &A) override {
4920     if (Argument *Arg = getAssociatedArgument())
4921       if (Arg->hasByValAttr())
4922         indicateOptimisticFixpoint();
4923     AANoCaptureImpl::initialize(A);
4924   }
4925 
4926   /// See AbstractAttribute::updateImpl(...).
4927   ChangeStatus updateImpl(Attributor &A) override {
4928     // TODO: Once we have call site specific value information we can provide
4929     //       call site specific liveness information and then it makes
4930     //       sense to specialize attributes for call sites arguments instead of
4931     //       redirecting requests to the callee argument.
4932     Argument *Arg = getAssociatedArgument();
4933     if (!Arg)
4934       return indicatePessimisticFixpoint();
4935     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4936     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4937     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4938   }
4939 
4940   /// See AbstractAttribute::trackStatistics()
4941   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4942 };
4943 
4944 /// NoCapture attribute for floating values.
4945 struct AANoCaptureFloating final : AANoCaptureImpl {
4946   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4947       : AANoCaptureImpl(IRP, A) {}
4948 
4949   /// See AbstractAttribute::trackStatistics()
4950   void trackStatistics() const override {
4951     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4952   }
4953 };
4954 
4955 /// NoCapture attribute for function return value.
4956 struct AANoCaptureReturned final : AANoCaptureImpl {
4957   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4958       : AANoCaptureImpl(IRP, A) {
4959     llvm_unreachable("NoCapture is not applicable to function returns!");
4960   }
4961 
4962   /// See AbstractAttribute::initialize(...).
4963   void initialize(Attributor &A) override {
4964     llvm_unreachable("NoCapture is not applicable to function returns!");
4965   }
4966 
4967   /// See AbstractAttribute::updateImpl(...).
4968   ChangeStatus updateImpl(Attributor &A) override {
4969     llvm_unreachable("NoCapture is not applicable to function returns!");
4970   }
4971 
4972   /// See AbstractAttribute::trackStatistics()
4973   void trackStatistics() const override {}
4974 };
4975 
4976 /// NoCapture attribute deduction for a call site return value.
4977 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4978   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4979       : AANoCaptureImpl(IRP, A) {}
4980 
4981   /// See AbstractAttribute::initialize(...).
4982   void initialize(Attributor &A) override {
4983     const Function *F = getAnchorScope();
4984     // Check what state the associated function can actually capture.
4985     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4986   }
4987 
4988   /// See AbstractAttribute::trackStatistics()
4989   void trackStatistics() const override {
4990     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4991   }
4992 };
4993 
4994 /// ------------------ Value Simplify Attribute ----------------------------
4995 
4996 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
4997   // FIXME: Add a typecast support.
4998   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
4999       SimplifiedAssociatedValue, Other, Ty);
5000   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5001     return false;
5002 
5003   LLVM_DEBUG({
5004     if (SimplifiedAssociatedValue.hasValue())
5005       dbgs() << "[ValueSimplify] is assumed to be "
5006              << **SimplifiedAssociatedValue << "\n";
5007     else
5008       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5009   });
5010   return true;
5011 }
5012 
5013 struct AAValueSimplifyImpl : AAValueSimplify {
5014   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5015       : AAValueSimplify(IRP, A) {}
5016 
5017   /// See AbstractAttribute::initialize(...).
5018   void initialize(Attributor &A) override {
5019     if (getAssociatedValue().getType()->isVoidTy())
5020       indicatePessimisticFixpoint();
5021   }
5022 
5023   /// See AbstractAttribute::getAsStr().
5024   const std::string getAsStr() const override {
5025     LLVM_DEBUG({
5026       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
5027       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5028         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5029     });
5030     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5031                           : "not-simple";
5032   }
5033 
5034   /// See AbstractAttribute::trackStatistics()
5035   void trackStatistics() const override {}
5036 
5037   /// See AAValueSimplify::getAssumedSimplifiedValue()
5038   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5039     return SimplifiedAssociatedValue;
5040   }
5041 
5042   /// Return a value we can use as replacement for the associated one, or
5043   /// nullptr if we don't have one that makes sense.
5044   Value *getReplacementValue(Attributor &A) const {
5045     Value *NewV;
5046     NewV = SimplifiedAssociatedValue.hasValue()
5047                ? SimplifiedAssociatedValue.getValue()
5048                : UndefValue::get(getAssociatedType());
5049     if (!NewV)
5050       return nullptr;
5051     NewV = AA::getWithType(*NewV, *getAssociatedType());
5052     if (!NewV || NewV == &getAssociatedValue())
5053       return nullptr;
5054     const Instruction *CtxI = getCtxI();
5055     if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache()))
5056       return nullptr;
5057     if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope()))
5058       return nullptr;
5059     return NewV;
5060   }
5061 
5062   /// Helper function for querying AAValueSimplify and updating candicate.
5063   /// \param IRP The value position we are trying to unify with SimplifiedValue
5064   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5065                       const IRPosition &IRP, bool Simplify = true) {
5066     bool UsedAssumedInformation = false;
5067     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5068     if (Simplify)
5069       QueryingValueSimplified =
5070           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5071     return unionAssumed(QueryingValueSimplified);
5072   }
5073 
5074   /// Returns a candidate is found or not
5075   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5076     if (!getAssociatedValue().getType()->isIntegerTy())
5077       return false;
5078 
5079     // This will also pass the call base context.
5080     const auto &AA =
5081         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5082 
5083     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5084 
5085     if (!COpt.hasValue()) {
5086       SimplifiedAssociatedValue = llvm::None;
5087       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5088       return true;
5089     }
5090     if (auto *C = COpt.getValue()) {
5091       SimplifiedAssociatedValue = C;
5092       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5093       return true;
5094     }
5095     return false;
5096   }
5097 
5098   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5099     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5100       return true;
5101     if (askSimplifiedValueFor<AAPotentialValues>(A))
5102       return true;
5103     return false;
5104   }
5105 
5106   /// See AbstractAttribute::manifest(...).
5107   ChangeStatus manifest(Attributor &A) override {
5108     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5109     if (getAssociatedValue().user_empty())
5110       return Changed;
5111 
5112     if (auto *NewV = getReplacementValue(A)) {
5113       LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> "
5114                         << *NewV << " :: " << *this << "\n");
5115       if (A.changeValueAfterManifest(getAssociatedValue(), *NewV))
5116         Changed = ChangeStatus::CHANGED;
5117     }
5118 
5119     return Changed | AAValueSimplify::manifest(A);
5120   }
5121 
5122   /// See AbstractState::indicatePessimisticFixpoint(...).
5123   ChangeStatus indicatePessimisticFixpoint() override {
5124     SimplifiedAssociatedValue = &getAssociatedValue();
5125     return AAValueSimplify::indicatePessimisticFixpoint();
5126   }
5127 
5128   static bool handleLoad(Attributor &A, const AbstractAttribute &AA,
5129                          LoadInst &L, function_ref<bool(Value &)> Union) {
5130     auto UnionWrapper = [&](Value &V, Value &Obj) {
5131       if (isa<AllocaInst>(Obj))
5132         return Union(V);
5133       if (!AA::isDynamicallyUnique(A, AA, V))
5134         return false;
5135       if (!AA::isValidAtPosition(V, L, A.getInfoCache()))
5136         return false;
5137       return Union(V);
5138     };
5139 
5140     Value &Ptr = *L.getPointerOperand();
5141     SmallVector<Value *, 8> Objects;
5142     if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L))
5143       return false;
5144 
5145     for (Value *Obj : Objects) {
5146       LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
5147       if (isa<UndefValue>(Obj))
5148         continue;
5149       if (isa<ConstantPointerNull>(Obj)) {
5150         // A null pointer access can be undefined but any offset from null may
5151         // be OK. We do not try to optimize the latter.
5152         bool UsedAssumedInformation = false;
5153         if (!NullPointerIsDefined(L.getFunction(),
5154                                   Ptr.getType()->getPointerAddressSpace()) &&
5155             A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj)
5156           continue;
5157         return false;
5158       }
5159       if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj))
5160         return false;
5161       Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType());
5162       if (!InitialVal || !Union(*InitialVal))
5163         return false;
5164 
5165       LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store "
5166                            "propagation, checking accesses next.\n");
5167 
5168       auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
5169         LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n");
5170         if (!Acc.isWrite())
5171           return true;
5172         if (Acc.isWrittenValueYetUndetermined())
5173           return true;
5174         Value *Content = Acc.getWrittenValue();
5175         if (!Content)
5176           return false;
5177         Value *CastedContent =
5178             AA::getWithType(*Content, *AA.getAssociatedType());
5179         if (!CastedContent)
5180           return false;
5181         if (IsExact)
5182           return UnionWrapper(*CastedContent, *Obj);
5183         if (auto *C = dyn_cast<Constant>(CastedContent))
5184           if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C))
5185             return UnionWrapper(*CastedContent, *Obj);
5186         return false;
5187       };
5188 
5189       auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj),
5190                                            DepClassTy::REQUIRED);
5191       if (!PI.forallInterferingAccesses(L, CheckAccess))
5192         return false;
5193     }
5194     return true;
5195   }
5196 };
5197 
5198 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5199   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5200       : AAValueSimplifyImpl(IRP, A) {}
5201 
5202   void initialize(Attributor &A) override {
5203     AAValueSimplifyImpl::initialize(A);
5204     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5205       indicatePessimisticFixpoint();
5206     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5207                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5208                 /* IgnoreSubsumingPositions */ true))
5209       indicatePessimisticFixpoint();
5210 
5211     // FIXME: This is a hack to prevent us from propagating function poiner in
5212     // the new pass manager CGSCC pass as it creates call edges the
5213     // CallGraphUpdater cannot handle yet.
5214     Value &V = getAssociatedValue();
5215     if (V.getType()->isPointerTy() &&
5216         V.getType()->getPointerElementType()->isFunctionTy() &&
5217         !A.isModulePass())
5218       indicatePessimisticFixpoint();
5219   }
5220 
5221   /// See AbstractAttribute::updateImpl(...).
5222   ChangeStatus updateImpl(Attributor &A) override {
5223     // Byval is only replacable if it is readonly otherwise we would write into
5224     // the replaced value and not the copy that byval creates implicitly.
5225     Argument *Arg = getAssociatedArgument();
5226     if (Arg->hasByValAttr()) {
5227       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5228       //       there is no race by not copying a constant byval.
5229       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
5230                                                        DepClassTy::REQUIRED);
5231       if (!MemAA.isAssumedReadOnly())
5232         return indicatePessimisticFixpoint();
5233     }
5234 
5235     auto Before = SimplifiedAssociatedValue;
5236 
5237     auto PredForCallSite = [&](AbstractCallSite ACS) {
5238       const IRPosition &ACSArgPos =
5239           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5240       // Check if a coresponding argument was found or if it is on not
5241       // associated (which can happen for callback calls).
5242       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5243         return false;
5244 
5245       // Simplify the argument operand explicitly and check if the result is
5246       // valid in the current scope. This avoids refering to simplified values
5247       // in other functions, e.g., we don't want to say a an argument in a
5248       // static function is actually an argument in a different function.
5249       bool UsedAssumedInformation = false;
5250       Optional<Constant *> SimpleArgOp =
5251           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5252       if (!SimpleArgOp.hasValue())
5253         return true;
5254       if (!SimpleArgOp.getValue())
5255         return false;
5256       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5257         return false;
5258       return unionAssumed(*SimpleArgOp);
5259     };
5260 
5261     // Generate a answer specific to a call site context.
5262     bool Success;
5263     bool AllCallSitesKnown;
5264     if (hasCallBaseContext() &&
5265         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5266       Success = PredForCallSite(
5267           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5268     else
5269       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5270                                        AllCallSitesKnown);
5271 
5272     if (!Success)
5273       if (!askSimplifiedValueForOtherAAs(A))
5274         return indicatePessimisticFixpoint();
5275 
5276     // If a candicate was found in this update, return CHANGED.
5277     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5278                                                : ChangeStatus ::CHANGED;
5279   }
5280 
5281   /// See AbstractAttribute::trackStatistics()
5282   void trackStatistics() const override {
5283     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5284   }
5285 };
5286 
5287 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5288   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5289       : AAValueSimplifyImpl(IRP, A) {}
5290 
5291   /// See AAValueSimplify::getAssumedSimplifiedValue()
5292   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5293     if (!isValidState())
5294       return nullptr;
5295     return SimplifiedAssociatedValue;
5296   }
5297 
5298   /// See AbstractAttribute::updateImpl(...).
5299   ChangeStatus updateImpl(Attributor &A) override {
5300     auto Before = SimplifiedAssociatedValue;
5301 
5302     auto PredForReturned = [&](Value &V) {
5303       return checkAndUpdate(A, *this,
5304                             IRPosition::value(V, getCallBaseContext()));
5305     };
5306 
5307     if (!A.checkForAllReturnedValues(PredForReturned, *this))
5308       if (!askSimplifiedValueForOtherAAs(A))
5309         return indicatePessimisticFixpoint();
5310 
5311     // If a candicate was found in this update, return CHANGED.
5312     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5313                                                : ChangeStatus ::CHANGED;
5314   }
5315 
5316   ChangeStatus manifest(Attributor &A) override {
5317     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5318 
5319     if (auto *NewV = getReplacementValue(A)) {
5320       auto PredForReturned =
5321           [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5322             for (ReturnInst *RI : RetInsts) {
5323               Value *ReturnedVal = RI->getReturnValue();
5324               if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal))
5325                 return true;
5326               assert(RI->getFunction() == getAnchorScope() &&
5327                      "ReturnInst in wrong function!");
5328               LLVM_DEBUG(dbgs()
5329                          << "[ValueSimplify] " << *ReturnedVal << " -> "
5330                          << *NewV << " in " << *RI << " :: " << *this << "\n");
5331               if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
5332                 Changed = ChangeStatus::CHANGED;
5333             }
5334             return true;
5335           };
5336       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
5337     }
5338 
5339     return Changed | AAValueSimplify::manifest(A);
5340   }
5341 
5342   /// See AbstractAttribute::trackStatistics()
5343   void trackStatistics() const override {
5344     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5345   }
5346 };
5347 
5348 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5349   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5350       : AAValueSimplifyImpl(IRP, A) {}
5351 
5352   /// See AbstractAttribute::initialize(...).
5353   void initialize(Attributor &A) override {
5354     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
5355     //        Needs investigation.
5356     // AAValueSimplifyImpl::initialize(A);
5357     Value &V = getAnchorValue();
5358 
5359     // TODO: add other stuffs
5360     if (isa<Constant>(V))
5361       indicatePessimisticFixpoint();
5362   }
5363 
5364   /// Check if \p Cmp is a comparison we can simplify.
5365   ///
5366   /// We handle multiple cases, one in which at least one operand is an
5367   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5368   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5369   /// will be updated.
5370   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5371     auto Union = [&](Value &V) {
5372       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5373           SimplifiedAssociatedValue, &V, V.getType());
5374       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5375     };
5376 
5377     Value *LHS = Cmp.getOperand(0);
5378     Value *RHS = Cmp.getOperand(1);
5379 
5380     // Simplify the operands first.
5381     bool UsedAssumedInformation = false;
5382     const auto &SimplifiedLHS =
5383         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5384                                *this, UsedAssumedInformation);
5385     if (!SimplifiedLHS.hasValue())
5386       return true;
5387     if (!SimplifiedLHS.getValue())
5388       return false;
5389     LHS = *SimplifiedLHS;
5390 
5391     const auto &SimplifiedRHS =
5392         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5393                                *this, UsedAssumedInformation);
5394     if (!SimplifiedRHS.hasValue())
5395       return true;
5396     if (!SimplifiedRHS.getValue())
5397       return false;
5398     RHS = *SimplifiedRHS;
5399 
5400     LLVMContext &Ctx = Cmp.getContext();
5401     // Handle the trivial case first in which we don't even need to think about
5402     // null or non-null.
5403     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5404       Constant *NewVal =
5405           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5406       if (!Union(*NewVal))
5407         return false;
5408       if (!UsedAssumedInformation)
5409         indicateOptimisticFixpoint();
5410       return true;
5411     }
5412 
5413     // From now on we only handle equalities (==, !=).
5414     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5415     if (!ICmp || !ICmp->isEquality())
5416       return false;
5417 
5418     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5419     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5420     if (!LHSIsNull && !RHSIsNull)
5421       return false;
5422 
5423     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5424     // non-nullptr operand and if we assume it's non-null we can conclude the
5425     // result of the comparison.
5426     assert((LHSIsNull || RHSIsNull) &&
5427            "Expected nullptr versus non-nullptr comparison at this point");
5428 
5429     // The index is the operand that we assume is not null.
5430     unsigned PtrIdx = LHSIsNull;
5431     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5432         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5433         DepClassTy::REQUIRED);
5434     if (!PtrNonNullAA.isAssumedNonNull())
5435       return false;
5436     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5437 
5438     // The new value depends on the predicate, true for != and false for ==.
5439     Constant *NewVal = ConstantInt::get(
5440         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5441     if (!Union(*NewVal))
5442       return false;
5443 
5444     if (!UsedAssumedInformation)
5445       indicateOptimisticFixpoint();
5446 
5447     return true;
5448   }
5449 
5450   bool updateWithLoad(Attributor &A, LoadInst &L) {
5451     auto Union = [&](Value &V) {
5452       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5453           SimplifiedAssociatedValue, &V, L.getType());
5454       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5455     };
5456     return handleLoad(A, *this, L, Union);
5457   }
5458 
5459   /// See AbstractAttribute::updateImpl(...).
5460   ChangeStatus updateImpl(Attributor &A) override {
5461     auto Before = SimplifiedAssociatedValue;
5462 
5463     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5464                             bool Stripped) -> bool {
5465       auto &AA = A.getAAFor<AAValueSimplify>(
5466           *this, IRPosition::value(V, getCallBaseContext()),
5467           DepClassTy::REQUIRED);
5468       if (!Stripped && this == &AA) {
5469         if (auto *LI = dyn_cast<LoadInst>(&V))
5470           return updateWithLoad(A, *LI);
5471         if (auto *Cmp = dyn_cast<CmpInst>(&V))
5472           return handleCmp(A, *Cmp);
5473         // TODO: Look the instruction and check recursively.
5474 
5475         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5476                           << "\n");
5477         return false;
5478       }
5479       return checkAndUpdate(A, *this,
5480                             IRPosition::value(V, getCallBaseContext()));
5481     };
5482 
5483     bool Dummy = false;
5484     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5485                                      VisitValueCB, getCtxI(),
5486                                      /* UseValueSimplify */ false))
5487       if (!askSimplifiedValueForOtherAAs(A))
5488         return indicatePessimisticFixpoint();
5489 
5490     // If a candicate was found in this update, return CHANGED.
5491     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5492                                                : ChangeStatus ::CHANGED;
5493   }
5494 
5495   /// See AbstractAttribute::trackStatistics()
5496   void trackStatistics() const override {
5497     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5498   }
5499 };
5500 
5501 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5502   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5503       : AAValueSimplifyImpl(IRP, A) {}
5504 
5505   /// See AbstractAttribute::initialize(...).
5506   void initialize(Attributor &A) override {
5507     SimplifiedAssociatedValue = nullptr;
5508     indicateOptimisticFixpoint();
5509   }
5510   /// See AbstractAttribute::initialize(...).
5511   ChangeStatus updateImpl(Attributor &A) override {
5512     llvm_unreachable(
5513         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5514   }
5515   /// See AbstractAttribute::trackStatistics()
5516   void trackStatistics() const override {
5517     STATS_DECLTRACK_FN_ATTR(value_simplify)
5518   }
5519 };
5520 
5521 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5522   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5523       : AAValueSimplifyFunction(IRP, A) {}
5524   /// See AbstractAttribute::trackStatistics()
5525   void trackStatistics() const override {
5526     STATS_DECLTRACK_CS_ATTR(value_simplify)
5527   }
5528 };
5529 
5530 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5531   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5532       : AAValueSimplifyImpl(IRP, A) {}
5533 
5534   void initialize(Attributor &A) override {
5535     if (!getAssociatedFunction())
5536       indicatePessimisticFixpoint();
5537   }
5538 
5539   /// See AbstractAttribute::updateImpl(...).
5540   ChangeStatus updateImpl(Attributor &A) override {
5541     auto Before = SimplifiedAssociatedValue;
5542     auto &RetAA = A.getAAFor<AAReturnedValues>(
5543         *this, IRPosition::function(*getAssociatedFunction()),
5544         DepClassTy::REQUIRED);
5545     auto PredForReturned =
5546         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5547           bool UsedAssumedInformation = false;
5548           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5549               &RetVal, *cast<CallBase>(getCtxI()), *this,
5550               UsedAssumedInformation);
5551           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5552               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5553           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5554         };
5555     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5556       if (!askSimplifiedValueForOtherAAs(A))
5557         return indicatePessimisticFixpoint();
5558     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5559                                                : ChangeStatus ::CHANGED;
5560   }
5561 
5562   void trackStatistics() const override {
5563     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5564   }
5565 };
5566 
5567 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5568   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5569       : AAValueSimplifyFloating(IRP, A) {}
5570 
5571   /// See AbstractAttribute::manifest(...).
5572   ChangeStatus manifest(Attributor &A) override {
5573     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5574 
5575     if (auto *NewV = getReplacementValue(A)) {
5576       Use &U = cast<CallBase>(&getAnchorValue())
5577                    ->getArgOperandUse(getCallSiteArgNo());
5578       if (A.changeUseAfterManifest(U, *NewV))
5579         Changed = ChangeStatus::CHANGED;
5580     }
5581 
5582     return Changed | AAValueSimplify::manifest(A);
5583   }
5584 
5585   void trackStatistics() const override {
5586     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5587   }
5588 };
5589 
5590 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5591 struct AAHeapToStackFunction final : public AAHeapToStack {
5592 
5593   struct AllocationInfo {
5594     /// The call that allocates the memory.
5595     CallBase *const CB;
5596 
5597     /// The kind of allocation.
5598     const enum class AllocationKind {
5599       MALLOC,
5600       CALLOC,
5601       ALIGNED_ALLOC,
5602     } Kind;
5603 
5604     /// The library function id for the allocation.
5605     LibFunc LibraryFunctionId = NotLibFunc;
5606 
5607     /// The status wrt. a rewrite.
5608     enum {
5609       STACK_DUE_TO_USE,
5610       STACK_DUE_TO_FREE,
5611       INVALID,
5612     } Status = STACK_DUE_TO_USE;
5613 
5614     /// Flag to indicate if we encountered a use that might free this allocation
5615     /// but which is not in the deallocation infos.
5616     bool HasPotentiallyFreeingUnknownUses = false;
5617 
5618     /// The set of free calls that use this allocation.
5619     SmallPtrSet<CallBase *, 1> PotentialFreeCalls{};
5620   };
5621 
5622   struct DeallocationInfo {
5623     /// The call that deallocates the memory.
5624     CallBase *const CB;
5625 
5626     /// Flag to indicate if we don't know all objects this deallocation might
5627     /// free.
5628     bool MightFreeUnknownObjects = false;
5629 
5630     /// The set of allocation calls that are potentially freed.
5631     SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{};
5632   };
5633 
5634   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5635       : AAHeapToStack(IRP, A) {}
5636 
5637   ~AAHeapToStackFunction() {
5638     // Ensure we call the destructor so we release any memory allocated in the
5639     // sets.
5640     for (auto &It : AllocationInfos)
5641       It.getSecond()->~AllocationInfo();
5642     for (auto &It : DeallocationInfos)
5643       It.getSecond()->~DeallocationInfo();
5644   }
5645 
5646   void initialize(Attributor &A) override {
5647     AAHeapToStack::initialize(A);
5648 
5649     const Function *F = getAnchorScope();
5650     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5651 
5652     auto AllocationIdentifierCB = [&](Instruction &I) {
5653       CallBase *CB = dyn_cast<CallBase>(&I);
5654       if (!CB)
5655         return true;
5656       if (isFreeCall(CB, TLI)) {
5657         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
5658         return true;
5659       }
5660       bool IsMalloc = isMallocLikeFn(CB, TLI);
5661       bool IsAlignedAllocLike = !IsMalloc && isAlignedAllocLikeFn(CB, TLI);
5662       bool IsCalloc =
5663           !IsMalloc && !IsAlignedAllocLike && isCallocLikeFn(CB, TLI);
5664       if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc)
5665         return true;
5666       auto Kind =
5667           IsMalloc ? AllocationInfo::AllocationKind::MALLOC
5668                    : (IsCalloc ? AllocationInfo::AllocationKind::CALLOC
5669                                : AllocationInfo::AllocationKind::ALIGNED_ALLOC);
5670 
5671       AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB, Kind};
5672       AllocationInfos[CB] = AI;
5673       TLI->getLibFunc(*CB, AI->LibraryFunctionId);
5674       return true;
5675     };
5676 
5677     bool UsedAssumedInformation = false;
5678     bool Success = A.checkForAllCallLikeInstructions(
5679         AllocationIdentifierCB, *this, UsedAssumedInformation,
5680         /* CheckBBLivenessOnly */ false,
5681         /* CheckPotentiallyDead */ true);
5682     (void)Success;
5683     assert(Success && "Did not expect the call base visit callback to fail!");
5684   }
5685 
5686   const std::string getAsStr() const override {
5687     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
5688     for (const auto &It : AllocationInfos) {
5689       if (It.second->Status == AllocationInfo::INVALID)
5690         ++NumInvalidMallocs;
5691       else
5692         ++NumH2SMallocs;
5693     }
5694     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
5695            std::to_string(NumInvalidMallocs);
5696   }
5697 
5698   /// See AbstractAttribute::trackStatistics().
5699   void trackStatistics() const override {
5700     STATS_DECL(
5701         MallocCalls, Function,
5702         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5703     for (auto &It : AllocationInfos)
5704       if (It.second->Status != AllocationInfo::INVALID)
5705         ++BUILD_STAT_NAME(MallocCalls, Function);
5706   }
5707 
5708   bool isAssumedHeapToStack(const CallBase &CB) const override {
5709     if (isValidState())
5710       if (AllocationInfo *AI = AllocationInfos.lookup(&CB))
5711         return AI->Status != AllocationInfo::INVALID;
5712     return false;
5713   }
5714 
5715   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
5716     if (!isValidState())
5717       return false;
5718 
5719     for (auto &It : AllocationInfos) {
5720       AllocationInfo &AI = *It.second;
5721       if (AI.Status == AllocationInfo::INVALID)
5722         continue;
5723 
5724       if (AI.PotentialFreeCalls.count(&CB))
5725         return true;
5726     }
5727 
5728     return false;
5729   }
5730 
5731   ChangeStatus manifest(Attributor &A) override {
5732     assert(getState().isValidState() &&
5733            "Attempted to manifest an invalid state!");
5734 
5735     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5736     Function *F = getAnchorScope();
5737     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5738 
5739     for (auto &It : AllocationInfos) {
5740       AllocationInfo &AI = *It.second;
5741       if (AI.Status == AllocationInfo::INVALID)
5742         continue;
5743 
5744       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
5745         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5746         A.deleteAfterManifest(*FreeCall);
5747         HasChanged = ChangeStatus::CHANGED;
5748       }
5749 
5750       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
5751                         << "\n");
5752 
5753       auto Remark = [&](OptimizationRemark OR) {
5754         LibFunc IsAllocShared;
5755         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
5756           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5757             return OR << "Moving globalized variable to the stack.";
5758         return OR << "Moving memory allocation from the heap to the stack.";
5759       };
5760       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
5761         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
5762       else
5763         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
5764 
5765       Value *Size;
5766       Optional<APInt> SizeAPI = getSize(A, *this, AI);
5767       if (SizeAPI.hasValue()) {
5768         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
5769       } else if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5770         auto *Num = AI.CB->getOperand(0);
5771         auto *SizeT = AI.CB->getOperand(1);
5772         IRBuilder<> B(AI.CB);
5773         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5774       } else if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5775         Size = AI.CB->getOperand(1);
5776       } else {
5777         Size = AI.CB->getOperand(0);
5778       }
5779 
5780       Align Alignment(1);
5781       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5782         Optional<APInt> AlignmentAPI =
5783             getAPInt(A, *this, *AI.CB->getArgOperand(0));
5784         assert(AlignmentAPI.hasValue() &&
5785                "Expected an alignment during manifest!");
5786         Alignment =
5787             max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue()));
5788       }
5789 
5790       unsigned AS = cast<PointerType>(AI.CB->getType())->getAddressSpace();
5791       Instruction *Alloca =
5792           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5793                          "", AI.CB->getNextNode());
5794 
5795       if (Alloca->getType() != AI.CB->getType())
5796         Alloca = new BitCastInst(Alloca, AI.CB->getType(), "malloc_bc",
5797                                  Alloca->getNextNode());
5798 
5799       A.changeValueAfterManifest(*AI.CB, *Alloca);
5800 
5801       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
5802         auto *NBB = II->getNormalDest();
5803         BranchInst::Create(NBB, AI.CB->getParent());
5804         A.deleteAfterManifest(*AI.CB);
5805       } else {
5806         A.deleteAfterManifest(*AI.CB);
5807       }
5808 
5809       // Zero out the allocated memory if it was a calloc.
5810       if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5811         auto *BI = new BitCastInst(Alloca, AI.CB->getType(), "calloc_bc",
5812                                    Alloca->getNextNode());
5813         Value *Ops[] = {
5814             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5815             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5816 
5817         Type *Tys[] = {BI->getType(), AI.CB->getOperand(0)->getType()};
5818         Module *M = F->getParent();
5819         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5820         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5821       }
5822       HasChanged = ChangeStatus::CHANGED;
5823     }
5824 
5825     return HasChanged;
5826   }
5827 
5828   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
5829                            Value &V) {
5830     bool UsedAssumedInformation = false;
5831     Optional<Constant *> SimpleV =
5832         A.getAssumedConstant(V, AA, UsedAssumedInformation);
5833     if (!SimpleV.hasValue())
5834       return APInt(64, 0);
5835     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
5836       return CI->getValue();
5837     return llvm::None;
5838   }
5839 
5840   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
5841                           AllocationInfo &AI) {
5842 
5843     if (AI.Kind == AllocationInfo::AllocationKind::MALLOC)
5844       return getAPInt(A, AA, *AI.CB->getArgOperand(0));
5845 
5846     if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
5847       // Only if the alignment is also constant we return a size.
5848       return getAPInt(A, AA, *AI.CB->getArgOperand(0)).hasValue()
5849                  ? getAPInt(A, AA, *AI.CB->getArgOperand(1))
5850                  : llvm::None;
5851 
5852     assert(AI.Kind == AllocationInfo::AllocationKind::CALLOC &&
5853            "Expected only callocs are left");
5854     Optional<APInt> Num = getAPInt(A, AA, *AI.CB->getArgOperand(0));
5855     Optional<APInt> Size = getAPInt(A, AA, *AI.CB->getArgOperand(1));
5856     if (!Num.hasValue() || !Size.hasValue())
5857       return llvm::None;
5858     bool Overflow = false;
5859     Size = Size.getValue().umul_ov(Num.getValue(), Overflow);
5860     return Overflow ? llvm::None : Size;
5861   }
5862 
5863   /// Collection of all malloc-like calls in a function with associated
5864   /// information.
5865   DenseMap<CallBase *, AllocationInfo *> AllocationInfos;
5866 
5867   /// Collection of all free-like calls in a function with associated
5868   /// information.
5869   DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos;
5870 
5871   ChangeStatus updateImpl(Attributor &A) override;
5872 };
5873 
5874 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
5875   ChangeStatus Changed = ChangeStatus::UNCHANGED;
5876   const Function *F = getAnchorScope();
5877 
5878   const auto &LivenessAA =
5879       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
5880 
5881   MustBeExecutedContextExplorer &Explorer =
5882       A.getInfoCache().getMustBeExecutedContextExplorer();
5883 
5884   bool StackIsAccessibleByOtherThreads =
5885       A.getInfoCache().stackIsAccessibleByOtherThreads();
5886 
5887   // Flag to ensure we update our deallocation information at most once per
5888   // updateImpl call and only if we use the free check reasoning.
5889   bool HasUpdatedFrees = false;
5890 
5891   auto UpdateFrees = [&]() {
5892     HasUpdatedFrees = true;
5893 
5894     for (auto &It : DeallocationInfos) {
5895       DeallocationInfo &DI = *It.second;
5896       // For now we cannot use deallocations that have unknown inputs, skip
5897       // them.
5898       if (DI.MightFreeUnknownObjects)
5899         continue;
5900 
5901       // No need to analyze dead calls, ignore them instead.
5902       bool UsedAssumedInformation = false;
5903       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
5904                           /* CheckBBLivenessOnly */ true))
5905         continue;
5906 
5907       // Use the optimistic version to get the freed objects, ignoring dead
5908       // branches etc.
5909       SmallVector<Value *, 8> Objects;
5910       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
5911                                            *this, DI.CB)) {
5912         LLVM_DEBUG(
5913             dbgs()
5914             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
5915         DI.MightFreeUnknownObjects = true;
5916         continue;
5917       }
5918 
5919       // Check each object explicitly.
5920       for (auto *Obj : Objects) {
5921         // Free of null and undef can be ignored as no-ops (or UB in the latter
5922         // case).
5923         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
5924           continue;
5925 
5926         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
5927         if (!ObjCB) {
5928           LLVM_DEBUG(dbgs()
5929                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
5930           DI.MightFreeUnknownObjects = true;
5931           continue;
5932         }
5933 
5934         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
5935         if (!AI) {
5936           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
5937                             << "\n");
5938           DI.MightFreeUnknownObjects = true;
5939           continue;
5940         }
5941 
5942         DI.PotentialAllocationCalls.insert(ObjCB);
5943       }
5944     }
5945   };
5946 
5947   auto FreeCheck = [&](AllocationInfo &AI) {
5948     // If the stack is not accessible by other threads, the "must-free" logic
5949     // doesn't apply as the pointer could be shared and needs to be places in
5950     // "shareable" memory.
5951     if (!StackIsAccessibleByOtherThreads) {
5952       auto &NoSyncAA =
5953           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
5954       if (!NoSyncAA.isAssumedNoSync()) {
5955         LLVM_DEBUG(
5956             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
5957                       "other threads and function is not nosync:\n");
5958         return false;
5959       }
5960     }
5961     if (!HasUpdatedFrees)
5962       UpdateFrees();
5963 
5964     // TODO: Allow multi exit functions that have different free calls.
5965     if (AI.PotentialFreeCalls.size() != 1) {
5966       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
5967                         << AI.PotentialFreeCalls.size() << "\n");
5968       return false;
5969     }
5970     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
5971     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
5972     if (!DI) {
5973       LLVM_DEBUG(
5974           dbgs() << "[H2S] unique free call was not known as deallocation call "
5975                  << *UniqueFree << "\n");
5976       return false;
5977     }
5978     if (DI->MightFreeUnknownObjects) {
5979       LLVM_DEBUG(
5980           dbgs() << "[H2S] unique free call might free unknown allocations\n");
5981       return false;
5982     }
5983     if (DI->PotentialAllocationCalls.size() > 1) {
5984       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
5985                         << DI->PotentialAllocationCalls.size()
5986                         << " different allocations\n");
5987       return false;
5988     }
5989     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
5990       LLVM_DEBUG(
5991           dbgs()
5992           << "[H2S] unique free call not known to free this allocation but "
5993           << **DI->PotentialAllocationCalls.begin() << "\n");
5994       return false;
5995     }
5996     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
5997     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
5998       LLVM_DEBUG(
5999           dbgs()
6000           << "[H2S] unique free call might not be executed with the allocation "
6001           << *UniqueFree << "\n");
6002       return false;
6003     }
6004     return true;
6005   };
6006 
6007   auto UsesCheck = [&](AllocationInfo &AI) {
6008     bool ValidUsesOnly = true;
6009 
6010     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6011       Instruction *UserI = cast<Instruction>(U.getUser());
6012       if (isa<LoadInst>(UserI))
6013         return true;
6014       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6015         if (SI->getValueOperand() == U.get()) {
6016           LLVM_DEBUG(dbgs()
6017                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6018           ValidUsesOnly = false;
6019         } else {
6020           // A store into the malloc'ed memory is fine.
6021         }
6022         return true;
6023       }
6024       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6025         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6026           return true;
6027         if (DeallocationInfos.count(CB)) {
6028           AI.PotentialFreeCalls.insert(CB);
6029           return true;
6030         }
6031 
6032         unsigned ArgNo = CB->getArgOperandNo(&U);
6033 
6034         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6035             *this, IRPosition::callsite_argument(*CB, ArgNo),
6036             DepClassTy::OPTIONAL);
6037 
6038         // If a call site argument use is nofree, we are fine.
6039         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6040             *this, IRPosition::callsite_argument(*CB, ArgNo),
6041             DepClassTy::OPTIONAL);
6042 
6043         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6044         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6045         if (MaybeCaptured ||
6046             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6047              MaybeFreed)) {
6048           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6049 
6050           // Emit a missed remark if this is missed OpenMP globalization.
6051           auto Remark = [&](OptimizationRemarkMissed ORM) {
6052             return ORM
6053                    << "Could not move globalized variable to the stack. "
6054                       "Variable is potentially captured in call. Mark "
6055                       "parameter as `__attribute__((noescape))` to override.";
6056           };
6057 
6058           if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6059             A.emitRemark<OptimizationRemarkMissed>(AI.CB, "OMP113", Remark);
6060 
6061           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6062           ValidUsesOnly = false;
6063         }
6064         return true;
6065       }
6066 
6067       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6068           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6069         Follow = true;
6070         return true;
6071       }
6072       // Unknown user for which we can not track uses further (in a way that
6073       // makes sense).
6074       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6075       ValidUsesOnly = false;
6076       return true;
6077     };
6078     A.checkForAllUses(Pred, *this, *AI.CB);
6079     return ValidUsesOnly;
6080   };
6081 
6082   // The actual update starts here. We look at all allocations and depending on
6083   // their status perform the appropriate check(s).
6084   for (auto &It : AllocationInfos) {
6085     AllocationInfo &AI = *It.second;
6086     if (AI.Status == AllocationInfo::INVALID)
6087       continue;
6088 
6089     if (MaxHeapToStackSize == -1) {
6090       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
6091         if (!getAPInt(A, *this, *AI.CB->getArgOperand(0)).hasValue()) {
6092           LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6093                             << "\n");
6094           AI.Status = AllocationInfo::INVALID;
6095           Changed = ChangeStatus::CHANGED;
6096           continue;
6097         }
6098     } else {
6099       Optional<APInt> Size = getSize(A, *this, AI);
6100       if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6101         LLVM_DEBUG({
6102           if (!Size.hasValue())
6103             dbgs() << "[H2S] Unknown allocation size (or alignment): " << *AI.CB
6104                    << "\n";
6105           else
6106             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6107                    << MaxHeapToStackSize << "\n";
6108         });
6109 
6110         AI.Status = AllocationInfo::INVALID;
6111         Changed = ChangeStatus::CHANGED;
6112         continue;
6113       }
6114     }
6115 
6116     switch (AI.Status) {
6117     case AllocationInfo::STACK_DUE_TO_USE:
6118       if (UsesCheck(AI))
6119         continue;
6120       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6121       LLVM_FALLTHROUGH;
6122     case AllocationInfo::STACK_DUE_TO_FREE:
6123       if (FreeCheck(AI))
6124         continue;
6125       AI.Status = AllocationInfo::INVALID;
6126       Changed = ChangeStatus::CHANGED;
6127       continue;
6128     case AllocationInfo::INVALID:
6129       llvm_unreachable("Invalid allocations should never reach this point!");
6130     };
6131   }
6132 
6133   return Changed;
6134 }
6135 
6136 /// ----------------------- Privatizable Pointers ------------------------------
6137 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6138   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6139       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6140 
6141   ChangeStatus indicatePessimisticFixpoint() override {
6142     AAPrivatizablePtr::indicatePessimisticFixpoint();
6143     PrivatizableType = nullptr;
6144     return ChangeStatus::CHANGED;
6145   }
6146 
6147   /// Identify the type we can chose for a private copy of the underlying
6148   /// argument. None means it is not clear yet, nullptr means there is none.
6149   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6150 
6151   /// Return a privatizable type that encloses both T0 and T1.
6152   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6153   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6154     if (!T0.hasValue())
6155       return T1;
6156     if (!T1.hasValue())
6157       return T0;
6158     if (T0 == T1)
6159       return T0;
6160     return nullptr;
6161   }
6162 
6163   Optional<Type *> getPrivatizableType() const override {
6164     return PrivatizableType;
6165   }
6166 
6167   const std::string getAsStr() const override {
6168     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6169   }
6170 
6171 protected:
6172   Optional<Type *> PrivatizableType;
6173 };
6174 
6175 // TODO: Do this for call site arguments (probably also other values) as well.
6176 
6177 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6178   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6179       : AAPrivatizablePtrImpl(IRP, A) {}
6180 
6181   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6182   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6183     // If this is a byval argument and we know all the call sites (so we can
6184     // rewrite them), there is no need to check them explicitly.
6185     bool AllCallSitesKnown;
6186     if (getIRPosition().hasAttr(Attribute::ByVal) &&
6187         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6188                                true, AllCallSitesKnown))
6189       return getAssociatedValue().getType()->getPointerElementType();
6190 
6191     Optional<Type *> Ty;
6192     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6193 
6194     // Make sure the associated call site argument has the same type at all call
6195     // sites and it is an allocation we know is safe to privatize, for now that
6196     // means we only allow alloca instructions.
6197     // TODO: We can additionally analyze the accesses in the callee to  create
6198     //       the type from that information instead. That is a little more
6199     //       involved and will be done in a follow up patch.
6200     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6201       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6202       // Check if a coresponding argument was found or if it is one not
6203       // associated (which can happen for callback calls).
6204       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6205         return false;
6206 
6207       // Check that all call sites agree on a type.
6208       auto &PrivCSArgAA =
6209           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6210       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6211 
6212       LLVM_DEBUG({
6213         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6214         if (CSTy.hasValue() && CSTy.getValue())
6215           CSTy.getValue()->print(dbgs());
6216         else if (CSTy.hasValue())
6217           dbgs() << "<nullptr>";
6218         else
6219           dbgs() << "<none>";
6220       });
6221 
6222       Ty = combineTypes(Ty, CSTy);
6223 
6224       LLVM_DEBUG({
6225         dbgs() << " : New Type: ";
6226         if (Ty.hasValue() && Ty.getValue())
6227           Ty.getValue()->print(dbgs());
6228         else if (Ty.hasValue())
6229           dbgs() << "<nullptr>";
6230         else
6231           dbgs() << "<none>";
6232         dbgs() << "\n";
6233       });
6234 
6235       return !Ty.hasValue() || Ty.getValue();
6236     };
6237 
6238     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
6239       return nullptr;
6240     return Ty;
6241   }
6242 
6243   /// See AbstractAttribute::updateImpl(...).
6244   ChangeStatus updateImpl(Attributor &A) override {
6245     PrivatizableType = identifyPrivatizableType(A);
6246     if (!PrivatizableType.hasValue())
6247       return ChangeStatus::UNCHANGED;
6248     if (!PrivatizableType.getValue())
6249       return indicatePessimisticFixpoint();
6250 
6251     // The dependence is optional so we don't give up once we give up on the
6252     // alignment.
6253     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6254                         DepClassTy::OPTIONAL);
6255 
6256     // Avoid arguments with padding for now.
6257     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6258         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
6259                                                 A.getInfoCache().getDL())) {
6260       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6261       return indicatePessimisticFixpoint();
6262     }
6263 
6264     // Verify callee and caller agree on how the promoted argument would be
6265     // passed.
6266     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
6267     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
6268     // which doesn't require the arguments ArgumentPromotion wanted to pass.
6269     Function &Fn = *getIRPosition().getAnchorScope();
6270     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
6271     ArgsToPromote.insert(getAssociatedArgument());
6272     const auto *TTI =
6273         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6274     if (!TTI ||
6275         !ArgumentPromotionPass::areFunctionArgsABICompatible(
6276             Fn, *TTI, ArgsToPromote, Dummy) ||
6277         ArgsToPromote.empty()) {
6278       LLVM_DEBUG(
6279           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6280                  << Fn.getName() << "\n");
6281       return indicatePessimisticFixpoint();
6282     }
6283 
6284     // Collect the types that will replace the privatizable type in the function
6285     // signature.
6286     SmallVector<Type *, 16> ReplacementTypes;
6287     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6288 
6289     // Register a rewrite of the argument.
6290     Argument *Arg = getAssociatedArgument();
6291     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6292       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6293       return indicatePessimisticFixpoint();
6294     }
6295 
6296     unsigned ArgNo = Arg->getArgNo();
6297 
6298     // Helper to check if for the given call site the associated argument is
6299     // passed to a callback where the privatization would be different.
6300     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6301       SmallVector<const Use *, 4> CallbackUses;
6302       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6303       for (const Use *U : CallbackUses) {
6304         AbstractCallSite CBACS(U);
6305         assert(CBACS && CBACS.isCallbackCall());
6306         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6307           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6308 
6309           LLVM_DEBUG({
6310             dbgs()
6311                 << "[AAPrivatizablePtr] Argument " << *Arg
6312                 << "check if can be privatized in the context of its parent ("
6313                 << Arg->getParent()->getName()
6314                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6315                    "callback ("
6316                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6317                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6318                 << CBACS.getCallArgOperand(CBArg) << " vs "
6319                 << CB.getArgOperand(ArgNo) << "\n"
6320                 << "[AAPrivatizablePtr] " << CBArg << " : "
6321                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6322           });
6323 
6324           if (CBArgNo != int(ArgNo))
6325             continue;
6326           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6327               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6328           if (CBArgPrivAA.isValidState()) {
6329             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6330             if (!CBArgPrivTy.hasValue())
6331               continue;
6332             if (CBArgPrivTy.getValue() == PrivatizableType)
6333               continue;
6334           }
6335 
6336           LLVM_DEBUG({
6337             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6338                    << " cannot be privatized in the context of its parent ("
6339                    << Arg->getParent()->getName()
6340                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6341                       "callback ("
6342                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6343                    << ").\n[AAPrivatizablePtr] for which the argument "
6344                       "privatization is not compatible.\n";
6345           });
6346           return false;
6347         }
6348       }
6349       return true;
6350     };
6351 
6352     // Helper to check if for the given call site the associated argument is
6353     // passed to a direct call where the privatization would be different.
6354     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6355       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6356       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6357       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
6358              "Expected a direct call operand for callback call operand");
6359 
6360       LLVM_DEBUG({
6361         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6362                << " check if be privatized in the context of its parent ("
6363                << Arg->getParent()->getName()
6364                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6365                   "direct call of ("
6366                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6367                << ").\n";
6368       });
6369 
6370       Function *DCCallee = DC->getCalledFunction();
6371       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6372         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6373             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6374             DepClassTy::REQUIRED);
6375         if (DCArgPrivAA.isValidState()) {
6376           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6377           if (!DCArgPrivTy.hasValue())
6378             return true;
6379           if (DCArgPrivTy.getValue() == PrivatizableType)
6380             return true;
6381         }
6382       }
6383 
6384       LLVM_DEBUG({
6385         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6386                << " cannot be privatized in the context of its parent ("
6387                << Arg->getParent()->getName()
6388                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6389                   "direct call of ("
6390                << ACS.getInstruction()->getCalledFunction()->getName()
6391                << ").\n[AAPrivatizablePtr] for which the argument "
6392                   "privatization is not compatible.\n";
6393       });
6394       return false;
6395     };
6396 
6397     // Helper to check if the associated argument is used at the given abstract
6398     // call site in a way that is incompatible with the privatization assumed
6399     // here.
6400     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6401       if (ACS.isDirectCall())
6402         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6403       if (ACS.isCallbackCall())
6404         return IsCompatiblePrivArgOfDirectCS(ACS);
6405       return false;
6406     };
6407 
6408     bool AllCallSitesKnown;
6409     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6410                                 AllCallSitesKnown))
6411       return indicatePessimisticFixpoint();
6412 
6413     return ChangeStatus::UNCHANGED;
6414   }
6415 
6416   /// Given a type to private \p PrivType, collect the constituates (which are
6417   /// used) in \p ReplacementTypes.
6418   static void
6419   identifyReplacementTypes(Type *PrivType,
6420                            SmallVectorImpl<Type *> &ReplacementTypes) {
6421     // TODO: For now we expand the privatization type to the fullest which can
6422     //       lead to dead arguments that need to be removed later.
6423     assert(PrivType && "Expected privatizable type!");
6424 
6425     // Traverse the type, extract constituate types on the outermost level.
6426     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6427       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6428         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6429     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6430       ReplacementTypes.append(PrivArrayType->getNumElements(),
6431                               PrivArrayType->getElementType());
6432     } else {
6433       ReplacementTypes.push_back(PrivType);
6434     }
6435   }
6436 
6437   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6438   /// The values needed are taken from the arguments of \p F starting at
6439   /// position \p ArgNo.
6440   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6441                                    unsigned ArgNo, Instruction &IP) {
6442     assert(PrivType && "Expected privatizable type!");
6443 
6444     IRBuilder<NoFolder> IRB(&IP);
6445     const DataLayout &DL = F.getParent()->getDataLayout();
6446 
6447     // Traverse the type, build GEPs and stores.
6448     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6449       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6450       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6451         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6452         Value *Ptr =
6453             constructPointer(PointeeTy, PrivType, &Base,
6454                              PrivStructLayout->getElementOffset(u), IRB, DL);
6455         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6456       }
6457     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6458       Type *PointeeTy = PrivArrayType->getElementType();
6459       Type *PointeePtrTy = PointeeTy->getPointerTo();
6460       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6461       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6462         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6463                                       u * PointeeTySize, IRB, DL);
6464         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6465       }
6466     } else {
6467       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6468     }
6469   }
6470 
6471   /// Extract values from \p Base according to the type \p PrivType at the
6472   /// call position \p ACS. The values are appended to \p ReplacementValues.
6473   void createReplacementValues(Align Alignment, Type *PrivType,
6474                                AbstractCallSite ACS, Value *Base,
6475                                SmallVectorImpl<Value *> &ReplacementValues) {
6476     assert(Base && "Expected base value!");
6477     assert(PrivType && "Expected privatizable type!");
6478     Instruction *IP = ACS.getInstruction();
6479 
6480     IRBuilder<NoFolder> IRB(IP);
6481     const DataLayout &DL = IP->getModule()->getDataLayout();
6482 
6483     if (Base->getType()->getPointerElementType() != PrivType)
6484       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
6485                                                  "", ACS.getInstruction());
6486 
6487     // Traverse the type, build GEPs and loads.
6488     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6489       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6490       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6491         Type *PointeeTy = PrivStructType->getElementType(u);
6492         Value *Ptr =
6493             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6494                              PrivStructLayout->getElementOffset(u), IRB, DL);
6495         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6496         L->setAlignment(Alignment);
6497         ReplacementValues.push_back(L);
6498       }
6499     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6500       Type *PointeeTy = PrivArrayType->getElementType();
6501       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6502       Type *PointeePtrTy = PointeeTy->getPointerTo();
6503       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6504         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6505                                       u * PointeeTySize, IRB, DL);
6506         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6507         L->setAlignment(Alignment);
6508         ReplacementValues.push_back(L);
6509       }
6510     } else {
6511       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6512       L->setAlignment(Alignment);
6513       ReplacementValues.push_back(L);
6514     }
6515   }
6516 
6517   /// See AbstractAttribute::manifest(...)
6518   ChangeStatus manifest(Attributor &A) override {
6519     if (!PrivatizableType.hasValue())
6520       return ChangeStatus::UNCHANGED;
6521     assert(PrivatizableType.getValue() && "Expected privatizable type!");
6522 
6523     // Collect all tail calls in the function as we cannot allow new allocas to
6524     // escape into tail recursion.
6525     // TODO: Be smarter about new allocas escaping into tail calls.
6526     SmallVector<CallInst *, 16> TailCalls;
6527     bool UsedAssumedInformation = false;
6528     if (!A.checkForAllInstructions(
6529             [&](Instruction &I) {
6530               CallInst &CI = cast<CallInst>(I);
6531               if (CI.isTailCall())
6532                 TailCalls.push_back(&CI);
6533               return true;
6534             },
6535             *this, {Instruction::Call}, UsedAssumedInformation))
6536       return ChangeStatus::UNCHANGED;
6537 
6538     Argument *Arg = getAssociatedArgument();
6539     // Query AAAlign attribute for alignment of associated argument to
6540     // determine the best alignment of loads.
6541     const auto &AlignAA =
6542         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
6543 
6544     // Callback to repair the associated function. A new alloca is placed at the
6545     // beginning and initialized with the values passed through arguments. The
6546     // new alloca replaces the use of the old pointer argument.
6547     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
6548         [=](const Attributor::ArgumentReplacementInfo &ARI,
6549             Function &ReplacementFn, Function::arg_iterator ArgIt) {
6550           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
6551           Instruction *IP = &*EntryBB.getFirstInsertionPt();
6552           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
6553                                            Arg->getName() + ".priv", IP);
6554           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
6555                                ArgIt->getArgNo(), *IP);
6556 
6557           if (AI->getType() != Arg->getType())
6558             AI =
6559                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
6560           Arg->replaceAllUsesWith(AI);
6561 
6562           for (CallInst *CI : TailCalls)
6563             CI->setTailCall(false);
6564         };
6565 
6566     // Callback to repair a call site of the associated function. The elements
6567     // of the privatizable type are loaded prior to the call and passed to the
6568     // new function version.
6569     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
6570         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
6571                       AbstractCallSite ACS,
6572                       SmallVectorImpl<Value *> &NewArgOperands) {
6573           // When no alignment is specified for the load instruction,
6574           // natural alignment is assumed.
6575           createReplacementValues(
6576               assumeAligned(AlignAA.getAssumedAlign()),
6577               PrivatizableType.getValue(), ACS,
6578               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
6579               NewArgOperands);
6580         };
6581 
6582     // Collect the types that will replace the privatizable type in the function
6583     // signature.
6584     SmallVector<Type *, 16> ReplacementTypes;
6585     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6586 
6587     // Register a rewrite of the argument.
6588     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
6589                                            std::move(FnRepairCB),
6590                                            std::move(ACSRepairCB)))
6591       return ChangeStatus::CHANGED;
6592     return ChangeStatus::UNCHANGED;
6593   }
6594 
6595   /// See AbstractAttribute::trackStatistics()
6596   void trackStatistics() const override {
6597     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
6598   }
6599 };
6600 
6601 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
6602   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
6603       : AAPrivatizablePtrImpl(IRP, A) {}
6604 
6605   /// See AbstractAttribute::initialize(...).
6606   virtual void initialize(Attributor &A) override {
6607     // TODO: We can privatize more than arguments.
6608     indicatePessimisticFixpoint();
6609   }
6610 
6611   ChangeStatus updateImpl(Attributor &A) override {
6612     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
6613                      "updateImpl will not be called");
6614   }
6615 
6616   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6617   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6618     Value *Obj = getUnderlyingObject(&getAssociatedValue());
6619     if (!Obj) {
6620       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
6621       return nullptr;
6622     }
6623 
6624     if (auto *AI = dyn_cast<AllocaInst>(Obj))
6625       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
6626         if (CI->isOne())
6627           return Obj->getType()->getPointerElementType();
6628     if (auto *Arg = dyn_cast<Argument>(Obj)) {
6629       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
6630           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
6631       if (PrivArgAA.isAssumedPrivatizablePtr())
6632         return Obj->getType()->getPointerElementType();
6633     }
6634 
6635     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
6636                          "alloca nor privatizable argument: "
6637                       << *Obj << "!\n");
6638     return nullptr;
6639   }
6640 
6641   /// See AbstractAttribute::trackStatistics()
6642   void trackStatistics() const override {
6643     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
6644   }
6645 };
6646 
6647 struct AAPrivatizablePtrCallSiteArgument final
6648     : public AAPrivatizablePtrFloating {
6649   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
6650       : AAPrivatizablePtrFloating(IRP, A) {}
6651 
6652   /// See AbstractAttribute::initialize(...).
6653   void initialize(Attributor &A) override {
6654     if (getIRPosition().hasAttr(Attribute::ByVal))
6655       indicateOptimisticFixpoint();
6656   }
6657 
6658   /// See AbstractAttribute::updateImpl(...).
6659   ChangeStatus updateImpl(Attributor &A) override {
6660     PrivatizableType = identifyPrivatizableType(A);
6661     if (!PrivatizableType.hasValue())
6662       return ChangeStatus::UNCHANGED;
6663     if (!PrivatizableType.getValue())
6664       return indicatePessimisticFixpoint();
6665 
6666     const IRPosition &IRP = getIRPosition();
6667     auto &NoCaptureAA =
6668         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
6669     if (!NoCaptureAA.isAssumedNoCapture()) {
6670       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
6671       return indicatePessimisticFixpoint();
6672     }
6673 
6674     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
6675     if (!NoAliasAA.isAssumedNoAlias()) {
6676       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
6677       return indicatePessimisticFixpoint();
6678     }
6679 
6680     const auto &MemBehaviorAA =
6681         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
6682     if (!MemBehaviorAA.isAssumedReadOnly()) {
6683       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
6684       return indicatePessimisticFixpoint();
6685     }
6686 
6687     return ChangeStatus::UNCHANGED;
6688   }
6689 
6690   /// See AbstractAttribute::trackStatistics()
6691   void trackStatistics() const override {
6692     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
6693   }
6694 };
6695 
6696 struct AAPrivatizablePtrCallSiteReturned final
6697     : public AAPrivatizablePtrFloating {
6698   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
6699       : AAPrivatizablePtrFloating(IRP, A) {}
6700 
6701   /// See AbstractAttribute::initialize(...).
6702   void initialize(Attributor &A) override {
6703     // TODO: We can privatize more than arguments.
6704     indicatePessimisticFixpoint();
6705   }
6706 
6707   /// See AbstractAttribute::trackStatistics()
6708   void trackStatistics() const override {
6709     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
6710   }
6711 };
6712 
6713 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
6714   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
6715       : AAPrivatizablePtrFloating(IRP, A) {}
6716 
6717   /// See AbstractAttribute::initialize(...).
6718   void initialize(Attributor &A) override {
6719     // TODO: We can privatize more than arguments.
6720     indicatePessimisticFixpoint();
6721   }
6722 
6723   /// See AbstractAttribute::trackStatistics()
6724   void trackStatistics() const override {
6725     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
6726   }
6727 };
6728 
6729 /// -------------------- Memory Behavior Attributes ----------------------------
6730 /// Includes read-none, read-only, and write-only.
6731 /// ----------------------------------------------------------------------------
6732 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
6733   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
6734       : AAMemoryBehavior(IRP, A) {}
6735 
6736   /// See AbstractAttribute::initialize(...).
6737   void initialize(Attributor &A) override {
6738     intersectAssumedBits(BEST_STATE);
6739     getKnownStateFromValue(getIRPosition(), getState());
6740     AAMemoryBehavior::initialize(A);
6741   }
6742 
6743   /// Return the memory behavior information encoded in the IR for \p IRP.
6744   static void getKnownStateFromValue(const IRPosition &IRP,
6745                                      BitIntegerState &State,
6746                                      bool IgnoreSubsumingPositions = false) {
6747     SmallVector<Attribute, 2> Attrs;
6748     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6749     for (const Attribute &Attr : Attrs) {
6750       switch (Attr.getKindAsEnum()) {
6751       case Attribute::ReadNone:
6752         State.addKnownBits(NO_ACCESSES);
6753         break;
6754       case Attribute::ReadOnly:
6755         State.addKnownBits(NO_WRITES);
6756         break;
6757       case Attribute::WriteOnly:
6758         State.addKnownBits(NO_READS);
6759         break;
6760       default:
6761         llvm_unreachable("Unexpected attribute!");
6762       }
6763     }
6764 
6765     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
6766       if (!I->mayReadFromMemory())
6767         State.addKnownBits(NO_READS);
6768       if (!I->mayWriteToMemory())
6769         State.addKnownBits(NO_WRITES);
6770     }
6771   }
6772 
6773   /// See AbstractAttribute::getDeducedAttributes(...).
6774   void getDeducedAttributes(LLVMContext &Ctx,
6775                             SmallVectorImpl<Attribute> &Attrs) const override {
6776     assert(Attrs.size() == 0);
6777     if (isAssumedReadNone())
6778       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6779     else if (isAssumedReadOnly())
6780       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
6781     else if (isAssumedWriteOnly())
6782       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
6783     assert(Attrs.size() <= 1);
6784   }
6785 
6786   /// See AbstractAttribute::manifest(...).
6787   ChangeStatus manifest(Attributor &A) override {
6788     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
6789       return ChangeStatus::UNCHANGED;
6790 
6791     const IRPosition &IRP = getIRPosition();
6792 
6793     // Check if we would improve the existing attributes first.
6794     SmallVector<Attribute, 4> DeducedAttrs;
6795     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6796     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6797           return IRP.hasAttr(Attr.getKindAsEnum(),
6798                              /* IgnoreSubsumingPositions */ true);
6799         }))
6800       return ChangeStatus::UNCHANGED;
6801 
6802     // Clear existing attributes.
6803     IRP.removeAttrs(AttrKinds);
6804 
6805     // Use the generic manifest method.
6806     return IRAttribute::manifest(A);
6807   }
6808 
6809   /// See AbstractState::getAsStr().
6810   const std::string getAsStr() const override {
6811     if (isAssumedReadNone())
6812       return "readnone";
6813     if (isAssumedReadOnly())
6814       return "readonly";
6815     if (isAssumedWriteOnly())
6816       return "writeonly";
6817     return "may-read/write";
6818   }
6819 
6820   /// The set of IR attributes AAMemoryBehavior deals with.
6821   static const Attribute::AttrKind AttrKinds[3];
6822 };
6823 
6824 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
6825     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
6826 
6827 /// Memory behavior attribute for a floating value.
6828 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
6829   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
6830       : AAMemoryBehaviorImpl(IRP, A) {}
6831 
6832   /// See AbstractAttribute::updateImpl(...).
6833   ChangeStatus updateImpl(Attributor &A) override;
6834 
6835   /// See AbstractAttribute::trackStatistics()
6836   void trackStatistics() const override {
6837     if (isAssumedReadNone())
6838       STATS_DECLTRACK_FLOATING_ATTR(readnone)
6839     else if (isAssumedReadOnly())
6840       STATS_DECLTRACK_FLOATING_ATTR(readonly)
6841     else if (isAssumedWriteOnly())
6842       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
6843   }
6844 
6845 private:
6846   /// Return true if users of \p UserI might access the underlying
6847   /// variable/location described by \p U and should therefore be analyzed.
6848   bool followUsersOfUseIn(Attributor &A, const Use &U,
6849                           const Instruction *UserI);
6850 
6851   /// Update the state according to the effect of use \p U in \p UserI.
6852   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
6853 };
6854 
6855 /// Memory behavior attribute for function argument.
6856 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
6857   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6858       : AAMemoryBehaviorFloating(IRP, A) {}
6859 
6860   /// See AbstractAttribute::initialize(...).
6861   void initialize(Attributor &A) override {
6862     intersectAssumedBits(BEST_STATE);
6863     const IRPosition &IRP = getIRPosition();
6864     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
6865     // can query it when we use has/getAttr. That would allow us to reuse the
6866     // initialize of the base class here.
6867     bool HasByVal =
6868         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
6869     getKnownStateFromValue(IRP, getState(),
6870                            /* IgnoreSubsumingPositions */ HasByVal);
6871 
6872     // Initialize the use vector with all direct uses of the associated value.
6873     Argument *Arg = getAssociatedArgument();
6874     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
6875       indicatePessimisticFixpoint();
6876   }
6877 
6878   ChangeStatus manifest(Attributor &A) override {
6879     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6880     if (!getAssociatedValue().getType()->isPointerTy())
6881       return ChangeStatus::UNCHANGED;
6882 
6883     // TODO: From readattrs.ll: "inalloca parameters are always
6884     //                           considered written"
6885     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6886       removeKnownBits(NO_WRITES);
6887       removeAssumedBits(NO_WRITES);
6888     }
6889     return AAMemoryBehaviorFloating::manifest(A);
6890   }
6891 
6892   /// See AbstractAttribute::trackStatistics()
6893   void trackStatistics() const override {
6894     if (isAssumedReadNone())
6895       STATS_DECLTRACK_ARG_ATTR(readnone)
6896     else if (isAssumedReadOnly())
6897       STATS_DECLTRACK_ARG_ATTR(readonly)
6898     else if (isAssumedWriteOnly())
6899       STATS_DECLTRACK_ARG_ATTR(writeonly)
6900   }
6901 };
6902 
6903 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
6904   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6905       : AAMemoryBehaviorArgument(IRP, A) {}
6906 
6907   /// See AbstractAttribute::initialize(...).
6908   void initialize(Attributor &A) override {
6909     // If we don't have an associated attribute this is either a variadic call
6910     // or an indirect call, either way, nothing to do here.
6911     Argument *Arg = getAssociatedArgument();
6912     if (!Arg) {
6913       indicatePessimisticFixpoint();
6914       return;
6915     }
6916     if (Arg->hasByValAttr()) {
6917       addKnownBits(NO_WRITES);
6918       removeKnownBits(NO_READS);
6919       removeAssumedBits(NO_READS);
6920     }
6921     AAMemoryBehaviorArgument::initialize(A);
6922     if (getAssociatedFunction()->isDeclaration())
6923       indicatePessimisticFixpoint();
6924   }
6925 
6926   /// See AbstractAttribute::updateImpl(...).
6927   ChangeStatus updateImpl(Attributor &A) override {
6928     // TODO: Once we have call site specific value information we can provide
6929     //       call site specific liveness liveness information and then it makes
6930     //       sense to specialize attributes for call sites arguments instead of
6931     //       redirecting requests to the callee argument.
6932     Argument *Arg = getAssociatedArgument();
6933     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6934     auto &ArgAA =
6935         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6936     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6937   }
6938 
6939   /// See AbstractAttribute::trackStatistics()
6940   void trackStatistics() const override {
6941     if (isAssumedReadNone())
6942       STATS_DECLTRACK_CSARG_ATTR(readnone)
6943     else if (isAssumedReadOnly())
6944       STATS_DECLTRACK_CSARG_ATTR(readonly)
6945     else if (isAssumedWriteOnly())
6946       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6947   }
6948 };
6949 
6950 /// Memory behavior attribute for a call site return position.
6951 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6952   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6953       : AAMemoryBehaviorFloating(IRP, A) {}
6954 
6955   /// See AbstractAttribute::initialize(...).
6956   void initialize(Attributor &A) override {
6957     AAMemoryBehaviorImpl::initialize(A);
6958     Function *F = getAssociatedFunction();
6959     if (!F || F->isDeclaration())
6960       indicatePessimisticFixpoint();
6961   }
6962 
6963   /// See AbstractAttribute::manifest(...).
6964   ChangeStatus manifest(Attributor &A) override {
6965     // We do not annotate returned values.
6966     return ChangeStatus::UNCHANGED;
6967   }
6968 
6969   /// See AbstractAttribute::trackStatistics()
6970   void trackStatistics() const override {}
6971 };
6972 
6973 /// An AA to represent the memory behavior function attributes.
6974 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6975   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6976       : AAMemoryBehaviorImpl(IRP, A) {}
6977 
6978   /// See AbstractAttribute::updateImpl(Attributor &A).
6979   virtual ChangeStatus updateImpl(Attributor &A) override;
6980 
6981   /// See AbstractAttribute::manifest(...).
6982   ChangeStatus manifest(Attributor &A) override {
6983     Function &F = cast<Function>(getAnchorValue());
6984     if (isAssumedReadNone()) {
6985       F.removeFnAttr(Attribute::ArgMemOnly);
6986       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6987       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6988     }
6989     return AAMemoryBehaviorImpl::manifest(A);
6990   }
6991 
6992   /// See AbstractAttribute::trackStatistics()
6993   void trackStatistics() const override {
6994     if (isAssumedReadNone())
6995       STATS_DECLTRACK_FN_ATTR(readnone)
6996     else if (isAssumedReadOnly())
6997       STATS_DECLTRACK_FN_ATTR(readonly)
6998     else if (isAssumedWriteOnly())
6999       STATS_DECLTRACK_FN_ATTR(writeonly)
7000   }
7001 };
7002 
7003 /// AAMemoryBehavior attribute for call sites.
7004 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7005   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7006       : AAMemoryBehaviorImpl(IRP, A) {}
7007 
7008   /// See AbstractAttribute::initialize(...).
7009   void initialize(Attributor &A) override {
7010     AAMemoryBehaviorImpl::initialize(A);
7011     Function *F = getAssociatedFunction();
7012     if (!F || F->isDeclaration())
7013       indicatePessimisticFixpoint();
7014   }
7015 
7016   /// See AbstractAttribute::updateImpl(...).
7017   ChangeStatus updateImpl(Attributor &A) override {
7018     // TODO: Once we have call site specific value information we can provide
7019     //       call site specific liveness liveness information and then it makes
7020     //       sense to specialize attributes for call sites arguments instead of
7021     //       redirecting requests to the callee argument.
7022     Function *F = getAssociatedFunction();
7023     const IRPosition &FnPos = IRPosition::function(*F);
7024     auto &FnAA =
7025         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7026     return clampStateAndIndicateChange(getState(), FnAA.getState());
7027   }
7028 
7029   /// See AbstractAttribute::trackStatistics()
7030   void trackStatistics() const override {
7031     if (isAssumedReadNone())
7032       STATS_DECLTRACK_CS_ATTR(readnone)
7033     else if (isAssumedReadOnly())
7034       STATS_DECLTRACK_CS_ATTR(readonly)
7035     else if (isAssumedWriteOnly())
7036       STATS_DECLTRACK_CS_ATTR(writeonly)
7037   }
7038 };
7039 
7040 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7041 
7042   // The current assumed state used to determine a change.
7043   auto AssumedState = getAssumed();
7044 
7045   auto CheckRWInst = [&](Instruction &I) {
7046     // If the instruction has an own memory behavior state, use it to restrict
7047     // the local state. No further analysis is required as the other memory
7048     // state is as optimistic as it gets.
7049     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7050       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7051           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7052       intersectAssumedBits(MemBehaviorAA.getAssumed());
7053       return !isAtFixpoint();
7054     }
7055 
7056     // Remove access kind modifiers if necessary.
7057     if (I.mayReadFromMemory())
7058       removeAssumedBits(NO_READS);
7059     if (I.mayWriteToMemory())
7060       removeAssumedBits(NO_WRITES);
7061     return !isAtFixpoint();
7062   };
7063 
7064   bool UsedAssumedInformation = false;
7065   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7066                                           UsedAssumedInformation))
7067     return indicatePessimisticFixpoint();
7068 
7069   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7070                                         : ChangeStatus::UNCHANGED;
7071 }
7072 
7073 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7074 
7075   const IRPosition &IRP = getIRPosition();
7076   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7077   AAMemoryBehavior::StateType &S = getState();
7078 
7079   // First, check the function scope. We take the known information and we avoid
7080   // work if the assumed information implies the current assumed information for
7081   // this attribute. This is a valid for all but byval arguments.
7082   Argument *Arg = IRP.getAssociatedArgument();
7083   AAMemoryBehavior::base_t FnMemAssumedState =
7084       AAMemoryBehavior::StateType::getWorstState();
7085   if (!Arg || !Arg->hasByValAttr()) {
7086     const auto &FnMemAA =
7087         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7088     FnMemAssumedState = FnMemAA.getAssumed();
7089     S.addKnownBits(FnMemAA.getKnown());
7090     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7091       return ChangeStatus::UNCHANGED;
7092   }
7093 
7094   // Make sure the value is not captured (except through "return"), if
7095   // it is, any information derived would be irrelevant anyway as we cannot
7096   // check the potential aliases introduced by the capture. However, no need
7097   // to fall back to anythign less optimistic than the function state.
7098   const auto &ArgNoCaptureAA =
7099       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7100   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7101     S.intersectAssumedBits(FnMemAssumedState);
7102     return ChangeStatus::CHANGED;
7103   }
7104 
7105   // The current assumed state used to determine a change.
7106   auto AssumedState = S.getAssumed();
7107 
7108   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7109   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7110     Instruction *UserI = cast<Instruction>(U.getUser());
7111     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7112                       << " \n");
7113 
7114     // Droppable users, e.g., llvm::assume does not actually perform any action.
7115     if (UserI->isDroppable())
7116       return true;
7117 
7118     // Check if the users of UserI should also be visited.
7119     Follow = followUsersOfUseIn(A, U, UserI);
7120 
7121     // If UserI might touch memory we analyze the use in detail.
7122     if (UserI->mayReadOrWriteMemory())
7123       analyzeUseIn(A, U, UserI);
7124 
7125     return !isAtFixpoint();
7126   };
7127 
7128   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7129     return indicatePessimisticFixpoint();
7130 
7131   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7132                                         : ChangeStatus::UNCHANGED;
7133 }
7134 
7135 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7136                                                   const Instruction *UserI) {
7137   // The loaded value is unrelated to the pointer argument, no need to
7138   // follow the users of the load.
7139   if (isa<LoadInst>(UserI))
7140     return false;
7141 
7142   // By default we follow all uses assuming UserI might leak information on U,
7143   // we have special handling for call sites operands though.
7144   const auto *CB = dyn_cast<CallBase>(UserI);
7145   if (!CB || !CB->isArgOperand(&U))
7146     return true;
7147 
7148   // If the use is a call argument known not to be captured, the users of
7149   // the call do not need to be visited because they have to be unrelated to
7150   // the input. Note that this check is not trivial even though we disallow
7151   // general capturing of the underlying argument. The reason is that the
7152   // call might the argument "through return", which we allow and for which we
7153   // need to check call users.
7154   if (U.get()->getType()->isPointerTy()) {
7155     unsigned ArgNo = CB->getArgOperandNo(&U);
7156     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7157         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7158     return !ArgNoCaptureAA.isAssumedNoCapture();
7159   }
7160 
7161   return true;
7162 }
7163 
7164 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7165                                             const Instruction *UserI) {
7166   assert(UserI->mayReadOrWriteMemory());
7167 
7168   switch (UserI->getOpcode()) {
7169   default:
7170     // TODO: Handle all atomics and other side-effect operations we know of.
7171     break;
7172   case Instruction::Load:
7173     // Loads cause the NO_READS property to disappear.
7174     removeAssumedBits(NO_READS);
7175     return;
7176 
7177   case Instruction::Store:
7178     // Stores cause the NO_WRITES property to disappear if the use is the
7179     // pointer operand. Note that we do assume that capturing was taken care of
7180     // somewhere else.
7181     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7182       removeAssumedBits(NO_WRITES);
7183     return;
7184 
7185   case Instruction::Call:
7186   case Instruction::CallBr:
7187   case Instruction::Invoke: {
7188     // For call sites we look at the argument memory behavior attribute (this
7189     // could be recursive!) in order to restrict our own state.
7190     const auto *CB = cast<CallBase>(UserI);
7191 
7192     // Give up on operand bundles.
7193     if (CB->isBundleOperand(&U)) {
7194       indicatePessimisticFixpoint();
7195       return;
7196     }
7197 
7198     // Calling a function does read the function pointer, maybe write it if the
7199     // function is self-modifying.
7200     if (CB->isCallee(&U)) {
7201       removeAssumedBits(NO_READS);
7202       break;
7203     }
7204 
7205     // Adjust the possible access behavior based on the information on the
7206     // argument.
7207     IRPosition Pos;
7208     if (U.get()->getType()->isPointerTy())
7209       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7210     else
7211       Pos = IRPosition::callsite_function(*CB);
7212     const auto &MemBehaviorAA =
7213         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7214     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7215     // and at least "known".
7216     intersectAssumedBits(MemBehaviorAA.getAssumed());
7217     return;
7218   }
7219   };
7220 
7221   // Generally, look at the "may-properties" and adjust the assumed state if we
7222   // did not trigger special handling before.
7223   if (UserI->mayReadFromMemory())
7224     removeAssumedBits(NO_READS);
7225   if (UserI->mayWriteToMemory())
7226     removeAssumedBits(NO_WRITES);
7227 }
7228 
7229 /// -------------------- Memory Locations Attributes ---------------------------
7230 /// Includes read-none, argmemonly, inaccessiblememonly,
7231 /// inaccessiblememorargmemonly
7232 /// ----------------------------------------------------------------------------
7233 
7234 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7235     AAMemoryLocation::MemoryLocationsKind MLK) {
7236   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7237     return "all memory";
7238   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7239     return "no memory";
7240   std::string S = "memory:";
7241   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7242     S += "stack,";
7243   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7244     S += "constant,";
7245   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7246     S += "internal global,";
7247   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7248     S += "external global,";
7249   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7250     S += "argument,";
7251   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7252     S += "inaccessible,";
7253   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7254     S += "malloced,";
7255   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7256     S += "unknown,";
7257   S.pop_back();
7258   return S;
7259 }
7260 
7261 namespace {
7262 struct AAMemoryLocationImpl : public AAMemoryLocation {
7263 
7264   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7265       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7266     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7267       AccessKind2Accesses[u] = nullptr;
7268   }
7269 
7270   ~AAMemoryLocationImpl() {
7271     // The AccessSets are allocated via a BumpPtrAllocator, we call
7272     // the destructor manually.
7273     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7274       if (AccessKind2Accesses[u])
7275         AccessKind2Accesses[u]->~AccessSet();
7276   }
7277 
7278   /// See AbstractAttribute::initialize(...).
7279   void initialize(Attributor &A) override {
7280     intersectAssumedBits(BEST_STATE);
7281     getKnownStateFromValue(A, getIRPosition(), getState());
7282     AAMemoryLocation::initialize(A);
7283   }
7284 
7285   /// Return the memory behavior information encoded in the IR for \p IRP.
7286   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7287                                      BitIntegerState &State,
7288                                      bool IgnoreSubsumingPositions = false) {
7289     // For internal functions we ignore `argmemonly` and
7290     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7291     // constant propagation. It is unclear if this is the best way but it is
7292     // unlikely this will cause real performance problems. If we are deriving
7293     // attributes for the anchor function we even remove the attribute in
7294     // addition to ignoring it.
7295     bool UseArgMemOnly = true;
7296     Function *AnchorFn = IRP.getAnchorScope();
7297     if (AnchorFn && A.isRunOn(*AnchorFn))
7298       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7299 
7300     SmallVector<Attribute, 2> Attrs;
7301     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7302     for (const Attribute &Attr : Attrs) {
7303       switch (Attr.getKindAsEnum()) {
7304       case Attribute::ReadNone:
7305         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7306         break;
7307       case Attribute::InaccessibleMemOnly:
7308         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7309         break;
7310       case Attribute::ArgMemOnly:
7311         if (UseArgMemOnly)
7312           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7313         else
7314           IRP.removeAttrs({Attribute::ArgMemOnly});
7315         break;
7316       case Attribute::InaccessibleMemOrArgMemOnly:
7317         if (UseArgMemOnly)
7318           State.addKnownBits(inverseLocation(
7319               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7320         else
7321           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7322         break;
7323       default:
7324         llvm_unreachable("Unexpected attribute!");
7325       }
7326     }
7327   }
7328 
7329   /// See AbstractAttribute::getDeducedAttributes(...).
7330   void getDeducedAttributes(LLVMContext &Ctx,
7331                             SmallVectorImpl<Attribute> &Attrs) const override {
7332     assert(Attrs.size() == 0);
7333     if (isAssumedReadNone()) {
7334       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7335     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7336       if (isAssumedInaccessibleMemOnly())
7337         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7338       else if (isAssumedArgMemOnly())
7339         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7340       else if (isAssumedInaccessibleOrArgMemOnly())
7341         Attrs.push_back(
7342             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7343     }
7344     assert(Attrs.size() <= 1);
7345   }
7346 
7347   /// See AbstractAttribute::manifest(...).
7348   ChangeStatus manifest(Attributor &A) override {
7349     const IRPosition &IRP = getIRPosition();
7350 
7351     // Check if we would improve the existing attributes first.
7352     SmallVector<Attribute, 4> DeducedAttrs;
7353     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7354     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7355           return IRP.hasAttr(Attr.getKindAsEnum(),
7356                              /* IgnoreSubsumingPositions */ true);
7357         }))
7358       return ChangeStatus::UNCHANGED;
7359 
7360     // Clear existing attributes.
7361     IRP.removeAttrs(AttrKinds);
7362     if (isAssumedReadNone())
7363       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7364 
7365     // Use the generic manifest method.
7366     return IRAttribute::manifest(A);
7367   }
7368 
7369   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7370   bool checkForAllAccessesToMemoryKind(
7371       function_ref<bool(const Instruction *, const Value *, AccessKind,
7372                         MemoryLocationsKind)>
7373           Pred,
7374       MemoryLocationsKind RequestedMLK) const override {
7375     if (!isValidState())
7376       return false;
7377 
7378     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7379     if (AssumedMLK == NO_LOCATIONS)
7380       return true;
7381 
7382     unsigned Idx = 0;
7383     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7384          CurMLK *= 2, ++Idx) {
7385       if (CurMLK & RequestedMLK)
7386         continue;
7387 
7388       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7389         for (const AccessInfo &AI : *Accesses)
7390           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7391             return false;
7392     }
7393 
7394     return true;
7395   }
7396 
7397   ChangeStatus indicatePessimisticFixpoint() override {
7398     // If we give up and indicate a pessimistic fixpoint this instruction will
7399     // become an access for all potential access kinds:
7400     // TODO: Add pointers for argmemonly and globals to improve the results of
7401     //       checkForAllAccessesToMemoryKind.
7402     bool Changed = false;
7403     MemoryLocationsKind KnownMLK = getKnown();
7404     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7405     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7406       if (!(CurMLK & KnownMLK))
7407         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7408                                   getAccessKindFromInst(I));
7409     return AAMemoryLocation::indicatePessimisticFixpoint();
7410   }
7411 
7412 protected:
7413   /// Helper struct to tie together an instruction that has a read or write
7414   /// effect with the pointer it accesses (if any).
7415   struct AccessInfo {
7416 
7417     /// The instruction that caused the access.
7418     const Instruction *I;
7419 
7420     /// The base pointer that is accessed, or null if unknown.
7421     const Value *Ptr;
7422 
7423     /// The kind of access (read/write/read+write).
7424     AccessKind Kind;
7425 
7426     bool operator==(const AccessInfo &RHS) const {
7427       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7428     }
7429     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7430       if (LHS.I != RHS.I)
7431         return LHS.I < RHS.I;
7432       if (LHS.Ptr != RHS.Ptr)
7433         return LHS.Ptr < RHS.Ptr;
7434       if (LHS.Kind != RHS.Kind)
7435         return LHS.Kind < RHS.Kind;
7436       return false;
7437     }
7438   };
7439 
7440   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7441   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7442   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7443   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7444 
7445   /// Categorize the pointer arguments of CB that might access memory in
7446   /// AccessedLoc and update the state and access map accordingly.
7447   void
7448   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7449                                      AAMemoryLocation::StateType &AccessedLocs,
7450                                      bool &Changed);
7451 
7452   /// Return the kind(s) of location that may be accessed by \p V.
7453   AAMemoryLocation::MemoryLocationsKind
7454   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7455 
7456   /// Return the access kind as determined by \p I.
7457   AccessKind getAccessKindFromInst(const Instruction *I) {
7458     AccessKind AK = READ_WRITE;
7459     if (I) {
7460       AK = I->mayReadFromMemory() ? READ : NONE;
7461       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7462     }
7463     return AK;
7464   }
7465 
7466   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7467   /// an access of kind \p AK to a \p MLK memory location with the access
7468   /// pointer \p Ptr.
7469   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7470                                  MemoryLocationsKind MLK, const Instruction *I,
7471                                  const Value *Ptr, bool &Changed,
7472                                  AccessKind AK = READ_WRITE) {
7473 
7474     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7475     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7476     if (!Accesses)
7477       Accesses = new (Allocator) AccessSet();
7478     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7479     State.removeAssumedBits(MLK);
7480   }
7481 
7482   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7483   /// arguments, and update the state and access map accordingly.
7484   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7485                           AAMemoryLocation::StateType &State, bool &Changed);
7486 
7487   /// Used to allocate access sets.
7488   BumpPtrAllocator &Allocator;
7489 
7490   /// The set of IR attributes AAMemoryLocation deals with.
7491   static const Attribute::AttrKind AttrKinds[4];
7492 };
7493 
7494 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7495     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7496     Attribute::InaccessibleMemOrArgMemOnly};
7497 
7498 void AAMemoryLocationImpl::categorizePtrValue(
7499     Attributor &A, const Instruction &I, const Value &Ptr,
7500     AAMemoryLocation::StateType &State, bool &Changed) {
7501   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7502                     << Ptr << " ["
7503                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7504 
7505   SmallVector<Value *, 8> Objects;
7506   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I)) {
7507     LLVM_DEBUG(
7508         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7509     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7510                               getAccessKindFromInst(&I));
7511     return;
7512   }
7513 
7514   for (Value *Obj : Objects) {
7515     // TODO: recognize the TBAA used for constant accesses.
7516     MemoryLocationsKind MLK = NO_LOCATIONS;
7517     assert(!isa<GEPOperator>(Obj) && "GEPs should have been stripped.");
7518     if (isa<UndefValue>(Obj))
7519       continue;
7520     if (auto *Arg = dyn_cast<Argument>(Obj)) {
7521       if (Arg->hasByValAttr())
7522         MLK = NO_LOCAL_MEM;
7523       else
7524         MLK = NO_ARGUMENT_MEM;
7525     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7526       // Reading constant memory is not treated as a read "effect" by the
7527       // function attr pass so we won't neither. Constants defined by TBAA are
7528       // similar. (We know we do not write it because it is constant.)
7529       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7530         if (GVar->isConstant())
7531           continue;
7532 
7533       if (GV->hasLocalLinkage())
7534         MLK = NO_GLOBAL_INTERNAL_MEM;
7535       else
7536         MLK = NO_GLOBAL_EXTERNAL_MEM;
7537     } else if (isa<ConstantPointerNull>(Obj) &&
7538                !NullPointerIsDefined(getAssociatedFunction(),
7539                                      Ptr.getType()->getPointerAddressSpace())) {
7540       continue;
7541     } else if (isa<AllocaInst>(Obj)) {
7542       MLK = NO_LOCAL_MEM;
7543     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
7544       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
7545           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
7546       if (NoAliasAA.isAssumedNoAlias())
7547         MLK = NO_MALLOCED_MEM;
7548       else
7549         MLK = NO_UNKOWN_MEM;
7550     } else {
7551       MLK = NO_UNKOWN_MEM;
7552     }
7553 
7554     assert(MLK != NO_LOCATIONS && "No location specified!");
7555     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
7556                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
7557                       << "\n");
7558     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
7559                               getAccessKindFromInst(&I));
7560   }
7561 
7562   LLVM_DEBUG(
7563       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
7564              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
7565 }
7566 
7567 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
7568     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
7569     bool &Changed) {
7570   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
7571 
7572     // Skip non-pointer arguments.
7573     const Value *ArgOp = CB.getArgOperand(ArgNo);
7574     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
7575       continue;
7576 
7577     // Skip readnone arguments.
7578     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
7579     const auto &ArgOpMemLocationAA =
7580         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
7581 
7582     if (ArgOpMemLocationAA.isAssumedReadNone())
7583       continue;
7584 
7585     // Categorize potentially accessed pointer arguments as if there was an
7586     // access instruction with them as pointer.
7587     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
7588   }
7589 }
7590 
7591 AAMemoryLocation::MemoryLocationsKind
7592 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
7593                                                   bool &Changed) {
7594   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
7595                     << I << "\n");
7596 
7597   AAMemoryLocation::StateType AccessedLocs;
7598   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
7599 
7600   if (auto *CB = dyn_cast<CallBase>(&I)) {
7601 
7602     // First check if we assume any memory is access is visible.
7603     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
7604         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
7605     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
7606                       << " [" << CBMemLocationAA << "]\n");
7607 
7608     if (CBMemLocationAA.isAssumedReadNone())
7609       return NO_LOCATIONS;
7610 
7611     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
7612       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
7613                                 Changed, getAccessKindFromInst(&I));
7614       return AccessedLocs.getAssumed();
7615     }
7616 
7617     uint32_t CBAssumedNotAccessedLocs =
7618         CBMemLocationAA.getAssumedNotAccessedLocation();
7619 
7620     // Set the argmemonly and global bit as we handle them separately below.
7621     uint32_t CBAssumedNotAccessedLocsNoArgMem =
7622         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
7623 
7624     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
7625       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
7626         continue;
7627       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
7628                                 getAccessKindFromInst(&I));
7629     }
7630 
7631     // Now handle global memory if it might be accessed. This is slightly tricky
7632     // as NO_GLOBAL_MEM has multiple bits set.
7633     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
7634     if (HasGlobalAccesses) {
7635       auto AccessPred = [&](const Instruction *, const Value *Ptr,
7636                             AccessKind Kind, MemoryLocationsKind MLK) {
7637         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
7638                                   getAccessKindFromInst(&I));
7639         return true;
7640       };
7641       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
7642               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
7643         return AccessedLocs.getWorstState();
7644     }
7645 
7646     LLVM_DEBUG(
7647         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
7648                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7649 
7650     // Now handle argument memory if it might be accessed.
7651     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
7652     if (HasArgAccesses)
7653       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
7654 
7655     LLVM_DEBUG(
7656         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
7657                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7658 
7659     return AccessedLocs.getAssumed();
7660   }
7661 
7662   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
7663     LLVM_DEBUG(
7664         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
7665                << I << " [" << *Ptr << "]\n");
7666     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
7667     return AccessedLocs.getAssumed();
7668   }
7669 
7670   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
7671                     << I << "\n");
7672   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
7673                             getAccessKindFromInst(&I));
7674   return AccessedLocs.getAssumed();
7675 }
7676 
7677 /// An AA to represent the memory behavior function attributes.
7678 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
7679   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
7680       : AAMemoryLocationImpl(IRP, A) {}
7681 
7682   /// See AbstractAttribute::updateImpl(Attributor &A).
7683   virtual ChangeStatus updateImpl(Attributor &A) override {
7684 
7685     const auto &MemBehaviorAA =
7686         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
7687     if (MemBehaviorAA.isAssumedReadNone()) {
7688       if (MemBehaviorAA.isKnownReadNone())
7689         return indicateOptimisticFixpoint();
7690       assert(isAssumedReadNone() &&
7691              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
7692       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
7693       return ChangeStatus::UNCHANGED;
7694     }
7695 
7696     // The current assumed state used to determine a change.
7697     auto AssumedState = getAssumed();
7698     bool Changed = false;
7699 
7700     auto CheckRWInst = [&](Instruction &I) {
7701       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
7702       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
7703                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
7704       removeAssumedBits(inverseLocation(MLK, false, false));
7705       // Stop once only the valid bit set in the *not assumed location*, thus
7706       // once we don't actually exclude any memory locations in the state.
7707       return getAssumedNotAccessedLocation() != VALID_STATE;
7708     };
7709 
7710     bool UsedAssumedInformation = false;
7711     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7712                                             UsedAssumedInformation))
7713       return indicatePessimisticFixpoint();
7714 
7715     Changed |= AssumedState != getAssumed();
7716     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7717   }
7718 
7719   /// See AbstractAttribute::trackStatistics()
7720   void trackStatistics() const override {
7721     if (isAssumedReadNone())
7722       STATS_DECLTRACK_FN_ATTR(readnone)
7723     else if (isAssumedArgMemOnly())
7724       STATS_DECLTRACK_FN_ATTR(argmemonly)
7725     else if (isAssumedInaccessibleMemOnly())
7726       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
7727     else if (isAssumedInaccessibleOrArgMemOnly())
7728       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
7729   }
7730 };
7731 
7732 /// AAMemoryLocation attribute for call sites.
7733 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
7734   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
7735       : AAMemoryLocationImpl(IRP, A) {}
7736 
7737   /// See AbstractAttribute::initialize(...).
7738   void initialize(Attributor &A) override {
7739     AAMemoryLocationImpl::initialize(A);
7740     Function *F = getAssociatedFunction();
7741     if (!F || F->isDeclaration())
7742       indicatePessimisticFixpoint();
7743   }
7744 
7745   /// See AbstractAttribute::updateImpl(...).
7746   ChangeStatus updateImpl(Attributor &A) override {
7747     // TODO: Once we have call site specific value information we can provide
7748     //       call site specific liveness liveness information and then it makes
7749     //       sense to specialize attributes for call sites arguments instead of
7750     //       redirecting requests to the callee argument.
7751     Function *F = getAssociatedFunction();
7752     const IRPosition &FnPos = IRPosition::function(*F);
7753     auto &FnAA =
7754         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
7755     bool Changed = false;
7756     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
7757                           AccessKind Kind, MemoryLocationsKind MLK) {
7758       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
7759                                 getAccessKindFromInst(I));
7760       return true;
7761     };
7762     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
7763       return indicatePessimisticFixpoint();
7764     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7765   }
7766 
7767   /// See AbstractAttribute::trackStatistics()
7768   void trackStatistics() const override {
7769     if (isAssumedReadNone())
7770       STATS_DECLTRACK_CS_ATTR(readnone)
7771   }
7772 };
7773 
7774 /// ------------------ Value Constant Range Attribute -------------------------
7775 
7776 struct AAValueConstantRangeImpl : AAValueConstantRange {
7777   using StateType = IntegerRangeState;
7778   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7779       : AAValueConstantRange(IRP, A) {}
7780 
7781   /// See AbstractAttribute::getAsStr().
7782   const std::string getAsStr() const override {
7783     std::string Str;
7784     llvm::raw_string_ostream OS(Str);
7785     OS << "range(" << getBitWidth() << ")<";
7786     getKnown().print(OS);
7787     OS << " / ";
7788     getAssumed().print(OS);
7789     OS << ">";
7790     return OS.str();
7791   }
7792 
7793   /// Helper function to get a SCEV expr for the associated value at program
7794   /// point \p I.
7795   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7796     if (!getAnchorScope())
7797       return nullptr;
7798 
7799     ScalarEvolution *SE =
7800         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7801             *getAnchorScope());
7802 
7803     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7804         *getAnchorScope());
7805 
7806     if (!SE || !LI)
7807       return nullptr;
7808 
7809     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7810     if (!I)
7811       return S;
7812 
7813     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7814   }
7815 
7816   /// Helper function to get a range from SCEV for the associated value at
7817   /// program point \p I.
7818   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7819                                          const Instruction *I = nullptr) const {
7820     if (!getAnchorScope())
7821       return getWorstState(getBitWidth());
7822 
7823     ScalarEvolution *SE =
7824         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7825             *getAnchorScope());
7826 
7827     const SCEV *S = getSCEV(A, I);
7828     if (!SE || !S)
7829       return getWorstState(getBitWidth());
7830 
7831     return SE->getUnsignedRange(S);
7832   }
7833 
7834   /// Helper function to get a range from LVI for the associated value at
7835   /// program point \p I.
7836   ConstantRange
7837   getConstantRangeFromLVI(Attributor &A,
7838                           const Instruction *CtxI = nullptr) const {
7839     if (!getAnchorScope())
7840       return getWorstState(getBitWidth());
7841 
7842     LazyValueInfo *LVI =
7843         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7844             *getAnchorScope());
7845 
7846     if (!LVI || !CtxI)
7847       return getWorstState(getBitWidth());
7848     return LVI->getConstantRange(&getAssociatedValue(),
7849                                  const_cast<Instruction *>(CtxI));
7850   }
7851 
7852   /// Return true if \p CtxI is valid for querying outside analyses.
7853   /// This basically makes sure we do not ask intra-procedural analysis
7854   /// about a context in the wrong function or a context that violates
7855   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
7856   /// if the original context of this AA is OK or should be considered invalid.
7857   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
7858                                                const Instruction *CtxI,
7859                                                bool AllowAACtxI) const {
7860     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
7861       return false;
7862 
7863     // Our context might be in a different function, neither intra-procedural
7864     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
7865     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
7866       return false;
7867 
7868     // If the context is not dominated by the value there are paths to the
7869     // context that do not define the value. This cannot be handled by
7870     // LazyValueInfo so we need to bail.
7871     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
7872       InformationCache &InfoCache = A.getInfoCache();
7873       const DominatorTree *DT =
7874           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
7875               *I->getFunction());
7876       return DT && DT->dominates(I, CtxI);
7877     }
7878 
7879     return true;
7880   }
7881 
7882   /// See AAValueConstantRange::getKnownConstantRange(..).
7883   ConstantRange
7884   getKnownConstantRange(Attributor &A,
7885                         const Instruction *CtxI = nullptr) const override {
7886     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
7887                                                  /* AllowAACtxI */ false))
7888       return getKnown();
7889 
7890     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7891     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7892     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7893   }
7894 
7895   /// See AAValueConstantRange::getAssumedConstantRange(..).
7896   ConstantRange
7897   getAssumedConstantRange(Attributor &A,
7898                           const Instruction *CtxI = nullptr) const override {
7899     // TODO: Make SCEV use Attributor assumption.
7900     //       We may be able to bound a variable range via assumptions in
7901     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7902     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7903     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
7904                                                  /* AllowAACtxI */ false))
7905       return getAssumed();
7906 
7907     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7908     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7909     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7910   }
7911 
7912   /// See AbstractAttribute::initialize(..).
7913   void initialize(Attributor &A) override {
7914     // Intersect a range given by SCEV.
7915     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7916 
7917     // Intersect a range given by LVI.
7918     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7919   }
7920 
7921   /// Helper function to create MDNode for range metadata.
7922   static MDNode *
7923   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7924                             const ConstantRange &AssumedConstantRange) {
7925     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7926                                   Ty, AssumedConstantRange.getLower())),
7927                               ConstantAsMetadata::get(ConstantInt::get(
7928                                   Ty, AssumedConstantRange.getUpper()))};
7929     return MDNode::get(Ctx, LowAndHigh);
7930   }
7931 
7932   /// Return true if \p Assumed is included in \p KnownRanges.
7933   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7934 
7935     if (Assumed.isFullSet())
7936       return false;
7937 
7938     if (!KnownRanges)
7939       return true;
7940 
7941     // If multiple ranges are annotated in IR, we give up to annotate assumed
7942     // range for now.
7943 
7944     // TODO:  If there exists a known range which containts assumed range, we
7945     // can say assumed range is better.
7946     if (KnownRanges->getNumOperands() > 2)
7947       return false;
7948 
7949     ConstantInt *Lower =
7950         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7951     ConstantInt *Upper =
7952         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7953 
7954     ConstantRange Known(Lower->getValue(), Upper->getValue());
7955     return Known.contains(Assumed) && Known != Assumed;
7956   }
7957 
7958   /// Helper function to set range metadata.
7959   static bool
7960   setRangeMetadataIfisBetterRange(Instruction *I,
7961                                   const ConstantRange &AssumedConstantRange) {
7962     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7963     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7964       if (!AssumedConstantRange.isEmptySet()) {
7965         I->setMetadata(LLVMContext::MD_range,
7966                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7967                                                  AssumedConstantRange));
7968         return true;
7969       }
7970     }
7971     return false;
7972   }
7973 
7974   /// See AbstractAttribute::manifest()
7975   ChangeStatus manifest(Attributor &A) override {
7976     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7977     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7978     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7979 
7980     auto &V = getAssociatedValue();
7981     if (!AssumedConstantRange.isEmptySet() &&
7982         !AssumedConstantRange.isSingleElement()) {
7983       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7984         assert(I == getCtxI() && "Should not annotate an instruction which is "
7985                                  "not the context instruction");
7986         if (isa<CallInst>(I) || isa<LoadInst>(I))
7987           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7988             Changed = ChangeStatus::CHANGED;
7989       }
7990     }
7991 
7992     return Changed;
7993   }
7994 };
7995 
7996 struct AAValueConstantRangeArgument final
7997     : AAArgumentFromCallSiteArguments<
7998           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7999           true /* BridgeCallBaseContext */> {
8000   using Base = AAArgumentFromCallSiteArguments<
8001       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8002       true /* BridgeCallBaseContext */>;
8003   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8004       : Base(IRP, A) {}
8005 
8006   /// See AbstractAttribute::initialize(..).
8007   void initialize(Attributor &A) override {
8008     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8009       indicatePessimisticFixpoint();
8010     } else {
8011       Base::initialize(A);
8012     }
8013   }
8014 
8015   /// See AbstractAttribute::trackStatistics()
8016   void trackStatistics() const override {
8017     STATS_DECLTRACK_ARG_ATTR(value_range)
8018   }
8019 };
8020 
8021 struct AAValueConstantRangeReturned
8022     : AAReturnedFromReturnedValues<AAValueConstantRange,
8023                                    AAValueConstantRangeImpl,
8024                                    AAValueConstantRangeImpl::StateType,
8025                                    /* PropogateCallBaseContext */ true> {
8026   using Base =
8027       AAReturnedFromReturnedValues<AAValueConstantRange,
8028                                    AAValueConstantRangeImpl,
8029                                    AAValueConstantRangeImpl::StateType,
8030                                    /* PropogateCallBaseContext */ true>;
8031   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8032       : Base(IRP, A) {}
8033 
8034   /// See AbstractAttribute::initialize(...).
8035   void initialize(Attributor &A) override {}
8036 
8037   /// See AbstractAttribute::trackStatistics()
8038   void trackStatistics() const override {
8039     STATS_DECLTRACK_FNRET_ATTR(value_range)
8040   }
8041 };
8042 
8043 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8044   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8045       : AAValueConstantRangeImpl(IRP, A) {}
8046 
8047   /// See AbstractAttribute::initialize(...).
8048   void initialize(Attributor &A) override {
8049     AAValueConstantRangeImpl::initialize(A);
8050     Value &V = getAssociatedValue();
8051 
8052     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8053       unionAssumed(ConstantRange(C->getValue()));
8054       indicateOptimisticFixpoint();
8055       return;
8056     }
8057 
8058     if (isa<UndefValue>(&V)) {
8059       // Collapse the undef state to 0.
8060       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8061       indicateOptimisticFixpoint();
8062       return;
8063     }
8064 
8065     if (isa<CallBase>(&V))
8066       return;
8067 
8068     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8069       return;
8070     // If it is a load instruction with range metadata, use it.
8071     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8072       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8073         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8074         return;
8075       }
8076 
8077     // We can work with PHI and select instruction as we traverse their operands
8078     // during update.
8079     if (isa<SelectInst>(V) || isa<PHINode>(V))
8080       return;
8081 
8082     // Otherwise we give up.
8083     indicatePessimisticFixpoint();
8084 
8085     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8086                       << getAssociatedValue() << "\n");
8087   }
8088 
8089   bool calculateBinaryOperator(
8090       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8091       const Instruction *CtxI,
8092       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8093     Value *LHS = BinOp->getOperand(0);
8094     Value *RHS = BinOp->getOperand(1);
8095 
8096     // Simplify the operands first.
8097     bool UsedAssumedInformation = false;
8098     const auto &SimplifiedLHS =
8099         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8100                                *this, UsedAssumedInformation);
8101     if (!SimplifiedLHS.hasValue())
8102       return true;
8103     if (!SimplifiedLHS.getValue())
8104       return false;
8105     LHS = *SimplifiedLHS;
8106 
8107     const auto &SimplifiedRHS =
8108         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8109                                *this, UsedAssumedInformation);
8110     if (!SimplifiedRHS.hasValue())
8111       return true;
8112     if (!SimplifiedRHS.getValue())
8113       return false;
8114     RHS = *SimplifiedRHS;
8115 
8116     // TODO: Allow non integers as well.
8117     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8118       return false;
8119 
8120     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8121         *this, IRPosition::value(*LHS, getCallBaseContext()),
8122         DepClassTy::REQUIRED);
8123     QuerriedAAs.push_back(&LHSAA);
8124     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8125 
8126     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8127         *this, IRPosition::value(*RHS, getCallBaseContext()),
8128         DepClassTy::REQUIRED);
8129     QuerriedAAs.push_back(&RHSAA);
8130     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8131 
8132     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8133 
8134     T.unionAssumed(AssumedRange);
8135 
8136     // TODO: Track a known state too.
8137 
8138     return T.isValidState();
8139   }
8140 
8141   bool calculateCastInst(
8142       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8143       const Instruction *CtxI,
8144       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8145     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8146     // TODO: Allow non integers as well.
8147     Value *OpV = CastI->getOperand(0);
8148 
8149     // Simplify the operand first.
8150     bool UsedAssumedInformation = false;
8151     const auto &SimplifiedOpV =
8152         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8153                                *this, UsedAssumedInformation);
8154     if (!SimplifiedOpV.hasValue())
8155       return true;
8156     if (!SimplifiedOpV.getValue())
8157       return false;
8158     OpV = *SimplifiedOpV;
8159 
8160     if (!OpV->getType()->isIntegerTy())
8161       return false;
8162 
8163     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8164         *this, IRPosition::value(*OpV, getCallBaseContext()),
8165         DepClassTy::REQUIRED);
8166     QuerriedAAs.push_back(&OpAA);
8167     T.unionAssumed(
8168         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8169     return T.isValidState();
8170   }
8171 
8172   bool
8173   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8174                    const Instruction *CtxI,
8175                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8176     Value *LHS = CmpI->getOperand(0);
8177     Value *RHS = CmpI->getOperand(1);
8178 
8179     // Simplify the operands first.
8180     bool UsedAssumedInformation = false;
8181     const auto &SimplifiedLHS =
8182         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8183                                *this, UsedAssumedInformation);
8184     if (!SimplifiedLHS.hasValue())
8185       return true;
8186     if (!SimplifiedLHS.getValue())
8187       return false;
8188     LHS = *SimplifiedLHS;
8189 
8190     const auto &SimplifiedRHS =
8191         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8192                                *this, UsedAssumedInformation);
8193     if (!SimplifiedRHS.hasValue())
8194       return true;
8195     if (!SimplifiedRHS.getValue())
8196       return false;
8197     RHS = *SimplifiedRHS;
8198 
8199     // TODO: Allow non integers as well.
8200     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8201       return false;
8202 
8203     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8204         *this, IRPosition::value(*LHS, getCallBaseContext()),
8205         DepClassTy::REQUIRED);
8206     QuerriedAAs.push_back(&LHSAA);
8207     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8208         *this, IRPosition::value(*RHS, getCallBaseContext()),
8209         DepClassTy::REQUIRED);
8210     QuerriedAAs.push_back(&RHSAA);
8211     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8212     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8213 
8214     // If one of them is empty set, we can't decide.
8215     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8216       return true;
8217 
8218     bool MustTrue = false, MustFalse = false;
8219 
8220     auto AllowedRegion =
8221         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8222 
8223     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8224       MustFalse = true;
8225 
8226     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8227       MustTrue = true;
8228 
8229     assert((!MustTrue || !MustFalse) &&
8230            "Either MustTrue or MustFalse should be false!");
8231 
8232     if (MustTrue)
8233       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8234     else if (MustFalse)
8235       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8236     else
8237       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8238 
8239     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8240                       << " " << RHSAA << "\n");
8241 
8242     // TODO: Track a known state too.
8243     return T.isValidState();
8244   }
8245 
8246   /// See AbstractAttribute::updateImpl(...).
8247   ChangeStatus updateImpl(Attributor &A) override {
8248     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8249                             IntegerRangeState &T, bool Stripped) -> bool {
8250       Instruction *I = dyn_cast<Instruction>(&V);
8251       if (!I || isa<CallBase>(I)) {
8252 
8253         // Simplify the operand first.
8254         bool UsedAssumedInformation = false;
8255         const auto &SimplifiedOpV =
8256             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8257                                    *this, UsedAssumedInformation);
8258         if (!SimplifiedOpV.hasValue())
8259           return true;
8260         if (!SimplifiedOpV.getValue())
8261           return false;
8262         Value *VPtr = *SimplifiedOpV;
8263 
8264         // If the value is not instruction, we query AA to Attributor.
8265         const auto &AA = A.getAAFor<AAValueConstantRange>(
8266             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8267             DepClassTy::REQUIRED);
8268 
8269         // Clamp operator is not used to utilize a program point CtxI.
8270         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8271 
8272         return T.isValidState();
8273       }
8274 
8275       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8276       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8277         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8278           return false;
8279       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8280         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8281           return false;
8282       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8283         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8284           return false;
8285       } else {
8286         // Give up with other instructions.
8287         // TODO: Add other instructions
8288 
8289         T.indicatePessimisticFixpoint();
8290         return false;
8291       }
8292 
8293       // Catch circular reasoning in a pessimistic way for now.
8294       // TODO: Check how the range evolves and if we stripped anything, see also
8295       //       AADereferenceable or AAAlign for similar situations.
8296       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8297         if (QueriedAA != this)
8298           continue;
8299         // If we are in a stady state we do not need to worry.
8300         if (T.getAssumed() == getState().getAssumed())
8301           continue;
8302         T.indicatePessimisticFixpoint();
8303       }
8304 
8305       return T.isValidState();
8306     };
8307 
8308     IntegerRangeState T(getBitWidth());
8309 
8310     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8311                                                   VisitValueCB, getCtxI(),
8312                                                   /* UseValueSimplify */ false))
8313       return indicatePessimisticFixpoint();
8314 
8315     return clampStateAndIndicateChange(getState(), T);
8316   }
8317 
8318   /// See AbstractAttribute::trackStatistics()
8319   void trackStatistics() const override {
8320     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8321   }
8322 };
8323 
8324 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8325   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8326       : AAValueConstantRangeImpl(IRP, A) {}
8327 
8328   /// See AbstractAttribute::initialize(...).
8329   ChangeStatus updateImpl(Attributor &A) override {
8330     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8331                      "not be called");
8332   }
8333 
8334   /// See AbstractAttribute::trackStatistics()
8335   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8336 };
8337 
8338 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8339   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8340       : AAValueConstantRangeFunction(IRP, A) {}
8341 
8342   /// See AbstractAttribute::trackStatistics()
8343   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8344 };
8345 
8346 struct AAValueConstantRangeCallSiteReturned
8347     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8348                                      AAValueConstantRangeImpl,
8349                                      AAValueConstantRangeImpl::StateType,
8350                                      /* IntroduceCallBaseContext */ true> {
8351   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8352       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8353                                        AAValueConstantRangeImpl,
8354                                        AAValueConstantRangeImpl::StateType,
8355                                        /* IntroduceCallBaseContext */ true>(IRP,
8356                                                                             A) {
8357   }
8358 
8359   /// See AbstractAttribute::initialize(...).
8360   void initialize(Attributor &A) override {
8361     // If it is a load instruction with range metadata, use the metadata.
8362     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8363       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8364         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8365 
8366     AAValueConstantRangeImpl::initialize(A);
8367   }
8368 
8369   /// See AbstractAttribute::trackStatistics()
8370   void trackStatistics() const override {
8371     STATS_DECLTRACK_CSRET_ATTR(value_range)
8372   }
8373 };
8374 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8375   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8376       : AAValueConstantRangeFloating(IRP, A) {}
8377 
8378   /// See AbstractAttribute::manifest()
8379   ChangeStatus manifest(Attributor &A) override {
8380     return ChangeStatus::UNCHANGED;
8381   }
8382 
8383   /// See AbstractAttribute::trackStatistics()
8384   void trackStatistics() const override {
8385     STATS_DECLTRACK_CSARG_ATTR(value_range)
8386   }
8387 };
8388 
8389 /// ------------------ Potential Values Attribute -------------------------
8390 
8391 struct AAPotentialValuesImpl : AAPotentialValues {
8392   using StateType = PotentialConstantIntValuesState;
8393 
8394   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
8395       : AAPotentialValues(IRP, A) {}
8396 
8397   /// See AbstractAttribute::getAsStr().
8398   const std::string getAsStr() const override {
8399     std::string Str;
8400     llvm::raw_string_ostream OS(Str);
8401     OS << getState();
8402     return OS.str();
8403   }
8404 
8405   /// See AbstractAttribute::updateImpl(...).
8406   ChangeStatus updateImpl(Attributor &A) override {
8407     return indicatePessimisticFixpoint();
8408   }
8409 };
8410 
8411 struct AAPotentialValuesArgument final
8412     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8413                                       PotentialConstantIntValuesState> {
8414   using Base =
8415       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8416                                       PotentialConstantIntValuesState>;
8417   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
8418       : Base(IRP, A) {}
8419 
8420   /// See AbstractAttribute::initialize(..).
8421   void initialize(Attributor &A) override {
8422     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8423       indicatePessimisticFixpoint();
8424     } else {
8425       Base::initialize(A);
8426     }
8427   }
8428 
8429   /// See AbstractAttribute::trackStatistics()
8430   void trackStatistics() const override {
8431     STATS_DECLTRACK_ARG_ATTR(potential_values)
8432   }
8433 };
8434 
8435 struct AAPotentialValuesReturned
8436     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
8437   using Base =
8438       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
8439   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
8440       : Base(IRP, A) {}
8441 
8442   /// See AbstractAttribute::trackStatistics()
8443   void trackStatistics() const override {
8444     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8445   }
8446 };
8447 
8448 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
8449   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
8450       : AAPotentialValuesImpl(IRP, A) {}
8451 
8452   /// See AbstractAttribute::initialize(..).
8453   void initialize(Attributor &A) override {
8454     Value &V = getAssociatedValue();
8455 
8456     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8457       unionAssumed(C->getValue());
8458       indicateOptimisticFixpoint();
8459       return;
8460     }
8461 
8462     if (isa<UndefValue>(&V)) {
8463       unionAssumedWithUndef();
8464       indicateOptimisticFixpoint();
8465       return;
8466     }
8467 
8468     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8469       return;
8470 
8471     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8472       return;
8473 
8474     indicatePessimisticFixpoint();
8475 
8476     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
8477                       << getAssociatedValue() << "\n");
8478   }
8479 
8480   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8481                                 const APInt &RHS) {
8482     ICmpInst::Predicate Pred = ICI->getPredicate();
8483     switch (Pred) {
8484     case ICmpInst::ICMP_UGT:
8485       return LHS.ugt(RHS);
8486     case ICmpInst::ICMP_SGT:
8487       return LHS.sgt(RHS);
8488     case ICmpInst::ICMP_EQ:
8489       return LHS.eq(RHS);
8490     case ICmpInst::ICMP_UGE:
8491       return LHS.uge(RHS);
8492     case ICmpInst::ICMP_SGE:
8493       return LHS.sge(RHS);
8494     case ICmpInst::ICMP_ULT:
8495       return LHS.ult(RHS);
8496     case ICmpInst::ICMP_SLT:
8497       return LHS.slt(RHS);
8498     case ICmpInst::ICMP_NE:
8499       return LHS.ne(RHS);
8500     case ICmpInst::ICMP_ULE:
8501       return LHS.ule(RHS);
8502     case ICmpInst::ICMP_SLE:
8503       return LHS.sle(RHS);
8504     default:
8505       llvm_unreachable("Invalid ICmp predicate!");
8506     }
8507   }
8508 
8509   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8510                                  uint32_t ResultBitWidth) {
8511     Instruction::CastOps CastOp = CI->getOpcode();
8512     switch (CastOp) {
8513     default:
8514       llvm_unreachable("unsupported or not integer cast");
8515     case Instruction::Trunc:
8516       return Src.trunc(ResultBitWidth);
8517     case Instruction::SExt:
8518       return Src.sext(ResultBitWidth);
8519     case Instruction::ZExt:
8520       return Src.zext(ResultBitWidth);
8521     case Instruction::BitCast:
8522       return Src;
8523     }
8524   }
8525 
8526   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
8527                                        const APInt &LHS, const APInt &RHS,
8528                                        bool &SkipOperation, bool &Unsupported) {
8529     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
8530     // Unsupported is set to true when the binary operator is not supported.
8531     // SkipOperation is set to true when UB occur with the given operand pair
8532     // (LHS, RHS).
8533     // TODO: we should look at nsw and nuw keywords to handle operations
8534     //       that create poison or undef value.
8535     switch (BinOpcode) {
8536     default:
8537       Unsupported = true;
8538       return LHS;
8539     case Instruction::Add:
8540       return LHS + RHS;
8541     case Instruction::Sub:
8542       return LHS - RHS;
8543     case Instruction::Mul:
8544       return LHS * RHS;
8545     case Instruction::UDiv:
8546       if (RHS.isNullValue()) {
8547         SkipOperation = true;
8548         return LHS;
8549       }
8550       return LHS.udiv(RHS);
8551     case Instruction::SDiv:
8552       if (RHS.isNullValue()) {
8553         SkipOperation = true;
8554         return LHS;
8555       }
8556       return LHS.sdiv(RHS);
8557     case Instruction::URem:
8558       if (RHS.isNullValue()) {
8559         SkipOperation = true;
8560         return LHS;
8561       }
8562       return LHS.urem(RHS);
8563     case Instruction::SRem:
8564       if (RHS.isNullValue()) {
8565         SkipOperation = true;
8566         return LHS;
8567       }
8568       return LHS.srem(RHS);
8569     case Instruction::Shl:
8570       return LHS.shl(RHS);
8571     case Instruction::LShr:
8572       return LHS.lshr(RHS);
8573     case Instruction::AShr:
8574       return LHS.ashr(RHS);
8575     case Instruction::And:
8576       return LHS & RHS;
8577     case Instruction::Or:
8578       return LHS | RHS;
8579     case Instruction::Xor:
8580       return LHS ^ RHS;
8581     }
8582   }
8583 
8584   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
8585                                            const APInt &LHS, const APInt &RHS) {
8586     bool SkipOperation = false;
8587     bool Unsupported = false;
8588     APInt Result =
8589         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
8590     if (Unsupported)
8591       return false;
8592     // If SkipOperation is true, we can ignore this operand pair (L, R).
8593     if (!SkipOperation)
8594       unionAssumed(Result);
8595     return isValidState();
8596   }
8597 
8598   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
8599     auto AssumedBefore = getAssumed();
8600     Value *LHS = ICI->getOperand(0);
8601     Value *RHS = ICI->getOperand(1);
8602 
8603     // Simplify the operands first.
8604     bool UsedAssumedInformation = false;
8605     const auto &SimplifiedLHS =
8606         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8607                                *this, UsedAssumedInformation);
8608     if (!SimplifiedLHS.hasValue())
8609       return ChangeStatus::UNCHANGED;
8610     if (!SimplifiedLHS.getValue())
8611       return indicatePessimisticFixpoint();
8612     LHS = *SimplifiedLHS;
8613 
8614     const auto &SimplifiedRHS =
8615         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8616                                *this, UsedAssumedInformation);
8617     if (!SimplifiedRHS.hasValue())
8618       return ChangeStatus::UNCHANGED;
8619     if (!SimplifiedRHS.getValue())
8620       return indicatePessimisticFixpoint();
8621     RHS = *SimplifiedRHS;
8622 
8623     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8624       return indicatePessimisticFixpoint();
8625 
8626     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8627                                                 DepClassTy::REQUIRED);
8628     if (!LHSAA.isValidState())
8629       return indicatePessimisticFixpoint();
8630 
8631     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8632                                                 DepClassTy::REQUIRED);
8633     if (!RHSAA.isValidState())
8634       return indicatePessimisticFixpoint();
8635 
8636     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8637     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8638 
8639     // TODO: make use of undef flag to limit potential values aggressively.
8640     bool MaybeTrue = false, MaybeFalse = false;
8641     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
8642     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8643       // The result of any comparison between undefs can be soundly replaced
8644       // with undef.
8645       unionAssumedWithUndef();
8646     } else if (LHSAA.undefIsContained()) {
8647       for (const APInt &R : RHSAAPVS) {
8648         bool CmpResult = calculateICmpInst(ICI, Zero, R);
8649         MaybeTrue |= CmpResult;
8650         MaybeFalse |= !CmpResult;
8651         if (MaybeTrue & MaybeFalse)
8652           return indicatePessimisticFixpoint();
8653       }
8654     } else if (RHSAA.undefIsContained()) {
8655       for (const APInt &L : LHSAAPVS) {
8656         bool CmpResult = calculateICmpInst(ICI, L, Zero);
8657         MaybeTrue |= CmpResult;
8658         MaybeFalse |= !CmpResult;
8659         if (MaybeTrue & MaybeFalse)
8660           return indicatePessimisticFixpoint();
8661       }
8662     } else {
8663       for (const APInt &L : LHSAAPVS) {
8664         for (const APInt &R : RHSAAPVS) {
8665           bool CmpResult = calculateICmpInst(ICI, L, R);
8666           MaybeTrue |= CmpResult;
8667           MaybeFalse |= !CmpResult;
8668           if (MaybeTrue & MaybeFalse)
8669             return indicatePessimisticFixpoint();
8670         }
8671       }
8672     }
8673     if (MaybeTrue)
8674       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
8675     if (MaybeFalse)
8676       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
8677     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8678                                          : ChangeStatus::CHANGED;
8679   }
8680 
8681   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
8682     auto AssumedBefore = getAssumed();
8683     Value *LHS = SI->getTrueValue();
8684     Value *RHS = SI->getFalseValue();
8685 
8686     // Simplify the operands first.
8687     bool UsedAssumedInformation = false;
8688     const auto &SimplifiedLHS =
8689         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8690                                *this, UsedAssumedInformation);
8691     if (!SimplifiedLHS.hasValue())
8692       return ChangeStatus::UNCHANGED;
8693     if (!SimplifiedLHS.getValue())
8694       return indicatePessimisticFixpoint();
8695     LHS = *SimplifiedLHS;
8696 
8697     const auto &SimplifiedRHS =
8698         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8699                                *this, UsedAssumedInformation);
8700     if (!SimplifiedRHS.hasValue())
8701       return ChangeStatus::UNCHANGED;
8702     if (!SimplifiedRHS.getValue())
8703       return indicatePessimisticFixpoint();
8704     RHS = *SimplifiedRHS;
8705 
8706     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8707       return indicatePessimisticFixpoint();
8708 
8709     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
8710                                                   UsedAssumedInformation);
8711 
8712     // Check if we only need one operand.
8713     bool OnlyLeft = false, OnlyRight = false;
8714     if (C.hasValue() && *C && (*C)->isOneValue())
8715       OnlyLeft = true;
8716     else if (C.hasValue() && *C && (*C)->isZeroValue())
8717       OnlyRight = true;
8718 
8719     const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr;
8720     if (!OnlyRight) {
8721       LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8722                                              DepClassTy::REQUIRED);
8723       if (!LHSAA->isValidState())
8724         return indicatePessimisticFixpoint();
8725     }
8726     if (!OnlyLeft) {
8727       RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8728                                              DepClassTy::REQUIRED);
8729       if (!RHSAA->isValidState())
8730         return indicatePessimisticFixpoint();
8731     }
8732 
8733     if (!LHSAA || !RHSAA) {
8734       // select (true/false), lhs, rhs
8735       auto *OpAA = LHSAA ? LHSAA : RHSAA;
8736 
8737       if (OpAA->undefIsContained())
8738         unionAssumedWithUndef();
8739       else
8740         unionAssumed(*OpAA);
8741 
8742     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
8743       // select i1 *, undef , undef => undef
8744       unionAssumedWithUndef();
8745     } else {
8746       unionAssumed(*LHSAA);
8747       unionAssumed(*RHSAA);
8748     }
8749     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8750                                          : ChangeStatus::CHANGED;
8751   }
8752 
8753   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
8754     auto AssumedBefore = getAssumed();
8755     if (!CI->isIntegerCast())
8756       return indicatePessimisticFixpoint();
8757     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
8758     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
8759     Value *Src = CI->getOperand(0);
8760 
8761     // Simplify the operand first.
8762     bool UsedAssumedInformation = false;
8763     const auto &SimplifiedSrc =
8764         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
8765                                *this, UsedAssumedInformation);
8766     if (!SimplifiedSrc.hasValue())
8767       return ChangeStatus::UNCHANGED;
8768     if (!SimplifiedSrc.getValue())
8769       return indicatePessimisticFixpoint();
8770     Src = *SimplifiedSrc;
8771 
8772     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
8773                                                 DepClassTy::REQUIRED);
8774     if (!SrcAA.isValidState())
8775       return indicatePessimisticFixpoint();
8776     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
8777     if (SrcAA.undefIsContained())
8778       unionAssumedWithUndef();
8779     else {
8780       for (const APInt &S : SrcAAPVS) {
8781         APInt T = calculateCastInst(CI, S, ResultBitWidth);
8782         unionAssumed(T);
8783       }
8784     }
8785     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8786                                          : ChangeStatus::CHANGED;
8787   }
8788 
8789   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
8790     auto AssumedBefore = getAssumed();
8791     Value *LHS = BinOp->getOperand(0);
8792     Value *RHS = BinOp->getOperand(1);
8793 
8794     // Simplify the operands first.
8795     bool UsedAssumedInformation = false;
8796     const auto &SimplifiedLHS =
8797         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8798                                *this, UsedAssumedInformation);
8799     if (!SimplifiedLHS.hasValue())
8800       return ChangeStatus::UNCHANGED;
8801     if (!SimplifiedLHS.getValue())
8802       return indicatePessimisticFixpoint();
8803     LHS = *SimplifiedLHS;
8804 
8805     const auto &SimplifiedRHS =
8806         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8807                                *this, UsedAssumedInformation);
8808     if (!SimplifiedRHS.hasValue())
8809       return ChangeStatus::UNCHANGED;
8810     if (!SimplifiedRHS.getValue())
8811       return indicatePessimisticFixpoint();
8812     RHS = *SimplifiedRHS;
8813 
8814     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8815       return indicatePessimisticFixpoint();
8816 
8817     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8818                                                 DepClassTy::REQUIRED);
8819     if (!LHSAA.isValidState())
8820       return indicatePessimisticFixpoint();
8821 
8822     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8823                                                 DepClassTy::REQUIRED);
8824     if (!RHSAA.isValidState())
8825       return indicatePessimisticFixpoint();
8826 
8827     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8828     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8829     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
8830 
8831     // TODO: make use of undef flag to limit potential values aggressively.
8832     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8833       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
8834         return indicatePessimisticFixpoint();
8835     } else if (LHSAA.undefIsContained()) {
8836       for (const APInt &R : RHSAAPVS) {
8837         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
8838           return indicatePessimisticFixpoint();
8839       }
8840     } else if (RHSAA.undefIsContained()) {
8841       for (const APInt &L : LHSAAPVS) {
8842         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
8843           return indicatePessimisticFixpoint();
8844       }
8845     } else {
8846       for (const APInt &L : LHSAAPVS) {
8847         for (const APInt &R : RHSAAPVS) {
8848           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
8849             return indicatePessimisticFixpoint();
8850         }
8851       }
8852     }
8853     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8854                                          : ChangeStatus::CHANGED;
8855   }
8856 
8857   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
8858     auto AssumedBefore = getAssumed();
8859     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
8860       Value *IncomingValue = PHI->getIncomingValue(u);
8861 
8862       // Simplify the operand first.
8863       bool UsedAssumedInformation = false;
8864       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
8865           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
8866           UsedAssumedInformation);
8867       if (!SimplifiedIncomingValue.hasValue())
8868         continue;
8869       if (!SimplifiedIncomingValue.getValue())
8870         return indicatePessimisticFixpoint();
8871       IncomingValue = *SimplifiedIncomingValue;
8872 
8873       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
8874           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
8875       if (!PotentialValuesAA.isValidState())
8876         return indicatePessimisticFixpoint();
8877       if (PotentialValuesAA.undefIsContained())
8878         unionAssumedWithUndef();
8879       else
8880         unionAssumed(PotentialValuesAA.getAssumed());
8881     }
8882     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8883                                          : ChangeStatus::CHANGED;
8884   }
8885 
8886   ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) {
8887     if (!L.getType()->isIntegerTy())
8888       return indicatePessimisticFixpoint();
8889 
8890     auto Union = [&](Value &V) {
8891       if (isa<UndefValue>(V)) {
8892         unionAssumedWithUndef();
8893         return true;
8894       }
8895       if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) {
8896         unionAssumed(CI->getValue());
8897         return true;
8898       }
8899       return false;
8900     };
8901     auto AssumedBefore = getAssumed();
8902 
8903     if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union))
8904       return indicatePessimisticFixpoint();
8905 
8906     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8907                                          : ChangeStatus::CHANGED;
8908   }
8909 
8910   /// See AbstractAttribute::updateImpl(...).
8911   ChangeStatus updateImpl(Attributor &A) override {
8912     Value &V = getAssociatedValue();
8913     Instruction *I = dyn_cast<Instruction>(&V);
8914 
8915     if (auto *ICI = dyn_cast<ICmpInst>(I))
8916       return updateWithICmpInst(A, ICI);
8917 
8918     if (auto *SI = dyn_cast<SelectInst>(I))
8919       return updateWithSelectInst(A, SI);
8920 
8921     if (auto *CI = dyn_cast<CastInst>(I))
8922       return updateWithCastInst(A, CI);
8923 
8924     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
8925       return updateWithBinaryOperator(A, BinOp);
8926 
8927     if (auto *PHI = dyn_cast<PHINode>(I))
8928       return updateWithPHINode(A, PHI);
8929 
8930     if (auto *L = dyn_cast<LoadInst>(I))
8931       return updateWithLoad(A, *L);
8932 
8933     return indicatePessimisticFixpoint();
8934   }
8935 
8936   /// See AbstractAttribute::trackStatistics()
8937   void trackStatistics() const override {
8938     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
8939   }
8940 };
8941 
8942 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
8943   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
8944       : AAPotentialValuesImpl(IRP, A) {}
8945 
8946   /// See AbstractAttribute::initialize(...).
8947   ChangeStatus updateImpl(Attributor &A) override {
8948     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
8949                      "not be called");
8950   }
8951 
8952   /// See AbstractAttribute::trackStatistics()
8953   void trackStatistics() const override {
8954     STATS_DECLTRACK_FN_ATTR(potential_values)
8955   }
8956 };
8957 
8958 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
8959   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
8960       : AAPotentialValuesFunction(IRP, A) {}
8961 
8962   /// See AbstractAttribute::trackStatistics()
8963   void trackStatistics() const override {
8964     STATS_DECLTRACK_CS_ATTR(potential_values)
8965   }
8966 };
8967 
8968 struct AAPotentialValuesCallSiteReturned
8969     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
8970   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
8971       : AACallSiteReturnedFromReturned<AAPotentialValues,
8972                                        AAPotentialValuesImpl>(IRP, A) {}
8973 
8974   /// See AbstractAttribute::trackStatistics()
8975   void trackStatistics() const override {
8976     STATS_DECLTRACK_CSRET_ATTR(potential_values)
8977   }
8978 };
8979 
8980 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
8981   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
8982       : AAPotentialValuesFloating(IRP, A) {}
8983 
8984   /// See AbstractAttribute::initialize(..).
8985   void initialize(Attributor &A) override {
8986     Value &V = getAssociatedValue();
8987 
8988     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8989       unionAssumed(C->getValue());
8990       indicateOptimisticFixpoint();
8991       return;
8992     }
8993 
8994     if (isa<UndefValue>(&V)) {
8995       unionAssumedWithUndef();
8996       indicateOptimisticFixpoint();
8997       return;
8998     }
8999   }
9000 
9001   /// See AbstractAttribute::updateImpl(...).
9002   ChangeStatus updateImpl(Attributor &A) override {
9003     Value &V = getAssociatedValue();
9004     auto AssumedBefore = getAssumed();
9005     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
9006                                              DepClassTy::REQUIRED);
9007     const auto &S = AA.getAssumed();
9008     unionAssumed(S);
9009     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9010                                          : ChangeStatus::CHANGED;
9011   }
9012 
9013   /// See AbstractAttribute::trackStatistics()
9014   void trackStatistics() const override {
9015     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9016   }
9017 };
9018 
9019 /// ------------------------ NoUndef Attribute ---------------------------------
9020 struct AANoUndefImpl : AANoUndef {
9021   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9022 
9023   /// See AbstractAttribute::initialize(...).
9024   void initialize(Attributor &A) override {
9025     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9026       indicateOptimisticFixpoint();
9027       return;
9028     }
9029     Value &V = getAssociatedValue();
9030     if (isa<UndefValue>(V))
9031       indicatePessimisticFixpoint();
9032     else if (isa<FreezeInst>(V))
9033       indicateOptimisticFixpoint();
9034     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9035              isGuaranteedNotToBeUndefOrPoison(&V))
9036       indicateOptimisticFixpoint();
9037     else
9038       AANoUndef::initialize(A);
9039   }
9040 
9041   /// See followUsesInMBEC
9042   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9043                        AANoUndef::StateType &State) {
9044     const Value *UseV = U->get();
9045     const DominatorTree *DT = nullptr;
9046     AssumptionCache *AC = nullptr;
9047     InformationCache &InfoCache = A.getInfoCache();
9048     if (Function *F = getAnchorScope()) {
9049       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9050       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9051     }
9052     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9053     bool TrackUse = false;
9054     // Track use for instructions which must produce undef or poison bits when
9055     // at least one operand contains such bits.
9056     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9057       TrackUse = true;
9058     return TrackUse;
9059   }
9060 
9061   /// See AbstractAttribute::getAsStr().
9062   const std::string getAsStr() const override {
9063     return getAssumed() ? "noundef" : "may-undef-or-poison";
9064   }
9065 
9066   ChangeStatus manifest(Attributor &A) override {
9067     // We don't manifest noundef attribute for dead positions because the
9068     // associated values with dead positions would be replaced with undef
9069     // values.
9070     bool UsedAssumedInformation = false;
9071     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9072                         UsedAssumedInformation))
9073       return ChangeStatus::UNCHANGED;
9074     // A position whose simplified value does not have any value is
9075     // considered to be dead. We don't manifest noundef in such positions for
9076     // the same reason above.
9077     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9078              .hasValue())
9079       return ChangeStatus::UNCHANGED;
9080     return AANoUndef::manifest(A);
9081   }
9082 };
9083 
9084 struct AANoUndefFloating : public AANoUndefImpl {
9085   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9086       : AANoUndefImpl(IRP, A) {}
9087 
9088   /// See AbstractAttribute::initialize(...).
9089   void initialize(Attributor &A) override {
9090     AANoUndefImpl::initialize(A);
9091     if (!getState().isAtFixpoint())
9092       if (Instruction *CtxI = getCtxI())
9093         followUsesInMBEC(*this, A, getState(), *CtxI);
9094   }
9095 
9096   /// See AbstractAttribute::updateImpl(...).
9097   ChangeStatus updateImpl(Attributor &A) override {
9098     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9099                             AANoUndef::StateType &T, bool Stripped) -> bool {
9100       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9101                                              DepClassTy::REQUIRED);
9102       if (!Stripped && this == &AA) {
9103         T.indicatePessimisticFixpoint();
9104       } else {
9105         const AANoUndef::StateType &S =
9106             static_cast<const AANoUndef::StateType &>(AA.getState());
9107         T ^= S;
9108       }
9109       return T.isValidState();
9110     };
9111 
9112     StateType T;
9113     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9114                                           VisitValueCB, getCtxI()))
9115       return indicatePessimisticFixpoint();
9116 
9117     return clampStateAndIndicateChange(getState(), T);
9118   }
9119 
9120   /// See AbstractAttribute::trackStatistics()
9121   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9122 };
9123 
9124 struct AANoUndefReturned final
9125     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9126   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9127       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9128 
9129   /// See AbstractAttribute::trackStatistics()
9130   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9131 };
9132 
9133 struct AANoUndefArgument final
9134     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9135   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9136       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9137 
9138   /// See AbstractAttribute::trackStatistics()
9139   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9140 };
9141 
9142 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9143   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9144       : AANoUndefFloating(IRP, A) {}
9145 
9146   /// See AbstractAttribute::trackStatistics()
9147   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9148 };
9149 
9150 struct AANoUndefCallSiteReturned final
9151     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9152   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9153       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9154 
9155   /// See AbstractAttribute::trackStatistics()
9156   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9157 };
9158 
9159 struct AACallEdgesFunction : public AACallEdges {
9160   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9161       : AACallEdges(IRP, A) {}
9162 
9163   /// See AbstractAttribute::updateImpl(...).
9164   ChangeStatus updateImpl(Attributor &A) override {
9165     ChangeStatus Change = ChangeStatus::UNCHANGED;
9166     bool OldHasUnknownCallee = HasUnknownCallee;
9167     bool OldHasUnknownCalleeNonAsm = HasUnknownCalleeNonAsm;
9168 
9169     auto AddCalledFunction = [&](Function *Fn) {
9170       if (CalledFunctions.insert(Fn)) {
9171         Change = ChangeStatus::CHANGED;
9172         LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9173                           << "\n");
9174       }
9175     };
9176 
9177     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9178                           bool Stripped) -> bool {
9179       if (Function *Fn = dyn_cast<Function>(&V)) {
9180         AddCalledFunction(Fn);
9181       } else {
9182         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9183         HasUnknown = true;
9184         HasUnknownCalleeNonAsm = true;
9185       }
9186 
9187       // Explore all values.
9188       return true;
9189     };
9190 
9191     // Process any value that we might call.
9192     auto ProcessCalledOperand = [&](Value *V, Instruction *Ctx) {
9193       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9194                                        HasUnknownCallee, VisitValue, nullptr,
9195                                        false)) {
9196         // If we haven't gone through all values, assume that there are unknown
9197         // callees.
9198         HasUnknownCallee = true;
9199         HasUnknownCalleeNonAsm = true;
9200       }
9201     };
9202 
9203     auto ProcessCallInst = [&](Instruction &Inst) {
9204       CallBase &CB = static_cast<CallBase &>(Inst);
9205       if (CB.isInlineAsm()) {
9206         HasUnknownCallee = true;
9207         return true;
9208       }
9209 
9210       // Process callee metadata if available.
9211       if (auto *MD = Inst.getMetadata(LLVMContext::MD_callees)) {
9212         for (auto &Op : MD->operands()) {
9213           Function *Callee = mdconst::extract_or_null<Function>(Op);
9214           if (Callee)
9215             AddCalledFunction(Callee);
9216         }
9217         // Callees metadata grantees that the called function is one of its
9218         // operands, So we are done.
9219         return true;
9220       }
9221 
9222       // The most simple case.
9223       ProcessCalledOperand(CB.getCalledOperand(), &Inst);
9224 
9225       // Process callback functions.
9226       SmallVector<const Use *, 4u> CallbackUses;
9227       AbstractCallSite::getCallbackUses(CB, CallbackUses);
9228       for (const Use *U : CallbackUses)
9229         ProcessCalledOperand(U->get(), &Inst);
9230 
9231       return true;
9232     };
9233 
9234     // Visit all callable instructions.
9235     bool UsedAssumedInformation = false;
9236     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9237                                            UsedAssumedInformation)) {
9238       // If we haven't looked at all call like instructions, assume that there
9239       // are unknown callees.
9240       HasUnknownCallee = true;
9241       HasUnknownCalleeNonAsm = true;
9242     }
9243 
9244     // Track changes.
9245     if (OldHasUnknownCallee != HasUnknownCallee ||
9246         OldHasUnknownCalleeNonAsm != HasUnknownCalleeNonAsm)
9247       Change = ChangeStatus::CHANGED;
9248 
9249     return Change;
9250   }
9251 
9252   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9253     return CalledFunctions;
9254   };
9255 
9256   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9257 
9258   virtual bool hasNonAsmUnknownCallee() const override {
9259     return HasUnknownCalleeNonAsm;
9260   }
9261 
9262   const std::string getAsStr() const override {
9263     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9264            std::to_string(CalledFunctions.size()) + "]";
9265   }
9266 
9267   void trackStatistics() const override {}
9268 
9269   /// Optimistic set of functions that might be called by this function.
9270   SetVector<Function *> CalledFunctions;
9271 
9272   /// Is there any call with a unknown callee.
9273   bool HasUnknownCallee = false;
9274 
9275   /// Is there any call with a unknown callee, excluding any inline asm.
9276   bool HasUnknownCalleeNonAsm = false;
9277 };
9278 
9279 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9280   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9281       : AAFunctionReachability(IRP, A) {}
9282 
9283   bool canReach(Attributor &A, Function *Fn) const override {
9284     // Assume that we can reach any function if we can reach a call with
9285     // unknown callee.
9286     if (CanReachUnknownCallee)
9287       return true;
9288 
9289     if (ReachableQueries.count(Fn))
9290       return true;
9291 
9292     if (UnreachableQueries.count(Fn))
9293       return false;
9294 
9295     const AACallEdges &AAEdges =
9296         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9297 
9298     const SetVector<Function *> &Edges = AAEdges.getOptimisticEdges();
9299     bool Result = checkIfReachable(A, Edges, Fn);
9300 
9301     // Attributor returns attributes as const, so this function has to be
9302     // const for users of this attribute to use it without having to do
9303     // a const_cast.
9304     // This is a hack for us to be able to cache queries.
9305     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9306 
9307     if (Result)
9308       NonConstThis->ReachableQueries.insert(Fn);
9309     else
9310       NonConstThis->UnreachableQueries.insert(Fn);
9311 
9312     return Result;
9313   }
9314 
9315   /// See AbstractAttribute::updateImpl(...).
9316   ChangeStatus updateImpl(Attributor &A) override {
9317     if (CanReachUnknownCallee)
9318       return ChangeStatus::UNCHANGED;
9319 
9320     const AACallEdges &AAEdges =
9321         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9322     const SetVector<Function *> &Edges = AAEdges.getOptimisticEdges();
9323     ChangeStatus Change = ChangeStatus::UNCHANGED;
9324 
9325     if (AAEdges.hasUnknownCallee()) {
9326       bool OldCanReachUnknown = CanReachUnknownCallee;
9327       CanReachUnknownCallee = true;
9328       return OldCanReachUnknown ? ChangeStatus::UNCHANGED
9329                                 : ChangeStatus::CHANGED;
9330     }
9331 
9332     // Check if any of the unreachable functions become reachable.
9333     for (auto Current = UnreachableQueries.begin();
9334          Current != UnreachableQueries.end();) {
9335       if (!checkIfReachable(A, Edges, *Current)) {
9336         Current++;
9337         continue;
9338       }
9339       ReachableQueries.insert(*Current);
9340       UnreachableQueries.erase(*Current++);
9341       Change = ChangeStatus::CHANGED;
9342     }
9343 
9344     return Change;
9345   }
9346 
9347   const std::string getAsStr() const override {
9348     size_t QueryCount = ReachableQueries.size() + UnreachableQueries.size();
9349 
9350     return "FunctionReachability [" + std::to_string(ReachableQueries.size()) +
9351            "," + std::to_string(QueryCount) + "]";
9352   }
9353 
9354   void trackStatistics() const override {}
9355 
9356 private:
9357   bool canReachUnknownCallee() const override { return CanReachUnknownCallee; }
9358 
9359   bool checkIfReachable(Attributor &A, const SetVector<Function *> &Edges,
9360                         Function *Fn) const {
9361     if (Edges.count(Fn))
9362       return true;
9363 
9364     for (Function *Edge : Edges) {
9365       // We don't need a dependency if the result is reachable.
9366       const AAFunctionReachability &EdgeReachability =
9367           A.getAAFor<AAFunctionReachability>(*this, IRPosition::function(*Edge),
9368                                              DepClassTy::NONE);
9369 
9370       if (EdgeReachability.canReach(A, Fn))
9371         return true;
9372     }
9373     for (Function *Fn : Edges)
9374       A.getAAFor<AAFunctionReachability>(*this, IRPosition::function(*Fn),
9375                                          DepClassTy::REQUIRED);
9376 
9377     return false;
9378   }
9379 
9380   /// Set of functions that we know for sure is reachable.
9381   SmallPtrSet<Function *, 8> ReachableQueries;
9382 
9383   /// Set of functions that are unreachable, but might become reachable.
9384   SmallPtrSet<Function *, 8> UnreachableQueries;
9385 
9386   /// If we can reach a function with a call to a unknown function we assume
9387   /// that we can reach any function.
9388   bool CanReachUnknownCallee = false;
9389 };
9390 
9391 } // namespace
9392 
9393 AACallGraphNode *AACallEdgeIterator::operator*() const {
9394   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
9395       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
9396 }
9397 
9398 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
9399 
9400 const char AAReturnedValues::ID = 0;
9401 const char AANoUnwind::ID = 0;
9402 const char AANoSync::ID = 0;
9403 const char AANoFree::ID = 0;
9404 const char AANonNull::ID = 0;
9405 const char AANoRecurse::ID = 0;
9406 const char AAWillReturn::ID = 0;
9407 const char AAUndefinedBehavior::ID = 0;
9408 const char AANoAlias::ID = 0;
9409 const char AAReachability::ID = 0;
9410 const char AANoReturn::ID = 0;
9411 const char AAIsDead::ID = 0;
9412 const char AADereferenceable::ID = 0;
9413 const char AAAlign::ID = 0;
9414 const char AANoCapture::ID = 0;
9415 const char AAValueSimplify::ID = 0;
9416 const char AAHeapToStack::ID = 0;
9417 const char AAPrivatizablePtr::ID = 0;
9418 const char AAMemoryBehavior::ID = 0;
9419 const char AAMemoryLocation::ID = 0;
9420 const char AAValueConstantRange::ID = 0;
9421 const char AAPotentialValues::ID = 0;
9422 const char AANoUndef::ID = 0;
9423 const char AACallEdges::ID = 0;
9424 const char AAFunctionReachability::ID = 0;
9425 const char AAPointerInfo::ID = 0;
9426 
9427 // Macro magic to create the static generator function for attributes that
9428 // follow the naming scheme.
9429 
9430 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
9431   case IRPosition::PK:                                                         \
9432     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
9433 
9434 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
9435   case IRPosition::PK:                                                         \
9436     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
9437     ++NumAAs;                                                                  \
9438     break;
9439 
9440 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
9441   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9442     CLASS *AA = nullptr;                                                       \
9443     switch (IRP.getPositionKind()) {                                           \
9444       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9445       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9446       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9447       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9448       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9449       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9450       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9451       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9452     }                                                                          \
9453     return *AA;                                                                \
9454   }
9455 
9456 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
9457   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9458     CLASS *AA = nullptr;                                                       \
9459     switch (IRP.getPositionKind()) {                                           \
9460       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9461       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
9462       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9463       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9464       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9465       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9466       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9467       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9468     }                                                                          \
9469     return *AA;                                                                \
9470   }
9471 
9472 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
9473   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9474     CLASS *AA = nullptr;                                                       \
9475     switch (IRP.getPositionKind()) {                                           \
9476       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9477       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9478       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9479       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9480       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9481       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9482       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9483       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9484     }                                                                          \
9485     return *AA;                                                                \
9486   }
9487 
9488 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
9489   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9490     CLASS *AA = nullptr;                                                       \
9491     switch (IRP.getPositionKind()) {                                           \
9492       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9493       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9494       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9495       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9496       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9497       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9498       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9499       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9500     }                                                                          \
9501     return *AA;                                                                \
9502   }
9503 
9504 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
9505   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9506     CLASS *AA = nullptr;                                                       \
9507     switch (IRP.getPositionKind()) {                                           \
9508       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9509       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9510       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9511       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9512       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9513       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9514       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9515       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9516     }                                                                          \
9517     return *AA;                                                                \
9518   }
9519 
9520 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
9521 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
9522 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
9523 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
9524 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
9525 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
9526 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
9527 
9528 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
9529 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
9530 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
9531 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
9532 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
9533 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
9534 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
9535 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
9536 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
9537 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
9538 
9539 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
9540 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
9541 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
9542 
9543 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
9544 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
9545 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
9546 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
9547 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
9548 
9549 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
9550 
9551 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
9552 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
9553 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
9554 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
9555 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
9556 #undef SWITCH_PK_CREATE
9557 #undef SWITCH_PK_INV
9558