1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/InstIterator.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/InitializePasses.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
39 #include "llvm/Transforms/Utils/Local.h"
40 
41 #include <cassert>
42 
43 using namespace llvm;
44 
45 #define DEBUG_TYPE "attributor"
46 
47 STATISTIC(NumFnWithExactDefinition,
48           "Number of function with exact definitions");
49 STATISTIC(NumFnWithoutExactDefinition,
50           "Number of function without exact definitions");
51 STATISTIC(NumAttributesTimedOut,
52           "Number of abstract attributes timed out before fixpoint");
53 STATISTIC(NumAttributesValidFixpoint,
54           "Number of abstract attributes in a valid fixpoint state");
55 STATISTIC(NumAttributesManifested,
56           "Number of abstract attributes manifested in IR");
57 STATISTIC(NumAttributesFixedDueToRequiredDependences,
58           "Number of abstract attributes fixed due to required dependences");
59 
60 // Some helper macros to deal with statistics tracking.
61 //
62 // Usage:
63 // For simple IR attribute tracking overload trackStatistics in the abstract
64 // attribute and choose the right STATS_DECLTRACK_********* macro,
65 // e.g.,:
66 //  void trackStatistics() const override {
67 //    STATS_DECLTRACK_ARG_ATTR(returned)
68 //  }
69 // If there is a single "increment" side one can use the macro
70 // STATS_DECLTRACK with a custom message. If there are multiple increment
71 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
72 //
73 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
74   ("Number of " #TYPE " marked '" #NAME "'")
75 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
76 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
77 #define STATS_DECL(NAME, TYPE, MSG)                                            \
78   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
79 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
80 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
81   {                                                                            \
82     STATS_DECL(NAME, TYPE, MSG)                                                \
83     STATS_TRACK(NAME, TYPE)                                                    \
84   }
85 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
86   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
87 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
88   STATS_DECLTRACK(NAME, CSArguments,                                           \
89                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
90 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
91   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
92 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
94 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
95   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
96                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
97 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
98   STATS_DECLTRACK(NAME, CSReturn,                                              \
99                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
100 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
101   STATS_DECLTRACK(NAME, Floating,                                              \
102                   ("Number of floating values known to be '" #NAME "'"))
103 
104 // TODO: Determine a good default value.
105 //
106 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
107 // (when run with the first 5 abstract attributes). The results also indicate
108 // that we never reach 32 iterations but always find a fixpoint sooner.
109 //
110 // This will become more evolved once we perform two interleaved fixpoint
111 // iterations: bottom-up and top-down.
112 static cl::opt<unsigned>
113     MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
114                           cl::desc("Maximal number of fixpoint iterations."),
115                           cl::init(32));
116 static cl::opt<bool> VerifyMaxFixpointIterations(
117     "attributor-max-iterations-verify", cl::Hidden,
118     cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
119     cl::init(false));
120 
121 static cl::opt<bool> DisableAttributor(
122     "attributor-disable", cl::Hidden,
123     cl::desc("Disable the attributor inter-procedural deduction pass."),
124     cl::init(true));
125 
126 static cl::opt<bool> AnnotateDeclarationCallSites(
127     "attributor-annotate-decl-cs", cl::Hidden,
128     cl::desc("Annoate call sites of function declarations."), cl::init(false));
129 
130 static cl::opt<bool> ManifestInternal(
131     "attributor-manifest-internal", cl::Hidden,
132     cl::desc("Manifest Attributor internal string attributes."),
133     cl::init(false));
134 
135 static cl::opt<unsigned> DepRecInterval(
136     "attributor-dependence-recompute-interval", cl::Hidden,
137     cl::desc("Number of iterations until dependences are recomputed."),
138     cl::init(4));
139 
140 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
141                                        cl::init(true), cl::Hidden);
142 
143 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
144                                        cl::Hidden);
145 
146 /// Logic operators for the change status enum class.
147 ///
148 ///{
149 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
150   return l == ChangeStatus::CHANGED ? l : r;
151 }
152 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
153   return l == ChangeStatus::UNCHANGED ? l : r;
154 }
155 ///}
156 
157 /// Recursively visit all values that might become \p IRP at some point. This
158 /// will be done by looking through cast instructions, selects, phis, and calls
159 /// with the "returned" attribute. Once we cannot look through the value any
160 /// further, the callback \p VisitValueCB is invoked and passed the current
161 /// value, the \p State, and a flag to indicate if we stripped anything. To
162 /// limit how much effort is invested, we will never visit more values than
163 /// specified by \p MaxValues.
164 template <typename AAType, typename StateTy>
165 static bool genericValueTraversal(
166     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
167     const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
168     int MaxValues = 8) {
169 
170   const AAIsDead *LivenessAA = nullptr;
171   if (IRP.getAnchorScope())
172     LivenessAA = &A.getAAFor<AAIsDead>(
173         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
174         /* TrackDependence */ false);
175   bool AnyDead = false;
176 
177   // TODO: Use Positions here to allow context sensitivity in VisitValueCB
178   SmallPtrSet<Value *, 16> Visited;
179   SmallVector<Value *, 16> Worklist;
180   Worklist.push_back(&IRP.getAssociatedValue());
181 
182   int Iteration = 0;
183   do {
184     Value *V = Worklist.pop_back_val();
185 
186     // Check if we should process the current value. To prevent endless
187     // recursion keep a record of the values we followed!
188     if (!Visited.insert(V).second)
189       continue;
190 
191     // Make sure we limit the compile time for complex expressions.
192     if (Iteration++ >= MaxValues)
193       return false;
194 
195     // Explicitly look through calls with a "returned" attribute if we do
196     // not have a pointer as stripPointerCasts only works on them.
197     Value *NewV = nullptr;
198     if (V->getType()->isPointerTy()) {
199       NewV = V->stripPointerCasts();
200     } else {
201       CallSite CS(V);
202       if (CS && CS.getCalledFunction()) {
203         for (Argument &Arg : CS.getCalledFunction()->args())
204           if (Arg.hasReturnedAttr()) {
205             NewV = CS.getArgOperand(Arg.getArgNo());
206             break;
207           }
208       }
209     }
210     if (NewV && NewV != V) {
211       Worklist.push_back(NewV);
212       continue;
213     }
214 
215     // Look through select instructions, visit both potential values.
216     if (auto *SI = dyn_cast<SelectInst>(V)) {
217       Worklist.push_back(SI->getTrueValue());
218       Worklist.push_back(SI->getFalseValue());
219       continue;
220     }
221 
222     // Look through phi nodes, visit all live operands.
223     if (auto *PHI = dyn_cast<PHINode>(V)) {
224       assert(LivenessAA &&
225              "Expected liveness in the presence of instructions!");
226       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
227         const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
228         if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
229           AnyDead = true;
230           continue;
231         }
232         Worklist.push_back(PHI->getIncomingValue(u));
233       }
234       continue;
235     }
236 
237     // Once a leaf is reached we inform the user through the callback.
238     if (!VisitValueCB(*V, State, Iteration > 1))
239       return false;
240   } while (!Worklist.empty());
241 
242   // If we actually used liveness information so we have to record a dependence.
243   if (AnyDead)
244     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
245 
246   // All values have been visited.
247   return true;
248 }
249 
250 /// Return true if \p New is equal or worse than \p Old.
251 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
252   if (!Old.isIntAttribute())
253     return true;
254 
255   return Old.getValueAsInt() >= New.getValueAsInt();
256 }
257 
258 /// Return true if the information provided by \p Attr was added to the
259 /// attribute list \p Attrs. This is only the case if it was not already present
260 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
261 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
262                              AttributeList &Attrs, int AttrIdx) {
263 
264   if (Attr.isEnumAttribute()) {
265     Attribute::AttrKind Kind = Attr.getKindAsEnum();
266     if (Attrs.hasAttribute(AttrIdx, Kind))
267       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
268         return false;
269     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
270     return true;
271   }
272   if (Attr.isStringAttribute()) {
273     StringRef Kind = Attr.getKindAsString();
274     if (Attrs.hasAttribute(AttrIdx, Kind))
275       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
276         return false;
277     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
278     return true;
279   }
280   if (Attr.isIntAttribute()) {
281     Attribute::AttrKind Kind = Attr.getKindAsEnum();
282     if (Attrs.hasAttribute(AttrIdx, Kind))
283       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
284         return false;
285     Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
286     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
287     return true;
288   }
289 
290   llvm_unreachable("Expected enum or string attribute!");
291 }
292 static const Value *getPointerOperand(const Instruction *I) {
293   if (auto *LI = dyn_cast<LoadInst>(I))
294     if (!LI->isVolatile())
295       return LI->getPointerOperand();
296 
297   if (auto *SI = dyn_cast<StoreInst>(I))
298     if (!SI->isVolatile())
299       return SI->getPointerOperand();
300 
301   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I))
302     if (!CXI->isVolatile())
303       return CXI->getPointerOperand();
304 
305   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I))
306     if (!RMWI->isVolatile())
307       return RMWI->getPointerOperand();
308 
309   return nullptr;
310 }
311 static const Value *
312 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
313                                      const DataLayout &DL,
314                                      bool AllowNonInbounds = false) {
315   const Value *Ptr = getPointerOperand(I);
316   if (!Ptr)
317     return nullptr;
318 
319   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
320                                           AllowNonInbounds);
321 }
322 
323 ChangeStatus AbstractAttribute::update(Attributor &A) {
324   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
325   if (getState().isAtFixpoint())
326     return HasChanged;
327 
328   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
329 
330   HasChanged = updateImpl(A);
331 
332   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
333                     << "\n");
334 
335   return HasChanged;
336 }
337 
338 ChangeStatus
339 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
340                                    const ArrayRef<Attribute> &DeducedAttrs) {
341   Function *ScopeFn = IRP.getAssociatedFunction();
342   IRPosition::Kind PK = IRP.getPositionKind();
343 
344   // In the following some generic code that will manifest attributes in
345   // DeducedAttrs if they improve the current IR. Due to the different
346   // annotation positions we use the underlying AttributeList interface.
347 
348   AttributeList Attrs;
349   switch (PK) {
350   case IRPosition::IRP_INVALID:
351   case IRPosition::IRP_FLOAT:
352     return ChangeStatus::UNCHANGED;
353   case IRPosition::IRP_ARGUMENT:
354   case IRPosition::IRP_FUNCTION:
355   case IRPosition::IRP_RETURNED:
356     Attrs = ScopeFn->getAttributes();
357     break;
358   case IRPosition::IRP_CALL_SITE:
359   case IRPosition::IRP_CALL_SITE_RETURNED:
360   case IRPosition::IRP_CALL_SITE_ARGUMENT:
361     Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
362     break;
363   }
364 
365   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
366   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
367   for (const Attribute &Attr : DeducedAttrs) {
368     if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
369       continue;
370 
371     HasChanged = ChangeStatus::CHANGED;
372   }
373 
374   if (HasChanged == ChangeStatus::UNCHANGED)
375     return HasChanged;
376 
377   switch (PK) {
378   case IRPosition::IRP_ARGUMENT:
379   case IRPosition::IRP_FUNCTION:
380   case IRPosition::IRP_RETURNED:
381     ScopeFn->setAttributes(Attrs);
382     break;
383   case IRPosition::IRP_CALL_SITE:
384   case IRPosition::IRP_CALL_SITE_RETURNED:
385   case IRPosition::IRP_CALL_SITE_ARGUMENT:
386     CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
387     break;
388   case IRPosition::IRP_INVALID:
389   case IRPosition::IRP_FLOAT:
390     break;
391   }
392 
393   return HasChanged;
394 }
395 
396 const IRPosition IRPosition::EmptyKey(255);
397 const IRPosition IRPosition::TombstoneKey(256);
398 
399 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
400   IRPositions.emplace_back(IRP);
401 
402   ImmutableCallSite ICS(&IRP.getAnchorValue());
403   switch (IRP.getPositionKind()) {
404   case IRPosition::IRP_INVALID:
405   case IRPosition::IRP_FLOAT:
406   case IRPosition::IRP_FUNCTION:
407     return;
408   case IRPosition::IRP_ARGUMENT:
409   case IRPosition::IRP_RETURNED:
410     IRPositions.emplace_back(
411         IRPosition::function(*IRP.getAssociatedFunction()));
412     return;
413   case IRPosition::IRP_CALL_SITE:
414     assert(ICS && "Expected call site!");
415     // TODO: We need to look at the operand bundles similar to the redirection
416     //       in CallBase.
417     if (!ICS.hasOperandBundles())
418       if (const Function *Callee = ICS.getCalledFunction())
419         IRPositions.emplace_back(IRPosition::function(*Callee));
420     return;
421   case IRPosition::IRP_CALL_SITE_RETURNED:
422     assert(ICS && "Expected call site!");
423     // TODO: We need to look at the operand bundles similar to the redirection
424     //       in CallBase.
425     if (!ICS.hasOperandBundles()) {
426       if (const Function *Callee = ICS.getCalledFunction()) {
427         IRPositions.emplace_back(IRPosition::returned(*Callee));
428         IRPositions.emplace_back(IRPosition::function(*Callee));
429       }
430     }
431     IRPositions.emplace_back(
432         IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
433     return;
434   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
435     int ArgNo = IRP.getArgNo();
436     assert(ICS && ArgNo >= 0 && "Expected call site!");
437     // TODO: We need to look at the operand bundles similar to the redirection
438     //       in CallBase.
439     if (!ICS.hasOperandBundles()) {
440       const Function *Callee = ICS.getCalledFunction();
441       if (Callee && Callee->arg_size() > unsigned(ArgNo))
442         IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
443       if (Callee)
444         IRPositions.emplace_back(IRPosition::function(*Callee));
445     }
446     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
447     return;
448   }
449   }
450 }
451 
452 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
453                          bool IgnoreSubsumingPositions) const {
454   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
455     for (Attribute::AttrKind AK : AKs)
456       if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
457         return true;
458     // The first position returned by the SubsumingPositionIterator is
459     // always the position itself. If we ignore subsuming positions we
460     // are done after the first iteration.
461     if (IgnoreSubsumingPositions)
462       break;
463   }
464   return false;
465 }
466 
467 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
468                           SmallVectorImpl<Attribute> &Attrs) const {
469   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
470     for (Attribute::AttrKind AK : AKs) {
471       const Attribute &Attr = EquivIRP.getAttr(AK);
472       if (Attr.getKindAsEnum() == AK)
473         Attrs.push_back(Attr);
474     }
475 }
476 
477 void IRPosition::verify() {
478   switch (KindOrArgNo) {
479   default:
480     assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
481     assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
482            "Expected call base or argument for positive attribute index!");
483     if (isa<Argument>(AnchorVal)) {
484       assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
485              "Argument number mismatch!");
486       assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
487              "Associated value mismatch!");
488     } else {
489       assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
490              "Call site argument number mismatch!");
491       assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
492                  &getAssociatedValue() &&
493              "Associated value mismatch!");
494     }
495     break;
496   case IRP_INVALID:
497     assert(!AnchorVal && "Expected no value for an invalid position!");
498     break;
499   case IRP_FLOAT:
500     assert((!isa<CallBase>(&getAssociatedValue()) &&
501             !isa<Argument>(&getAssociatedValue())) &&
502            "Expected specialized kind for call base and argument values!");
503     break;
504   case IRP_RETURNED:
505     assert(isa<Function>(AnchorVal) &&
506            "Expected function for a 'returned' position!");
507     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
508     break;
509   case IRP_CALL_SITE_RETURNED:
510     assert((isa<CallBase>(AnchorVal)) &&
511            "Expected call base for 'call site returned' position!");
512     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
513     break;
514   case IRP_CALL_SITE:
515     assert((isa<CallBase>(AnchorVal)) &&
516            "Expected call base for 'call site function' position!");
517     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
518     break;
519   case IRP_FUNCTION:
520     assert(isa<Function>(AnchorVal) &&
521            "Expected function for a 'function' position!");
522     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
523     break;
524   }
525 }
526 
527 namespace {
528 /// Helper function to clamp a state \p S of type \p StateType with the
529 /// information in \p R and indicate/return if \p S did change (as-in update is
530 /// required to be run again).
531 template <typename StateType>
532 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
533   auto Assumed = S.getAssumed();
534   S ^= R;
535   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
536                                    : ChangeStatus::CHANGED;
537 }
538 
539 /// Clamp the information known for all returned values of a function
540 /// (identified by \p QueryingAA) into \p S.
541 template <typename AAType, typename StateType = typename AAType::StateType>
542 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
543                                      StateType &S) {
544   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
545                     << static_cast<const AbstractAttribute &>(QueryingAA)
546                     << " into " << S << "\n");
547 
548   assert((QueryingAA.getIRPosition().getPositionKind() ==
549               IRPosition::IRP_RETURNED ||
550           QueryingAA.getIRPosition().getPositionKind() ==
551               IRPosition::IRP_CALL_SITE_RETURNED) &&
552          "Can only clamp returned value states for a function returned or call "
553          "site returned position!");
554 
555   // Use an optional state as there might not be any return values and we want
556   // to join (IntegerState::operator&) the state of all there are.
557   Optional<StateType> T;
558 
559   // Callback for each possibly returned value.
560   auto CheckReturnValue = [&](Value &RV) -> bool {
561     const IRPosition &RVPos = IRPosition::value(RV);
562     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
563     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
564                       << " @ " << RVPos << "\n");
565     const StateType &AAS = static_cast<const StateType &>(AA.getState());
566     if (T.hasValue())
567       *T &= AAS;
568     else
569       T = AAS;
570     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
571                       << "\n");
572     return T->isValidState();
573   };
574 
575   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
576     S.indicatePessimisticFixpoint();
577   else if (T.hasValue())
578     S ^= *T;
579 }
580 
581 /// Helper class to compose two generic deduction
582 template <typename AAType, typename Base, typename StateType,
583           template <typename...> class F, template <typename...> class G>
584 struct AAComposeTwoGenericDeduction
585     : public F<AAType, G<AAType, Base, StateType>, StateType> {
586   AAComposeTwoGenericDeduction(const IRPosition &IRP)
587       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
588 
589   /// See AbstractAttribute::updateImpl(...).
590   ChangeStatus updateImpl(Attributor &A) override {
591     ChangeStatus ChangedF =
592         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
593     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
594     return ChangedF | ChangedG;
595   }
596 };
597 
598 /// Helper class for generic deduction: return value -> returned position.
599 template <typename AAType, typename Base,
600           typename StateType = typename AAType::StateType>
601 struct AAReturnedFromReturnedValues : public Base {
602   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
603 
604   /// See AbstractAttribute::updateImpl(...).
605   ChangeStatus updateImpl(Attributor &A) override {
606     StateType S;
607     clampReturnedValueStates<AAType, StateType>(A, *this, S);
608     // TODO: If we know we visited all returned values, thus no are assumed
609     // dead, we can take the known information from the state T.
610     return clampStateAndIndicateChange<StateType>(this->getState(), S);
611   }
612 };
613 
614 /// Clamp the information known at all call sites for a given argument
615 /// (identified by \p QueryingAA) into \p S.
616 template <typename AAType, typename StateType = typename AAType::StateType>
617 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
618                                         StateType &S) {
619   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
620                     << static_cast<const AbstractAttribute &>(QueryingAA)
621                     << " into " << S << "\n");
622 
623   assert(QueryingAA.getIRPosition().getPositionKind() ==
624              IRPosition::IRP_ARGUMENT &&
625          "Can only clamp call site argument states for an argument position!");
626 
627   // Use an optional state as there might not be any return values and we want
628   // to join (IntegerState::operator&) the state of all there are.
629   Optional<StateType> T;
630 
631   // The argument number which is also the call site argument number.
632   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
633 
634   auto CallSiteCheck = [&](AbstractCallSite ACS) {
635     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
636     // Check if a coresponding argument was found or if it is on not associated
637     // (which can happen for callback calls).
638     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
639       return false;
640 
641     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
642     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
643                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
644     const StateType &AAS = static_cast<const StateType &>(AA.getState());
645     if (T.hasValue())
646       *T &= AAS;
647     else
648       T = AAS;
649     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
650                       << "\n");
651     return T->isValidState();
652   };
653 
654   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
655     S.indicatePessimisticFixpoint();
656   else if (T.hasValue())
657     S ^= *T;
658 }
659 
660 /// Helper class for generic deduction: call site argument -> argument position.
661 template <typename AAType, typename Base,
662           typename StateType = typename AAType::StateType>
663 struct AAArgumentFromCallSiteArguments : public Base {
664   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
665 
666   /// See AbstractAttribute::updateImpl(...).
667   ChangeStatus updateImpl(Attributor &A) override {
668     StateType S;
669     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
670     // TODO: If we know we visited all incoming values, thus no are assumed
671     // dead, we can take the known information from the state T.
672     return clampStateAndIndicateChange<StateType>(this->getState(), S);
673   }
674 };
675 
676 /// Helper class for generic replication: function returned -> cs returned.
677 template <typename AAType, typename Base,
678           typename StateType = typename AAType::StateType>
679 struct AACallSiteReturnedFromReturned : public Base {
680   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
681 
682   /// See AbstractAttribute::updateImpl(...).
683   ChangeStatus updateImpl(Attributor &A) override {
684     assert(this->getIRPosition().getPositionKind() ==
685                IRPosition::IRP_CALL_SITE_RETURNED &&
686            "Can only wrap function returned positions for call site returned "
687            "positions!");
688     auto &S = this->getState();
689 
690     const Function *AssociatedFunction =
691         this->getIRPosition().getAssociatedFunction();
692     if (!AssociatedFunction)
693       return S.indicatePessimisticFixpoint();
694 
695     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
696     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
697     return clampStateAndIndicateChange(
698         S, static_cast<const typename AAType::StateType &>(AA.getState()));
699   }
700 };
701 
702 /// Helper class for generic deduction using must-be-executed-context
703 /// Base class is required to have `followUse` method.
704 
705 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
706 /// U - Underlying use.
707 /// I - The user of the \p U.
708 /// `followUse` returns true if the value should be tracked transitively.
709 
710 template <typename AAType, typename Base,
711           typename StateType = typename AAType::StateType>
712 struct AAFromMustBeExecutedContext : public Base {
713   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
714 
715   void initialize(Attributor &A) override {
716     Base::initialize(A);
717     const IRPosition &IRP = this->getIRPosition();
718     Instruction *CtxI = IRP.getCtxI();
719 
720     if (!CtxI)
721       return;
722 
723     for (const Use &U : IRP.getAssociatedValue().uses())
724       Uses.insert(&U);
725   }
726 
727   /// See AbstractAttribute::updateImpl(...).
728   ChangeStatus updateImpl(Attributor &A) override {
729     auto BeforeState = this->getState();
730     auto &S = this->getState();
731     Instruction *CtxI = this->getIRPosition().getCtxI();
732     if (!CtxI)
733       return ChangeStatus::UNCHANGED;
734 
735     MustBeExecutedContextExplorer &Explorer =
736         A.getInfoCache().getMustBeExecutedContextExplorer();
737 
738     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
739     for (unsigned u = 0; u < Uses.size(); ++u) {
740       const Use *U = Uses[u];
741       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
742         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
743         if (Found && Base::followUse(A, U, UserI))
744           for (const Use &Us : UserI->uses())
745             Uses.insert(&Us);
746       }
747     }
748 
749     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
750   }
751 
752 private:
753   /// Container for (transitive) uses of the associated value.
754   SetVector<const Use *> Uses;
755 };
756 
757 template <typename AAType, typename Base,
758           typename StateType = typename AAType::StateType>
759 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
760     AAComposeTwoGenericDeduction<AAType, Base, StateType,
761                                  AAFromMustBeExecutedContext,
762                                  AAArgumentFromCallSiteArguments>;
763 
764 template <typename AAType, typename Base,
765           typename StateType = typename AAType::StateType>
766 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
767     AAComposeTwoGenericDeduction<AAType, Base, StateType,
768                                  AAFromMustBeExecutedContext,
769                                  AACallSiteReturnedFromReturned>;
770 
771 /// -----------------------NoUnwind Function Attribute--------------------------
772 
773 struct AANoUnwindImpl : AANoUnwind {
774   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
775 
776   const std::string getAsStr() const override {
777     return getAssumed() ? "nounwind" : "may-unwind";
778   }
779 
780   /// See AbstractAttribute::updateImpl(...).
781   ChangeStatus updateImpl(Attributor &A) override {
782     auto Opcodes = {
783         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
784         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
785         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
786 
787     auto CheckForNoUnwind = [&](Instruction &I) {
788       if (!I.mayThrow())
789         return true;
790 
791       if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
792         const auto &NoUnwindAA =
793             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
794         return NoUnwindAA.isAssumedNoUnwind();
795       }
796       return false;
797     };
798 
799     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
800       return indicatePessimisticFixpoint();
801 
802     return ChangeStatus::UNCHANGED;
803   }
804 };
805 
806 struct AANoUnwindFunction final : public AANoUnwindImpl {
807   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
808 
809   /// See AbstractAttribute::trackStatistics()
810   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
811 };
812 
813 /// NoUnwind attribute deduction for a call sites.
814 struct AANoUnwindCallSite final : AANoUnwindImpl {
815   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
816 
817   /// See AbstractAttribute::initialize(...).
818   void initialize(Attributor &A) override {
819     AANoUnwindImpl::initialize(A);
820     Function *F = getAssociatedFunction();
821     if (!F)
822       indicatePessimisticFixpoint();
823   }
824 
825   /// See AbstractAttribute::updateImpl(...).
826   ChangeStatus updateImpl(Attributor &A) override {
827     // TODO: Once we have call site specific value information we can provide
828     //       call site specific liveness information and then it makes
829     //       sense to specialize attributes for call sites arguments instead of
830     //       redirecting requests to the callee argument.
831     Function *F = getAssociatedFunction();
832     const IRPosition &FnPos = IRPosition::function(*F);
833     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
834     return clampStateAndIndicateChange(
835         getState(),
836         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
837   }
838 
839   /// See AbstractAttribute::trackStatistics()
840   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
841 };
842 
843 /// --------------------- Function Return Values -------------------------------
844 
845 /// "Attribute" that collects all potential returned values and the return
846 /// instructions that they arise from.
847 ///
848 /// If there is a unique returned value R, the manifest method will:
849 ///   - mark R with the "returned" attribute, if R is an argument.
850 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
851 
852   /// Mapping of values potentially returned by the associated function to the
853   /// return instructions that might return them.
854   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
855 
856   /// Mapping to remember the number of returned values for a call site such
857   /// that we can avoid updates if nothing changed.
858   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
859 
860   /// Set of unresolved calls returned by the associated function.
861   SmallSetVector<CallBase *, 4> UnresolvedCalls;
862 
863   /// State flags
864   ///
865   ///{
866   bool IsFixed = false;
867   bool IsValidState = true;
868   ///}
869 
870 public:
871   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
872 
873   /// See AbstractAttribute::initialize(...).
874   void initialize(Attributor &A) override {
875     // Reset the state.
876     IsFixed = false;
877     IsValidState = true;
878     ReturnedValues.clear();
879 
880     Function *F = getAssociatedFunction();
881     if (!F) {
882       indicatePessimisticFixpoint();
883       return;
884     }
885 
886     // The map from instruction opcodes to those instructions in the function.
887     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
888 
889     // Look through all arguments, if one is marked as returned we are done.
890     for (Argument &Arg : F->args()) {
891       if (Arg.hasReturnedAttr()) {
892         auto &ReturnInstSet = ReturnedValues[&Arg];
893         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
894           ReturnInstSet.insert(cast<ReturnInst>(RI));
895 
896         indicateOptimisticFixpoint();
897         return;
898       }
899     }
900 
901     if (!F->hasExactDefinition())
902       indicatePessimisticFixpoint();
903   }
904 
905   /// See AbstractAttribute::manifest(...).
906   ChangeStatus manifest(Attributor &A) override;
907 
908   /// See AbstractAttribute::getState(...).
909   AbstractState &getState() override { return *this; }
910 
911   /// See AbstractAttribute::getState(...).
912   const AbstractState &getState() const override { return *this; }
913 
914   /// See AbstractAttribute::updateImpl(Attributor &A).
915   ChangeStatus updateImpl(Attributor &A) override;
916 
917   llvm::iterator_range<iterator> returned_values() override {
918     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
919   }
920 
921   llvm::iterator_range<const_iterator> returned_values() const override {
922     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
923   }
924 
925   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
926     return UnresolvedCalls;
927   }
928 
929   /// Return the number of potential return values, -1 if unknown.
930   size_t getNumReturnValues() const override {
931     return isValidState() ? ReturnedValues.size() : -1;
932   }
933 
934   /// Return an assumed unique return value if a single candidate is found. If
935   /// there cannot be one, return a nullptr. If it is not clear yet, return the
936   /// Optional::NoneType.
937   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
938 
939   /// See AbstractState::checkForAllReturnedValues(...).
940   bool checkForAllReturnedValuesAndReturnInsts(
941       const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
942           &Pred) const override;
943 
944   /// Pretty print the attribute similar to the IR representation.
945   const std::string getAsStr() const override;
946 
947   /// See AbstractState::isAtFixpoint().
948   bool isAtFixpoint() const override { return IsFixed; }
949 
950   /// See AbstractState::isValidState().
951   bool isValidState() const override { return IsValidState; }
952 
953   /// See AbstractState::indicateOptimisticFixpoint(...).
954   ChangeStatus indicateOptimisticFixpoint() override {
955     IsFixed = true;
956     return ChangeStatus::UNCHANGED;
957   }
958 
959   ChangeStatus indicatePessimisticFixpoint() override {
960     IsFixed = true;
961     IsValidState = false;
962     return ChangeStatus::CHANGED;
963   }
964 };
965 
966 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
967   ChangeStatus Changed = ChangeStatus::UNCHANGED;
968 
969   // Bookkeeping.
970   assert(isValidState());
971   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
972                   "Number of function with known return values");
973 
974   // Check if we have an assumed unique return value that we could manifest.
975   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
976 
977   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
978     return Changed;
979 
980   // Bookkeeping.
981   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
982                   "Number of function with unique return");
983 
984   // Callback to replace the uses of CB with the constant C.
985   auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
986     if (CB.getNumUses() == 0 || CB.isMustTailCall())
987       return ChangeStatus::UNCHANGED;
988     CB.replaceAllUsesWith(&C);
989     return ChangeStatus::CHANGED;
990   };
991 
992   // If the assumed unique return value is an argument, annotate it.
993   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
994     // TODO: This should be handled differently!
995     this->AnchorVal = UniqueRVArg;
996     this->KindOrArgNo = UniqueRVArg->getArgNo();
997     Changed = IRAttribute::manifest(A);
998   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
999     // We can replace the returned value with the unique returned constant.
1000     Value &AnchorValue = getAnchorValue();
1001     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1002       for (const Use &U : F->uses())
1003         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1004           if (CB->isCallee(&U)) {
1005             Constant *RVCCast =
1006                 CB->getType() == RVC->getType()
1007                     ? RVC
1008                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
1009             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1010           }
1011     } else {
1012       assert(isa<CallBase>(AnchorValue) &&
1013              "Expcected a function or call base anchor!");
1014       Constant *RVCCast =
1015           AnchorValue.getType() == RVC->getType()
1016               ? RVC
1017               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1018       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1019     }
1020     if (Changed == ChangeStatus::CHANGED)
1021       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1022                       "Number of function returns replaced by constant return");
1023   }
1024 
1025   return Changed;
1026 }
1027 
1028 const std::string AAReturnedValuesImpl::getAsStr() const {
1029   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1030          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1031          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1032 }
1033 
1034 Optional<Value *>
1035 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1036   // If checkForAllReturnedValues provides a unique value, ignoring potential
1037   // undef values that can also be present, it is assumed to be the actual
1038   // return value and forwarded to the caller of this method. If there are
1039   // multiple, a nullptr is returned indicating there cannot be a unique
1040   // returned value.
1041   Optional<Value *> UniqueRV;
1042 
1043   auto Pred = [&](Value &RV) -> bool {
1044     // If we found a second returned value and neither the current nor the saved
1045     // one is an undef, there is no unique returned value. Undefs are special
1046     // since we can pretend they have any value.
1047     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1048         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1049       UniqueRV = nullptr;
1050       return false;
1051     }
1052 
1053     // Do not overwrite a value with an undef.
1054     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1055       UniqueRV = &RV;
1056 
1057     return true;
1058   };
1059 
1060   if (!A.checkForAllReturnedValues(Pred, *this))
1061     UniqueRV = nullptr;
1062 
1063   return UniqueRV;
1064 }
1065 
1066 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1067     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1068         &Pred) const {
1069   if (!isValidState())
1070     return false;
1071 
1072   // Check all returned values but ignore call sites as long as we have not
1073   // encountered an overdefined one during an update.
1074   for (auto &It : ReturnedValues) {
1075     Value *RV = It.first;
1076 
1077     CallBase *CB = dyn_cast<CallBase>(RV);
1078     if (CB && !UnresolvedCalls.count(CB))
1079       continue;
1080 
1081     if (!Pred(*RV, It.second))
1082       return false;
1083   }
1084 
1085   return true;
1086 }
1087 
1088 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1089   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1090   bool Changed = false;
1091 
1092   // State used in the value traversals starting in returned values.
1093   struct RVState {
1094     // The map in which we collect return values -> return instrs.
1095     decltype(ReturnedValues) &RetValsMap;
1096     // The flag to indicate a change.
1097     bool &Changed;
1098     // The return instrs we come from.
1099     SmallSetVector<ReturnInst *, 4> RetInsts;
1100   };
1101 
1102   // Callback for a leaf value returned by the associated function.
1103   auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1104     auto Size = RVS.RetValsMap[&Val].size();
1105     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1106     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1107     RVS.Changed |= Inserted;
1108     LLVM_DEBUG({
1109       if (Inserted)
1110         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1111                << " => " << RVS.RetInsts.size() << "\n";
1112     });
1113     return true;
1114   };
1115 
1116   // Helper method to invoke the generic value traversal.
1117   auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1118     IRPosition RetValPos = IRPosition::value(RV);
1119     return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1120                                                             RVS, VisitValueCB);
1121   };
1122 
1123   // Callback for all "return intructions" live in the associated function.
1124   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1125     ReturnInst &Ret = cast<ReturnInst>(I);
1126     RVState RVS({ReturnedValues, Changed, {}});
1127     RVS.RetInsts.insert(&Ret);
1128     return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1129   };
1130 
1131   // Start by discovering returned values from all live returned instructions in
1132   // the associated function.
1133   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1134     return indicatePessimisticFixpoint();
1135 
1136   // Once returned values "directly" present in the code are handled we try to
1137   // resolve returned calls.
1138   decltype(ReturnedValues) NewRVsMap;
1139   for (auto &It : ReturnedValues) {
1140     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1141                       << " by #" << It.second.size() << " RIs\n");
1142     CallBase *CB = dyn_cast<CallBase>(It.first);
1143     if (!CB || UnresolvedCalls.count(CB))
1144       continue;
1145 
1146     if (!CB->getCalledFunction()) {
1147       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1148                         << "\n");
1149       UnresolvedCalls.insert(CB);
1150       continue;
1151     }
1152 
1153     // TODO: use the function scope once we have call site AAReturnedValues.
1154     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1155         *this, IRPosition::function(*CB->getCalledFunction()));
1156     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1157                       << static_cast<const AbstractAttribute &>(RetValAA)
1158                       << "\n");
1159 
1160     // Skip dead ends, thus if we do not know anything about the returned
1161     // call we mark it as unresolved and it will stay that way.
1162     if (!RetValAA.getState().isValidState()) {
1163       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1164                         << "\n");
1165       UnresolvedCalls.insert(CB);
1166       continue;
1167     }
1168 
1169     // Do not try to learn partial information. If the callee has unresolved
1170     // return values we will treat the call as unresolved/opaque.
1171     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1172     if (!RetValAAUnresolvedCalls.empty()) {
1173       UnresolvedCalls.insert(CB);
1174       continue;
1175     }
1176 
1177     // Now check if we can track transitively returned values. If possible, thus
1178     // if all return value can be represented in the current scope, do so.
1179     bool Unresolved = false;
1180     for (auto &RetValAAIt : RetValAA.returned_values()) {
1181       Value *RetVal = RetValAAIt.first;
1182       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1183           isa<Constant>(RetVal))
1184         continue;
1185       // Anything that did not fit in the above categories cannot be resolved,
1186       // mark the call as unresolved.
1187       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1188                            "cannot be translated: "
1189                         << *RetVal << "\n");
1190       UnresolvedCalls.insert(CB);
1191       Unresolved = true;
1192       break;
1193     }
1194 
1195     if (Unresolved)
1196       continue;
1197 
1198     // Now track transitively returned values.
1199     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1200     if (NumRetAA == RetValAA.getNumReturnValues()) {
1201       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1202                            "changed since it was seen last\n");
1203       continue;
1204     }
1205     NumRetAA = RetValAA.getNumReturnValues();
1206 
1207     for (auto &RetValAAIt : RetValAA.returned_values()) {
1208       Value *RetVal = RetValAAIt.first;
1209       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1210         // Arguments are mapped to call site operands and we begin the traversal
1211         // again.
1212         bool Unused = false;
1213         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1214         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1215         continue;
1216       } else if (isa<CallBase>(RetVal)) {
1217         // Call sites are resolved by the callee attribute over time, no need to
1218         // do anything for us.
1219         continue;
1220       } else if (isa<Constant>(RetVal)) {
1221         // Constants are valid everywhere, we can simply take them.
1222         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1223         continue;
1224       }
1225     }
1226   }
1227 
1228   // To avoid modifications to the ReturnedValues map while we iterate over it
1229   // we kept record of potential new entries in a copy map, NewRVsMap.
1230   for (auto &It : NewRVsMap) {
1231     assert(!It.second.empty() && "Entry does not add anything.");
1232     auto &ReturnInsts = ReturnedValues[It.first];
1233     for (ReturnInst *RI : It.second)
1234       if (ReturnInsts.insert(RI)) {
1235         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1236                           << *It.first << " => " << *RI << "\n");
1237         Changed = true;
1238       }
1239   }
1240 
1241   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1242   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1243 }
1244 
1245 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1246   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1247 
1248   /// See AbstractAttribute::trackStatistics()
1249   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1250 };
1251 
1252 /// Returned values information for a call sites.
1253 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1254   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1255 
1256   /// See AbstractAttribute::initialize(...).
1257   void initialize(Attributor &A) override {
1258     // TODO: Once we have call site specific value information we can provide
1259     //       call site specific liveness information and then it makes
1260     //       sense to specialize attributes for call sites instead of
1261     //       redirecting requests to the callee.
1262     llvm_unreachable("Abstract attributes for returned values are not "
1263                      "supported for call sites yet!");
1264   }
1265 
1266   /// See AbstractAttribute::updateImpl(...).
1267   ChangeStatus updateImpl(Attributor &A) override {
1268     return indicatePessimisticFixpoint();
1269   }
1270 
1271   /// See AbstractAttribute::trackStatistics()
1272   void trackStatistics() const override {}
1273 };
1274 
1275 /// ------------------------ NoSync Function Attribute -------------------------
1276 
1277 struct AANoSyncImpl : AANoSync {
1278   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1279 
1280   const std::string getAsStr() const override {
1281     return getAssumed() ? "nosync" : "may-sync";
1282   }
1283 
1284   /// See AbstractAttribute::updateImpl(...).
1285   ChangeStatus updateImpl(Attributor &A) override;
1286 
1287   /// Helper function used to determine whether an instruction is non-relaxed
1288   /// atomic. In other words, if an atomic instruction does not have unordered
1289   /// or monotonic ordering
1290   static bool isNonRelaxedAtomic(Instruction *I);
1291 
1292   /// Helper function used to determine whether an instruction is volatile.
1293   static bool isVolatile(Instruction *I);
1294 
1295   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1296   /// memset).
1297   static bool isNoSyncIntrinsic(Instruction *I);
1298 };
1299 
1300 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1301   if (!I->isAtomic())
1302     return false;
1303 
1304   AtomicOrdering Ordering;
1305   switch (I->getOpcode()) {
1306   case Instruction::AtomicRMW:
1307     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1308     break;
1309   case Instruction::Store:
1310     Ordering = cast<StoreInst>(I)->getOrdering();
1311     break;
1312   case Instruction::Load:
1313     Ordering = cast<LoadInst>(I)->getOrdering();
1314     break;
1315   case Instruction::Fence: {
1316     auto *FI = cast<FenceInst>(I);
1317     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1318       return false;
1319     Ordering = FI->getOrdering();
1320     break;
1321   }
1322   case Instruction::AtomicCmpXchg: {
1323     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1324     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1325     // Only if both are relaxed, than it can be treated as relaxed.
1326     // Otherwise it is non-relaxed.
1327     if (Success != AtomicOrdering::Unordered &&
1328         Success != AtomicOrdering::Monotonic)
1329       return true;
1330     if (Failure != AtomicOrdering::Unordered &&
1331         Failure != AtomicOrdering::Monotonic)
1332       return true;
1333     return false;
1334   }
1335   default:
1336     llvm_unreachable(
1337         "New atomic operations need to be known in the attributor.");
1338   }
1339 
1340   // Relaxed.
1341   if (Ordering == AtomicOrdering::Unordered ||
1342       Ordering == AtomicOrdering::Monotonic)
1343     return false;
1344   return true;
1345 }
1346 
1347 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1348 /// FIXME: We should ipmrove the handling of intrinsics.
1349 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1350   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1351     switch (II->getIntrinsicID()) {
1352     /// Element wise atomic memory intrinsics are can only be unordered,
1353     /// therefore nosync.
1354     case Intrinsic::memset_element_unordered_atomic:
1355     case Intrinsic::memmove_element_unordered_atomic:
1356     case Intrinsic::memcpy_element_unordered_atomic:
1357       return true;
1358     case Intrinsic::memset:
1359     case Intrinsic::memmove:
1360     case Intrinsic::memcpy:
1361       if (!cast<MemIntrinsic>(II)->isVolatile())
1362         return true;
1363       return false;
1364     default:
1365       return false;
1366     }
1367   }
1368   return false;
1369 }
1370 
1371 bool AANoSyncImpl::isVolatile(Instruction *I) {
1372   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1373          "Calls should not be checked here");
1374 
1375   switch (I->getOpcode()) {
1376   case Instruction::AtomicRMW:
1377     return cast<AtomicRMWInst>(I)->isVolatile();
1378   case Instruction::Store:
1379     return cast<StoreInst>(I)->isVolatile();
1380   case Instruction::Load:
1381     return cast<LoadInst>(I)->isVolatile();
1382   case Instruction::AtomicCmpXchg:
1383     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1384   default:
1385     return false;
1386   }
1387 }
1388 
1389 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1390 
1391   auto CheckRWInstForNoSync = [&](Instruction &I) {
1392     /// We are looking for volatile instructions or Non-Relaxed atomics.
1393     /// FIXME: We should improve the handling of intrinsics.
1394 
1395     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1396       return true;
1397 
1398     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1399       if (ICS.hasFnAttr(Attribute::NoSync))
1400         return true;
1401 
1402       const auto &NoSyncAA =
1403           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1404       if (NoSyncAA.isAssumedNoSync())
1405         return true;
1406       return false;
1407     }
1408 
1409     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1410       return true;
1411 
1412     return false;
1413   };
1414 
1415   auto CheckForNoSync = [&](Instruction &I) {
1416     // At this point we handled all read/write effects and they are all
1417     // nosync, so they can be skipped.
1418     if (I.mayReadOrWriteMemory())
1419       return true;
1420 
1421     // non-convergent and readnone imply nosync.
1422     return !ImmutableCallSite(&I).isConvergent();
1423   };
1424 
1425   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1426       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1427     return indicatePessimisticFixpoint();
1428 
1429   return ChangeStatus::UNCHANGED;
1430 }
1431 
1432 struct AANoSyncFunction final : public AANoSyncImpl {
1433   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1434 
1435   /// See AbstractAttribute::trackStatistics()
1436   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1437 };
1438 
1439 /// NoSync attribute deduction for a call sites.
1440 struct AANoSyncCallSite final : AANoSyncImpl {
1441   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1442 
1443   /// See AbstractAttribute::initialize(...).
1444   void initialize(Attributor &A) override {
1445     AANoSyncImpl::initialize(A);
1446     Function *F = getAssociatedFunction();
1447     if (!F)
1448       indicatePessimisticFixpoint();
1449   }
1450 
1451   /// See AbstractAttribute::updateImpl(...).
1452   ChangeStatus updateImpl(Attributor &A) override {
1453     // TODO: Once we have call site specific value information we can provide
1454     //       call site specific liveness information and then it makes
1455     //       sense to specialize attributes for call sites arguments instead of
1456     //       redirecting requests to the callee argument.
1457     Function *F = getAssociatedFunction();
1458     const IRPosition &FnPos = IRPosition::function(*F);
1459     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1460     return clampStateAndIndicateChange(
1461         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1462   }
1463 
1464   /// See AbstractAttribute::trackStatistics()
1465   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1466 };
1467 
1468 /// ------------------------ No-Free Attributes ----------------------------
1469 
1470 struct AANoFreeImpl : public AANoFree {
1471   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1472 
1473   /// See AbstractAttribute::updateImpl(...).
1474   ChangeStatus updateImpl(Attributor &A) override {
1475     auto CheckForNoFree = [&](Instruction &I) {
1476       ImmutableCallSite ICS(&I);
1477       if (ICS.hasFnAttr(Attribute::NoFree))
1478         return true;
1479 
1480       const auto &NoFreeAA =
1481           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1482       return NoFreeAA.isAssumedNoFree();
1483     };
1484 
1485     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1486       return indicatePessimisticFixpoint();
1487     return ChangeStatus::UNCHANGED;
1488   }
1489 
1490   /// See AbstractAttribute::getAsStr().
1491   const std::string getAsStr() const override {
1492     return getAssumed() ? "nofree" : "may-free";
1493   }
1494 };
1495 
1496 struct AANoFreeFunction final : public AANoFreeImpl {
1497   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1498 
1499   /// See AbstractAttribute::trackStatistics()
1500   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1501 };
1502 
1503 /// NoFree attribute deduction for a call sites.
1504 struct AANoFreeCallSite final : AANoFreeImpl {
1505   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1506 
1507   /// See AbstractAttribute::initialize(...).
1508   void initialize(Attributor &A) override {
1509     AANoFreeImpl::initialize(A);
1510     Function *F = getAssociatedFunction();
1511     if (!F)
1512       indicatePessimisticFixpoint();
1513   }
1514 
1515   /// See AbstractAttribute::updateImpl(...).
1516   ChangeStatus updateImpl(Attributor &A) override {
1517     // TODO: Once we have call site specific value information we can provide
1518     //       call site specific liveness information and then it makes
1519     //       sense to specialize attributes for call sites arguments instead of
1520     //       redirecting requests to the callee argument.
1521     Function *F = getAssociatedFunction();
1522     const IRPosition &FnPos = IRPosition::function(*F);
1523     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1524     return clampStateAndIndicateChange(
1525         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1526   }
1527 
1528   /// See AbstractAttribute::trackStatistics()
1529   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1530 };
1531 
1532 /// NoFree attribute for floating values.
1533 struct AANoFreeFloating : AANoFreeImpl {
1534   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1535 
1536   /// See AbstractAttribute::trackStatistics()
1537   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1538 
1539   /// See Abstract Attribute::updateImpl(...).
1540   ChangeStatus updateImpl(Attributor &A) override {
1541     const IRPosition &IRP = getIRPosition();
1542 
1543     const auto &NoFreeAA =
1544         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1545     if (NoFreeAA.isAssumedNoFree())
1546       return ChangeStatus::UNCHANGED;
1547 
1548     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1549     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1550       Instruction *UserI = cast<Instruction>(U.getUser());
1551       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1552         if (CB->isBundleOperand(&U))
1553           return false;
1554         if (!CB->isArgOperand(&U))
1555           return true;
1556         unsigned ArgNo = CB->getArgOperandNo(&U);
1557 
1558         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1559             *this, IRPosition::callsite_argument(*CB, ArgNo));
1560         return NoFreeArg.isAssumedNoFree();
1561       }
1562 
1563       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1564           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1565         Follow = true;
1566         return true;
1567       }
1568 
1569       // Unknown user.
1570       return false;
1571     };
1572     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1573       return indicatePessimisticFixpoint();
1574 
1575     return ChangeStatus::UNCHANGED;
1576   }
1577 };
1578 
1579 /// NoFree attribute for a call site argument.
1580 struct AANoFreeArgument final : AANoFreeFloating {
1581   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1582 
1583   /// See AbstractAttribute::trackStatistics()
1584   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1585 };
1586 
1587 /// NoFree attribute for call site arguments.
1588 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1589   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1590 
1591   /// See AbstractAttribute::updateImpl(...).
1592   ChangeStatus updateImpl(Attributor &A) override {
1593     // TODO: Once we have call site specific value information we can provide
1594     //       call site specific liveness information and then it makes
1595     //       sense to specialize attributes for call sites arguments instead of
1596     //       redirecting requests to the callee argument.
1597     Argument *Arg = getAssociatedArgument();
1598     if (!Arg)
1599       return indicatePessimisticFixpoint();
1600     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1601     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1602     return clampStateAndIndicateChange(
1603         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1604   }
1605 
1606   /// See AbstractAttribute::trackStatistics()
1607   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1608 };
1609 
1610 /// NoFree attribute for function return value.
1611 struct AANoFreeReturned final : AANoFreeFloating {
1612   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1613     llvm_unreachable("NoFree is not applicable to function returns!");
1614   }
1615 
1616   /// See AbstractAttribute::initialize(...).
1617   void initialize(Attributor &A) override {
1618     llvm_unreachable("NoFree is not applicable to function returns!");
1619   }
1620 
1621   /// See AbstractAttribute::updateImpl(...).
1622   ChangeStatus updateImpl(Attributor &A) override {
1623     llvm_unreachable("NoFree is not applicable to function returns!");
1624   }
1625 
1626   /// See AbstractAttribute::trackStatistics()
1627   void trackStatistics() const override {}
1628 };
1629 
1630 /// NoFree attribute deduction for a call site return value.
1631 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1632   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1633 
1634   ChangeStatus manifest(Attributor &A) override {
1635     return ChangeStatus::UNCHANGED;
1636   }
1637   /// See AbstractAttribute::trackStatistics()
1638   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1639 };
1640 
1641 /// ------------------------ NonNull Argument Attribute ------------------------
1642 static int64_t getKnownNonNullAndDerefBytesForUse(
1643     Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1644     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1645   TrackUse = false;
1646 
1647   const Value *UseV = U->get();
1648   if (!UseV->getType()->isPointerTy())
1649     return 0;
1650 
1651   Type *PtrTy = UseV->getType();
1652   const Function *F = I->getFunction();
1653   bool NullPointerIsDefined =
1654       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1655   const DataLayout &DL = A.getInfoCache().getDL();
1656   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1657     if (ICS.isBundleOperand(U))
1658       return 0;
1659 
1660     if (ICS.isCallee(U)) {
1661       IsNonNull |= !NullPointerIsDefined;
1662       return 0;
1663     }
1664 
1665     unsigned ArgNo = ICS.getArgumentNo(U);
1666     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1667     // As long as we only use known information there is no need to track
1668     // dependences here.
1669     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1670                                                   /* TrackDependence */ false);
1671     IsNonNull |= DerefAA.isKnownNonNull();
1672     return DerefAA.getKnownDereferenceableBytes();
1673   }
1674 
1675   // We need to follow common pointer manipulation uses to the accesses they
1676   // feed into. We can try to be smart to avoid looking through things we do not
1677   // like for now, e.g., non-inbounds GEPs.
1678   if (isa<CastInst>(I)) {
1679     TrackUse = true;
1680     return 0;
1681   }
1682   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1683     if (GEP->hasAllConstantIndices()) {
1684       TrackUse = true;
1685       return 0;
1686     }
1687 
1688   int64_t Offset;
1689   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1690     if (Base == &AssociatedValue && getPointerOperand(I) == UseV) {
1691       int64_t DerefBytes =
1692           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1693 
1694       IsNonNull |= !NullPointerIsDefined;
1695       return std::max(int64_t(0), DerefBytes);
1696     }
1697   }
1698 
1699   /// Corner case when an offset is 0.
1700   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1701           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1702     if (Offset == 0 && Base == &AssociatedValue &&
1703         getPointerOperand(I) == UseV) {
1704       int64_t DerefBytes =
1705           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1706       IsNonNull |= !NullPointerIsDefined;
1707       return std::max(int64_t(0), DerefBytes);
1708     }
1709   }
1710   if (const Value *Base =
1711           GetPointerBaseWithConstantOffset(UseV, Offset, DL,
1712                                            /*AllowNonInbounds*/ false)) {
1713     if (Base == &AssociatedValue) {
1714       // As long as we only use known information there is no need to track
1715       // dependences here.
1716       auto &DerefAA = A.getAAFor<AADereferenceable>(
1717           QueryingAA, IRPosition::value(*Base), /* TrackDependence */ false);
1718       IsNonNull |= (!NullPointerIsDefined && DerefAA.isKnownNonNull());
1719       IsNonNull |= (!NullPointerIsDefined && (Offset != 0));
1720       int64_t DerefBytes = DerefAA.getKnownDereferenceableBytes();
1721       return std::max(int64_t(0), DerefBytes - std::max(int64_t(0), Offset));
1722     }
1723   }
1724 
1725   return 0;
1726 }
1727 
1728 struct AANonNullImpl : AANonNull {
1729   AANonNullImpl(const IRPosition &IRP)
1730       : AANonNull(IRP),
1731         NullIsDefined(NullPointerIsDefined(
1732             getAnchorScope(),
1733             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1734 
1735   /// See AbstractAttribute::initialize(...).
1736   void initialize(Attributor &A) override {
1737     if (!NullIsDefined &&
1738         hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1739       indicateOptimisticFixpoint();
1740     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1741       indicatePessimisticFixpoint();
1742     else
1743       AANonNull::initialize(A);
1744   }
1745 
1746   /// See AAFromMustBeExecutedContext
1747   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1748     bool IsNonNull = false;
1749     bool TrackUse = false;
1750     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1751                                        IsNonNull, TrackUse);
1752     setKnown(IsNonNull);
1753     return TrackUse;
1754   }
1755 
1756   /// See AbstractAttribute::getAsStr().
1757   const std::string getAsStr() const override {
1758     return getAssumed() ? "nonnull" : "may-null";
1759   }
1760 
1761   /// Flag to determine if the underlying value can be null and still allow
1762   /// valid accesses.
1763   const bool NullIsDefined;
1764 };
1765 
1766 /// NonNull attribute for a floating value.
1767 struct AANonNullFloating
1768     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1769   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1770   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1771 
1772   /// See AbstractAttribute::updateImpl(...).
1773   ChangeStatus updateImpl(Attributor &A) override {
1774     ChangeStatus Change = Base::updateImpl(A);
1775     if (isKnownNonNull())
1776       return Change;
1777 
1778     if (!NullIsDefined) {
1779       const auto &DerefAA =
1780           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1781       if (DerefAA.getAssumedDereferenceableBytes())
1782         return Change;
1783     }
1784 
1785     const DataLayout &DL = A.getDataLayout();
1786 
1787     DominatorTree *DT = nullptr;
1788     InformationCache &InfoCache = A.getInfoCache();
1789     if (const Function *Fn = getAnchorScope())
1790       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1791 
1792     auto VisitValueCB = [&](Value &V, AANonNull::StateType &T,
1793                             bool Stripped) -> bool {
1794       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1795       if (!Stripped && this == &AA) {
1796         if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, getCtxI(), DT))
1797           T.indicatePessimisticFixpoint();
1798       } else {
1799         // Use abstract attribute information.
1800         const AANonNull::StateType &NS =
1801             static_cast<const AANonNull::StateType &>(AA.getState());
1802         T ^= NS;
1803       }
1804       return T.isValidState();
1805     };
1806 
1807     StateType T;
1808     if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1809                                                      T, VisitValueCB))
1810       return indicatePessimisticFixpoint();
1811 
1812     return clampStateAndIndicateChange(getState(), T);
1813   }
1814 
1815   /// See AbstractAttribute::trackStatistics()
1816   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1817 };
1818 
1819 /// NonNull attribute for function return value.
1820 struct AANonNullReturned final
1821     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1822   AANonNullReturned(const IRPosition &IRP)
1823       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1824 
1825   /// See AbstractAttribute::trackStatistics()
1826   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1827 };
1828 
1829 /// NonNull attribute for function argument.
1830 struct AANonNullArgument final
1831     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1832                                                               AANonNullImpl> {
1833   AANonNullArgument(const IRPosition &IRP)
1834       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1835                                                                 AANonNullImpl>(
1836             IRP) {}
1837 
1838   /// See AbstractAttribute::trackStatistics()
1839   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1840 };
1841 
1842 struct AANonNullCallSiteArgument final : AANonNullFloating {
1843   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1844 
1845   /// See AbstractAttribute::trackStatistics()
1846   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1847 };
1848 
1849 /// NonNull attribute for a call site return position.
1850 struct AANonNullCallSiteReturned final
1851     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1852                                                              AANonNullImpl> {
1853   AANonNullCallSiteReturned(const IRPosition &IRP)
1854       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1855                                                                AANonNullImpl>(
1856             IRP) {}
1857 
1858   /// See AbstractAttribute::trackStatistics()
1859   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1860 };
1861 
1862 /// ------------------------ No-Recurse Attributes ----------------------------
1863 
1864 struct AANoRecurseImpl : public AANoRecurse {
1865   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1866 
1867   /// See AbstractAttribute::getAsStr()
1868   const std::string getAsStr() const override {
1869     return getAssumed() ? "norecurse" : "may-recurse";
1870   }
1871 };
1872 
1873 struct AANoRecurseFunction final : AANoRecurseImpl {
1874   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1875 
1876   /// See AbstractAttribute::initialize(...).
1877   void initialize(Attributor &A) override {
1878     AANoRecurseImpl::initialize(A);
1879     if (const Function *F = getAnchorScope())
1880       if (A.getInfoCache().getSccSize(*F) == 1)
1881         return;
1882     indicatePessimisticFixpoint();
1883   }
1884 
1885   /// See AbstractAttribute::updateImpl(...).
1886   ChangeStatus updateImpl(Attributor &A) override {
1887 
1888     auto CheckForNoRecurse = [&](Instruction &I) {
1889       ImmutableCallSite ICS(&I);
1890       if (ICS.hasFnAttr(Attribute::NoRecurse))
1891         return true;
1892 
1893       const auto &NoRecurseAA =
1894           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
1895       if (!NoRecurseAA.isAssumedNoRecurse())
1896         return false;
1897 
1898       // Recursion to the same function
1899       if (ICS.getCalledFunction() == getAnchorScope())
1900         return false;
1901 
1902       return true;
1903     };
1904 
1905     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1906       return indicatePessimisticFixpoint();
1907     return ChangeStatus::UNCHANGED;
1908   }
1909 
1910   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1911 };
1912 
1913 /// NoRecurse attribute deduction for a call sites.
1914 struct AANoRecurseCallSite final : AANoRecurseImpl {
1915   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1916 
1917   /// See AbstractAttribute::initialize(...).
1918   void initialize(Attributor &A) override {
1919     AANoRecurseImpl::initialize(A);
1920     Function *F = getAssociatedFunction();
1921     if (!F)
1922       indicatePessimisticFixpoint();
1923   }
1924 
1925   /// See AbstractAttribute::updateImpl(...).
1926   ChangeStatus updateImpl(Attributor &A) override {
1927     // TODO: Once we have call site specific value information we can provide
1928     //       call site specific liveness information and then it makes
1929     //       sense to specialize attributes for call sites arguments instead of
1930     //       redirecting requests to the callee argument.
1931     Function *F = getAssociatedFunction();
1932     const IRPosition &FnPos = IRPosition::function(*F);
1933     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1934     return clampStateAndIndicateChange(
1935         getState(),
1936         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1937   }
1938 
1939   /// See AbstractAttribute::trackStatistics()
1940   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1941 };
1942 
1943 /// ------------------------ Will-Return Attributes ----------------------------
1944 
1945 // Helper function that checks whether a function has any cycle.
1946 // TODO: Replace with more efficent code
1947 static bool containsCycle(Function &F) {
1948   SmallPtrSet<BasicBlock *, 32> Visited;
1949 
1950   // Traverse BB by dfs and check whether successor is already visited.
1951   for (BasicBlock *BB : depth_first(&F)) {
1952     Visited.insert(BB);
1953     for (auto *SuccBB : successors(BB)) {
1954       if (Visited.count(SuccBB))
1955         return true;
1956     }
1957   }
1958   return false;
1959 }
1960 
1961 // Helper function that checks the function have a loop which might become an
1962 // endless loop
1963 // FIXME: Any cycle is regarded as endless loop for now.
1964 //        We have to allow some patterns.
1965 static bool containsPossiblyEndlessLoop(Function *F) {
1966   return !F || !F->hasExactDefinition() || containsCycle(*F);
1967 }
1968 
1969 struct AAWillReturnImpl : public AAWillReturn {
1970   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
1971 
1972   /// See AbstractAttribute::initialize(...).
1973   void initialize(Attributor &A) override {
1974     AAWillReturn::initialize(A);
1975 
1976     Function *F = getAssociatedFunction();
1977     if (containsPossiblyEndlessLoop(F))
1978       indicatePessimisticFixpoint();
1979   }
1980 
1981   /// See AbstractAttribute::updateImpl(...).
1982   ChangeStatus updateImpl(Attributor &A) override {
1983     auto CheckForWillReturn = [&](Instruction &I) {
1984       IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
1985       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
1986       if (WillReturnAA.isKnownWillReturn())
1987         return true;
1988       if (!WillReturnAA.isAssumedWillReturn())
1989         return false;
1990       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
1991       return NoRecurseAA.isAssumedNoRecurse();
1992     };
1993 
1994     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
1995       return indicatePessimisticFixpoint();
1996 
1997     return ChangeStatus::UNCHANGED;
1998   }
1999 
2000   /// See AbstractAttribute::getAsStr()
2001   const std::string getAsStr() const override {
2002     return getAssumed() ? "willreturn" : "may-noreturn";
2003   }
2004 };
2005 
2006 struct AAWillReturnFunction final : AAWillReturnImpl {
2007   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2008 
2009   /// See AbstractAttribute::trackStatistics()
2010   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2011 };
2012 
2013 /// WillReturn attribute deduction for a call sites.
2014 struct AAWillReturnCallSite final : AAWillReturnImpl {
2015   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2016 
2017   /// See AbstractAttribute::initialize(...).
2018   void initialize(Attributor &A) override {
2019     AAWillReturnImpl::initialize(A);
2020     Function *F = getAssociatedFunction();
2021     if (!F)
2022       indicatePessimisticFixpoint();
2023   }
2024 
2025   /// See AbstractAttribute::updateImpl(...).
2026   ChangeStatus updateImpl(Attributor &A) override {
2027     // TODO: Once we have call site specific value information we can provide
2028     //       call site specific liveness information and then it makes
2029     //       sense to specialize attributes for call sites arguments instead of
2030     //       redirecting requests to the callee argument.
2031     Function *F = getAssociatedFunction();
2032     const IRPosition &FnPos = IRPosition::function(*F);
2033     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2034     return clampStateAndIndicateChange(
2035         getState(),
2036         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2037   }
2038 
2039   /// See AbstractAttribute::trackStatistics()
2040   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2041 };
2042 
2043 /// -------------------AAReachability Attribute--------------------------
2044 
2045 struct AAReachabilityImpl : AAReachability {
2046   AAReachabilityImpl(const IRPosition &IRP) : AAReachability(IRP) {}
2047 
2048   const std::string getAsStr() const override {
2049     // TODO: Return the number of reachable queries.
2050     return "reachable";
2051   }
2052 
2053   /// See AbstractAttribute::initialize(...).
2054   void initialize(Attributor &A) override {
2055     indicatePessimisticFixpoint();
2056   }
2057 
2058   /// See AbstractAttribute::updateImpl(...).
2059   ChangeStatus updateImpl(Attributor &A) override {
2060     return indicatePessimisticFixpoint();
2061   }
2062 };
2063 
2064 struct AAReachabilityFunction final : public AAReachabilityImpl {
2065   AAReachabilityFunction(const IRPosition &IRP) : AAReachabilityImpl(IRP) {}
2066 
2067   /// See AbstractAttribute::trackStatistics()
2068   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2069 };
2070 
2071 /// ------------------------ NoAlias Argument Attribute ------------------------
2072 
2073 struct AANoAliasImpl : AANoAlias {
2074   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
2075 
2076   const std::string getAsStr() const override {
2077     return getAssumed() ? "noalias" : "may-alias";
2078   }
2079 };
2080 
2081 /// NoAlias attribute for a floating value.
2082 struct AANoAliasFloating final : AANoAliasImpl {
2083   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2084 
2085   /// See AbstractAttribute::initialize(...).
2086   void initialize(Attributor &A) override {
2087     AANoAliasImpl::initialize(A);
2088     Value &Val = getAssociatedValue();
2089     if (isa<AllocaInst>(Val))
2090       indicateOptimisticFixpoint();
2091     if (isa<ConstantPointerNull>(Val) &&
2092         Val.getType()->getPointerAddressSpace() == 0)
2093       indicateOptimisticFixpoint();
2094   }
2095 
2096   /// See AbstractAttribute::updateImpl(...).
2097   ChangeStatus updateImpl(Attributor &A) override {
2098     // TODO: Implement this.
2099     return indicatePessimisticFixpoint();
2100   }
2101 
2102   /// See AbstractAttribute::trackStatistics()
2103   void trackStatistics() const override {
2104     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2105   }
2106 };
2107 
2108 /// NoAlias attribute for an argument.
2109 struct AANoAliasArgument final
2110     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2111   AANoAliasArgument(const IRPosition &IRP)
2112       : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>(IRP) {}
2113 
2114   /// See AbstractAttribute::trackStatistics()
2115   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2116 };
2117 
2118 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2119   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2120 
2121   /// See AbstractAttribute::initialize(...).
2122   void initialize(Attributor &A) override {
2123     // See callsite argument attribute and callee argument attribute.
2124     ImmutableCallSite ICS(&getAnchorValue());
2125     if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2126       indicateOptimisticFixpoint();
2127   }
2128 
2129   /// See AbstractAttribute::updateImpl(...).
2130   ChangeStatus updateImpl(Attributor &A) override {
2131     // We can deduce "noalias" if the following conditions hold.
2132     // (i)   Associated value is assumed to be noalias in the definition.
2133     // (ii)  Associated value is assumed to be no-capture in all the uses
2134     //       possibly executed before this callsite.
2135     // (iii) There is no other pointer argument which could alias with the
2136     //       value.
2137 
2138     const Value &V = getAssociatedValue();
2139     const IRPosition IRP = IRPosition::value(V);
2140 
2141     // (i) Check whether noalias holds in the definition.
2142 
2143     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
2144 
2145     if (!NoAliasAA.isAssumedNoAlias())
2146       return indicatePessimisticFixpoint();
2147 
2148     LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] " << V
2149                       << " is assumed NoAlias in the definition\n");
2150 
2151     // (ii) Check whether the value is captured in the scope using AANoCapture.
2152     //      FIXME: This is conservative though, it is better to look at CFG and
2153     //             check only uses possibly executed before this callsite.
2154 
2155     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
2156     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2157       LLVM_DEBUG(
2158           dbgs() << "[Attributor][AANoAliasCSArg] " << V
2159                  << " cannot be noalias as it is potentially captured\n");
2160       return indicatePessimisticFixpoint();
2161     }
2162 
2163     // (iii) Check there is no other pointer argument which could alias with the
2164     // value.
2165     ImmutableCallSite ICS(&getAnchorValue());
2166     for (unsigned i = 0; i < ICS.getNumArgOperands(); i++) {
2167       if (getArgNo() == (int)i)
2168         continue;
2169       const Value *ArgOp = ICS.getArgOperand(i);
2170       if (!ArgOp->getType()->isPointerTy())
2171         continue;
2172 
2173       if (const Function *F = getAnchorScope()) {
2174         if (AAResults *AAR = A.getInfoCache().getAAResultsForFunction(*F)) {
2175           bool IsAliasing = AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2176           LLVM_DEBUG(dbgs()
2177                      << "[Attributor][NoAliasCSArg] Check alias between "
2178                         "callsite arguments "
2179                      << AAR->isNoAlias(&getAssociatedValue(), ArgOp) << " "
2180                      << getAssociatedValue() << " " << *ArgOp << " => "
2181                      << (IsAliasing ? "" : "no-") << "alias \n");
2182 
2183           if (IsAliasing)
2184             continue;
2185         }
2186       }
2187       return indicatePessimisticFixpoint();
2188     }
2189 
2190     return ChangeStatus::UNCHANGED;
2191   }
2192 
2193   /// See AbstractAttribute::trackStatistics()
2194   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2195 };
2196 
2197 /// NoAlias attribute for function return value.
2198 struct AANoAliasReturned final : AANoAliasImpl {
2199   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2200 
2201   /// See AbstractAttribute::updateImpl(...).
2202   virtual ChangeStatus updateImpl(Attributor &A) override {
2203 
2204     auto CheckReturnValue = [&](Value &RV) -> bool {
2205       if (Constant *C = dyn_cast<Constant>(&RV))
2206         if (C->isNullValue() || isa<UndefValue>(C))
2207           return true;
2208 
2209       /// For now, we can only deduce noalias if we have call sites.
2210       /// FIXME: add more support.
2211       ImmutableCallSite ICS(&RV);
2212       if (!ICS)
2213         return false;
2214 
2215       const IRPosition &RVPos = IRPosition::value(RV);
2216       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2217       if (!NoAliasAA.isAssumedNoAlias())
2218         return false;
2219 
2220       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2221       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2222     };
2223 
2224     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2225       return indicatePessimisticFixpoint();
2226 
2227     return ChangeStatus::UNCHANGED;
2228   }
2229 
2230   /// See AbstractAttribute::trackStatistics()
2231   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2232 };
2233 
2234 /// NoAlias attribute deduction for a call site return value.
2235 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2236   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2237 
2238   /// See AbstractAttribute::initialize(...).
2239   void initialize(Attributor &A) override {
2240     AANoAliasImpl::initialize(A);
2241     Function *F = getAssociatedFunction();
2242     if (!F)
2243       indicatePessimisticFixpoint();
2244   }
2245 
2246   /// See AbstractAttribute::updateImpl(...).
2247   ChangeStatus updateImpl(Attributor &A) override {
2248     // TODO: Once we have call site specific value information we can provide
2249     //       call site specific liveness information and then it makes
2250     //       sense to specialize attributes for call sites arguments instead of
2251     //       redirecting requests to the callee argument.
2252     Function *F = getAssociatedFunction();
2253     const IRPosition &FnPos = IRPosition::returned(*F);
2254     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2255     return clampStateAndIndicateChange(
2256         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2257   }
2258 
2259   /// See AbstractAttribute::trackStatistics()
2260   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2261 };
2262 
2263 /// -------------------AAIsDead Function Attribute-----------------------
2264 
2265 struct AAIsDeadValueImpl : public AAIsDead {
2266   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2267 
2268   /// See AAIsDead::isAssumedDead().
2269   bool isAssumedDead() const override { return getAssumed(); }
2270 
2271   /// See AAIsDead::isAssumedDead(BasicBlock *).
2272   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2273 
2274   /// See AAIsDead::isKnownDead(BasicBlock *).
2275   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2276 
2277   /// See AAIsDead::isAssumedDead(Instruction *I).
2278   bool isAssumedDead(const Instruction *I) const override {
2279     return I == getCtxI() && isAssumedDead();
2280   }
2281 
2282   /// See AAIsDead::isKnownDead(Instruction *I).
2283   bool isKnownDead(const Instruction *I) const override {
2284     return I == getCtxI() && getKnown();
2285   }
2286 
2287   /// See AbstractAttribute::getAsStr().
2288   const std::string getAsStr() const override {
2289     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2290   }
2291 };
2292 
2293 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2294   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2295 
2296   /// See AbstractAttribute::initialize(...).
2297   void initialize(Attributor &A) override {
2298     if (Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()))
2299       if (!wouldInstructionBeTriviallyDead(I))
2300         indicatePessimisticFixpoint();
2301     if (isa<UndefValue>(getAssociatedValue()))
2302       indicatePessimisticFixpoint();
2303   }
2304 
2305   /// See AbstractAttribute::updateImpl(...).
2306   ChangeStatus updateImpl(Attributor &A) override {
2307     auto UsePred = [&](const Use &U, bool &Follow) {
2308       Instruction *UserI = cast<Instruction>(U.getUser());
2309       if (CallSite CS = CallSite(UserI)) {
2310         if (!CS.isArgOperand(&U))
2311           return false;
2312         const IRPosition &CSArgPos =
2313             IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
2314         const auto &CSArgIsDead = A.getAAFor<AAIsDead>(*this, CSArgPos);
2315         return CSArgIsDead.isAssumedDead();
2316       }
2317       if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
2318         const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
2319         const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, RetPos);
2320         return RetIsDeadAA.isAssumedDead();
2321       }
2322       Follow = true;
2323       return wouldInstructionBeTriviallyDead(UserI);
2324     };
2325 
2326     if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
2327       return indicatePessimisticFixpoint();
2328     return ChangeStatus::UNCHANGED;
2329   }
2330 
2331   /// See AbstractAttribute::manifest(...).
2332   ChangeStatus manifest(Attributor &A) override {
2333     Value &V = getAssociatedValue();
2334     if (auto *I = dyn_cast<Instruction>(&V))
2335       if (wouldInstructionBeTriviallyDead(I)) {
2336         A.deleteAfterManifest(*I);
2337         return ChangeStatus::CHANGED;
2338       }
2339 
2340     if (V.use_empty())
2341       return ChangeStatus::UNCHANGED;
2342 
2343     UndefValue &UV = *UndefValue::get(V.getType());
2344     bool AnyChange = false;
2345     for (Use &U : V.uses())
2346       AnyChange |= A.changeUseAfterManifest(U, UV);
2347     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2348   }
2349 
2350   /// See AbstractAttribute::trackStatistics()
2351   void trackStatistics() const override {
2352     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2353   }
2354 };
2355 
2356 struct AAIsDeadArgument : public AAIsDeadFloating {
2357   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2358 
2359   /// See AbstractAttribute::initialize(...).
2360   void initialize(Attributor &A) override {
2361     if (!getAssociatedFunction()->hasExactDefinition())
2362       indicatePessimisticFixpoint();
2363   }
2364 
2365   /// See AbstractAttribute::trackStatistics()
2366   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2367 };
2368 
2369 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2370   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2371 
2372   /// See AbstractAttribute::initialize(...).
2373   void initialize(Attributor &A) override {
2374     if (isa<UndefValue>(getAssociatedValue()))
2375       indicatePessimisticFixpoint();
2376   }
2377 
2378   /// See AbstractAttribute::updateImpl(...).
2379   ChangeStatus updateImpl(Attributor &A) override {
2380     // TODO: Once we have call site specific value information we can provide
2381     //       call site specific liveness information and then it makes
2382     //       sense to specialize attributes for call sites arguments instead of
2383     //       redirecting requests to the callee argument.
2384     Argument *Arg = getAssociatedArgument();
2385     if (!Arg)
2386       return indicatePessimisticFixpoint();
2387     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2388     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2389     return clampStateAndIndicateChange(
2390         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2391   }
2392 
2393   /// See AbstractAttribute::manifest(...).
2394   ChangeStatus manifest(Attributor &A) override {
2395     CallBase &CB = cast<CallBase>(getAnchorValue());
2396     Use &U = CB.getArgOperandUse(getArgNo());
2397     assert(!isa<UndefValue>(U.get()) &&
2398            "Expected undef values to be filtered out!");
2399     UndefValue &UV = *UndefValue::get(U->getType());
2400     if (A.changeUseAfterManifest(U, UV))
2401       return ChangeStatus::CHANGED;
2402     return ChangeStatus::UNCHANGED;
2403   }
2404 
2405   /// See AbstractAttribute::trackStatistics()
2406   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2407 };
2408 
2409 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2410   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2411 
2412   /// See AbstractAttribute::updateImpl(...).
2413   ChangeStatus updateImpl(Attributor &A) override {
2414 
2415     auto PredForCallSite = [&](AbstractCallSite ACS) {
2416       if (ACS.isCallbackCall())
2417         return false;
2418       const IRPosition &CSRetPos =
2419           IRPosition::callsite_returned(ACS.getCallSite());
2420       const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, CSRetPos);
2421       return RetIsDeadAA.isAssumedDead();
2422     };
2423 
2424     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
2425       return indicatePessimisticFixpoint();
2426 
2427     return ChangeStatus::UNCHANGED;
2428   }
2429 
2430   /// See AbstractAttribute::manifest(...).
2431   ChangeStatus manifest(Attributor &A) override {
2432     // TODO: Rewrite the signature to return void?
2433     bool AnyChange = false;
2434     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2435     auto RetInstPred = [&](Instruction &I) {
2436       ReturnInst &RI = cast<ReturnInst>(I);
2437       if (!isa<UndefValue>(RI.getReturnValue()))
2438         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2439       return true;
2440     };
2441     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2442     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2443   }
2444 
2445   /// See AbstractAttribute::trackStatistics()
2446   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2447 };
2448 
2449 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2450   AAIsDeadCallSiteReturned(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2451 
2452   /// See AbstractAttribute::initialize(...).
2453   void initialize(Attributor &A) override {}
2454 
2455   /// See AbstractAttribute::trackStatistics()
2456   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(IsDead) }
2457 };
2458 
2459 struct AAIsDeadFunction : public AAIsDead {
2460   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
2461 
2462   /// See AbstractAttribute::initialize(...).
2463   void initialize(Attributor &A) override {
2464     const Function *F = getAssociatedFunction();
2465     if (F && !F->isDeclaration()) {
2466       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2467       assumeLive(A, F->getEntryBlock());
2468     }
2469   }
2470 
2471   /// See AbstractAttribute::getAsStr().
2472   const std::string getAsStr() const override {
2473     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2474            std::to_string(getAssociatedFunction()->size()) + "][#TBEP " +
2475            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2476            std::to_string(KnownDeadEnds.size()) + "]";
2477   }
2478 
2479   /// See AbstractAttribute::manifest(...).
2480   ChangeStatus manifest(Attributor &A) override {
2481     assert(getState().isValidState() &&
2482            "Attempted to manifest an invalid state!");
2483 
2484     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2485     Function &F = *getAssociatedFunction();
2486 
2487     if (AssumedLiveBlocks.empty()) {
2488       A.deleteAfterManifest(F);
2489       return ChangeStatus::CHANGED;
2490     }
2491 
2492     // Flag to determine if we can change an invoke to a call assuming the
2493     // callee is nounwind. This is not possible if the personality of the
2494     // function allows to catch asynchronous exceptions.
2495     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2496 
2497     KnownDeadEnds.set_union(ToBeExploredFrom);
2498     for (const Instruction *DeadEndI : KnownDeadEnds) {
2499       auto *CB = dyn_cast<CallBase>(DeadEndI);
2500       if (!CB)
2501         continue;
2502       const auto &NoReturnAA =
2503           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2504       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2505       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2506         continue;
2507       Instruction *I = const_cast<Instruction *>(DeadEndI);
2508       BasicBlock *BB = I->getParent();
2509       Instruction *SplitPos = I->getNextNode();
2510       // TODO: mark stuff before unreachable instructions as dead.
2511 
2512       if (auto *II = dyn_cast<InvokeInst>(I)) {
2513         // If we keep the invoke the split position is at the beginning of the
2514         // normal desitination block (it invokes a noreturn function after all).
2515         BasicBlock *NormalDestBB = II->getNormalDest();
2516         SplitPos = &NormalDestBB->front();
2517 
2518         /// Invoke is replaced with a call and unreachable is placed after it if
2519         /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
2520         /// and only place an unreachable in the normal successor.
2521         if (Invoke2CallAllowed) {
2522           if (II->getCalledFunction()) {
2523             const IRPosition &IPos = IRPosition::callsite_function(*II);
2524             const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
2525             if (AANoUnw.isAssumedNoUnwind()) {
2526               LLVM_DEBUG(dbgs()
2527                          << "[AAIsDead] Replace invoke with call inst\n");
2528               CallInst *CI = createCallMatchingInvoke(II);
2529               CI->insertBefore(II);
2530               CI->takeName(II);
2531               II->replaceAllUsesWith(CI);
2532 
2533               // If this is a nounwind + mayreturn invoke we only remove the unwind edge.
2534               // This is done by moving the invoke into a new and dead block and connecting
2535               // the normal destination of the invoke with a branch that follows the call
2536               // replacement we created above.
2537               if (MayReturn) {
2538                 BasicBlock *NewDeadBB = SplitBlock(BB, II, nullptr, nullptr, nullptr, ".i2c");
2539                 assert(isa<BranchInst>(BB->getTerminator()) &&
2540                        BB->getTerminator()->getNumSuccessors() == 1 &&
2541                        BB->getTerminator()->getSuccessor(0) == NewDeadBB);
2542                 new UnreachableInst(I->getContext(), NewDeadBB);
2543                 BB->getTerminator()->setOperand(0, NormalDestBB);
2544                 A.deleteAfterManifest(*II);
2545                 continue;
2546               }
2547 
2548               // We do not need an invoke (II) but instead want a call followed
2549               // by an unreachable. However, we do not remove II as other
2550               // abstract attributes might have it cached as part of their
2551               // results. Given that we modify the CFG anyway, we simply keep II
2552               // around but in a new dead block. To avoid II being live through
2553               // a different edge we have to ensure the block we place it in is
2554               // only reached from the current block of II and then not reached
2555               // at all when we insert the unreachable.
2556               SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c");
2557               SplitPos = CI->getNextNode();
2558             }
2559           }
2560         }
2561 
2562         if (SplitPos == &NormalDestBB->front()) {
2563           // If this is an invoke of a noreturn function the edge to the normal
2564           // destination block is dead but not necessarily the block itself.
2565           // TODO: We need to move to an edge based system during deduction and
2566           //       also manifest.
2567           assert(!NormalDestBB->isLandingPad() &&
2568                  "Expected the normal destination not to be a landingpad!");
2569           if (NormalDestBB->getUniquePredecessor() == BB) {
2570             assumeLive(A, *NormalDestBB);
2571           } else {
2572             BasicBlock *SplitBB =
2573                 SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2574             // The split block is live even if it contains only an unreachable
2575             // instruction at the end.
2576             assumeLive(A, *SplitBB);
2577             SplitPos = SplitBB->getTerminator();
2578             HasChanged = ChangeStatus::CHANGED;
2579           }
2580         }
2581       }
2582 
2583       if (isa_and_nonnull<UnreachableInst>(SplitPos))
2584         continue;
2585 
2586       BB = SplitPos->getParent();
2587       SplitBlock(BB, SplitPos);
2588       changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
2589       HasChanged = ChangeStatus::CHANGED;
2590     }
2591 
2592     for (BasicBlock &BB : F)
2593       if (!AssumedLiveBlocks.count(&BB))
2594         A.deleteAfterManifest(BB);
2595 
2596     return HasChanged;
2597   }
2598 
2599   /// See AbstractAttribute::updateImpl(...).
2600   ChangeStatus updateImpl(Attributor &A) override;
2601 
2602   /// See AbstractAttribute::trackStatistics()
2603   void trackStatistics() const override {}
2604 
2605   /// Returns true if the function is assumed dead.
2606   bool isAssumedDead() const override { return false; }
2607 
2608   /// See AAIsDead::isAssumedDead(BasicBlock *).
2609   bool isAssumedDead(const BasicBlock *BB) const override {
2610     assert(BB->getParent() == getAssociatedFunction() &&
2611            "BB must be in the same anchor scope function.");
2612 
2613     if (!getAssumed())
2614       return false;
2615     return !AssumedLiveBlocks.count(BB);
2616   }
2617 
2618   /// See AAIsDead::isKnownDead(BasicBlock *).
2619   bool isKnownDead(const BasicBlock *BB) const override {
2620     return getKnown() && isAssumedDead(BB);
2621   }
2622 
2623   /// See AAIsDead::isAssumed(Instruction *I).
2624   bool isAssumedDead(const Instruction *I) const override {
2625     assert(I->getParent()->getParent() == getAssociatedFunction() &&
2626            "Instruction must be in the same anchor scope function.");
2627 
2628     if (!getAssumed())
2629       return false;
2630 
2631     // If it is not in AssumedLiveBlocks then it for sure dead.
2632     // Otherwise, it can still be after noreturn call in a live block.
2633     if (!AssumedLiveBlocks.count(I->getParent()))
2634       return true;
2635 
2636     // If it is not after a liveness barrier it is live.
2637     const Instruction *PrevI = I->getPrevNode();
2638     while (PrevI) {
2639       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2640         return true;
2641       PrevI = PrevI->getPrevNode();
2642     }
2643     return false;
2644   }
2645 
2646   /// See AAIsDead::isKnownDead(Instruction *I).
2647   bool isKnownDead(const Instruction *I) const override {
2648     return getKnown() && isAssumedDead(I);
2649   }
2650 
2651   /// Determine if \p F might catch asynchronous exceptions.
2652   static bool mayCatchAsynchronousExceptions(const Function &F) {
2653     return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
2654   }
2655 
2656   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2657   /// that internal function called from \p BB should now be looked at.
2658   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2659     if (!AssumedLiveBlocks.insert(&BB).second)
2660       return false;
2661 
2662     // We assume that all of BB is (probably) live now and if there are calls to
2663     // internal functions we will assume that those are now live as well. This
2664     // is a performance optimization for blocks with calls to a lot of internal
2665     // functions. It can however cause dead functions to be treated as live.
2666     for (const Instruction &I : BB)
2667       if (ImmutableCallSite ICS = ImmutableCallSite(&I))
2668         if (const Function *F = ICS.getCalledFunction())
2669           if (F->hasLocalLinkage())
2670             A.markLiveInternalFunction(*F);
2671     return true;
2672   }
2673 
2674   /// Collection of instructions that need to be explored again, e.g., we
2675   /// did assume they do not transfer control to (one of their) successors.
2676   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
2677 
2678   /// Collection of instructions that are known to not transfer control.
2679   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
2680 
2681   /// Collection of all assumed live BasicBlocks.
2682   DenseSet<const BasicBlock *> AssumedLiveBlocks;
2683 };
2684 
2685 static bool
2686 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
2687                         AbstractAttribute &AA,
2688                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2689   const IRPosition &IPos = IRPosition::callsite_function(CB);
2690 
2691   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
2692   if (NoReturnAA.isAssumedNoReturn())
2693     return !NoReturnAA.isKnownNoReturn();
2694   if (CB.isTerminator())
2695     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
2696   else
2697     AliveSuccessors.push_back(CB.getNextNode());
2698   return false;
2699 }
2700 
2701 static bool
2702 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
2703                         AbstractAttribute &AA,
2704                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2705   bool UsedAssumedInformation =
2706       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
2707 
2708   // First, determine if we can change an invoke to a call assuming the
2709   // callee is nounwind. This is not possible if the personality of the
2710   // function allows to catch asynchronous exceptions.
2711   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
2712     AliveSuccessors.push_back(&II.getUnwindDest()->front());
2713   } else {
2714     const IRPosition &IPos = IRPosition::callsite_function(II);
2715     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
2716     if (AANoUnw.isAssumedNoUnwind()) {
2717       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
2718     } else {
2719       AliveSuccessors.push_back(&II.getUnwindDest()->front());
2720     }
2721   }
2722   return UsedAssumedInformation;
2723 }
2724 
2725 static Optional<ConstantInt *>
2726 getAssumedConstant(Attributor &A, const Value &V, AbstractAttribute &AA,
2727                    bool &UsedAssumedInformation) {
2728   const auto &ValueSimplifyAA =
2729       A.getAAFor<AAValueSimplify>(AA, IRPosition::value(V));
2730   Optional<Value *> SimplifiedV = ValueSimplifyAA.getAssumedSimplifiedValue(A);
2731   UsedAssumedInformation |= !ValueSimplifyAA.isKnown();
2732   if (!SimplifiedV.hasValue())
2733     return llvm::None;
2734   if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue()))
2735     return llvm::None;
2736   return dyn_cast_or_null<ConstantInt>(SimplifiedV.getValue());
2737 }
2738 
2739 static bool
2740 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
2741                         AbstractAttribute &AA,
2742                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2743   bool UsedAssumedInformation = false;
2744   if (BI.getNumSuccessors() == 1) {
2745     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
2746   } else {
2747     Optional<ConstantInt *> CI =
2748         getAssumedConstant(A, *BI.getCondition(), AA, UsedAssumedInformation);
2749     if (!CI.hasValue()) {
2750       // No value yet, assume both edges are dead.
2751     } else if (CI.getValue()) {
2752       const BasicBlock *SuccBB =
2753           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
2754       AliveSuccessors.push_back(&SuccBB->front());
2755     } else {
2756       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
2757       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
2758       UsedAssumedInformation = false;
2759     }
2760   }
2761   return UsedAssumedInformation;
2762 }
2763 
2764 static bool
2765 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
2766                         AbstractAttribute &AA,
2767                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2768   bool UsedAssumedInformation = false;
2769   Optional<ConstantInt *> CI =
2770       getAssumedConstant(A, *SI.getCondition(), AA, UsedAssumedInformation);
2771   if (!CI.hasValue()) {
2772     // No value yet, assume all edges are dead.
2773   } else if (CI.getValue()) {
2774     for (auto &CaseIt : SI.cases()) {
2775       if (CaseIt.getCaseValue() == CI.getValue()) {
2776         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
2777         return UsedAssumedInformation;
2778       }
2779     }
2780     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
2781     return UsedAssumedInformation;
2782   } else {
2783     for (const BasicBlock *SuccBB : successors(SI.getParent()))
2784       AliveSuccessors.push_back(&SuccBB->front());
2785   }
2786   return UsedAssumedInformation;
2787 }
2788 
2789 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
2790   ChangeStatus Change = ChangeStatus::UNCHANGED;
2791 
2792   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
2793                     << getAssociatedFunction()->size() << "] BBs and "
2794                     << ToBeExploredFrom.size() << " exploration points and "
2795                     << KnownDeadEnds.size() << " known dead ends\n");
2796 
2797   // Copy and clear the list of instructions we need to explore from. It is
2798   // refilled with instructions the next update has to look at.
2799   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
2800                                                ToBeExploredFrom.end());
2801   decltype(ToBeExploredFrom) NewToBeExploredFrom;
2802 
2803   SmallVector<const Instruction *, 8> AliveSuccessors;
2804   while (!Worklist.empty()) {
2805     const Instruction *I = Worklist.pop_back_val();
2806     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
2807 
2808     AliveSuccessors.clear();
2809 
2810     bool UsedAssumedInformation = false;
2811     switch (I->getOpcode()) {
2812     // TODO: look for (assumed) UB to backwards propagate "deadness".
2813     default:
2814       if (I->isTerminator()) {
2815         for (const BasicBlock *SuccBB : successors(I->getParent()))
2816           AliveSuccessors.push_back(&SuccBB->front());
2817       } else {
2818         AliveSuccessors.push_back(I->getNextNode());
2819       }
2820       break;
2821     case Instruction::Call:
2822       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
2823                                                        *this, AliveSuccessors);
2824       break;
2825     case Instruction::Invoke:
2826       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
2827                                                        *this, AliveSuccessors);
2828       break;
2829     case Instruction::Br:
2830       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
2831                                                        *this, AliveSuccessors);
2832       break;
2833     case Instruction::Switch:
2834       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
2835                                                        *this, AliveSuccessors);
2836       break;
2837     }
2838 
2839     if (UsedAssumedInformation) {
2840       NewToBeExploredFrom.insert(I);
2841     } else {
2842       Change = ChangeStatus::CHANGED;
2843       if (AliveSuccessors.empty() ||
2844           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
2845         KnownDeadEnds.insert(I);
2846     }
2847 
2848     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
2849                       << AliveSuccessors.size() << " UsedAssumedInformation: "
2850                       << UsedAssumedInformation << "\n");
2851 
2852     for (const Instruction *AliveSuccessor : AliveSuccessors) {
2853       if (!I->isTerminator()) {
2854         assert(AliveSuccessors.size() == 1 &&
2855                "Non-terminator expected to have a single successor!");
2856         Worklist.push_back(AliveSuccessor);
2857       } else {
2858         if (assumeLive(A, *AliveSuccessor->getParent()))
2859           Worklist.push_back(AliveSuccessor);
2860       }
2861     }
2862   }
2863 
2864   ToBeExploredFrom = std::move(NewToBeExploredFrom);
2865 
2866   // If we know everything is live there is no need to query for liveness.
2867   // Instead, indicating a pessimistic fixpoint will cause the state to be
2868   // "invalid" and all queries to be answered conservatively without lookups.
2869   // To be in this state we have to (1) finished the exploration and (3) not
2870   // discovered any non-trivial dead end and (2) not ruled unreachable code
2871   // dead.
2872   if (ToBeExploredFrom.empty() &&
2873       getAssociatedFunction()->size() == AssumedLiveBlocks.size() &&
2874       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
2875         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
2876       }))
2877     return indicatePessimisticFixpoint();
2878   return Change;
2879 }
2880 
2881 /// Liveness information for a call sites.
2882 struct AAIsDeadCallSite final : AAIsDeadFunction {
2883   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
2884 
2885   /// See AbstractAttribute::initialize(...).
2886   void initialize(Attributor &A) override {
2887     // TODO: Once we have call site specific value information we can provide
2888     //       call site specific liveness information and then it makes
2889     //       sense to specialize attributes for call sites instead of
2890     //       redirecting requests to the callee.
2891     llvm_unreachable("Abstract attributes for liveness are not "
2892                      "supported for call sites yet!");
2893   }
2894 
2895   /// See AbstractAttribute::updateImpl(...).
2896   ChangeStatus updateImpl(Attributor &A) override {
2897     return indicatePessimisticFixpoint();
2898   }
2899 
2900   /// See AbstractAttribute::trackStatistics()
2901   void trackStatistics() const override {}
2902 };
2903 
2904 /// -------------------- Dereferenceable Argument Attribute --------------------
2905 
2906 template <>
2907 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
2908                                                      const DerefState &R) {
2909   ChangeStatus CS0 =
2910       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
2911   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
2912   return CS0 | CS1;
2913 }
2914 
2915 struct AADereferenceableImpl : AADereferenceable {
2916   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
2917   using StateType = DerefState;
2918 
2919   void initialize(Attributor &A) override {
2920     SmallVector<Attribute, 4> Attrs;
2921     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
2922              Attrs);
2923     for (const Attribute &Attr : Attrs)
2924       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
2925 
2926     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
2927 
2928     const IRPosition &IRP = this->getIRPosition();
2929     bool IsFnInterface = IRP.isFnInterfaceKind();
2930     const Function *FnScope = IRP.getAnchorScope();
2931     if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
2932       indicatePessimisticFixpoint();
2933   }
2934 
2935   /// See AbstractAttribute::getState()
2936   /// {
2937   StateType &getState() override { return *this; }
2938   const StateType &getState() const override { return *this; }
2939   /// }
2940 
2941   /// Helper function for collecting accessed bytes in must-be-executed-context
2942   void addAccessedBytesForUse(Attributor &A, const Use *U,
2943                               const Instruction *I) {
2944     const Value *UseV = U->get();
2945     if (!UseV->getType()->isPointerTy())
2946       return;
2947 
2948     Type *PtrTy = UseV->getType();
2949     const DataLayout &DL = A.getDataLayout();
2950     int64_t Offset;
2951     if (const Value *Base = getBasePointerOfAccessPointerOperand(
2952             I, Offset, DL, /*AllowNonInbounds*/ true)) {
2953       if (Base == &getAssociatedValue() && getPointerOperand(I) == UseV) {
2954         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
2955         addAccessedBytes(Offset, Size);
2956       }
2957     }
2958     return;
2959   }
2960 
2961   /// See AAFromMustBeExecutedContext
2962   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
2963     bool IsNonNull = false;
2964     bool TrackUse = false;
2965     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
2966         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
2967 
2968     addAccessedBytesForUse(A, U, I);
2969     takeKnownDerefBytesMaximum(DerefBytes);
2970     return TrackUse;
2971   }
2972 
2973   /// See AbstractAttribute::manifest(...).
2974   ChangeStatus manifest(Attributor &A) override {
2975     ChangeStatus Change = AADereferenceable::manifest(A);
2976     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
2977       removeAttrs({Attribute::DereferenceableOrNull});
2978       return ChangeStatus::CHANGED;
2979     }
2980     return Change;
2981   }
2982 
2983   void getDeducedAttributes(LLVMContext &Ctx,
2984                             SmallVectorImpl<Attribute> &Attrs) const override {
2985     // TODO: Add *_globally support
2986     if (isAssumedNonNull())
2987       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
2988           Ctx, getAssumedDereferenceableBytes()));
2989     else
2990       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
2991           Ctx, getAssumedDereferenceableBytes()));
2992   }
2993 
2994   /// See AbstractAttribute::getAsStr().
2995   const std::string getAsStr() const override {
2996     if (!getAssumedDereferenceableBytes())
2997       return "unknown-dereferenceable";
2998     return std::string("dereferenceable") +
2999            (isAssumedNonNull() ? "" : "_or_null") +
3000            (isAssumedGlobal() ? "_globally" : "") + "<" +
3001            std::to_string(getKnownDereferenceableBytes()) + "-" +
3002            std::to_string(getAssumedDereferenceableBytes()) + ">";
3003   }
3004 };
3005 
3006 /// Dereferenceable attribute for a floating value.
3007 struct AADereferenceableFloating
3008     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3009   using Base =
3010       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3011   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
3012 
3013   /// See AbstractAttribute::updateImpl(...).
3014   ChangeStatus updateImpl(Attributor &A) override {
3015     ChangeStatus Change = Base::updateImpl(A);
3016 
3017     const DataLayout &DL = A.getDataLayout();
3018 
3019     auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
3020       unsigned IdxWidth =
3021           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3022       APInt Offset(IdxWidth, 0);
3023       const Value *Base =
3024           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3025 
3026       const auto &AA =
3027           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3028       int64_t DerefBytes = 0;
3029       if (!Stripped && this == &AA) {
3030         // Use IR information if we did not strip anything.
3031         // TODO: track globally.
3032         bool CanBeNull;
3033         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3034         T.GlobalState.indicatePessimisticFixpoint();
3035       } else {
3036         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3037         DerefBytes = DS.DerefBytesState.getAssumed();
3038         T.GlobalState &= DS.GlobalState;
3039       }
3040 
3041       // For now we do not try to "increase" dereferenceability due to negative
3042       // indices as we first have to come up with code to deal with loops and
3043       // for overflows of the dereferenceable bytes.
3044       int64_t OffsetSExt = Offset.getSExtValue();
3045       if (OffsetSExt < 0)
3046         OffsetSExt = 0;
3047 
3048       T.takeAssumedDerefBytesMinimum(
3049           std::max(int64_t(0), DerefBytes - OffsetSExt));
3050 
3051       if (this == &AA) {
3052         if (!Stripped) {
3053           // If nothing was stripped IR information is all we got.
3054           T.takeKnownDerefBytesMaximum(
3055               std::max(int64_t(0), DerefBytes - OffsetSExt));
3056           T.indicatePessimisticFixpoint();
3057         } else if (OffsetSExt > 0) {
3058           // If something was stripped but there is circular reasoning we look
3059           // for the offset. If it is positive we basically decrease the
3060           // dereferenceable bytes in a circluar loop now, which will simply
3061           // drive them down to the known value in a very slow way which we
3062           // can accelerate.
3063           T.indicatePessimisticFixpoint();
3064         }
3065       }
3066 
3067       return T.isValidState();
3068     };
3069 
3070     DerefState T;
3071     if (!genericValueTraversal<AADereferenceable, DerefState>(
3072             A, getIRPosition(), *this, T, VisitValueCB))
3073       return indicatePessimisticFixpoint();
3074 
3075     return Change | clampStateAndIndicateChange(getState(), T);
3076   }
3077 
3078   /// See AbstractAttribute::trackStatistics()
3079   void trackStatistics() const override {
3080     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3081   }
3082 };
3083 
3084 /// Dereferenceable attribute for a return value.
3085 struct AADereferenceableReturned final
3086     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3087                                    DerefState> {
3088   AADereferenceableReturned(const IRPosition &IRP)
3089       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3090                                      DerefState>(IRP) {}
3091 
3092   /// See AbstractAttribute::trackStatistics()
3093   void trackStatistics() const override {
3094     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3095   }
3096 };
3097 
3098 /// Dereferenceable attribute for an argument
3099 struct AADereferenceableArgument final
3100     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3101           AADereferenceable, AADereferenceableImpl, DerefState> {
3102   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3103       AADereferenceable, AADereferenceableImpl, DerefState>;
3104   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3105 
3106   /// See AbstractAttribute::trackStatistics()
3107   void trackStatistics() const override {
3108     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3109   }
3110 };
3111 
3112 /// Dereferenceable attribute for a call site argument.
3113 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3114   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3115       : AADereferenceableFloating(IRP) {}
3116 
3117   /// See AbstractAttribute::trackStatistics()
3118   void trackStatistics() const override {
3119     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3120   }
3121 };
3122 
3123 /// Dereferenceable attribute deduction for a call site return value.
3124 struct AADereferenceableCallSiteReturned final
3125     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3126           AADereferenceable, AADereferenceableImpl> {
3127   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3128       AADereferenceable, AADereferenceableImpl>;
3129   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3130 
3131   /// See AbstractAttribute::trackStatistics()
3132   void trackStatistics() const override {
3133     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3134   }
3135 };
3136 
3137 // ------------------------ Align Argument Attribute ------------------------
3138 
3139 static unsigned int getKnownAlignForUse(Attributor &A,
3140                                         AbstractAttribute &QueryingAA,
3141                                         Value &AssociatedValue, const Use *U,
3142                                         const Instruction *I, bool &TrackUse) {
3143   // We need to follow common pointer manipulation uses to the accesses they
3144   // feed into.
3145   if (isa<CastInst>(I)) {
3146     TrackUse = true;
3147     return 0;
3148   }
3149   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3150     if (GEP->hasAllConstantIndices()) {
3151       TrackUse = true;
3152       return 0;
3153     }
3154   }
3155 
3156   unsigned Alignment = 0;
3157   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
3158     if (ICS.isBundleOperand(U) || ICS.isCallee(U))
3159       return 0;
3160 
3161     unsigned ArgNo = ICS.getArgumentNo(U);
3162     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
3163     // As long as we only use known information there is no need to track
3164     // dependences here.
3165     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3166                                         /* TrackDependence */ false);
3167     Alignment = AlignAA.getKnownAlign();
3168   }
3169 
3170   const Value *UseV = U->get();
3171   if (auto *SI = dyn_cast<StoreInst>(I))
3172     Alignment = SI->getAlignment();
3173   else if (auto *LI = dyn_cast<LoadInst>(I))
3174     Alignment = LI->getAlignment();
3175 
3176   if (Alignment <= 1)
3177     return 0;
3178 
3179   auto &DL = A.getDataLayout();
3180   int64_t Offset;
3181 
3182   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3183     if (Base == &AssociatedValue) {
3184       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3185       // So we can say that the maximum power of two which is a divisor of
3186       // gcd(Offset, Alignment) is an alignment.
3187 
3188       uint32_t gcd =
3189           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3190       Alignment = llvm::PowerOf2Floor(gcd);
3191     }
3192   }
3193 
3194   return Alignment;
3195 }
3196 struct AAAlignImpl : AAAlign {
3197   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3198 
3199   /// See AbstractAttribute::initialize(...).
3200   void initialize(Attributor &A) override {
3201     SmallVector<Attribute, 4> Attrs;
3202     getAttrs({Attribute::Alignment}, Attrs);
3203     for (const Attribute &Attr : Attrs)
3204       takeKnownMaximum(Attr.getValueAsInt());
3205 
3206     if (getIRPosition().isFnInterfaceKind() &&
3207         (!getAssociatedFunction() ||
3208          !getAssociatedFunction()->hasExactDefinition()))
3209       indicatePessimisticFixpoint();
3210   }
3211 
3212   /// See AbstractAttribute::manifest(...).
3213   ChangeStatus manifest(Attributor &A) override {
3214     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3215 
3216     // Check for users that allow alignment annotations.
3217     Value &AnchorVal = getIRPosition().getAnchorValue();
3218     for (const Use &U : AnchorVal.uses()) {
3219       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3220         if (SI->getPointerOperand() == &AnchorVal)
3221           if (SI->getAlignment() < getAssumedAlign()) {
3222             STATS_DECLTRACK(AAAlign, Store,
3223                             "Number of times alignemnt added to a store");
3224             SI->setAlignment(Align(getAssumedAlign()));
3225             Changed = ChangeStatus::CHANGED;
3226           }
3227       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3228         if (LI->getPointerOperand() == &AnchorVal)
3229           if (LI->getAlignment() < getAssumedAlign()) {
3230             LI->setAlignment(Align(getAssumedAlign()));
3231             STATS_DECLTRACK(AAAlign, Load,
3232                             "Number of times alignemnt added to a load");
3233             Changed = ChangeStatus::CHANGED;
3234           }
3235       }
3236     }
3237 
3238     return AAAlign::manifest(A) | Changed;
3239   }
3240 
3241   // TODO: Provide a helper to determine the implied ABI alignment and check in
3242   //       the existing manifest method and a new one for AAAlignImpl that value
3243   //       to avoid making the alignment explicit if it did not improve.
3244 
3245   /// See AbstractAttribute::getDeducedAttributes
3246   virtual void
3247   getDeducedAttributes(LLVMContext &Ctx,
3248                        SmallVectorImpl<Attribute> &Attrs) const override {
3249     if (getAssumedAlign() > 1)
3250       Attrs.emplace_back(
3251           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3252   }
3253   /// See AAFromMustBeExecutedContext
3254   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3255     bool TrackUse = false;
3256 
3257     unsigned int KnownAlign =
3258         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3259     takeKnownMaximum(KnownAlign);
3260 
3261     return TrackUse;
3262   }
3263 
3264   /// See AbstractAttribute::getAsStr().
3265   const std::string getAsStr() const override {
3266     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3267                                 "-" + std::to_string(getAssumedAlign()) + ">")
3268                              : "unknown-align";
3269   }
3270 };
3271 
3272 /// Align attribute for a floating value.
3273 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3274   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3275   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3276 
3277   /// See AbstractAttribute::updateImpl(...).
3278   ChangeStatus updateImpl(Attributor &A) override {
3279     Base::updateImpl(A);
3280 
3281     const DataLayout &DL = A.getDataLayout();
3282 
3283     auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
3284                             bool Stripped) -> bool {
3285       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3286       if (!Stripped && this == &AA) {
3287         // Use only IR information if we did not strip anything.
3288         const MaybeAlign PA = V.getPointerAlignment(DL);
3289         T.takeKnownMaximum(PA ? PA->value() : 0);
3290         T.indicatePessimisticFixpoint();
3291       } else {
3292         // Use abstract attribute information.
3293         const AAAlign::StateType &DS =
3294             static_cast<const AAAlign::StateType &>(AA.getState());
3295         T ^= DS;
3296       }
3297       return T.isValidState();
3298     };
3299 
3300     StateType T;
3301     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3302                                                    VisitValueCB))
3303       return indicatePessimisticFixpoint();
3304 
3305     // TODO: If we know we visited all incoming values, thus no are assumed
3306     // dead, we can take the known information from the state T.
3307     return clampStateAndIndicateChange(getState(), T);
3308   }
3309 
3310   /// See AbstractAttribute::trackStatistics()
3311   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3312 };
3313 
3314 /// Align attribute for function return value.
3315 struct AAAlignReturned final
3316     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3317   AAAlignReturned(const IRPosition &IRP)
3318       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3319 
3320   /// See AbstractAttribute::trackStatistics()
3321   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3322 };
3323 
3324 /// Align attribute for function argument.
3325 struct AAAlignArgument final
3326     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3327                                                               AAAlignImpl> {
3328   AAAlignArgument(const IRPosition &IRP)
3329       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3330                                                                 AAAlignImpl>(
3331             IRP) {}
3332 
3333   /// See AbstractAttribute::trackStatistics()
3334   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3335 };
3336 
3337 struct AAAlignCallSiteArgument final : AAAlignFloating {
3338   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3339 
3340   /// See AbstractAttribute::manifest(...).
3341   ChangeStatus manifest(Attributor &A) override {
3342     return AAAlignImpl::manifest(A);
3343   }
3344 
3345   /// See AbstractAttribute::trackStatistics()
3346   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3347 };
3348 
3349 /// Align attribute deduction for a call site return value.
3350 struct AAAlignCallSiteReturned final
3351     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3352                                                              AAAlignImpl> {
3353   using Base =
3354       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3355                                                              AAAlignImpl>;
3356   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3357 
3358   /// See AbstractAttribute::initialize(...).
3359   void initialize(Attributor &A) override {
3360     Base::initialize(A);
3361     Function *F = getAssociatedFunction();
3362     if (!F)
3363       indicatePessimisticFixpoint();
3364   }
3365 
3366   /// See AbstractAttribute::trackStatistics()
3367   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3368 };
3369 
3370 /// ------------------ Function No-Return Attribute ----------------------------
3371 struct AANoReturnImpl : public AANoReturn {
3372   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3373 
3374   /// See AbstractAttribute::initialize(...).
3375   void initialize(Attributor &A) override {
3376     AANoReturn::initialize(A);
3377     Function *F = getAssociatedFunction();
3378     if (!F)
3379       indicatePessimisticFixpoint();
3380   }
3381 
3382   /// See AbstractAttribute::getAsStr().
3383   const std::string getAsStr() const override {
3384     return getAssumed() ? "noreturn" : "may-return";
3385   }
3386 
3387   /// See AbstractAttribute::updateImpl(Attributor &A).
3388   virtual ChangeStatus updateImpl(Attributor &A) override {
3389     auto CheckForNoReturn = [](Instruction &) { return false; };
3390     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3391                                    {(unsigned)Instruction::Ret}))
3392       return indicatePessimisticFixpoint();
3393     return ChangeStatus::UNCHANGED;
3394   }
3395 };
3396 
3397 struct AANoReturnFunction final : AANoReturnImpl {
3398   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3399 
3400   /// See AbstractAttribute::trackStatistics()
3401   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3402 };
3403 
3404 /// NoReturn attribute deduction for a call sites.
3405 struct AANoReturnCallSite final : AANoReturnImpl {
3406   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3407 
3408   /// See AbstractAttribute::updateImpl(...).
3409   ChangeStatus updateImpl(Attributor &A) override {
3410     // TODO: Once we have call site specific value information we can provide
3411     //       call site specific liveness information and then it makes
3412     //       sense to specialize attributes for call sites arguments instead of
3413     //       redirecting requests to the callee argument.
3414     Function *F = getAssociatedFunction();
3415     const IRPosition &FnPos = IRPosition::function(*F);
3416     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3417     return clampStateAndIndicateChange(
3418         getState(),
3419         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3420   }
3421 
3422   /// See AbstractAttribute::trackStatistics()
3423   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3424 };
3425 
3426 /// ----------------------- Variable Capturing ---------------------------------
3427 
3428 /// A class to hold the state of for no-capture attributes.
3429 struct AANoCaptureImpl : public AANoCapture {
3430   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
3431 
3432   /// See AbstractAttribute::initialize(...).
3433   void initialize(Attributor &A) override {
3434     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3435       indicateOptimisticFixpoint();
3436       return;
3437     }
3438     Function *AnchorScope = getAnchorScope();
3439     if (isFnInterfaceKind() &&
3440         (!AnchorScope || !AnchorScope->hasExactDefinition())) {
3441       indicatePessimisticFixpoint();
3442       return;
3443     }
3444 
3445     // You cannot "capture" null in the default address space.
3446     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3447         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3448       indicateOptimisticFixpoint();
3449       return;
3450     }
3451 
3452     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3453 
3454     // Check what state the associated function can actually capture.
3455     if (F)
3456       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3457     else
3458       indicatePessimisticFixpoint();
3459   }
3460 
3461   /// See AbstractAttribute::updateImpl(...).
3462   ChangeStatus updateImpl(Attributor &A) override;
3463 
3464   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3465   virtual void
3466   getDeducedAttributes(LLVMContext &Ctx,
3467                        SmallVectorImpl<Attribute> &Attrs) const override {
3468     if (!isAssumedNoCaptureMaybeReturned())
3469       return;
3470 
3471     if (getArgNo() >= 0) {
3472       if (isAssumedNoCapture())
3473         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3474       else if (ManifestInternal)
3475         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3476     }
3477   }
3478 
3479   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3480   /// depending on the ability of the function associated with \p IRP to capture
3481   /// state in memory and through "returning/throwing", respectively.
3482   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3483                                                    const Function &F,
3484                                                    BitIntegerState &State) {
3485     // TODO: Once we have memory behavior attributes we should use them here.
3486 
3487     // If we know we cannot communicate or write to memory, we do not care about
3488     // ptr2int anymore.
3489     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3490         F.getReturnType()->isVoidTy()) {
3491       State.addKnownBits(NO_CAPTURE);
3492       return;
3493     }
3494 
3495     // A function cannot capture state in memory if it only reads memory, it can
3496     // however return/throw state and the state might be influenced by the
3497     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3498     if (F.onlyReadsMemory())
3499       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3500 
3501     // A function cannot communicate state back if it does not through
3502     // exceptions and doesn not return values.
3503     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3504       State.addKnownBits(NOT_CAPTURED_IN_RET);
3505 
3506     // Check existing "returned" attributes.
3507     int ArgNo = IRP.getArgNo();
3508     if (F.doesNotThrow() && ArgNo >= 0) {
3509       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3510         if (F.hasParamAttribute(u, Attribute::Returned)) {
3511           if (u == unsigned(ArgNo))
3512             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3513           else if (F.onlyReadsMemory())
3514             State.addKnownBits(NO_CAPTURE);
3515           else
3516             State.addKnownBits(NOT_CAPTURED_IN_RET);
3517           break;
3518         }
3519     }
3520   }
3521 
3522   /// See AbstractState::getAsStr().
3523   const std::string getAsStr() const override {
3524     if (isKnownNoCapture())
3525       return "known not-captured";
3526     if (isAssumedNoCapture())
3527       return "assumed not-captured";
3528     if (isKnownNoCaptureMaybeReturned())
3529       return "known not-captured-maybe-returned";
3530     if (isAssumedNoCaptureMaybeReturned())
3531       return "assumed not-captured-maybe-returned";
3532     return "assumed-captured";
3533   }
3534 };
3535 
3536 /// Attributor-aware capture tracker.
3537 struct AACaptureUseTracker final : public CaptureTracker {
3538 
3539   /// Create a capture tracker that can lookup in-flight abstract attributes
3540   /// through the Attributor \p A.
3541   ///
3542   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3543   /// search is stopped. If a use leads to a return instruction,
3544   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3545   /// If a use leads to a ptr2int which may capture the value,
3546   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3547   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3548   /// set. All values in \p PotentialCopies are later tracked as well. For every
3549   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3550   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3551   /// conservatively set to true.
3552   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3553                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3554                       SmallVectorImpl<const Value *> &PotentialCopies,
3555                       unsigned &RemainingUsesToExplore)
3556       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3557         PotentialCopies(PotentialCopies),
3558         RemainingUsesToExplore(RemainingUsesToExplore) {}
3559 
3560   /// Determine if \p V maybe captured. *Also updates the state!*
3561   bool valueMayBeCaptured(const Value *V) {
3562     if (V->getType()->isPointerTy()) {
3563       PointerMayBeCaptured(V, this);
3564     } else {
3565       State.indicatePessimisticFixpoint();
3566     }
3567     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3568   }
3569 
3570   /// See CaptureTracker::tooManyUses().
3571   void tooManyUses() override {
3572     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3573   }
3574 
3575   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3576     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3577       return true;
3578     const auto &DerefAA =
3579         A.getAAFor<AADereferenceable>(NoCaptureAA, IRPosition::value(*O));
3580     return DerefAA.getAssumedDereferenceableBytes();
3581   }
3582 
3583   /// See CaptureTracker::captured(...).
3584   bool captured(const Use *U) override {
3585     Instruction *UInst = cast<Instruction>(U->getUser());
3586     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3587                       << "\n");
3588 
3589     // Because we may reuse the tracker multiple times we keep track of the
3590     // number of explored uses ourselves as well.
3591     if (RemainingUsesToExplore-- == 0) {
3592       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3593       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3594                           /* Return */ true);
3595     }
3596 
3597     // Deal with ptr2int by following uses.
3598     if (isa<PtrToIntInst>(UInst)) {
3599       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3600       return valueMayBeCaptured(UInst);
3601     }
3602 
3603     // Explicitly catch return instructions.
3604     if (isa<ReturnInst>(UInst))
3605       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3606                           /* Return */ true);
3607 
3608     // For now we only use special logic for call sites. However, the tracker
3609     // itself knows about a lot of other non-capturing cases already.
3610     CallSite CS(UInst);
3611     if (!CS || !CS.isArgOperand(U))
3612       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3613                           /* Return */ true);
3614 
3615     unsigned ArgNo = CS.getArgumentNo(U);
3616     const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
3617     // If we have a abstract no-capture attribute for the argument we can use
3618     // it to justify a non-capture attribute here. This allows recursion!
3619     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3620     if (ArgNoCaptureAA.isAssumedNoCapture())
3621       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3622                           /* Return */ false);
3623     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3624       addPotentialCopy(CS);
3625       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3626                           /* Return */ false);
3627     }
3628 
3629     // Lastly, we could not find a reason no-capture can be assumed so we don't.
3630     return isCapturedIn(/* Memory */ true, /* Integer */ true,
3631                         /* Return */ true);
3632   }
3633 
3634   /// Register \p CS as potential copy of the value we are checking.
3635   void addPotentialCopy(CallSite CS) {
3636     PotentialCopies.push_back(CS.getInstruction());
3637   }
3638 
3639   /// See CaptureTracker::shouldExplore(...).
3640   bool shouldExplore(const Use *U) override {
3641     // Check liveness.
3642     return !IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()));
3643   }
3644 
3645   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
3646   /// \p CapturedInRet, then return the appropriate value for use in the
3647   /// CaptureTracker::captured() interface.
3648   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
3649                     bool CapturedInRet) {
3650     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
3651                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
3652     if (CapturedInMem)
3653       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
3654     if (CapturedInInt)
3655       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
3656     if (CapturedInRet)
3657       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
3658     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3659   }
3660 
3661 private:
3662   /// The attributor providing in-flight abstract attributes.
3663   Attributor &A;
3664 
3665   /// The abstract attribute currently updated.
3666   AANoCapture &NoCaptureAA;
3667 
3668   /// The abstract liveness state.
3669   const AAIsDead &IsDeadAA;
3670 
3671   /// The state currently updated.
3672   AANoCapture::StateType &State;
3673 
3674   /// Set of potential copies of the tracked value.
3675   SmallVectorImpl<const Value *> &PotentialCopies;
3676 
3677   /// Global counter to limit the number of explored uses.
3678   unsigned &RemainingUsesToExplore;
3679 };
3680 
3681 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
3682   const IRPosition &IRP = getIRPosition();
3683   const Value *V =
3684       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
3685   if (!V)
3686     return indicatePessimisticFixpoint();
3687 
3688   const Function *F =
3689       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
3690   assert(F && "Expected a function!");
3691   const IRPosition &FnPos = IRPosition::function(*F);
3692   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos);
3693 
3694   AANoCapture::StateType T;
3695 
3696   // Readonly means we cannot capture through memory.
3697   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
3698   if (FnMemAA.isAssumedReadOnly()) {
3699     T.addKnownBits(NOT_CAPTURED_IN_MEM);
3700     if (FnMemAA.isKnownReadOnly())
3701       addKnownBits(NOT_CAPTURED_IN_MEM);
3702   }
3703 
3704   // Make sure all returned values are different than the underlying value.
3705   // TODO: we could do this in a more sophisticated way inside
3706   //       AAReturnedValues, e.g., track all values that escape through returns
3707   //       directly somehow.
3708   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
3709     bool SeenConstant = false;
3710     for (auto &It : RVAA.returned_values()) {
3711       if (isa<Constant>(It.first)) {
3712         if (SeenConstant)
3713           return false;
3714         SeenConstant = true;
3715       } else if (!isa<Argument>(It.first) ||
3716                  It.first == getAssociatedArgument())
3717         return false;
3718     }
3719     return true;
3720   };
3721 
3722   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, FnPos);
3723   if (NoUnwindAA.isAssumedNoUnwind()) {
3724     bool IsVoidTy = F->getReturnType()->isVoidTy();
3725     const AAReturnedValues *RVAA =
3726         IsVoidTy ? nullptr : &A.getAAFor<AAReturnedValues>(*this, FnPos);
3727     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
3728       T.addKnownBits(NOT_CAPTURED_IN_RET);
3729       if (T.isKnown(NOT_CAPTURED_IN_MEM))
3730         return ChangeStatus::UNCHANGED;
3731       if (NoUnwindAA.isKnownNoUnwind() &&
3732           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
3733         addKnownBits(NOT_CAPTURED_IN_RET);
3734         if (isKnown(NOT_CAPTURED_IN_MEM))
3735           return indicateOptimisticFixpoint();
3736       }
3737     }
3738   }
3739 
3740   // Use the CaptureTracker interface and logic with the specialized tracker,
3741   // defined in AACaptureUseTracker, that can look at in-flight abstract
3742   // attributes and directly updates the assumed state.
3743   SmallVector<const Value *, 4> PotentialCopies;
3744   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
3745   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
3746                               RemainingUsesToExplore);
3747 
3748   // Check all potential copies of the associated value until we can assume
3749   // none will be captured or we have to assume at least one might be.
3750   unsigned Idx = 0;
3751   PotentialCopies.push_back(V);
3752   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
3753     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
3754 
3755   AANoCapture::StateType &S = getState();
3756   auto Assumed = S.getAssumed();
3757   S.intersectAssumedBits(T.getAssumed());
3758   if (!isAssumedNoCaptureMaybeReturned())
3759     return indicatePessimisticFixpoint();
3760   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
3761                                    : ChangeStatus::CHANGED;
3762 }
3763 
3764 /// NoCapture attribute for function arguments.
3765 struct AANoCaptureArgument final : AANoCaptureImpl {
3766   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3767 
3768   /// See AbstractAttribute::trackStatistics()
3769   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
3770 };
3771 
3772 /// NoCapture attribute for call site arguments.
3773 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
3774   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3775 
3776   /// See AbstractAttribute::updateImpl(...).
3777   ChangeStatus updateImpl(Attributor &A) override {
3778     // TODO: Once we have call site specific value information we can provide
3779     //       call site specific liveness information and then it makes
3780     //       sense to specialize attributes for call sites arguments instead of
3781     //       redirecting requests to the callee argument.
3782     Argument *Arg = getAssociatedArgument();
3783     if (!Arg)
3784       return indicatePessimisticFixpoint();
3785     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3786     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
3787     return clampStateAndIndicateChange(
3788         getState(),
3789         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
3790   }
3791 
3792   /// See AbstractAttribute::trackStatistics()
3793   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
3794 };
3795 
3796 /// NoCapture attribute for floating values.
3797 struct AANoCaptureFloating final : AANoCaptureImpl {
3798   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3799 
3800   /// See AbstractAttribute::trackStatistics()
3801   void trackStatistics() const override {
3802     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
3803   }
3804 };
3805 
3806 /// NoCapture attribute for function return value.
3807 struct AANoCaptureReturned final : AANoCaptureImpl {
3808   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
3809     llvm_unreachable("NoCapture is not applicable to function returns!");
3810   }
3811 
3812   /// See AbstractAttribute::initialize(...).
3813   void initialize(Attributor &A) override {
3814     llvm_unreachable("NoCapture is not applicable to function returns!");
3815   }
3816 
3817   /// See AbstractAttribute::updateImpl(...).
3818   ChangeStatus updateImpl(Attributor &A) override {
3819     llvm_unreachable("NoCapture is not applicable to function returns!");
3820   }
3821 
3822   /// See AbstractAttribute::trackStatistics()
3823   void trackStatistics() const override {}
3824 };
3825 
3826 /// NoCapture attribute deduction for a call site return value.
3827 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
3828   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3829 
3830   /// See AbstractAttribute::trackStatistics()
3831   void trackStatistics() const override {
3832     STATS_DECLTRACK_CSRET_ATTR(nocapture)
3833   }
3834 };
3835 
3836 /// ------------------ Value Simplify Attribute ----------------------------
3837 struct AAValueSimplifyImpl : AAValueSimplify {
3838   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
3839 
3840   /// See AbstractAttribute::getAsStr().
3841   const std::string getAsStr() const override {
3842     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
3843                         : "not-simple";
3844   }
3845 
3846   /// See AbstractAttribute::trackStatistics()
3847   void trackStatistics() const override {}
3848 
3849   /// See AAValueSimplify::getAssumedSimplifiedValue()
3850   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
3851     if (!getAssumed())
3852       return const_cast<Value *>(&getAssociatedValue());
3853     return SimplifiedAssociatedValue;
3854   }
3855   void initialize(Attributor &A) override {}
3856 
3857   /// Helper function for querying AAValueSimplify and updating candicate.
3858   /// \param QueryingValue Value trying to unify with SimplifiedValue
3859   /// \param AccumulatedSimplifiedValue Current simplification result.
3860   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
3861                              Value &QueryingValue,
3862                              Optional<Value *> &AccumulatedSimplifiedValue) {
3863     // FIXME: Add a typecast support.
3864 
3865     auto &ValueSimpifyAA = A.getAAFor<AAValueSimplify>(
3866         QueryingAA, IRPosition::value(QueryingValue));
3867 
3868     Optional<Value *> QueryingValueSimplified =
3869         ValueSimpifyAA.getAssumedSimplifiedValue(A);
3870 
3871     if (!QueryingValueSimplified.hasValue())
3872       return true;
3873 
3874     if (!QueryingValueSimplified.getValue())
3875       return false;
3876 
3877     Value &QueryingValueSimplifiedUnwrapped =
3878         *QueryingValueSimplified.getValue();
3879 
3880     if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
3881       return true;
3882 
3883     if (AccumulatedSimplifiedValue.hasValue())
3884       return AccumulatedSimplifiedValue == QueryingValueSimplified;
3885 
3886     LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << QueryingValue
3887                       << " is assumed to be "
3888                       << QueryingValueSimplifiedUnwrapped << "\n");
3889 
3890     AccumulatedSimplifiedValue = QueryingValueSimplified;
3891     return true;
3892   }
3893 
3894   /// See AbstractAttribute::manifest(...).
3895   ChangeStatus manifest(Attributor &A) override {
3896     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3897 
3898     if (!SimplifiedAssociatedValue.hasValue() ||
3899         !SimplifiedAssociatedValue.getValue())
3900       return Changed;
3901 
3902     if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
3903       // We can replace the AssociatedValue with the constant.
3904       Value &V = getAssociatedValue();
3905       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
3906         LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << V << " -> " << *C
3907                           << "\n");
3908         V.replaceAllUsesWith(C);
3909         Changed = ChangeStatus::CHANGED;
3910       }
3911     }
3912 
3913     return Changed | AAValueSimplify::manifest(A);
3914   }
3915 
3916 protected:
3917   // An assumed simplified value. Initially, it is set to Optional::None, which
3918   // means that the value is not clear under current assumption. If in the
3919   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
3920   // returns orignal associated value.
3921   Optional<Value *> SimplifiedAssociatedValue;
3922 };
3923 
3924 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
3925   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3926 
3927   void initialize(Attributor &A) override {
3928     AAValueSimplifyImpl::initialize(A);
3929     if (!getAssociatedFunction() || getAssociatedFunction()->isDeclaration())
3930       indicatePessimisticFixpoint();
3931     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
3932                 /* IgnoreSubsumingPositions */ true))
3933       indicatePessimisticFixpoint();
3934   }
3935 
3936   /// See AbstractAttribute::updateImpl(...).
3937   ChangeStatus updateImpl(Attributor &A) override {
3938     // Byval is only replacable if it is readonly otherwise we would write into
3939     // the replaced value and not the copy that byval creates implicitly.
3940     Argument *Arg = getAssociatedArgument();
3941     if (Arg->hasByValAttr()) {
3942       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
3943       if (!MemAA.isAssumedReadOnly())
3944         return indicatePessimisticFixpoint();
3945     }
3946 
3947     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3948 
3949     auto PredForCallSite = [&](AbstractCallSite ACS) {
3950       // Check if we have an associated argument or not (which can happen for
3951       // callback calls).
3952       Value *ArgOp = ACS.getCallArgOperand(getArgNo());
3953       if (!ArgOp)
3954         return false;
3955       // We can only propagate thread independent values through callbacks.
3956       // This is different to direct/indirect call sites because for them we
3957       // know the thread executing the caller and callee is the same. For
3958       // callbacks this is not guaranteed, thus a thread dependent value could
3959       // be different for the caller and callee, making it invalid to propagate.
3960       if (ACS.isCallbackCall())
3961         if (auto *C = dyn_cast<Constant>(ArgOp))
3962           if (C->isThreadDependent())
3963             return false;
3964       return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
3965     };
3966 
3967     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
3968       return indicatePessimisticFixpoint();
3969 
3970     // If a candicate was found in this update, return CHANGED.
3971     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3972                ? ChangeStatus::UNCHANGED
3973                : ChangeStatus ::CHANGED;
3974   }
3975 
3976   /// See AbstractAttribute::trackStatistics()
3977   void trackStatistics() const override {
3978     STATS_DECLTRACK_ARG_ATTR(value_simplify)
3979   }
3980 };
3981 
3982 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
3983   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3984 
3985   /// See AbstractAttribute::updateImpl(...).
3986   ChangeStatus updateImpl(Attributor &A) override {
3987     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3988 
3989     auto PredForReturned = [&](Value &V) {
3990       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3991     };
3992 
3993     if (!A.checkForAllReturnedValues(PredForReturned, *this))
3994       return indicatePessimisticFixpoint();
3995 
3996     // If a candicate was found in this update, return CHANGED.
3997     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3998                ? ChangeStatus::UNCHANGED
3999                : ChangeStatus ::CHANGED;
4000   }
4001   /// See AbstractAttribute::trackStatistics()
4002   void trackStatistics() const override {
4003     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4004   }
4005 };
4006 
4007 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4008   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4009 
4010   /// See AbstractAttribute::initialize(...).
4011   void initialize(Attributor &A) override {
4012     Value &V = getAnchorValue();
4013 
4014     // TODO: add other stuffs
4015     if (isa<Constant>(V) || isa<UndefValue>(V))
4016       indicatePessimisticFixpoint();
4017   }
4018 
4019   /// See AbstractAttribute::updateImpl(...).
4020   ChangeStatus updateImpl(Attributor &A) override {
4021     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4022 
4023     auto VisitValueCB = [&](Value &V, BooleanState, bool Stripped) -> bool {
4024       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4025       if (!Stripped && this == &AA) {
4026         // TODO: Look the instruction and check recursively.
4027         LLVM_DEBUG(
4028             dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
4029                    << V << "\n");
4030         indicatePessimisticFixpoint();
4031         return false;
4032       }
4033       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4034     };
4035 
4036     if (!genericValueTraversal<AAValueSimplify, BooleanState>(
4037             A, getIRPosition(), *this, static_cast<BooleanState &>(*this),
4038             VisitValueCB))
4039       return indicatePessimisticFixpoint();
4040 
4041     // If a candicate was found in this update, return CHANGED.
4042 
4043     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4044                ? ChangeStatus::UNCHANGED
4045                : ChangeStatus ::CHANGED;
4046   }
4047 
4048   /// See AbstractAttribute::trackStatistics()
4049   void trackStatistics() const override {
4050     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4051   }
4052 };
4053 
4054 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4055   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4056 
4057   /// See AbstractAttribute::initialize(...).
4058   void initialize(Attributor &A) override {
4059     SimplifiedAssociatedValue = &getAnchorValue();
4060     indicateOptimisticFixpoint();
4061   }
4062   /// See AbstractAttribute::initialize(...).
4063   ChangeStatus updateImpl(Attributor &A) override {
4064     llvm_unreachable(
4065         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4066   }
4067   /// See AbstractAttribute::trackStatistics()
4068   void trackStatistics() const override {
4069     STATS_DECLTRACK_FN_ATTR(value_simplify)
4070   }
4071 };
4072 
4073 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4074   AAValueSimplifyCallSite(const IRPosition &IRP)
4075       : AAValueSimplifyFunction(IRP) {}
4076   /// See AbstractAttribute::trackStatistics()
4077   void trackStatistics() const override {
4078     STATS_DECLTRACK_CS_ATTR(value_simplify)
4079   }
4080 };
4081 
4082 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4083   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4084       : AAValueSimplifyReturned(IRP) {}
4085 
4086   void trackStatistics() const override {
4087     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4088   }
4089 };
4090 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4091   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4092       : AAValueSimplifyFloating(IRP) {}
4093 
4094   void trackStatistics() const override {
4095     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4096   }
4097 };
4098 
4099 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4100 struct AAHeapToStackImpl : public AAHeapToStack {
4101   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4102 
4103   const std::string getAsStr() const override {
4104     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4105   }
4106 
4107   ChangeStatus manifest(Attributor &A) override {
4108     assert(getState().isValidState() &&
4109            "Attempted to manifest an invalid state!");
4110 
4111     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4112     Function *F = getAssociatedFunction();
4113     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4114 
4115     for (Instruction *MallocCall : MallocCalls) {
4116       // This malloc cannot be replaced.
4117       if (BadMallocCalls.count(MallocCall))
4118         continue;
4119 
4120       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4121         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4122         A.deleteAfterManifest(*FreeCall);
4123         HasChanged = ChangeStatus::CHANGED;
4124       }
4125 
4126       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4127                         << "\n");
4128 
4129       Constant *Size;
4130       if (isCallocLikeFn(MallocCall, TLI)) {
4131         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4132         auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
4133         APInt TotalSize = SizeT->getValue() * Num->getValue();
4134         Size =
4135             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4136       } else {
4137         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4138       }
4139 
4140       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4141       Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
4142                                        Size, "", MallocCall->getNextNode());
4143 
4144       if (AI->getType() != MallocCall->getType())
4145         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4146                              AI->getNextNode());
4147 
4148       MallocCall->replaceAllUsesWith(AI);
4149 
4150       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4151         auto *NBB = II->getNormalDest();
4152         BranchInst::Create(NBB, MallocCall->getParent());
4153         A.deleteAfterManifest(*MallocCall);
4154       } else {
4155         A.deleteAfterManifest(*MallocCall);
4156       }
4157 
4158       if (isCallocLikeFn(MallocCall, TLI)) {
4159         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4160                                    AI->getNextNode());
4161         Value *Ops[] = {
4162             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4163             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4164 
4165         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4166         Module *M = F->getParent();
4167         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4168         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4169       }
4170       HasChanged = ChangeStatus::CHANGED;
4171     }
4172 
4173     return HasChanged;
4174   }
4175 
4176   /// Collection of all malloc calls in a function.
4177   SmallSetVector<Instruction *, 4> MallocCalls;
4178 
4179   /// Collection of malloc calls that cannot be converted.
4180   DenseSet<const Instruction *> BadMallocCalls;
4181 
4182   /// A map for each malloc call to the set of associated free calls.
4183   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4184 
4185   ChangeStatus updateImpl(Attributor &A) override;
4186 };
4187 
4188 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4189   const Function *F = getAssociatedFunction();
4190   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4191 
4192   MustBeExecutedContextExplorer &Explorer =
4193       A.getInfoCache().getMustBeExecutedContextExplorer();
4194 
4195   auto FreeCheck = [&](Instruction &I) {
4196     const auto &Frees = FreesForMalloc.lookup(&I);
4197     if (Frees.size() != 1)
4198       return false;
4199     Instruction *UniqueFree = *Frees.begin();
4200     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4201   };
4202 
4203   auto UsesCheck = [&](Instruction &I) {
4204     bool ValidUsesOnly = true;
4205     bool MustUse = true;
4206     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4207       Instruction *UserI = cast<Instruction>(U.getUser());
4208       if (isa<LoadInst>(UserI))
4209         return true;
4210       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4211         if (SI->getValueOperand() == U.get()) {
4212           LLVM_DEBUG(dbgs()
4213                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4214           ValidUsesOnly = false;
4215         } else {
4216           // A store into the malloc'ed memory is fine.
4217         }
4218         return true;
4219       }
4220       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4221         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4222           return true;
4223         // Record malloc.
4224         if (isFreeCall(UserI, TLI)) {
4225           if (MustUse) {
4226             FreesForMalloc[&I].insert(UserI);
4227           } else {
4228             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4229                               << *UserI << "\n");
4230             ValidUsesOnly = false;
4231           }
4232           return true;
4233         }
4234 
4235         unsigned ArgNo = CB->getArgOperandNo(&U);
4236 
4237         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4238             *this, IRPosition::callsite_argument(*CB, ArgNo));
4239 
4240         // If a callsite argument use is nofree, we are fine.
4241         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4242             *this, IRPosition::callsite_argument(*CB, ArgNo));
4243 
4244         if (!NoCaptureAA.isAssumedNoCapture() ||
4245             !ArgNoFreeAA.isAssumedNoFree()) {
4246           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4247           ValidUsesOnly = false;
4248         }
4249         return true;
4250       }
4251 
4252       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4253           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4254         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4255         Follow = true;
4256         return true;
4257       }
4258       // Unknown user for which we can not track uses further (in a way that
4259       // makes sense).
4260       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4261       ValidUsesOnly = false;
4262       return true;
4263     };
4264     A.checkForAllUses(Pred, *this, I);
4265     return ValidUsesOnly;
4266   };
4267 
4268   auto MallocCallocCheck = [&](Instruction &I) {
4269     if (BadMallocCalls.count(&I))
4270       return true;
4271 
4272     bool IsMalloc = isMallocLikeFn(&I, TLI);
4273     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4274     if (!IsMalloc && !IsCalloc) {
4275       BadMallocCalls.insert(&I);
4276       return true;
4277     }
4278 
4279     if (IsMalloc) {
4280       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4281         if (Size->getValue().sle(MaxHeapToStackSize))
4282           if (UsesCheck(I) || FreeCheck(I)) {
4283             MallocCalls.insert(&I);
4284             return true;
4285           }
4286     } else if (IsCalloc) {
4287       bool Overflow = false;
4288       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4289         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4290           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4291                   .sle(MaxHeapToStackSize))
4292             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4293               MallocCalls.insert(&I);
4294               return true;
4295             }
4296     }
4297 
4298     BadMallocCalls.insert(&I);
4299     return true;
4300   };
4301 
4302   size_t NumBadMallocs = BadMallocCalls.size();
4303 
4304   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4305 
4306   if (NumBadMallocs != BadMallocCalls.size())
4307     return ChangeStatus::CHANGED;
4308 
4309   return ChangeStatus::UNCHANGED;
4310 }
4311 
4312 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4313   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
4314 
4315   /// See AbstractAttribute::trackStatistics()
4316   void trackStatistics() const override {
4317     STATS_DECL(MallocCalls, Function,
4318                "Number of malloc calls converted to allocas");
4319     for (auto *C : MallocCalls)
4320       if (!BadMallocCalls.count(C))
4321         ++BUILD_STAT_NAME(MallocCalls, Function);
4322   }
4323 };
4324 
4325 /// -------------------- Memory Behavior Attributes ----------------------------
4326 /// Includes read-none, read-only, and write-only.
4327 /// ----------------------------------------------------------------------------
4328 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
4329   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
4330 
4331   /// See AbstractAttribute::initialize(...).
4332   void initialize(Attributor &A) override {
4333     intersectAssumedBits(BEST_STATE);
4334     getKnownStateFromValue(getIRPosition(), getState());
4335     IRAttribute::initialize(A);
4336   }
4337 
4338   /// Return the memory behavior information encoded in the IR for \p IRP.
4339   static void getKnownStateFromValue(const IRPosition &IRP,
4340                                      BitIntegerState &State) {
4341     SmallVector<Attribute, 2> Attrs;
4342     IRP.getAttrs(AttrKinds, Attrs);
4343     for (const Attribute &Attr : Attrs) {
4344       switch (Attr.getKindAsEnum()) {
4345       case Attribute::ReadNone:
4346         State.addKnownBits(NO_ACCESSES);
4347         break;
4348       case Attribute::ReadOnly:
4349         State.addKnownBits(NO_WRITES);
4350         break;
4351       case Attribute::WriteOnly:
4352         State.addKnownBits(NO_READS);
4353         break;
4354       default:
4355         llvm_unreachable("Unexpcted attribute!");
4356       }
4357     }
4358 
4359     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
4360       if (!I->mayReadFromMemory())
4361         State.addKnownBits(NO_READS);
4362       if (!I->mayWriteToMemory())
4363         State.addKnownBits(NO_WRITES);
4364     }
4365   }
4366 
4367   /// See AbstractAttribute::getDeducedAttributes(...).
4368   void getDeducedAttributes(LLVMContext &Ctx,
4369                             SmallVectorImpl<Attribute> &Attrs) const override {
4370     assert(Attrs.size() == 0);
4371     if (isAssumedReadNone())
4372       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
4373     else if (isAssumedReadOnly())
4374       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
4375     else if (isAssumedWriteOnly())
4376       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
4377     assert(Attrs.size() <= 1);
4378   }
4379 
4380   /// See AbstractAttribute::manifest(...).
4381   ChangeStatus manifest(Attributor &A) override {
4382     const IRPosition &IRP = getIRPosition();
4383 
4384     // Check if we would improve the existing attributes first.
4385     SmallVector<Attribute, 4> DeducedAttrs;
4386     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
4387     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
4388           return IRP.hasAttr(Attr.getKindAsEnum(),
4389                              /* IgnoreSubsumingPositions */ true);
4390         }))
4391       return ChangeStatus::UNCHANGED;
4392 
4393     // Clear existing attributes.
4394     IRP.removeAttrs(AttrKinds);
4395 
4396     // Use the generic manifest method.
4397     return IRAttribute::manifest(A);
4398   }
4399 
4400   /// See AbstractState::getAsStr().
4401   const std::string getAsStr() const override {
4402     if (isAssumedReadNone())
4403       return "readnone";
4404     if (isAssumedReadOnly())
4405       return "readonly";
4406     if (isAssumedWriteOnly())
4407       return "writeonly";
4408     return "may-read/write";
4409   }
4410 
4411   /// The set of IR attributes AAMemoryBehavior deals with.
4412   static const Attribute::AttrKind AttrKinds[3];
4413 };
4414 
4415 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
4416     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
4417 
4418 /// Memory behavior attribute for a floating value.
4419 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
4420   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4421 
4422   /// See AbstractAttribute::initialize(...).
4423   void initialize(Attributor &A) override {
4424     AAMemoryBehaviorImpl::initialize(A);
4425     // Initialize the use vector with all direct uses of the associated value.
4426     for (const Use &U : getAssociatedValue().uses())
4427       Uses.insert(&U);
4428   }
4429 
4430   /// See AbstractAttribute::updateImpl(...).
4431   ChangeStatus updateImpl(Attributor &A) override;
4432 
4433   /// See AbstractAttribute::trackStatistics()
4434   void trackStatistics() const override {
4435     if (isAssumedReadNone())
4436       STATS_DECLTRACK_FLOATING_ATTR(readnone)
4437     else if (isAssumedReadOnly())
4438       STATS_DECLTRACK_FLOATING_ATTR(readonly)
4439     else if (isAssumedWriteOnly())
4440       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
4441   }
4442 
4443 private:
4444   /// Return true if users of \p UserI might access the underlying
4445   /// variable/location described by \p U and should therefore be analyzed.
4446   bool followUsersOfUseIn(Attributor &A, const Use *U,
4447                           const Instruction *UserI);
4448 
4449   /// Update the state according to the effect of use \p U in \p UserI.
4450   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
4451 
4452 protected:
4453   /// Container for (transitive) uses of the associated argument.
4454   SetVector<const Use *> Uses;
4455 };
4456 
4457 /// Memory behavior attribute for function argument.
4458 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
4459   AAMemoryBehaviorArgument(const IRPosition &IRP)
4460       : AAMemoryBehaviorFloating(IRP) {}
4461 
4462   /// See AbstractAttribute::initialize(...).
4463   void initialize(Attributor &A) override {
4464     AAMemoryBehaviorFloating::initialize(A);
4465 
4466     // Initialize the use vector with all direct uses of the associated value.
4467     Argument *Arg = getAssociatedArgument();
4468     if (!Arg || !Arg->getParent()->hasExactDefinition())
4469       indicatePessimisticFixpoint();
4470   }
4471 
4472   ChangeStatus manifest(Attributor &A) override {
4473     // TODO: From readattrs.ll: "inalloca parameters are always
4474     //                           considered written"
4475     if (hasAttr({Attribute::InAlloca})) {
4476       removeKnownBits(NO_WRITES);
4477       removeAssumedBits(NO_WRITES);
4478     }
4479     return AAMemoryBehaviorFloating::manifest(A);
4480   }
4481 
4482   /// See AbstractAttribute::trackStatistics()
4483   void trackStatistics() const override {
4484     if (isAssumedReadNone())
4485       STATS_DECLTRACK_ARG_ATTR(readnone)
4486     else if (isAssumedReadOnly())
4487       STATS_DECLTRACK_ARG_ATTR(readonly)
4488     else if (isAssumedWriteOnly())
4489       STATS_DECLTRACK_ARG_ATTR(writeonly)
4490   }
4491 };
4492 
4493 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
4494   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
4495       : AAMemoryBehaviorArgument(IRP) {}
4496 
4497   /// See AbstractAttribute::updateImpl(...).
4498   ChangeStatus updateImpl(Attributor &A) override {
4499     // TODO: Once we have call site specific value information we can provide
4500     //       call site specific liveness liveness information and then it makes
4501     //       sense to specialize attributes for call sites arguments instead of
4502     //       redirecting requests to the callee argument.
4503     Argument *Arg = getAssociatedArgument();
4504     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4505     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4506     return clampStateAndIndicateChange(
4507         getState(),
4508         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
4509   }
4510 
4511   /// See AbstractAttribute::trackStatistics()
4512   void trackStatistics() const override {
4513     if (isAssumedReadNone())
4514       STATS_DECLTRACK_CSARG_ATTR(readnone)
4515     else if (isAssumedReadOnly())
4516       STATS_DECLTRACK_CSARG_ATTR(readonly)
4517     else if (isAssumedWriteOnly())
4518       STATS_DECLTRACK_CSARG_ATTR(writeonly)
4519   }
4520 };
4521 
4522 /// Memory behavior attribute for a call site return position.
4523 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
4524   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
4525       : AAMemoryBehaviorFloating(IRP) {}
4526 
4527   /// See AbstractAttribute::manifest(...).
4528   ChangeStatus manifest(Attributor &A) override {
4529     // We do not annotate returned values.
4530     return ChangeStatus::UNCHANGED;
4531   }
4532 
4533   /// See AbstractAttribute::trackStatistics()
4534   void trackStatistics() const override {}
4535 };
4536 
4537 /// An AA to represent the memory behavior function attributes.
4538 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
4539   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4540 
4541   /// See AbstractAttribute::updateImpl(Attributor &A).
4542   virtual ChangeStatus updateImpl(Attributor &A) override;
4543 
4544   /// See AbstractAttribute::manifest(...).
4545   ChangeStatus manifest(Attributor &A) override {
4546     Function &F = cast<Function>(getAnchorValue());
4547     if (isAssumedReadNone()) {
4548       F.removeFnAttr(Attribute::ArgMemOnly);
4549       F.removeFnAttr(Attribute::InaccessibleMemOnly);
4550       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
4551     }
4552     return AAMemoryBehaviorImpl::manifest(A);
4553   }
4554 
4555   /// See AbstractAttribute::trackStatistics()
4556   void trackStatistics() const override {
4557     if (isAssumedReadNone())
4558       STATS_DECLTRACK_FN_ATTR(readnone)
4559     else if (isAssumedReadOnly())
4560       STATS_DECLTRACK_FN_ATTR(readonly)
4561     else if (isAssumedWriteOnly())
4562       STATS_DECLTRACK_FN_ATTR(writeonly)
4563   }
4564 };
4565 
4566 /// AAMemoryBehavior attribute for call sites.
4567 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
4568   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4569 
4570   /// See AbstractAttribute::initialize(...).
4571   void initialize(Attributor &A) override {
4572     AAMemoryBehaviorImpl::initialize(A);
4573     Function *F = getAssociatedFunction();
4574     if (!F || !F->hasExactDefinition())
4575       indicatePessimisticFixpoint();
4576   }
4577 
4578   /// See AbstractAttribute::updateImpl(...).
4579   ChangeStatus updateImpl(Attributor &A) override {
4580     // TODO: Once we have call site specific value information we can provide
4581     //       call site specific liveness liveness information and then it makes
4582     //       sense to specialize attributes for call sites arguments instead of
4583     //       redirecting requests to the callee argument.
4584     Function *F = getAssociatedFunction();
4585     const IRPosition &FnPos = IRPosition::function(*F);
4586     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4587     return clampStateAndIndicateChange(
4588         getState(),
4589         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
4590   }
4591 
4592   /// See AbstractAttribute::trackStatistics()
4593   void trackStatistics() const override {
4594     if (isAssumedReadNone())
4595       STATS_DECLTRACK_CS_ATTR(readnone)
4596     else if (isAssumedReadOnly())
4597       STATS_DECLTRACK_CS_ATTR(readonly)
4598     else if (isAssumedWriteOnly())
4599       STATS_DECLTRACK_CS_ATTR(writeonly)
4600   }
4601 };
4602 } // namespace
4603 
4604 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
4605 
4606   // The current assumed state used to determine a change.
4607   auto AssumedState = getAssumed();
4608 
4609   auto CheckRWInst = [&](Instruction &I) {
4610     // If the instruction has an own memory behavior state, use it to restrict
4611     // the local state. No further analysis is required as the other memory
4612     // state is as optimistic as it gets.
4613     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
4614       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
4615           *this, IRPosition::callsite_function(ICS));
4616       intersectAssumedBits(MemBehaviorAA.getAssumed());
4617       return !isAtFixpoint();
4618     }
4619 
4620     // Remove access kind modifiers if necessary.
4621     if (I.mayReadFromMemory())
4622       removeAssumedBits(NO_READS);
4623     if (I.mayWriteToMemory())
4624       removeAssumedBits(NO_WRITES);
4625     return !isAtFixpoint();
4626   };
4627 
4628   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
4629     return indicatePessimisticFixpoint();
4630 
4631   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4632                                         : ChangeStatus::UNCHANGED;
4633 }
4634 
4635 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
4636 
4637   const IRPosition &IRP = getIRPosition();
4638   const IRPosition &FnPos = IRPosition::function_scope(IRP);
4639   AAMemoryBehavior::StateType &S = getState();
4640 
4641   // First, check the function scope. We take the known information and we avoid
4642   // work if the assumed information implies the current assumed information for
4643   // this attribute.
4644   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4645   S.addKnownBits(FnMemAA.getKnown());
4646   if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
4647     return ChangeStatus::UNCHANGED;
4648 
4649   // Make sure the value is not captured (except through "return"), if
4650   // it is, any information derived would be irrelevant anyway as we cannot
4651   // check the potential aliases introduced by the capture. However, no need
4652   // to fall back to anythign less optimistic than the function state.
4653   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
4654   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4655     S.intersectAssumedBits(FnMemAA.getAssumed());
4656     return ChangeStatus::CHANGED;
4657   }
4658 
4659   // The current assumed state used to determine a change.
4660   auto AssumedState = S.getAssumed();
4661 
4662   // Liveness information to exclude dead users.
4663   // TODO: Take the FnPos once we have call site specific liveness information.
4664   const auto &LivenessAA = A.getAAFor<AAIsDead>(
4665       *this, IRPosition::function(*IRP.getAssociatedFunction()));
4666 
4667   // Visit and expand uses until all are analyzed or a fixpoint is reached.
4668   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
4669     const Use *U = Uses[i];
4670     Instruction *UserI = cast<Instruction>(U->getUser());
4671     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
4672                       << " [Dead: " << (LivenessAA.isAssumedDead(UserI))
4673                       << "]\n");
4674     if (LivenessAA.isAssumedDead(UserI))
4675       continue;
4676 
4677     // Check if the users of UserI should also be visited.
4678     if (followUsersOfUseIn(A, U, UserI))
4679       for (const Use &UserIUse : UserI->uses())
4680         Uses.insert(&UserIUse);
4681 
4682     // If UserI might touch memory we analyze the use in detail.
4683     if (UserI->mayReadOrWriteMemory())
4684       analyzeUseIn(A, U, UserI);
4685   }
4686 
4687   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4688                                         : ChangeStatus::UNCHANGED;
4689 }
4690 
4691 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
4692                                                   const Instruction *UserI) {
4693   // The loaded value is unrelated to the pointer argument, no need to
4694   // follow the users of the load.
4695   if (isa<LoadInst>(UserI))
4696     return false;
4697 
4698   // By default we follow all uses assuming UserI might leak information on U,
4699   // we have special handling for call sites operands though.
4700   ImmutableCallSite ICS(UserI);
4701   if (!ICS || !ICS.isArgOperand(U))
4702     return true;
4703 
4704   // If the use is a call argument known not to be captured, the users of
4705   // the call do not need to be visited because they have to be unrelated to
4706   // the input. Note that this check is not trivial even though we disallow
4707   // general capturing of the underlying argument. The reason is that the
4708   // call might the argument "through return", which we allow and for which we
4709   // need to check call users.
4710   unsigned ArgNo = ICS.getArgumentNo(U);
4711   const auto &ArgNoCaptureAA =
4712       A.getAAFor<AANoCapture>(*this, IRPosition::callsite_argument(ICS, ArgNo));
4713   return !ArgNoCaptureAA.isAssumedNoCapture();
4714 }
4715 
4716 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
4717                                             const Instruction *UserI) {
4718   assert(UserI->mayReadOrWriteMemory());
4719 
4720   switch (UserI->getOpcode()) {
4721   default:
4722     // TODO: Handle all atomics and other side-effect operations we know of.
4723     break;
4724   case Instruction::Load:
4725     // Loads cause the NO_READS property to disappear.
4726     removeAssumedBits(NO_READS);
4727     return;
4728 
4729   case Instruction::Store:
4730     // Stores cause the NO_WRITES property to disappear if the use is the
4731     // pointer operand. Note that we do assume that capturing was taken care of
4732     // somewhere else.
4733     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
4734       removeAssumedBits(NO_WRITES);
4735     return;
4736 
4737   case Instruction::Call:
4738   case Instruction::CallBr:
4739   case Instruction::Invoke: {
4740     // For call sites we look at the argument memory behavior attribute (this
4741     // could be recursive!) in order to restrict our own state.
4742     ImmutableCallSite ICS(UserI);
4743 
4744     // Give up on operand bundles.
4745     if (ICS.isBundleOperand(U)) {
4746       indicatePessimisticFixpoint();
4747       return;
4748     }
4749 
4750     // Calling a function does read the function pointer, maybe write it if the
4751     // function is self-modifying.
4752     if (ICS.isCallee(U)) {
4753       removeAssumedBits(NO_READS);
4754       break;
4755     }
4756 
4757     // Adjust the possible access behavior based on the information on the
4758     // argument.
4759     unsigned ArgNo = ICS.getArgumentNo(U);
4760     const IRPosition &ArgPos = IRPosition::callsite_argument(ICS, ArgNo);
4761     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4762     // "assumed" has at most the same bits as the MemBehaviorAA assumed
4763     // and at least "known".
4764     intersectAssumedBits(MemBehaviorAA.getAssumed());
4765     return;
4766   }
4767   };
4768 
4769   // Generally, look at the "may-properties" and adjust the assumed state if we
4770   // did not trigger special handling before.
4771   if (UserI->mayReadFromMemory())
4772     removeAssumedBits(NO_READS);
4773   if (UserI->mayWriteToMemory())
4774     removeAssumedBits(NO_WRITES);
4775 }
4776 
4777 /// ----------------------------------------------------------------------------
4778 ///                               Attributor
4779 /// ----------------------------------------------------------------------------
4780 
4781 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
4782                                const AAIsDead *LivenessAA) {
4783   const Instruction *CtxI = AA.getIRPosition().getCtxI();
4784   if (!CtxI)
4785     return false;
4786 
4787   // TODO: Find a good way to utilize fine and coarse grained liveness
4788   // information.
4789   if (!LivenessAA)
4790     LivenessAA =
4791         &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
4792                             /* TrackDependence */ false);
4793 
4794   // Don't check liveness for AAIsDead.
4795   if (&AA == LivenessAA)
4796     return false;
4797 
4798   if (!LivenessAA->isAssumedDead(CtxI))
4799     return false;
4800 
4801   // We actually used liveness information so we have to record a dependence.
4802   recordDependence(*LivenessAA, AA, DepClassTy::OPTIONAL);
4803 
4804   return true;
4805 }
4806 
4807 bool Attributor::checkForAllUses(
4808     const function_ref<bool(const Use &, bool &)> &Pred,
4809     const AbstractAttribute &QueryingAA, const Value &V) {
4810   const IRPosition &IRP = QueryingAA.getIRPosition();
4811   SmallVector<const Use *, 16> Worklist;
4812   SmallPtrSet<const Use *, 16> Visited;
4813 
4814   for (const Use &U : V.uses())
4815     Worklist.push_back(&U);
4816 
4817   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
4818                     << " initial uses to check\n");
4819 
4820   if (Worklist.empty())
4821     return true;
4822 
4823   bool AnyDead = false;
4824   const Function *ScopeFn = IRP.getAnchorScope();
4825   const auto *LivenessAA =
4826       ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
4827                                     /* TrackDependence */ false)
4828               : nullptr;
4829 
4830   while (!Worklist.empty()) {
4831     const Use *U = Worklist.pop_back_val();
4832     if (!Visited.insert(U).second)
4833       continue;
4834     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << **U << "\n");
4835     if (Instruction *UserI = dyn_cast<Instruction>(U->getUser()))
4836       if (LivenessAA && LivenessAA->isAssumedDead(UserI)) {
4837         LLVM_DEBUG(dbgs() << "[Attributor] Dead user: " << *UserI << ": "
4838                           << static_cast<const AbstractAttribute &>(*LivenessAA)
4839                           << "\n");
4840         AnyDead = true;
4841         continue;
4842       }
4843 
4844     bool Follow = false;
4845     if (!Pred(*U, Follow))
4846       return false;
4847     if (!Follow)
4848       continue;
4849     for (const Use &UU : U->getUser()->uses())
4850       Worklist.push_back(&UU);
4851   }
4852 
4853   if (AnyDead)
4854     recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
4855 
4856   return true;
4857 }
4858 
4859 bool Attributor::checkForAllCallSites(
4860     const function_ref<bool(AbstractCallSite)> &Pred,
4861     const AbstractAttribute &QueryingAA, bool RequireAllCallSites) {
4862   // We can try to determine information from
4863   // the call sites. However, this is only possible all call sites are known,
4864   // hence the function has internal linkage.
4865   const IRPosition &IRP = QueryingAA.getIRPosition();
4866   const Function *AssociatedFunction = IRP.getAssociatedFunction();
4867   if (!AssociatedFunction) {
4868     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
4869                       << "\n");
4870     return false;
4871   }
4872 
4873   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
4874                               &QueryingAA);
4875 }
4876 
4877 bool Attributor::checkForAllCallSites(
4878     const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
4879     bool RequireAllCallSites, const AbstractAttribute *QueryingAA) {
4880   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
4881     LLVM_DEBUG(
4882         dbgs()
4883         << "[Attributor] Function " << Fn.getName()
4884         << " has no internal linkage, hence not all call sites are known\n");
4885     return false;
4886   }
4887 
4888   for (const Use &U : Fn.uses()) {
4889     AbstractCallSite ACS(&U);
4890     if (!ACS) {
4891       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
4892                         << " has non call site use " << *U.get() << " in "
4893                         << *U.getUser() << "\n");
4894       // BlockAddress users are allowed.
4895       if (isa<BlockAddress>(U.getUser()))
4896         continue;
4897       return false;
4898     }
4899 
4900     Instruction *I = ACS.getInstruction();
4901     Function *Caller = I->getFunction();
4902 
4903     const auto *LivenessAA =
4904         lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
4905                               /* TrackDependence */ false);
4906 
4907     // Skip dead calls.
4908     if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4909       // We actually used liveness information so we have to record a
4910       // dependence.
4911       if (QueryingAA)
4912         recordDependence(*LivenessAA, *QueryingAA, DepClassTy::OPTIONAL);
4913       continue;
4914     }
4915 
4916     const Use *EffectiveUse =
4917         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
4918     if (!ACS.isCallee(EffectiveUse)) {
4919       if (!RequireAllCallSites)
4920         continue;
4921       LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
4922                         << " is an invalid use of " << Fn.getName() << "\n");
4923       return false;
4924     }
4925 
4926     if (Pred(ACS))
4927       continue;
4928 
4929     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
4930                       << *ACS.getInstruction() << "\n");
4931     return false;
4932   }
4933 
4934   return true;
4935 }
4936 
4937 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
4938     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
4939         &Pred,
4940     const AbstractAttribute &QueryingAA) {
4941 
4942   const IRPosition &IRP = QueryingAA.getIRPosition();
4943   // Since we need to provide return instructions we have to have an exact
4944   // definition.
4945   const Function *AssociatedFunction = IRP.getAssociatedFunction();
4946   if (!AssociatedFunction)
4947     return false;
4948 
4949   // If this is a call site query we use the call site specific return values
4950   // and liveness information.
4951   // TODO: use the function scope once we have call site AAReturnedValues.
4952   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4953   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4954   if (!AARetVal.getState().isValidState())
4955     return false;
4956 
4957   return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
4958 }
4959 
4960 bool Attributor::checkForAllReturnedValues(
4961     const function_ref<bool(Value &)> &Pred,
4962     const AbstractAttribute &QueryingAA) {
4963 
4964   const IRPosition &IRP = QueryingAA.getIRPosition();
4965   const Function *AssociatedFunction = IRP.getAssociatedFunction();
4966   if (!AssociatedFunction)
4967     return false;
4968 
4969   // TODO: use the function scope once we have call site AAReturnedValues.
4970   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4971   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4972   if (!AARetVal.getState().isValidState())
4973     return false;
4974 
4975   return AARetVal.checkForAllReturnedValuesAndReturnInsts(
4976       [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
4977         return Pred(RV);
4978       });
4979 }
4980 
4981 static bool
4982 checkForAllInstructionsImpl(InformationCache::OpcodeInstMapTy &OpcodeInstMap,
4983                             const function_ref<bool(Instruction &)> &Pred,
4984                             const AAIsDead *LivenessAA, bool &AnyDead,
4985                             const ArrayRef<unsigned> &Opcodes) {
4986   for (unsigned Opcode : Opcodes) {
4987     for (Instruction *I : OpcodeInstMap[Opcode]) {
4988       // Skip dead instructions.
4989       if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4990         AnyDead = true;
4991         continue;
4992       }
4993 
4994       if (!Pred(*I))
4995         return false;
4996     }
4997   }
4998   return true;
4999 }
5000 
5001 bool Attributor::checkForAllInstructions(
5002     const llvm::function_ref<bool(Instruction &)> &Pred,
5003     const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
5004 
5005   const IRPosition &IRP = QueryingAA.getIRPosition();
5006   // Since we need to provide instructions we have to have an exact definition.
5007   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5008   if (!AssociatedFunction)
5009     return false;
5010 
5011   // TODO: use the function scope once we have call site AAReturnedValues.
5012   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5013   const auto &LivenessAA =
5014       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
5015   bool AnyDead = false;
5016 
5017   auto &OpcodeInstMap =
5018       InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
5019   if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
5020                                    Opcodes))
5021     return false;
5022 
5023   // If we actually used liveness information so we have to record a dependence.
5024   if (AnyDead)
5025     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5026 
5027   return true;
5028 }
5029 
5030 bool Attributor::checkForAllReadWriteInstructions(
5031     const llvm::function_ref<bool(Instruction &)> &Pred,
5032     AbstractAttribute &QueryingAA) {
5033 
5034   const Function *AssociatedFunction =
5035       QueryingAA.getIRPosition().getAssociatedFunction();
5036   if (!AssociatedFunction)
5037     return false;
5038 
5039   // TODO: use the function scope once we have call site AAReturnedValues.
5040   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5041   const auto &LivenessAA =
5042       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
5043   bool AnyDead = false;
5044 
5045   for (Instruction *I :
5046        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
5047     // Skip dead instructions.
5048     if (LivenessAA.isAssumedDead(I)) {
5049       AnyDead = true;
5050       continue;
5051     }
5052 
5053     if (!Pred(*I))
5054       return false;
5055   }
5056 
5057   // If we actually used liveness information so we have to record a dependence.
5058   if (AnyDead)
5059     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5060 
5061   return true;
5062 }
5063 
5064 ChangeStatus Attributor::run(Module &M) {
5065   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
5066                     << AllAbstractAttributes.size()
5067                     << " abstract attributes.\n");
5068 
5069   // Now that all abstract attributes are collected and initialized we start
5070   // the abstract analysis.
5071 
5072   unsigned IterationCounter = 1;
5073 
5074   SmallVector<AbstractAttribute *, 64> ChangedAAs;
5075   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
5076   Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
5077 
5078   bool RecomputeDependences = false;
5079 
5080   do {
5081     // Remember the size to determine new attributes.
5082     size_t NumAAs = AllAbstractAttributes.size();
5083     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
5084                       << ", Worklist size: " << Worklist.size() << "\n");
5085 
5086     // For invalid AAs we can fix dependent AAs that have a required dependence,
5087     // thereby folding long dependence chains in a single step without the need
5088     // to run updates.
5089     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
5090       AbstractAttribute *InvalidAA = InvalidAAs[u];
5091       auto &QuerriedAAs = QueryMap[InvalidAA];
5092       LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "
5093                         << QuerriedAAs.RequiredAAs.size() << "/"
5094                         << QuerriedAAs.OptionalAAs.size()
5095                         << " required/optional dependences\n");
5096       for (AbstractAttribute *DepOnInvalidAA : QuerriedAAs.RequiredAAs) {
5097         AbstractState &DOIAAState = DepOnInvalidAA->getState();
5098         DOIAAState.indicatePessimisticFixpoint();
5099         ++NumAttributesFixedDueToRequiredDependences;
5100         assert(DOIAAState.isAtFixpoint() && "Expected fixpoint state!");
5101         if (!DOIAAState.isValidState())
5102           InvalidAAs.insert(DepOnInvalidAA);
5103       }
5104       if (!RecomputeDependences)
5105         Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5106                         QuerriedAAs.OptionalAAs.end());
5107     }
5108 
5109     // If dependences (=QueryMap) are recomputed we have to look at all abstract
5110     // attributes again, regardless of what changed in the last iteration.
5111     if (RecomputeDependences) {
5112       LLVM_DEBUG(
5113           dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
5114       QueryMap.clear();
5115       ChangedAAs.clear();
5116       Worklist.insert(AllAbstractAttributes.begin(),
5117                       AllAbstractAttributes.end());
5118     }
5119 
5120     // Add all abstract attributes that are potentially dependent on one that
5121     // changed to the work list.
5122     for (AbstractAttribute *ChangedAA : ChangedAAs) {
5123       auto &QuerriedAAs = QueryMap[ChangedAA];
5124       Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5125                       QuerriedAAs.OptionalAAs.end());
5126       Worklist.insert(QuerriedAAs.RequiredAAs.begin(),
5127                       QuerriedAAs.RequiredAAs.end());
5128     }
5129 
5130     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
5131                       << ", Worklist+Dependent size: " << Worklist.size()
5132                       << "\n");
5133 
5134     // Reset the changed and invalid set.
5135     ChangedAAs.clear();
5136     InvalidAAs.clear();
5137 
5138     // Update all abstract attribute in the work list and record the ones that
5139     // changed.
5140     for (AbstractAttribute *AA : Worklist)
5141       if (!AA->getState().isAtFixpoint() && !isAssumedDead(*AA, nullptr)) {
5142         QueriedNonFixAA = false;
5143         if (AA->update(*this) == ChangeStatus::CHANGED) {
5144           ChangedAAs.push_back(AA);
5145           if (!AA->getState().isValidState())
5146             InvalidAAs.insert(AA);
5147         } else if (!QueriedNonFixAA) {
5148           // If the attribute did not query any non-fix information, the state
5149           // will not change and we can indicate that right away.
5150           AA->getState().indicateOptimisticFixpoint();
5151         }
5152       }
5153 
5154     // Check if we recompute the dependences in the next iteration.
5155     RecomputeDependences = (DepRecomputeInterval > 0 &&
5156                             IterationCounter % DepRecomputeInterval == 0);
5157 
5158     // Add attributes to the changed set if they have been created in the last
5159     // iteration.
5160     ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
5161                       AllAbstractAttributes.end());
5162 
5163     // Reset the work list and repopulate with the changed abstract attributes.
5164     // Note that dependent ones are added above.
5165     Worklist.clear();
5166     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
5167 
5168   } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
5169                                  VerifyMaxFixpointIterations));
5170 
5171   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
5172                     << IterationCounter << "/" << MaxFixpointIterations
5173                     << " iterations\n");
5174 
5175   size_t NumFinalAAs = AllAbstractAttributes.size();
5176 
5177   // Reset abstract arguments not settled in a sound fixpoint by now. This
5178   // happens when we stopped the fixpoint iteration early. Note that only the
5179   // ones marked as "changed" *and* the ones transitively depending on them
5180   // need to be reverted to a pessimistic state. Others might not be in a
5181   // fixpoint state but we can use the optimistic results for them anyway.
5182   SmallPtrSet<AbstractAttribute *, 32> Visited;
5183   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
5184     AbstractAttribute *ChangedAA = ChangedAAs[u];
5185     if (!Visited.insert(ChangedAA).second)
5186       continue;
5187 
5188     AbstractState &State = ChangedAA->getState();
5189     if (!State.isAtFixpoint()) {
5190       State.indicatePessimisticFixpoint();
5191 
5192       NumAttributesTimedOut++;
5193     }
5194 
5195     auto &QuerriedAAs = QueryMap[ChangedAA];
5196     ChangedAAs.append(QuerriedAAs.OptionalAAs.begin(),
5197                       QuerriedAAs.OptionalAAs.end());
5198     ChangedAAs.append(QuerriedAAs.RequiredAAs.begin(),
5199                       QuerriedAAs.RequiredAAs.end());
5200   }
5201 
5202   LLVM_DEBUG({
5203     if (!Visited.empty())
5204       dbgs() << "\n[Attributor] Finalized " << Visited.size()
5205              << " abstract attributes.\n";
5206   });
5207 
5208   unsigned NumManifested = 0;
5209   unsigned NumAtFixpoint = 0;
5210   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
5211   for (AbstractAttribute *AA : AllAbstractAttributes) {
5212     AbstractState &State = AA->getState();
5213 
5214     // If there is not already a fixpoint reached, we can now take the
5215     // optimistic state. This is correct because we enforced a pessimistic one
5216     // on abstract attributes that were transitively dependent on a changed one
5217     // already above.
5218     if (!State.isAtFixpoint())
5219       State.indicateOptimisticFixpoint();
5220 
5221     // If the state is invalid, we do not try to manifest it.
5222     if (!State.isValidState())
5223       continue;
5224 
5225     // Skip dead code.
5226     if (isAssumedDead(*AA, nullptr))
5227       continue;
5228     // Manifest the state and record if we changed the IR.
5229     ChangeStatus LocalChange = AA->manifest(*this);
5230     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
5231       AA->trackStatistics();
5232 
5233     ManifestChange = ManifestChange | LocalChange;
5234 
5235     NumAtFixpoint++;
5236     NumManifested += (LocalChange == ChangeStatus::CHANGED);
5237   }
5238 
5239   (void)NumManifested;
5240   (void)NumAtFixpoint;
5241   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
5242                     << " arguments while " << NumAtFixpoint
5243                     << " were in a valid fixpoint state\n");
5244 
5245   NumAttributesManifested += NumManifested;
5246   NumAttributesValidFixpoint += NumAtFixpoint;
5247 
5248   (void)NumFinalAAs;
5249   assert(
5250       NumFinalAAs == AllAbstractAttributes.size() &&
5251       "Expected the final number of abstract attributes to remain unchanged!");
5252 
5253   // Delete stuff at the end to avoid invalid references and a nice order.
5254   {
5255     LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
5256                       << ToBeDeletedFunctions.size() << " functions and "
5257                       << ToBeDeletedBlocks.size() << " blocks and "
5258                       << ToBeDeletedInsts.size() << " instructions and "
5259                       << ToBeChangedUses.size() << " uses\n");
5260 
5261     SmallVector<Instruction *, 32> DeadInsts;
5262     SmallVector<Instruction *, 32> TerminatorsToFold;
5263     SmallVector<Instruction *, 32> UnreachablesToInsert;
5264 
5265     for (auto &It : ToBeChangedUses) {
5266       Use *U = It.first;
5267       Value *NewV = It.second;
5268       Value *OldV = U->get();
5269       LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
5270                         << " instead of " << *OldV << "\n");
5271       U->set(NewV);
5272       if (Instruction *I = dyn_cast<Instruction>(OldV))
5273         if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
5274             isInstructionTriviallyDead(I)) {
5275           DeadInsts.push_back(I);
5276         }
5277       if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
5278         Instruction *UserI = cast<Instruction>(U->getUser());
5279         if (isa<UndefValue>(NewV)) {
5280           UnreachablesToInsert.push_back(UserI);
5281         } else {
5282           TerminatorsToFold.push_back(UserI);
5283         }
5284       }
5285     }
5286     for (Instruction *I : UnreachablesToInsert)
5287       changeToUnreachable(I, /* UseLLVMTrap */ false);
5288     for (Instruction *I : TerminatorsToFold)
5289       ConstantFoldTerminator(I->getParent());
5290 
5291     for (Instruction *I : ToBeDeletedInsts) {
5292       I->replaceAllUsesWith(UndefValue::get(I->getType()));
5293       if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
5294         DeadInsts.push_back(I);
5295       else
5296         I->eraseFromParent();
5297     }
5298 
5299     RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
5300 
5301     if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
5302       SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
5303       ToBeDeletedBBs.reserve(NumDeadBlocks);
5304       ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end());
5305       // Actually we do not delete the blocks but squash them into a single
5306       // unreachable but untangling branches that jump here is something we need
5307       // to do in a more generic way.
5308       DetatchDeadBlocks(ToBeDeletedBBs, nullptr);
5309       STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
5310       BUILD_STAT_NAME(AAIsDead, BasicBlock) += ToBeDeletedBlocks.size();
5311     }
5312 
5313     STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
5314     for (Function *Fn : ToBeDeletedFunctions) {
5315       Fn->replaceAllUsesWith(UndefValue::get(Fn->getType()));
5316       Fn->eraseFromParent();
5317       STATS_TRACK(AAIsDead, Function);
5318     }
5319 
5320     // Identify dead internal functions and delete them. This happens outside
5321     // the other fixpoint analysis as we might treat potentially dead functions
5322     // as live to lower the number of iterations. If they happen to be dead, the
5323     // below fixpoint loop will identify and eliminate them.
5324     SmallVector<Function *, 8> InternalFns;
5325     for (Function &F : M)
5326       if (F.hasLocalLinkage())
5327         InternalFns.push_back(&F);
5328 
5329     bool FoundDeadFn = true;
5330     while (FoundDeadFn) {
5331       FoundDeadFn = false;
5332       for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
5333         Function *F = InternalFns[u];
5334         if (!F)
5335           continue;
5336 
5337         if (!checkForAllCallSites([](AbstractCallSite ACS) { return false; },
5338                                   *F, true, nullptr))
5339           continue;
5340 
5341         STATS_TRACK(AAIsDead, Function);
5342         ToBeDeletedFunctions.insert(F);
5343         F->deleteBody();
5344         F->replaceAllUsesWith(UndefValue::get(F->getType()));
5345         F->eraseFromParent();
5346         InternalFns[u] = nullptr;
5347         FoundDeadFn = true;
5348       }
5349     }
5350   }
5351 
5352   if (VerifyMaxFixpointIterations &&
5353       IterationCounter != MaxFixpointIterations) {
5354     errs() << "\n[Attributor] Fixpoint iteration done after: "
5355            << IterationCounter << "/" << MaxFixpointIterations
5356            << " iterations\n";
5357     llvm_unreachable("The fixpoint was not reached with exactly the number of "
5358                      "specified iterations!");
5359   }
5360 
5361   return ManifestChange;
5362 }
5363 
5364 void Attributor::initializeInformationCache(Function &F) {
5365 
5366   // Walk all instructions to find interesting instructions that might be
5367   // queried by abstract attributes during their initialization or update.
5368   // This has to happen before we create attributes.
5369   auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
5370   auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
5371 
5372   for (Instruction &I : instructions(&F)) {
5373     bool IsInterestingOpcode = false;
5374 
5375     // To allow easy access to all instructions in a function with a given
5376     // opcode we store them in the InfoCache. As not all opcodes are interesting
5377     // to concrete attributes we only cache the ones that are as identified in
5378     // the following switch.
5379     // Note: There are no concrete attributes now so this is initially empty.
5380     switch (I.getOpcode()) {
5381     default:
5382       assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
5383              "New call site/base instruction type needs to be known int the "
5384              "Attributor.");
5385       break;
5386     case Instruction::Load:
5387       // The alignment of a pointer is interesting for loads.
5388     case Instruction::Store:
5389       // The alignment of a pointer is interesting for stores.
5390     case Instruction::Call:
5391     case Instruction::CallBr:
5392     case Instruction::Invoke:
5393     case Instruction::CleanupRet:
5394     case Instruction::CatchSwitch:
5395     case Instruction::Resume:
5396     case Instruction::Ret:
5397       IsInterestingOpcode = true;
5398     }
5399     if (IsInterestingOpcode)
5400       InstOpcodeMap[I.getOpcode()].push_back(&I);
5401     if (I.mayReadOrWriteMemory())
5402       ReadOrWriteInsts.push_back(&I);
5403   }
5404 }
5405 
5406 void Attributor::recordDependence(const AbstractAttribute &FromAA,
5407                                   const AbstractAttribute &ToAA,
5408                                   DepClassTy DepClass) {
5409   if (FromAA.getState().isAtFixpoint())
5410     return;
5411 
5412   if (DepClass == DepClassTy::REQUIRED)
5413     QueryMap[&FromAA].RequiredAAs.insert(
5414         const_cast<AbstractAttribute *>(&ToAA));
5415   else
5416     QueryMap[&FromAA].OptionalAAs.insert(
5417         const_cast<AbstractAttribute *>(&ToAA));
5418   QueriedNonFixAA = true;
5419 }
5420 
5421 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
5422   if (!VisitedFunctions.insert(&F).second)
5423     return;
5424   if (F.isDeclaration())
5425     return;
5426 
5427   IRPosition FPos = IRPosition::function(F);
5428 
5429   // Check for dead BasicBlocks in every function.
5430   // We need dead instruction detection because we do not want to deal with
5431   // broken IR in which SSA rules do not apply.
5432   getOrCreateAAFor<AAIsDead>(FPos);
5433 
5434   // Every function might be "will-return".
5435   getOrCreateAAFor<AAWillReturn>(FPos);
5436 
5437   // Every function can be nounwind.
5438   getOrCreateAAFor<AANoUnwind>(FPos);
5439 
5440   // Every function might be marked "nosync"
5441   getOrCreateAAFor<AANoSync>(FPos);
5442 
5443   // Every function might be "no-free".
5444   getOrCreateAAFor<AANoFree>(FPos);
5445 
5446   // Every function might be "no-return".
5447   getOrCreateAAFor<AANoReturn>(FPos);
5448 
5449   // Every function might be "no-recurse".
5450   getOrCreateAAFor<AANoRecurse>(FPos);
5451 
5452   // Every function might be "readnone/readonly/writeonly/...".
5453   getOrCreateAAFor<AAMemoryBehavior>(FPos);
5454 
5455   // Every function might be applicable for Heap-To-Stack conversion.
5456   if (EnableHeapToStack)
5457     getOrCreateAAFor<AAHeapToStack>(FPos);
5458 
5459   // Return attributes are only appropriate if the return type is non void.
5460   Type *ReturnType = F.getReturnType();
5461   if (!ReturnType->isVoidTy()) {
5462     // Argument attribute "returned" --- Create only one per function even
5463     // though it is an argument attribute.
5464     getOrCreateAAFor<AAReturnedValues>(FPos);
5465 
5466     IRPosition RetPos = IRPosition::returned(F);
5467 
5468     // Every returned value might be dead.
5469     getOrCreateAAFor<AAIsDead>(RetPos);
5470 
5471     // Every function might be simplified.
5472     getOrCreateAAFor<AAValueSimplify>(RetPos);
5473 
5474     if (ReturnType->isPointerTy()) {
5475 
5476       // Every function with pointer return type might be marked align.
5477       getOrCreateAAFor<AAAlign>(RetPos);
5478 
5479       // Every function with pointer return type might be marked nonnull.
5480       getOrCreateAAFor<AANonNull>(RetPos);
5481 
5482       // Every function with pointer return type might be marked noalias.
5483       getOrCreateAAFor<AANoAlias>(RetPos);
5484 
5485       // Every function with pointer return type might be marked
5486       // dereferenceable.
5487       getOrCreateAAFor<AADereferenceable>(RetPos);
5488     }
5489   }
5490 
5491   for (Argument &Arg : F.args()) {
5492     IRPosition ArgPos = IRPosition::argument(Arg);
5493 
5494     // Every argument might be simplified.
5495     getOrCreateAAFor<AAValueSimplify>(ArgPos);
5496 
5497     if (Arg.getType()->isPointerTy()) {
5498       // Every argument with pointer type might be marked nonnull.
5499       getOrCreateAAFor<AANonNull>(ArgPos);
5500 
5501       // Every argument with pointer type might be marked noalias.
5502       getOrCreateAAFor<AANoAlias>(ArgPos);
5503 
5504       // Every argument with pointer type might be marked dereferenceable.
5505       getOrCreateAAFor<AADereferenceable>(ArgPos);
5506 
5507       // Every argument with pointer type might be marked align.
5508       getOrCreateAAFor<AAAlign>(ArgPos);
5509 
5510       // Every argument with pointer type might be marked nocapture.
5511       getOrCreateAAFor<AANoCapture>(ArgPos);
5512 
5513       // Every argument with pointer type might be marked
5514       // "readnone/readonly/writeonly/..."
5515       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
5516 
5517       // Every argument with pointer type might be marked nofree.
5518       getOrCreateAAFor<AANoFree>(ArgPos);
5519     }
5520   }
5521 
5522   auto CallSitePred = [&](Instruction &I) -> bool {
5523     CallSite CS(&I);
5524     if (Function *Callee = CS.getCalledFunction()) {
5525       // Skip declerations except if annotations on their call sites were
5526       // explicitly requested.
5527       if (!AnnotateDeclarationCallSites && Callee->isDeclaration())
5528         return true;
5529 
5530       if (!Callee->getReturnType()->isVoidTy() && !CS->use_empty()) {
5531         IRPosition CSRetPos = IRPosition::callsite_returned(CS);
5532 
5533         // Call site return values might be dead.
5534         getOrCreateAAFor<AAIsDead>(CSRetPos);
5535       }
5536 
5537       for (int i = 0, e = Callee->arg_size(); i < e; i++) {
5538 
5539         IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
5540 
5541         // Every call site argument might be dead.
5542         getOrCreateAAFor<AAIsDead>(CSArgPos);
5543 
5544         // Call site argument might be simplified.
5545         getOrCreateAAFor<AAValueSimplify>(CSArgPos);
5546 
5547         if (!CS.getArgument(i)->getType()->isPointerTy())
5548           continue;
5549 
5550         // Call site argument attribute "non-null".
5551         getOrCreateAAFor<AANonNull>(CSArgPos);
5552 
5553         // Call site argument attribute "no-alias".
5554         getOrCreateAAFor<AANoAlias>(CSArgPos);
5555 
5556         // Call site argument attribute "dereferenceable".
5557         getOrCreateAAFor<AADereferenceable>(CSArgPos);
5558 
5559         // Call site argument attribute "align".
5560         getOrCreateAAFor<AAAlign>(CSArgPos);
5561 
5562         // Call site argument attribute "nofree".
5563         getOrCreateAAFor<AANoFree>(CSArgPos);
5564       }
5565     }
5566     return true;
5567   };
5568 
5569   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
5570   bool Success, AnyDead = false;
5571   Success = checkForAllInstructionsImpl(
5572       OpcodeInstMap, CallSitePred, nullptr, AnyDead,
5573       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
5574        (unsigned)Instruction::Call});
5575   (void)Success;
5576   assert(Success && !AnyDead && "Expected the check call to be successful!");
5577 
5578   auto LoadStorePred = [&](Instruction &I) -> bool {
5579     if (isa<LoadInst>(I))
5580       getOrCreateAAFor<AAAlign>(
5581           IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
5582     else
5583       getOrCreateAAFor<AAAlign>(
5584           IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
5585     return true;
5586   };
5587   Success = checkForAllInstructionsImpl(
5588       OpcodeInstMap, LoadStorePred, nullptr, AnyDead,
5589       {(unsigned)Instruction::Load, (unsigned)Instruction::Store});
5590   (void)Success;
5591   assert(Success && !AnyDead && "Expected the check call to be successful!");
5592 }
5593 
5594 /// Helpers to ease debugging through output streams and print calls.
5595 ///
5596 ///{
5597 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
5598   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
5599 }
5600 
5601 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
5602   switch (AP) {
5603   case IRPosition::IRP_INVALID:
5604     return OS << "inv";
5605   case IRPosition::IRP_FLOAT:
5606     return OS << "flt";
5607   case IRPosition::IRP_RETURNED:
5608     return OS << "fn_ret";
5609   case IRPosition::IRP_CALL_SITE_RETURNED:
5610     return OS << "cs_ret";
5611   case IRPosition::IRP_FUNCTION:
5612     return OS << "fn";
5613   case IRPosition::IRP_CALL_SITE:
5614     return OS << "cs";
5615   case IRPosition::IRP_ARGUMENT:
5616     return OS << "arg";
5617   case IRPosition::IRP_CALL_SITE_ARGUMENT:
5618     return OS << "cs_arg";
5619   }
5620   llvm_unreachable("Unknown attribute position!");
5621 }
5622 
5623 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
5624   const Value &AV = Pos.getAssociatedValue();
5625   return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
5626             << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
5627 }
5628 
5629 template <typename base_ty, base_ty BestState, base_ty WorstState>
5630 raw_ostream &llvm::
5631 operator<<(raw_ostream &OS,
5632            const IntegerStateBase<base_ty, BestState, WorstState> &S) {
5633   return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
5634             << static_cast<const AbstractState &>(S);
5635 }
5636 
5637 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
5638   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
5639 }
5640 
5641 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
5642   AA.print(OS);
5643   return OS;
5644 }
5645 
5646 void AbstractAttribute::print(raw_ostream &OS) const {
5647   OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
5648      << "]";
5649 }
5650 ///}
5651 
5652 /// ----------------------------------------------------------------------------
5653 ///                       Pass (Manager) Boilerplate
5654 /// ----------------------------------------------------------------------------
5655 
5656 static bool runAttributorOnModule(Module &M, AnalysisGetter &AG) {
5657   if (DisableAttributor)
5658     return false;
5659 
5660   LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
5661                     << " functions.\n");
5662 
5663   // Create an Attributor and initially empty information cache that is filled
5664   // while we identify default attribute opportunities.
5665   InformationCache InfoCache(M, AG);
5666   Attributor A(InfoCache, DepRecInterval);
5667 
5668   for (Function &F : M)
5669     A.initializeInformationCache(F);
5670 
5671   for (Function &F : M) {
5672     if (F.hasExactDefinition())
5673       NumFnWithExactDefinition++;
5674     else
5675       NumFnWithoutExactDefinition++;
5676 
5677     // We look at internal functions only on-demand but if any use is not a
5678     // direct call, we have to do it eagerly.
5679     if (F.hasLocalLinkage()) {
5680       if (llvm::all_of(F.uses(), [](const Use &U) {
5681             return ImmutableCallSite(U.getUser()) &&
5682                    ImmutableCallSite(U.getUser()).isCallee(&U);
5683           }))
5684         continue;
5685     }
5686 
5687     // Populate the Attributor with abstract attribute opportunities in the
5688     // function and the information cache with IR information.
5689     A.identifyDefaultAbstractAttributes(F);
5690   }
5691 
5692   return A.run(M) == ChangeStatus::CHANGED;
5693 }
5694 
5695 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
5696   AnalysisGetter AG(AM);
5697   if (runAttributorOnModule(M, AG)) {
5698     // FIXME: Think about passes we will preserve and add them here.
5699     return PreservedAnalyses::none();
5700   }
5701   return PreservedAnalyses::all();
5702 }
5703 
5704 namespace {
5705 
5706 struct AttributorLegacyPass : public ModulePass {
5707   static char ID;
5708 
5709   AttributorLegacyPass() : ModulePass(ID) {
5710     initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
5711   }
5712 
5713   bool runOnModule(Module &M) override {
5714     if (skipModule(M))
5715       return false;
5716 
5717     AnalysisGetter AG;
5718     return runAttributorOnModule(M, AG);
5719   }
5720 
5721   void getAnalysisUsage(AnalysisUsage &AU) const override {
5722     // FIXME: Think about passes we will preserve and add them here.
5723     AU.addRequired<TargetLibraryInfoWrapperPass>();
5724   }
5725 };
5726 
5727 } // end anonymous namespace
5728 
5729 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
5730 
5731 char AttributorLegacyPass::ID = 0;
5732 
5733 const char AAReturnedValues::ID = 0;
5734 const char AANoUnwind::ID = 0;
5735 const char AANoSync::ID = 0;
5736 const char AANoFree::ID = 0;
5737 const char AANonNull::ID = 0;
5738 const char AANoRecurse::ID = 0;
5739 const char AAWillReturn::ID = 0;
5740 const char AANoAlias::ID = 0;
5741 const char AAReachability::ID = 0;
5742 const char AANoReturn::ID = 0;
5743 const char AAIsDead::ID = 0;
5744 const char AADereferenceable::ID = 0;
5745 const char AAAlign::ID = 0;
5746 const char AANoCapture::ID = 0;
5747 const char AAValueSimplify::ID = 0;
5748 const char AAHeapToStack::ID = 0;
5749 const char AAMemoryBehavior::ID = 0;
5750 
5751 // Macro magic to create the static generator function for attributes that
5752 // follow the naming scheme.
5753 
5754 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
5755   case IRPosition::PK:                                                         \
5756     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
5757 
5758 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
5759   case IRPosition::PK:                                                         \
5760     AA = new CLASS##SUFFIX(IRP);                                               \
5761     break;
5762 
5763 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
5764   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5765     CLASS *AA = nullptr;                                                       \
5766     switch (IRP.getPositionKind()) {                                           \
5767       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5768       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
5769       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
5770       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
5771       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
5772       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
5773       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5774       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
5775     }                                                                          \
5776     return *AA;                                                                \
5777   }
5778 
5779 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
5780   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5781     CLASS *AA = nullptr;                                                       \
5782     switch (IRP.getPositionKind()) {                                           \
5783       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5784       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
5785       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
5786       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
5787       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
5788       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
5789       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
5790       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
5791     }                                                                          \
5792     return *AA;                                                                \
5793   }
5794 
5795 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
5796   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5797     CLASS *AA = nullptr;                                                       \
5798     switch (IRP.getPositionKind()) {                                           \
5799       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5800       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5801       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
5802       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
5803       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
5804       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
5805       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
5806       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
5807     }                                                                          \
5808     return *AA;                                                                \
5809   }
5810 
5811 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
5812   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5813     CLASS *AA = nullptr;                                                       \
5814     switch (IRP.getPositionKind()) {                                           \
5815       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5816       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
5817       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
5818       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
5819       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
5820       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
5821       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
5822       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5823     }                                                                          \
5824     return *AA;                                                                \
5825   }
5826 
5827 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
5828   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5829     CLASS *AA = nullptr;                                                       \
5830     switch (IRP.getPositionKind()) {                                           \
5831       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5832       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
5833       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5834       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
5835       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
5836       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
5837       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
5838       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
5839     }                                                                          \
5840     return *AA;                                                                \
5841   }
5842 
5843 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
5844 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
5845 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
5846 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
5847 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
5848 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
5849 
5850 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
5851 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
5852 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
5853 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
5854 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
5855 
5856 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
5857 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
5858 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
5859 
5860 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
5861 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
5862 
5863 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
5864 
5865 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
5866 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
5867 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
5868 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
5869 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
5870 #undef SWITCH_PK_CREATE
5871 #undef SWITCH_PK_INV
5872 
5873 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
5874                       "Deduce and propagate attributes", false, false)
5875 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
5876 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
5877                     "Deduce and propagate attributes", false, false)
5878