1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/InstIterator.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/InitializePasses.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
39 #include "llvm/Transforms/Utils/Local.h"
40 
41 #include <cassert>
42 
43 using namespace llvm;
44 
45 #define DEBUG_TYPE "attributor"
46 
47 STATISTIC(NumFnWithExactDefinition,
48           "Number of function with exact definitions");
49 STATISTIC(NumFnWithoutExactDefinition,
50           "Number of function without exact definitions");
51 STATISTIC(NumAttributesTimedOut,
52           "Number of abstract attributes timed out before fixpoint");
53 STATISTIC(NumAttributesValidFixpoint,
54           "Number of abstract attributes in a valid fixpoint state");
55 STATISTIC(NumAttributesManifested,
56           "Number of abstract attributes manifested in IR");
57 STATISTIC(NumAttributesFixedDueToRequiredDependences,
58           "Number of abstract attributes fixed due to required dependences");
59 
60 // Some helper macros to deal with statistics tracking.
61 //
62 // Usage:
63 // For simple IR attribute tracking overload trackStatistics in the abstract
64 // attribute and choose the right STATS_DECLTRACK_********* macro,
65 // e.g.,:
66 //  void trackStatistics() const override {
67 //    STATS_DECLTRACK_ARG_ATTR(returned)
68 //  }
69 // If there is a single "increment" side one can use the macro
70 // STATS_DECLTRACK with a custom message. If there are multiple increment
71 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
72 //
73 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
74   ("Number of " #TYPE " marked '" #NAME "'")
75 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
76 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
77 #define STATS_DECL(NAME, TYPE, MSG)                                            \
78   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
79 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
80 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
81   {                                                                            \
82     STATS_DECL(NAME, TYPE, MSG)                                                \
83     STATS_TRACK(NAME, TYPE)                                                    \
84   }
85 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
86   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
87 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
88   STATS_DECLTRACK(NAME, CSArguments,                                           \
89                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
90 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
91   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
92 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
94 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
95   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
96                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
97 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
98   STATS_DECLTRACK(NAME, CSReturn,                                              \
99                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
100 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
101   STATS_DECLTRACK(NAME, Floating,                                              \
102                   ("Number of floating values known to be '" #NAME "'"))
103 
104 // TODO: Determine a good default value.
105 //
106 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
107 // (when run with the first 5 abstract attributes). The results also indicate
108 // that we never reach 32 iterations but always find a fixpoint sooner.
109 //
110 // This will become more evolved once we perform two interleaved fixpoint
111 // iterations: bottom-up and top-down.
112 static cl::opt<unsigned>
113     MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
114                           cl::desc("Maximal number of fixpoint iterations."),
115                           cl::init(32));
116 static cl::opt<bool> VerifyMaxFixpointIterations(
117     "attributor-max-iterations-verify", cl::Hidden,
118     cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
119     cl::init(false));
120 
121 static cl::opt<bool> DisableAttributor(
122     "attributor-disable", cl::Hidden,
123     cl::desc("Disable the attributor inter-procedural deduction pass."),
124     cl::init(true));
125 
126 static cl::opt<bool> AnnotateDeclarationCallSites(
127     "attributor-annotate-decl-cs", cl::Hidden,
128     cl::desc("Annoate call sites of function declarations."), cl::init(false));
129 
130 static cl::opt<bool> ManifestInternal(
131     "attributor-manifest-internal", cl::Hidden,
132     cl::desc("Manifest Attributor internal string attributes."),
133     cl::init(false));
134 
135 static cl::opt<unsigned> DepRecInterval(
136     "attributor-dependence-recompute-interval", cl::Hidden,
137     cl::desc("Number of iterations until dependences are recomputed."),
138     cl::init(4));
139 
140 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
141                                        cl::init(true), cl::Hidden);
142 
143 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
144                                        cl::Hidden);
145 
146 /// Logic operators for the change status enum class.
147 ///
148 ///{
149 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
150   return l == ChangeStatus::CHANGED ? l : r;
151 }
152 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
153   return l == ChangeStatus::UNCHANGED ? l : r;
154 }
155 ///}
156 
157 /// Recursively visit all values that might become \p IRP at some point. This
158 /// will be done by looking through cast instructions, selects, phis, and calls
159 /// with the "returned" attribute. Once we cannot look through the value any
160 /// further, the callback \p VisitValueCB is invoked and passed the current
161 /// value, the \p State, and a flag to indicate if we stripped anything. To
162 /// limit how much effort is invested, we will never visit more values than
163 /// specified by \p MaxValues.
164 template <typename AAType, typename StateTy>
165 static bool genericValueTraversal(
166     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
167     const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
168     int MaxValues = 8) {
169 
170   const AAIsDead *LivenessAA = nullptr;
171   if (IRP.getAnchorScope())
172     LivenessAA = &A.getAAFor<AAIsDead>(
173         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
174         /* TrackDependence */ false);
175   bool AnyDead = false;
176 
177   // TODO: Use Positions here to allow context sensitivity in VisitValueCB
178   SmallPtrSet<Value *, 16> Visited;
179   SmallVector<Value *, 16> Worklist;
180   Worklist.push_back(&IRP.getAssociatedValue());
181 
182   int Iteration = 0;
183   do {
184     Value *V = Worklist.pop_back_val();
185 
186     // Check if we should process the current value. To prevent endless
187     // recursion keep a record of the values we followed!
188     if (!Visited.insert(V).second)
189       continue;
190 
191     // Make sure we limit the compile time for complex expressions.
192     if (Iteration++ >= MaxValues)
193       return false;
194 
195     // Explicitly look through calls with a "returned" attribute if we do
196     // not have a pointer as stripPointerCasts only works on them.
197     Value *NewV = nullptr;
198     if (V->getType()->isPointerTy()) {
199       NewV = V->stripPointerCasts();
200     } else {
201       CallSite CS(V);
202       if (CS && CS.getCalledFunction()) {
203         for (Argument &Arg : CS.getCalledFunction()->args())
204           if (Arg.hasReturnedAttr()) {
205             NewV = CS.getArgOperand(Arg.getArgNo());
206             break;
207           }
208       }
209     }
210     if (NewV && NewV != V) {
211       Worklist.push_back(NewV);
212       continue;
213     }
214 
215     // Look through select instructions, visit both potential values.
216     if (auto *SI = dyn_cast<SelectInst>(V)) {
217       Worklist.push_back(SI->getTrueValue());
218       Worklist.push_back(SI->getFalseValue());
219       continue;
220     }
221 
222     // Look through phi nodes, visit all live operands.
223     if (auto *PHI = dyn_cast<PHINode>(V)) {
224       assert(LivenessAA &&
225              "Expected liveness in the presence of instructions!");
226       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
227         const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
228         if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
229           AnyDead = true;
230           continue;
231         }
232         Worklist.push_back(PHI->getIncomingValue(u));
233       }
234       continue;
235     }
236 
237     // Once a leaf is reached we inform the user through the callback.
238     if (!VisitValueCB(*V, State, Iteration > 1))
239       return false;
240   } while (!Worklist.empty());
241 
242   // If we actually used liveness information so we have to record a dependence.
243   if (AnyDead)
244     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
245 
246   // All values have been visited.
247   return true;
248 }
249 
250 /// Return true if \p New is equal or worse than \p Old.
251 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
252   if (!Old.isIntAttribute())
253     return true;
254 
255   return Old.getValueAsInt() >= New.getValueAsInt();
256 }
257 
258 /// Return true if the information provided by \p Attr was added to the
259 /// attribute list \p Attrs. This is only the case if it was not already present
260 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
261 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
262                              AttributeList &Attrs, int AttrIdx) {
263 
264   if (Attr.isEnumAttribute()) {
265     Attribute::AttrKind Kind = Attr.getKindAsEnum();
266     if (Attrs.hasAttribute(AttrIdx, Kind))
267       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
268         return false;
269     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
270     return true;
271   }
272   if (Attr.isStringAttribute()) {
273     StringRef Kind = Attr.getKindAsString();
274     if (Attrs.hasAttribute(AttrIdx, Kind))
275       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
276         return false;
277     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
278     return true;
279   }
280   if (Attr.isIntAttribute()) {
281     Attribute::AttrKind Kind = Attr.getKindAsEnum();
282     if (Attrs.hasAttribute(AttrIdx, Kind))
283       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
284         return false;
285     Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
286     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
287     return true;
288   }
289 
290   llvm_unreachable("Expected enum or string attribute!");
291 }
292 static const Value *getPointerOperand(const Instruction *I) {
293   if (auto *LI = dyn_cast<LoadInst>(I))
294     if (!LI->isVolatile())
295       return LI->getPointerOperand();
296 
297   if (auto *SI = dyn_cast<StoreInst>(I))
298     if (!SI->isVolatile())
299       return SI->getPointerOperand();
300 
301   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I))
302     if (!CXI->isVolatile())
303       return CXI->getPointerOperand();
304 
305   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I))
306     if (!RMWI->isVolatile())
307       return RMWI->getPointerOperand();
308 
309   return nullptr;
310 }
311 static const Value *getBasePointerOfAccessPointerOperand(const Instruction *I,
312                                                          int64_t &BytesOffset,
313                                                          const DataLayout &DL) {
314   const Value *Ptr = getPointerOperand(I);
315   if (!Ptr)
316     return nullptr;
317 
318   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
319                                           /*AllowNonInbounds*/ false);
320 }
321 
322 ChangeStatus AbstractAttribute::update(Attributor &A) {
323   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
324   if (getState().isAtFixpoint())
325     return HasChanged;
326 
327   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
328 
329   HasChanged = updateImpl(A);
330 
331   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
332                     << "\n");
333 
334   return HasChanged;
335 }
336 
337 ChangeStatus
338 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
339                                    const ArrayRef<Attribute> &DeducedAttrs) {
340   Function *ScopeFn = IRP.getAssociatedFunction();
341   IRPosition::Kind PK = IRP.getPositionKind();
342 
343   // In the following some generic code that will manifest attributes in
344   // DeducedAttrs if they improve the current IR. Due to the different
345   // annotation positions we use the underlying AttributeList interface.
346 
347   AttributeList Attrs;
348   switch (PK) {
349   case IRPosition::IRP_INVALID:
350   case IRPosition::IRP_FLOAT:
351     return ChangeStatus::UNCHANGED;
352   case IRPosition::IRP_ARGUMENT:
353   case IRPosition::IRP_FUNCTION:
354   case IRPosition::IRP_RETURNED:
355     Attrs = ScopeFn->getAttributes();
356     break;
357   case IRPosition::IRP_CALL_SITE:
358   case IRPosition::IRP_CALL_SITE_RETURNED:
359   case IRPosition::IRP_CALL_SITE_ARGUMENT:
360     Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
361     break;
362   }
363 
364   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
365   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
366   for (const Attribute &Attr : DeducedAttrs) {
367     if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
368       continue;
369 
370     HasChanged = ChangeStatus::CHANGED;
371   }
372 
373   if (HasChanged == ChangeStatus::UNCHANGED)
374     return HasChanged;
375 
376   switch (PK) {
377   case IRPosition::IRP_ARGUMENT:
378   case IRPosition::IRP_FUNCTION:
379   case IRPosition::IRP_RETURNED:
380     ScopeFn->setAttributes(Attrs);
381     break;
382   case IRPosition::IRP_CALL_SITE:
383   case IRPosition::IRP_CALL_SITE_RETURNED:
384   case IRPosition::IRP_CALL_SITE_ARGUMENT:
385     CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
386     break;
387   case IRPosition::IRP_INVALID:
388   case IRPosition::IRP_FLOAT:
389     break;
390   }
391 
392   return HasChanged;
393 }
394 
395 const IRPosition IRPosition::EmptyKey(255);
396 const IRPosition IRPosition::TombstoneKey(256);
397 
398 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
399   IRPositions.emplace_back(IRP);
400 
401   ImmutableCallSite ICS(&IRP.getAnchorValue());
402   switch (IRP.getPositionKind()) {
403   case IRPosition::IRP_INVALID:
404   case IRPosition::IRP_FLOAT:
405   case IRPosition::IRP_FUNCTION:
406     return;
407   case IRPosition::IRP_ARGUMENT:
408   case IRPosition::IRP_RETURNED:
409     IRPositions.emplace_back(
410         IRPosition::function(*IRP.getAssociatedFunction()));
411     return;
412   case IRPosition::IRP_CALL_SITE:
413     assert(ICS && "Expected call site!");
414     // TODO: We need to look at the operand bundles similar to the redirection
415     //       in CallBase.
416     if (!ICS.hasOperandBundles())
417       if (const Function *Callee = ICS.getCalledFunction())
418         IRPositions.emplace_back(IRPosition::function(*Callee));
419     return;
420   case IRPosition::IRP_CALL_SITE_RETURNED:
421     assert(ICS && "Expected call site!");
422     // TODO: We need to look at the operand bundles similar to the redirection
423     //       in CallBase.
424     if (!ICS.hasOperandBundles()) {
425       if (const Function *Callee = ICS.getCalledFunction()) {
426         IRPositions.emplace_back(IRPosition::returned(*Callee));
427         IRPositions.emplace_back(IRPosition::function(*Callee));
428       }
429     }
430     IRPositions.emplace_back(
431         IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
432     return;
433   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
434     int ArgNo = IRP.getArgNo();
435     assert(ICS && ArgNo >= 0 && "Expected call site!");
436     // TODO: We need to look at the operand bundles similar to the redirection
437     //       in CallBase.
438     if (!ICS.hasOperandBundles()) {
439       const Function *Callee = ICS.getCalledFunction();
440       if (Callee && Callee->arg_size() > unsigned(ArgNo))
441         IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
442       if (Callee)
443         IRPositions.emplace_back(IRPosition::function(*Callee));
444     }
445     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
446     return;
447   }
448   }
449 }
450 
451 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
452                          bool IgnoreSubsumingPositions) const {
453   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
454     for (Attribute::AttrKind AK : AKs)
455       if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
456         return true;
457     // The first position returned by the SubsumingPositionIterator is
458     // always the position itself. If we ignore subsuming positions we
459     // are done after the first iteration.
460     if (IgnoreSubsumingPositions)
461       break;
462   }
463   return false;
464 }
465 
466 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
467                           SmallVectorImpl<Attribute> &Attrs) const {
468   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
469     for (Attribute::AttrKind AK : AKs) {
470       const Attribute &Attr = EquivIRP.getAttr(AK);
471       if (Attr.getKindAsEnum() == AK)
472         Attrs.push_back(Attr);
473     }
474 }
475 
476 void IRPosition::verify() {
477   switch (KindOrArgNo) {
478   default:
479     assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
480     assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
481            "Expected call base or argument for positive attribute index!");
482     if (isa<Argument>(AnchorVal)) {
483       assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
484              "Argument number mismatch!");
485       assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
486              "Associated value mismatch!");
487     } else {
488       assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
489              "Call site argument number mismatch!");
490       assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
491                  &getAssociatedValue() &&
492              "Associated value mismatch!");
493     }
494     break;
495   case IRP_INVALID:
496     assert(!AnchorVal && "Expected no value for an invalid position!");
497     break;
498   case IRP_FLOAT:
499     assert((!isa<CallBase>(&getAssociatedValue()) &&
500             !isa<Argument>(&getAssociatedValue())) &&
501            "Expected specialized kind for call base and argument values!");
502     break;
503   case IRP_RETURNED:
504     assert(isa<Function>(AnchorVal) &&
505            "Expected function for a 'returned' position!");
506     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
507     break;
508   case IRP_CALL_SITE_RETURNED:
509     assert((isa<CallBase>(AnchorVal)) &&
510            "Expected call base for 'call site returned' position!");
511     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
512     break;
513   case IRP_CALL_SITE:
514     assert((isa<CallBase>(AnchorVal)) &&
515            "Expected call base for 'call site function' position!");
516     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
517     break;
518   case IRP_FUNCTION:
519     assert(isa<Function>(AnchorVal) &&
520            "Expected function for a 'function' position!");
521     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
522     break;
523   }
524 }
525 
526 namespace {
527 /// Helper function to clamp a state \p S of type \p StateType with the
528 /// information in \p R and indicate/return if \p S did change (as-in update is
529 /// required to be run again).
530 template <typename StateType>
531 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
532   auto Assumed = S.getAssumed();
533   S ^= R;
534   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
535                                    : ChangeStatus::CHANGED;
536 }
537 
538 /// Clamp the information known for all returned values of a function
539 /// (identified by \p QueryingAA) into \p S.
540 template <typename AAType, typename StateType = typename AAType::StateType>
541 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
542                                      StateType &S) {
543   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
544                     << static_cast<const AbstractAttribute &>(QueryingAA)
545                     << " into " << S << "\n");
546 
547   assert((QueryingAA.getIRPosition().getPositionKind() ==
548               IRPosition::IRP_RETURNED ||
549           QueryingAA.getIRPosition().getPositionKind() ==
550               IRPosition::IRP_CALL_SITE_RETURNED) &&
551          "Can only clamp returned value states for a function returned or call "
552          "site returned position!");
553 
554   // Use an optional state as there might not be any return values and we want
555   // to join (IntegerState::operator&) the state of all there are.
556   Optional<StateType> T;
557 
558   // Callback for each possibly returned value.
559   auto CheckReturnValue = [&](Value &RV) -> bool {
560     const IRPosition &RVPos = IRPosition::value(RV);
561     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
562     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
563                       << " @ " << RVPos << "\n");
564     const StateType &AAS = static_cast<const StateType &>(AA.getState());
565     if (T.hasValue())
566       *T &= AAS;
567     else
568       T = AAS;
569     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
570                       << "\n");
571     return T->isValidState();
572   };
573 
574   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
575     S.indicatePessimisticFixpoint();
576   else if (T.hasValue())
577     S ^= *T;
578 }
579 
580 /// Helper class to compose two generic deduction
581 template <typename AAType, typename Base, typename StateType,
582           template <typename...> class F, template <typename...> class G>
583 struct AAComposeTwoGenericDeduction
584     : public F<AAType, G<AAType, Base, StateType>, StateType> {
585   AAComposeTwoGenericDeduction(const IRPosition &IRP)
586       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
587 
588   /// See AbstractAttribute::updateImpl(...).
589   ChangeStatus updateImpl(Attributor &A) override {
590     ChangeStatus ChangedF =
591         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
592     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
593     return ChangedF | ChangedG;
594   }
595 };
596 
597 /// Helper class for generic deduction: return value -> returned position.
598 template <typename AAType, typename Base,
599           typename StateType = typename AAType::StateType>
600 struct AAReturnedFromReturnedValues : public Base {
601   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
602 
603   /// See AbstractAttribute::updateImpl(...).
604   ChangeStatus updateImpl(Attributor &A) override {
605     StateType S;
606     clampReturnedValueStates<AAType, StateType>(A, *this, S);
607     // TODO: If we know we visited all returned values, thus no are assumed
608     // dead, we can take the known information from the state T.
609     return clampStateAndIndicateChange<StateType>(this->getState(), S);
610   }
611 };
612 
613 /// Clamp the information known at all call sites for a given argument
614 /// (identified by \p QueryingAA) into \p S.
615 template <typename AAType, typename StateType = typename AAType::StateType>
616 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
617                                         StateType &S) {
618   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
619                     << static_cast<const AbstractAttribute &>(QueryingAA)
620                     << " into " << S << "\n");
621 
622   assert(QueryingAA.getIRPosition().getPositionKind() ==
623              IRPosition::IRP_ARGUMENT &&
624          "Can only clamp call site argument states for an argument position!");
625 
626   // Use an optional state as there might not be any return values and we want
627   // to join (IntegerState::operator&) the state of all there are.
628   Optional<StateType> T;
629 
630   // The argument number which is also the call site argument number.
631   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
632 
633   auto CallSiteCheck = [&](AbstractCallSite ACS) {
634     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
635     // Check if a coresponding argument was found or if it is on not associated
636     // (which can happen for callback calls).
637     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
638       return false;
639 
640     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
641     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
642                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
643     const StateType &AAS = static_cast<const StateType &>(AA.getState());
644     if (T.hasValue())
645       *T &= AAS;
646     else
647       T = AAS;
648     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
649                       << "\n");
650     return T->isValidState();
651   };
652 
653   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
654     S.indicatePessimisticFixpoint();
655   else if (T.hasValue())
656     S ^= *T;
657 }
658 
659 /// Helper class for generic deduction: call site argument -> argument position.
660 template <typename AAType, typename Base,
661           typename StateType = typename AAType::StateType>
662 struct AAArgumentFromCallSiteArguments : public Base {
663   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
664 
665   /// See AbstractAttribute::updateImpl(...).
666   ChangeStatus updateImpl(Attributor &A) override {
667     StateType S;
668     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
669     // TODO: If we know we visited all incoming values, thus no are assumed
670     // dead, we can take the known information from the state T.
671     return clampStateAndIndicateChange<StateType>(this->getState(), S);
672   }
673 };
674 
675 /// Helper class for generic replication: function returned -> cs returned.
676 template <typename AAType, typename Base,
677           typename StateType = typename AAType::StateType>
678 struct AACallSiteReturnedFromReturned : public Base {
679   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
680 
681   /// See AbstractAttribute::updateImpl(...).
682   ChangeStatus updateImpl(Attributor &A) override {
683     assert(this->getIRPosition().getPositionKind() ==
684                IRPosition::IRP_CALL_SITE_RETURNED &&
685            "Can only wrap function returned positions for call site returned "
686            "positions!");
687     auto &S = this->getState();
688 
689     const Function *AssociatedFunction =
690         this->getIRPosition().getAssociatedFunction();
691     if (!AssociatedFunction)
692       return S.indicatePessimisticFixpoint();
693 
694     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
695     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
696     return clampStateAndIndicateChange(
697         S, static_cast<const typename AAType::StateType &>(AA.getState()));
698   }
699 };
700 
701 /// Helper class for generic deduction using must-be-executed-context
702 /// Base class is required to have `followUse` method.
703 
704 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
705 /// U - Underlying use.
706 /// I - The user of the \p U.
707 /// `followUse` returns true if the value should be tracked transitively.
708 
709 template <typename AAType, typename Base,
710           typename StateType = typename AAType::StateType>
711 struct AAFromMustBeExecutedContext : public Base {
712   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
713 
714   void initialize(Attributor &A) override {
715     Base::initialize(A);
716     const IRPosition &IRP = this->getIRPosition();
717     Instruction *CtxI = IRP.getCtxI();
718 
719     if (!CtxI)
720       return;
721 
722     for (const Use &U : IRP.getAssociatedValue().uses())
723       Uses.insert(&U);
724   }
725 
726   /// See AbstractAttribute::updateImpl(...).
727   ChangeStatus updateImpl(Attributor &A) override {
728     auto BeforeState = this->getState();
729     auto &S = this->getState();
730     Instruction *CtxI = this->getIRPosition().getCtxI();
731     if (!CtxI)
732       return ChangeStatus::UNCHANGED;
733 
734     MustBeExecutedContextExplorer &Explorer =
735         A.getInfoCache().getMustBeExecutedContextExplorer();
736 
737     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
738     for (unsigned u = 0; u < Uses.size(); ++u) {
739       const Use *U = Uses[u];
740       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
741         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
742         if (Found && Base::followUse(A, U, UserI))
743           for (const Use &Us : UserI->uses())
744             Uses.insert(&Us);
745       }
746     }
747 
748     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
749   }
750 
751 private:
752   /// Container for (transitive) uses of the associated value.
753   SetVector<const Use *> Uses;
754 };
755 
756 template <typename AAType, typename Base,
757           typename StateType = typename AAType::StateType>
758 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
759     AAComposeTwoGenericDeduction<AAType, Base, StateType,
760                                  AAFromMustBeExecutedContext,
761                                  AAArgumentFromCallSiteArguments>;
762 
763 template <typename AAType, typename Base,
764           typename StateType = typename AAType::StateType>
765 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
766     AAComposeTwoGenericDeduction<AAType, Base, StateType,
767                                  AAFromMustBeExecutedContext,
768                                  AACallSiteReturnedFromReturned>;
769 
770 /// -----------------------NoUnwind Function Attribute--------------------------
771 
772 struct AANoUnwindImpl : AANoUnwind {
773   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
774 
775   const std::string getAsStr() const override {
776     return getAssumed() ? "nounwind" : "may-unwind";
777   }
778 
779   /// See AbstractAttribute::updateImpl(...).
780   ChangeStatus updateImpl(Attributor &A) override {
781     auto Opcodes = {
782         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
783         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
784         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
785 
786     auto CheckForNoUnwind = [&](Instruction &I) {
787       if (!I.mayThrow())
788         return true;
789 
790       if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
791         const auto &NoUnwindAA =
792             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
793         return NoUnwindAA.isAssumedNoUnwind();
794       }
795       return false;
796     };
797 
798     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
799       return indicatePessimisticFixpoint();
800 
801     return ChangeStatus::UNCHANGED;
802   }
803 };
804 
805 struct AANoUnwindFunction final : public AANoUnwindImpl {
806   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
807 
808   /// See AbstractAttribute::trackStatistics()
809   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
810 };
811 
812 /// NoUnwind attribute deduction for a call sites.
813 struct AANoUnwindCallSite final : AANoUnwindImpl {
814   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
815 
816   /// See AbstractAttribute::initialize(...).
817   void initialize(Attributor &A) override {
818     AANoUnwindImpl::initialize(A);
819     Function *F = getAssociatedFunction();
820     if (!F)
821       indicatePessimisticFixpoint();
822   }
823 
824   /// See AbstractAttribute::updateImpl(...).
825   ChangeStatus updateImpl(Attributor &A) override {
826     // TODO: Once we have call site specific value information we can provide
827     //       call site specific liveness information and then it makes
828     //       sense to specialize attributes for call sites arguments instead of
829     //       redirecting requests to the callee argument.
830     Function *F = getAssociatedFunction();
831     const IRPosition &FnPos = IRPosition::function(*F);
832     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
833     return clampStateAndIndicateChange(
834         getState(),
835         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
836   }
837 
838   /// See AbstractAttribute::trackStatistics()
839   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
840 };
841 
842 /// --------------------- Function Return Values -------------------------------
843 
844 /// "Attribute" that collects all potential returned values and the return
845 /// instructions that they arise from.
846 ///
847 /// If there is a unique returned value R, the manifest method will:
848 ///   - mark R with the "returned" attribute, if R is an argument.
849 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
850 
851   /// Mapping of values potentially returned by the associated function to the
852   /// return instructions that might return them.
853   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
854 
855   /// Mapping to remember the number of returned values for a call site such
856   /// that we can avoid updates if nothing changed.
857   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
858 
859   /// Set of unresolved calls returned by the associated function.
860   SmallSetVector<CallBase *, 4> UnresolvedCalls;
861 
862   /// State flags
863   ///
864   ///{
865   bool IsFixed = false;
866   bool IsValidState = true;
867   ///}
868 
869 public:
870   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
871 
872   /// See AbstractAttribute::initialize(...).
873   void initialize(Attributor &A) override {
874     // Reset the state.
875     IsFixed = false;
876     IsValidState = true;
877     ReturnedValues.clear();
878 
879     Function *F = getAssociatedFunction();
880     if (!F) {
881       indicatePessimisticFixpoint();
882       return;
883     }
884 
885     // The map from instruction opcodes to those instructions in the function.
886     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
887 
888     // Look through all arguments, if one is marked as returned we are done.
889     for (Argument &Arg : F->args()) {
890       if (Arg.hasReturnedAttr()) {
891         auto &ReturnInstSet = ReturnedValues[&Arg];
892         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
893           ReturnInstSet.insert(cast<ReturnInst>(RI));
894 
895         indicateOptimisticFixpoint();
896         return;
897       }
898     }
899 
900     if (!F->hasExactDefinition())
901       indicatePessimisticFixpoint();
902   }
903 
904   /// See AbstractAttribute::manifest(...).
905   ChangeStatus manifest(Attributor &A) override;
906 
907   /// See AbstractAttribute::getState(...).
908   AbstractState &getState() override { return *this; }
909 
910   /// See AbstractAttribute::getState(...).
911   const AbstractState &getState() const override { return *this; }
912 
913   /// See AbstractAttribute::updateImpl(Attributor &A).
914   ChangeStatus updateImpl(Attributor &A) override;
915 
916   llvm::iterator_range<iterator> returned_values() override {
917     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
918   }
919 
920   llvm::iterator_range<const_iterator> returned_values() const override {
921     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
922   }
923 
924   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
925     return UnresolvedCalls;
926   }
927 
928   /// Return the number of potential return values, -1 if unknown.
929   size_t getNumReturnValues() const override {
930     return isValidState() ? ReturnedValues.size() : -1;
931   }
932 
933   /// Return an assumed unique return value if a single candidate is found. If
934   /// there cannot be one, return a nullptr. If it is not clear yet, return the
935   /// Optional::NoneType.
936   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
937 
938   /// See AbstractState::checkForAllReturnedValues(...).
939   bool checkForAllReturnedValuesAndReturnInsts(
940       const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
941           &Pred) const override;
942 
943   /// Pretty print the attribute similar to the IR representation.
944   const std::string getAsStr() const override;
945 
946   /// See AbstractState::isAtFixpoint().
947   bool isAtFixpoint() const override { return IsFixed; }
948 
949   /// See AbstractState::isValidState().
950   bool isValidState() const override { return IsValidState; }
951 
952   /// See AbstractState::indicateOptimisticFixpoint(...).
953   ChangeStatus indicateOptimisticFixpoint() override {
954     IsFixed = true;
955     return ChangeStatus::UNCHANGED;
956   }
957 
958   ChangeStatus indicatePessimisticFixpoint() override {
959     IsFixed = true;
960     IsValidState = false;
961     return ChangeStatus::CHANGED;
962   }
963 };
964 
965 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
966   ChangeStatus Changed = ChangeStatus::UNCHANGED;
967 
968   // Bookkeeping.
969   assert(isValidState());
970   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
971                   "Number of function with known return values");
972 
973   // Check if we have an assumed unique return value that we could manifest.
974   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
975 
976   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
977     return Changed;
978 
979   // Bookkeeping.
980   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
981                   "Number of function with unique return");
982 
983   // Callback to replace the uses of CB with the constant C.
984   auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
985     if (CB.getNumUses() == 0 || CB.isMustTailCall())
986       return ChangeStatus::UNCHANGED;
987     CB.replaceAllUsesWith(&C);
988     return ChangeStatus::CHANGED;
989   };
990 
991   // If the assumed unique return value is an argument, annotate it.
992   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
993     // TODO: This should be handled differently!
994     this->AnchorVal = UniqueRVArg;
995     this->KindOrArgNo = UniqueRVArg->getArgNo();
996     Changed = IRAttribute::manifest(A);
997   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
998     // We can replace the returned value with the unique returned constant.
999     Value &AnchorValue = getAnchorValue();
1000     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1001       for (const Use &U : F->uses())
1002         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1003           if (CB->isCallee(&U)) {
1004             Constant *RVCCast =
1005                 CB->getType() == RVC->getType()
1006                     ? RVC
1007                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
1008             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1009           }
1010     } else {
1011       assert(isa<CallBase>(AnchorValue) &&
1012              "Expcected a function or call base anchor!");
1013       Constant *RVCCast =
1014           AnchorValue.getType() == RVC->getType()
1015               ? RVC
1016               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1017       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1018     }
1019     if (Changed == ChangeStatus::CHANGED)
1020       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1021                       "Number of function returns replaced by constant return");
1022   }
1023 
1024   return Changed;
1025 }
1026 
1027 const std::string AAReturnedValuesImpl::getAsStr() const {
1028   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1029          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1030          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1031 }
1032 
1033 Optional<Value *>
1034 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1035   // If checkForAllReturnedValues provides a unique value, ignoring potential
1036   // undef values that can also be present, it is assumed to be the actual
1037   // return value and forwarded to the caller of this method. If there are
1038   // multiple, a nullptr is returned indicating there cannot be a unique
1039   // returned value.
1040   Optional<Value *> UniqueRV;
1041 
1042   auto Pred = [&](Value &RV) -> bool {
1043     // If we found a second returned value and neither the current nor the saved
1044     // one is an undef, there is no unique returned value. Undefs are special
1045     // since we can pretend they have any value.
1046     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1047         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1048       UniqueRV = nullptr;
1049       return false;
1050     }
1051 
1052     // Do not overwrite a value with an undef.
1053     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1054       UniqueRV = &RV;
1055 
1056     return true;
1057   };
1058 
1059   if (!A.checkForAllReturnedValues(Pred, *this))
1060     UniqueRV = nullptr;
1061 
1062   return UniqueRV;
1063 }
1064 
1065 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1066     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1067         &Pred) const {
1068   if (!isValidState())
1069     return false;
1070 
1071   // Check all returned values but ignore call sites as long as we have not
1072   // encountered an overdefined one during an update.
1073   for (auto &It : ReturnedValues) {
1074     Value *RV = It.first;
1075 
1076     CallBase *CB = dyn_cast<CallBase>(RV);
1077     if (CB && !UnresolvedCalls.count(CB))
1078       continue;
1079 
1080     if (!Pred(*RV, It.second))
1081       return false;
1082   }
1083 
1084   return true;
1085 }
1086 
1087 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1088   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1089   bool Changed = false;
1090 
1091   // State used in the value traversals starting in returned values.
1092   struct RVState {
1093     // The map in which we collect return values -> return instrs.
1094     decltype(ReturnedValues) &RetValsMap;
1095     // The flag to indicate a change.
1096     bool &Changed;
1097     // The return instrs we come from.
1098     SmallSetVector<ReturnInst *, 4> RetInsts;
1099   };
1100 
1101   // Callback for a leaf value returned by the associated function.
1102   auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1103     auto Size = RVS.RetValsMap[&Val].size();
1104     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1105     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1106     RVS.Changed |= Inserted;
1107     LLVM_DEBUG({
1108       if (Inserted)
1109         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1110                << " => " << RVS.RetInsts.size() << "\n";
1111     });
1112     return true;
1113   };
1114 
1115   // Helper method to invoke the generic value traversal.
1116   auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1117     IRPosition RetValPos = IRPosition::value(RV);
1118     return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1119                                                             RVS, VisitValueCB);
1120   };
1121 
1122   // Callback for all "return intructions" live in the associated function.
1123   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1124     ReturnInst &Ret = cast<ReturnInst>(I);
1125     RVState RVS({ReturnedValues, Changed, {}});
1126     RVS.RetInsts.insert(&Ret);
1127     return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1128   };
1129 
1130   // Start by discovering returned values from all live returned instructions in
1131   // the associated function.
1132   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1133     return indicatePessimisticFixpoint();
1134 
1135   // Once returned values "directly" present in the code are handled we try to
1136   // resolve returned calls.
1137   decltype(ReturnedValues) NewRVsMap;
1138   for (auto &It : ReturnedValues) {
1139     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1140                       << " by #" << It.second.size() << " RIs\n");
1141     CallBase *CB = dyn_cast<CallBase>(It.first);
1142     if (!CB || UnresolvedCalls.count(CB))
1143       continue;
1144 
1145     if (!CB->getCalledFunction()) {
1146       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1147                         << "\n");
1148       UnresolvedCalls.insert(CB);
1149       continue;
1150     }
1151 
1152     // TODO: use the function scope once we have call site AAReturnedValues.
1153     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1154         *this, IRPosition::function(*CB->getCalledFunction()));
1155     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1156                       << static_cast<const AbstractAttribute &>(RetValAA)
1157                       << "\n");
1158 
1159     // Skip dead ends, thus if we do not know anything about the returned
1160     // call we mark it as unresolved and it will stay that way.
1161     if (!RetValAA.getState().isValidState()) {
1162       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1163                         << "\n");
1164       UnresolvedCalls.insert(CB);
1165       continue;
1166     }
1167 
1168     // Do not try to learn partial information. If the callee has unresolved
1169     // return values we will treat the call as unresolved/opaque.
1170     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1171     if (!RetValAAUnresolvedCalls.empty()) {
1172       UnresolvedCalls.insert(CB);
1173       continue;
1174     }
1175 
1176     // Now check if we can track transitively returned values. If possible, thus
1177     // if all return value can be represented in the current scope, do so.
1178     bool Unresolved = false;
1179     for (auto &RetValAAIt : RetValAA.returned_values()) {
1180       Value *RetVal = RetValAAIt.first;
1181       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1182           isa<Constant>(RetVal))
1183         continue;
1184       // Anything that did not fit in the above categories cannot be resolved,
1185       // mark the call as unresolved.
1186       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1187                            "cannot be translated: "
1188                         << *RetVal << "\n");
1189       UnresolvedCalls.insert(CB);
1190       Unresolved = true;
1191       break;
1192     }
1193 
1194     if (Unresolved)
1195       continue;
1196 
1197     // Now track transitively returned values.
1198     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1199     if (NumRetAA == RetValAA.getNumReturnValues()) {
1200       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1201                            "changed since it was seen last\n");
1202       continue;
1203     }
1204     NumRetAA = RetValAA.getNumReturnValues();
1205 
1206     for (auto &RetValAAIt : RetValAA.returned_values()) {
1207       Value *RetVal = RetValAAIt.first;
1208       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1209         // Arguments are mapped to call site operands and we begin the traversal
1210         // again.
1211         bool Unused = false;
1212         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1213         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1214         continue;
1215       } else if (isa<CallBase>(RetVal)) {
1216         // Call sites are resolved by the callee attribute over time, no need to
1217         // do anything for us.
1218         continue;
1219       } else if (isa<Constant>(RetVal)) {
1220         // Constants are valid everywhere, we can simply take them.
1221         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1222         continue;
1223       }
1224     }
1225   }
1226 
1227   // To avoid modifications to the ReturnedValues map while we iterate over it
1228   // we kept record of potential new entries in a copy map, NewRVsMap.
1229   for (auto &It : NewRVsMap) {
1230     assert(!It.second.empty() && "Entry does not add anything.");
1231     auto &ReturnInsts = ReturnedValues[It.first];
1232     for (ReturnInst *RI : It.second)
1233       if (ReturnInsts.insert(RI)) {
1234         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1235                           << *It.first << " => " << *RI << "\n");
1236         Changed = true;
1237       }
1238   }
1239 
1240   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1241   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1242 }
1243 
1244 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1245   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1246 
1247   /// See AbstractAttribute::trackStatistics()
1248   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1249 };
1250 
1251 /// Returned values information for a call sites.
1252 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1253   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1254 
1255   /// See AbstractAttribute::initialize(...).
1256   void initialize(Attributor &A) override {
1257     // TODO: Once we have call site specific value information we can provide
1258     //       call site specific liveness information and then it makes
1259     //       sense to specialize attributes for call sites instead of
1260     //       redirecting requests to the callee.
1261     llvm_unreachable("Abstract attributes for returned values are not "
1262                      "supported for call sites yet!");
1263   }
1264 
1265   /// See AbstractAttribute::updateImpl(...).
1266   ChangeStatus updateImpl(Attributor &A) override {
1267     return indicatePessimisticFixpoint();
1268   }
1269 
1270   /// See AbstractAttribute::trackStatistics()
1271   void trackStatistics() const override {}
1272 };
1273 
1274 /// ------------------------ NoSync Function Attribute -------------------------
1275 
1276 struct AANoSyncImpl : AANoSync {
1277   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1278 
1279   const std::string getAsStr() const override {
1280     return getAssumed() ? "nosync" : "may-sync";
1281   }
1282 
1283   /// See AbstractAttribute::updateImpl(...).
1284   ChangeStatus updateImpl(Attributor &A) override;
1285 
1286   /// Helper function used to determine whether an instruction is non-relaxed
1287   /// atomic. In other words, if an atomic instruction does not have unordered
1288   /// or monotonic ordering
1289   static bool isNonRelaxedAtomic(Instruction *I);
1290 
1291   /// Helper function used to determine whether an instruction is volatile.
1292   static bool isVolatile(Instruction *I);
1293 
1294   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1295   /// memset).
1296   static bool isNoSyncIntrinsic(Instruction *I);
1297 };
1298 
1299 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1300   if (!I->isAtomic())
1301     return false;
1302 
1303   AtomicOrdering Ordering;
1304   switch (I->getOpcode()) {
1305   case Instruction::AtomicRMW:
1306     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1307     break;
1308   case Instruction::Store:
1309     Ordering = cast<StoreInst>(I)->getOrdering();
1310     break;
1311   case Instruction::Load:
1312     Ordering = cast<LoadInst>(I)->getOrdering();
1313     break;
1314   case Instruction::Fence: {
1315     auto *FI = cast<FenceInst>(I);
1316     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1317       return false;
1318     Ordering = FI->getOrdering();
1319     break;
1320   }
1321   case Instruction::AtomicCmpXchg: {
1322     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1323     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1324     // Only if both are relaxed, than it can be treated as relaxed.
1325     // Otherwise it is non-relaxed.
1326     if (Success != AtomicOrdering::Unordered &&
1327         Success != AtomicOrdering::Monotonic)
1328       return true;
1329     if (Failure != AtomicOrdering::Unordered &&
1330         Failure != AtomicOrdering::Monotonic)
1331       return true;
1332     return false;
1333   }
1334   default:
1335     llvm_unreachable(
1336         "New atomic operations need to be known in the attributor.");
1337   }
1338 
1339   // Relaxed.
1340   if (Ordering == AtomicOrdering::Unordered ||
1341       Ordering == AtomicOrdering::Monotonic)
1342     return false;
1343   return true;
1344 }
1345 
1346 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1347 /// FIXME: We should ipmrove the handling of intrinsics.
1348 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1349   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1350     switch (II->getIntrinsicID()) {
1351     /// Element wise atomic memory intrinsics are can only be unordered,
1352     /// therefore nosync.
1353     case Intrinsic::memset_element_unordered_atomic:
1354     case Intrinsic::memmove_element_unordered_atomic:
1355     case Intrinsic::memcpy_element_unordered_atomic:
1356       return true;
1357     case Intrinsic::memset:
1358     case Intrinsic::memmove:
1359     case Intrinsic::memcpy:
1360       if (!cast<MemIntrinsic>(II)->isVolatile())
1361         return true;
1362       return false;
1363     default:
1364       return false;
1365     }
1366   }
1367   return false;
1368 }
1369 
1370 bool AANoSyncImpl::isVolatile(Instruction *I) {
1371   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1372          "Calls should not be checked here");
1373 
1374   switch (I->getOpcode()) {
1375   case Instruction::AtomicRMW:
1376     return cast<AtomicRMWInst>(I)->isVolatile();
1377   case Instruction::Store:
1378     return cast<StoreInst>(I)->isVolatile();
1379   case Instruction::Load:
1380     return cast<LoadInst>(I)->isVolatile();
1381   case Instruction::AtomicCmpXchg:
1382     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1383   default:
1384     return false;
1385   }
1386 }
1387 
1388 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1389 
1390   auto CheckRWInstForNoSync = [&](Instruction &I) {
1391     /// We are looking for volatile instructions or Non-Relaxed atomics.
1392     /// FIXME: We should ipmrove the handling of intrinsics.
1393 
1394     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1395       return true;
1396 
1397     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1398       if (ICS.hasFnAttr(Attribute::NoSync))
1399         return true;
1400 
1401       const auto &NoSyncAA =
1402           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1403       if (NoSyncAA.isAssumedNoSync())
1404         return true;
1405       return false;
1406     }
1407 
1408     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1409       return true;
1410 
1411     return false;
1412   };
1413 
1414   auto CheckForNoSync = [&](Instruction &I) {
1415     // At this point we handled all read/write effects and they are all
1416     // nosync, so they can be skipped.
1417     if (I.mayReadOrWriteMemory())
1418       return true;
1419 
1420     // non-convergent and readnone imply nosync.
1421     return !ImmutableCallSite(&I).isConvergent();
1422   };
1423 
1424   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1425       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1426     return indicatePessimisticFixpoint();
1427 
1428   return ChangeStatus::UNCHANGED;
1429 }
1430 
1431 struct AANoSyncFunction final : public AANoSyncImpl {
1432   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1433 
1434   /// See AbstractAttribute::trackStatistics()
1435   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1436 };
1437 
1438 /// NoSync attribute deduction for a call sites.
1439 struct AANoSyncCallSite final : AANoSyncImpl {
1440   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1441 
1442   /// See AbstractAttribute::initialize(...).
1443   void initialize(Attributor &A) override {
1444     AANoSyncImpl::initialize(A);
1445     Function *F = getAssociatedFunction();
1446     if (!F)
1447       indicatePessimisticFixpoint();
1448   }
1449 
1450   /// See AbstractAttribute::updateImpl(...).
1451   ChangeStatus updateImpl(Attributor &A) override {
1452     // TODO: Once we have call site specific value information we can provide
1453     //       call site specific liveness information and then it makes
1454     //       sense to specialize attributes for call sites arguments instead of
1455     //       redirecting requests to the callee argument.
1456     Function *F = getAssociatedFunction();
1457     const IRPosition &FnPos = IRPosition::function(*F);
1458     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1459     return clampStateAndIndicateChange(
1460         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1461   }
1462 
1463   /// See AbstractAttribute::trackStatistics()
1464   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1465 };
1466 
1467 /// ------------------------ No-Free Attributes ----------------------------
1468 
1469 struct AANoFreeImpl : public AANoFree {
1470   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1471 
1472   /// See AbstractAttribute::updateImpl(...).
1473   ChangeStatus updateImpl(Attributor &A) override {
1474     auto CheckForNoFree = [&](Instruction &I) {
1475       ImmutableCallSite ICS(&I);
1476       if (ICS.hasFnAttr(Attribute::NoFree))
1477         return true;
1478 
1479       const auto &NoFreeAA =
1480           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1481       return NoFreeAA.isAssumedNoFree();
1482     };
1483 
1484     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1485       return indicatePessimisticFixpoint();
1486     return ChangeStatus::UNCHANGED;
1487   }
1488 
1489   /// See AbstractAttribute::getAsStr().
1490   const std::string getAsStr() const override {
1491     return getAssumed() ? "nofree" : "may-free";
1492   }
1493 };
1494 
1495 struct AANoFreeFunction final : public AANoFreeImpl {
1496   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1497 
1498   /// See AbstractAttribute::trackStatistics()
1499   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1500 };
1501 
1502 /// NoFree attribute deduction for a call sites.
1503 struct AANoFreeCallSite final : AANoFreeImpl {
1504   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1505 
1506   /// See AbstractAttribute::initialize(...).
1507   void initialize(Attributor &A) override {
1508     AANoFreeImpl::initialize(A);
1509     Function *F = getAssociatedFunction();
1510     if (!F)
1511       indicatePessimisticFixpoint();
1512   }
1513 
1514   /// See AbstractAttribute::updateImpl(...).
1515   ChangeStatus updateImpl(Attributor &A) override {
1516     // TODO: Once we have call site specific value information we can provide
1517     //       call site specific liveness information and then it makes
1518     //       sense to specialize attributes for call sites arguments instead of
1519     //       redirecting requests to the callee argument.
1520     Function *F = getAssociatedFunction();
1521     const IRPosition &FnPos = IRPosition::function(*F);
1522     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1523     return clampStateAndIndicateChange(
1524         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1525   }
1526 
1527   /// See AbstractAttribute::trackStatistics()
1528   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1529 };
1530 
1531 /// NoFree attribute for floating values.
1532 struct AANoFreeFloating : AANoFreeImpl {
1533   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1534 
1535   /// See AbstractAttribute::trackStatistics()
1536   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1537 
1538   /// See Abstract Attribute::updateImpl(...).
1539   ChangeStatus updateImpl(Attributor &A) override {
1540     const IRPosition &IRP = getIRPosition();
1541     Function *F = IRP.getAnchorScope();
1542 
1543     const AAIsDead &LivenessAA =
1544         A.getAAFor<AAIsDead>(*this, IRPosition::function(*F));
1545 
1546     const auto &NoFreeAA =
1547         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1548     if (NoFreeAA.isAssumedNoFree())
1549       return ChangeStatus::UNCHANGED;
1550 
1551     SmallPtrSet<const Use *, 8> Visited;
1552     SmallVector<const Use *, 8> Worklist;
1553 
1554     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1555     for (Use &U : AssociatedValue.uses())
1556       Worklist.push_back(&U);
1557 
1558     while (!Worklist.empty()) {
1559       const Use *U = Worklist.pop_back_val();
1560       if (!Visited.insert(U).second)
1561         continue;
1562 
1563       auto *UserI = U->getUser();
1564       if (!UserI)
1565         continue;
1566 
1567       if (LivenessAA.isAssumedDead(cast<Instruction>(UserI)))
1568         continue;
1569 
1570       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1571         if (CB->isBundleOperand(U))
1572           return indicatePessimisticFixpoint();
1573         if (!CB->isArgOperand(U))
1574           continue;
1575 
1576         unsigned ArgNo = CB->getArgOperandNo(U);
1577 
1578         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1579             *this, IRPosition::callsite_argument(*CB, ArgNo));
1580 
1581         if (NoFreeArg.isAssumedNoFree())
1582           continue;
1583 
1584         return indicatePessimisticFixpoint();
1585       }
1586 
1587       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1588           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1589         for (Use &U : UserI->uses())
1590           Worklist.push_back(&U);
1591         continue;
1592       }
1593 
1594       // Unknown user.
1595       return indicatePessimisticFixpoint();
1596     }
1597     return ChangeStatus::UNCHANGED;
1598   }
1599 };
1600 
1601 /// NoFree attribute for a call site argument.
1602 struct AANoFreeArgument final : AANoFreeFloating {
1603   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1604 
1605   /// See AbstractAttribute::trackStatistics()
1606   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1607 };
1608 
1609 /// NoFree attribute for call site arguments.
1610 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1611   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1612 
1613   /// See AbstractAttribute::updateImpl(...).
1614   ChangeStatus updateImpl(Attributor &A) override {
1615     // TODO: Once we have call site specific value information we can provide
1616     //       call site specific liveness information and then it makes
1617     //       sense to specialize attributes for call sites arguments instead of
1618     //       redirecting requests to the callee argument.
1619     Argument *Arg = getAssociatedArgument();
1620     if (!Arg)
1621       return indicatePessimisticFixpoint();
1622     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1623     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1624     return clampStateAndIndicateChange(
1625         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1626   }
1627 
1628   /// See AbstractAttribute::trackStatistics()
1629   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1630 };
1631 
1632 /// NoFree attribute for function return value.
1633 struct AANoFreeReturned final : AANoFreeFloating {
1634   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1635     llvm_unreachable("NoFree is not applicable to function returns!");
1636   }
1637 
1638   /// See AbstractAttribute::initialize(...).
1639   void initialize(Attributor &A) override {
1640     llvm_unreachable("NoFree is not applicable to function returns!");
1641   }
1642 
1643   /// See AbstractAttribute::updateImpl(...).
1644   ChangeStatus updateImpl(Attributor &A) override {
1645     llvm_unreachable("NoFree is not applicable to function returns!");
1646   }
1647 
1648   /// See AbstractAttribute::trackStatistics()
1649   void trackStatistics() const override {}
1650 };
1651 
1652 /// NoFree attribute deduction for a call site return value.
1653 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1654   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1655 
1656   ChangeStatus manifest(Attributor &A) override {
1657     return ChangeStatus::UNCHANGED;
1658   }
1659   /// See AbstractAttribute::trackStatistics()
1660   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1661 };
1662 
1663 /// ------------------------ NonNull Argument Attribute ------------------------
1664 static int64_t getKnownNonNullAndDerefBytesForUse(
1665     Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1666     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1667   TrackUse = false;
1668 
1669   const Value *UseV = U->get();
1670   if (!UseV->getType()->isPointerTy())
1671     return 0;
1672 
1673   Type *PtrTy = UseV->getType();
1674   const Function *F = I->getFunction();
1675   bool NullPointerIsDefined =
1676       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1677   const DataLayout &DL = A.getInfoCache().getDL();
1678   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1679     if (ICS.isBundleOperand(U))
1680       return 0;
1681 
1682     if (ICS.isCallee(U)) {
1683       IsNonNull |= !NullPointerIsDefined;
1684       return 0;
1685     }
1686 
1687     unsigned ArgNo = ICS.getArgumentNo(U);
1688     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1689     // As long as we only use known information there is no need to track
1690     // dependences here.
1691     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1692                                                   /* TrackDependence */ false);
1693     IsNonNull |= DerefAA.isKnownNonNull();
1694     return DerefAA.getKnownDereferenceableBytes();
1695   }
1696 
1697   // We need to follow common pointer manipulation uses to the accesses they
1698   // feed into. We can try to be smart to avoid looking through things we do not
1699   // like for now, e.g., non-inbounds GEPs.
1700   if (isa<CastInst>(I)) {
1701     TrackUse = true;
1702     return 0;
1703   }
1704   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1705     if (GEP->hasAllZeroIndices() ||
1706         (GEP->isInBounds() && GEP->hasAllConstantIndices())) {
1707       TrackUse = true;
1708       return 0;
1709     }
1710 
1711   int64_t Offset;
1712   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1713     if (Base == &AssociatedValue && getPointerOperand(I) == UseV) {
1714       int64_t DerefBytes =
1715           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1716 
1717       IsNonNull |= !NullPointerIsDefined;
1718       return std::max(int64_t(0), DerefBytes);
1719     }
1720   }
1721   if (const Value *Base =
1722           GetPointerBaseWithConstantOffset(UseV, Offset, DL,
1723                                            /*AllowNonInbounds*/ false)) {
1724     if (Base == &AssociatedValue) {
1725       // As long as we only use known information there is no need to track
1726       // dependences here.
1727       auto &DerefAA = A.getAAFor<AADereferenceable>(
1728           QueryingAA, IRPosition::value(*Base), /* TrackDependence */ false);
1729       IsNonNull |= (!NullPointerIsDefined && DerefAA.isKnownNonNull());
1730       IsNonNull |= (!NullPointerIsDefined && (Offset != 0));
1731       int64_t DerefBytes = DerefAA.getKnownDereferenceableBytes();
1732       return std::max(int64_t(0), DerefBytes - std::max(int64_t(0), Offset));
1733     }
1734   }
1735 
1736   return 0;
1737 }
1738 
1739 struct AANonNullImpl : AANonNull {
1740   AANonNullImpl(const IRPosition &IRP)
1741       : AANonNull(IRP),
1742         NullIsDefined(NullPointerIsDefined(
1743             getAnchorScope(),
1744             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1745 
1746   /// See AbstractAttribute::initialize(...).
1747   void initialize(Attributor &A) override {
1748     if (!NullIsDefined &&
1749         hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1750       indicateOptimisticFixpoint();
1751     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1752       indicatePessimisticFixpoint();
1753     else
1754       AANonNull::initialize(A);
1755   }
1756 
1757   /// See AAFromMustBeExecutedContext
1758   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1759     bool IsNonNull = false;
1760     bool TrackUse = false;
1761     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1762                                        IsNonNull, TrackUse);
1763     setKnown(IsNonNull);
1764     return TrackUse;
1765   }
1766 
1767   /// See AbstractAttribute::getAsStr().
1768   const std::string getAsStr() const override {
1769     return getAssumed() ? "nonnull" : "may-null";
1770   }
1771 
1772   /// Flag to determine if the underlying value can be null and still allow
1773   /// valid accesses.
1774   const bool NullIsDefined;
1775 };
1776 
1777 /// NonNull attribute for a floating value.
1778 struct AANonNullFloating
1779     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1780   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1781   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1782 
1783   /// See AbstractAttribute::updateImpl(...).
1784   ChangeStatus updateImpl(Attributor &A) override {
1785     ChangeStatus Change = Base::updateImpl(A);
1786     if (isKnownNonNull())
1787       return Change;
1788 
1789     if (!NullIsDefined) {
1790       const auto &DerefAA =
1791           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1792       if (DerefAA.getAssumedDereferenceableBytes())
1793         return Change;
1794     }
1795 
1796     const DataLayout &DL = A.getDataLayout();
1797 
1798     DominatorTree *DT = nullptr;
1799     InformationCache &InfoCache = A.getInfoCache();
1800     if (const Function *Fn = getAnchorScope())
1801       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1802 
1803     auto VisitValueCB = [&](Value &V, AANonNull::StateType &T,
1804                             bool Stripped) -> bool {
1805       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1806       if (!Stripped && this == &AA) {
1807         if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, getCtxI(), DT))
1808           T.indicatePessimisticFixpoint();
1809       } else {
1810         // Use abstract attribute information.
1811         const AANonNull::StateType &NS =
1812             static_cast<const AANonNull::StateType &>(AA.getState());
1813         T ^= NS;
1814       }
1815       return T.isValidState();
1816     };
1817 
1818     StateType T;
1819     if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1820                                                      T, VisitValueCB))
1821       return indicatePessimisticFixpoint();
1822 
1823     return clampStateAndIndicateChange(getState(), T);
1824   }
1825 
1826   /// See AbstractAttribute::trackStatistics()
1827   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1828 };
1829 
1830 /// NonNull attribute for function return value.
1831 struct AANonNullReturned final
1832     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1833   AANonNullReturned(const IRPosition &IRP)
1834       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1835 
1836   /// See AbstractAttribute::trackStatistics()
1837   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1838 };
1839 
1840 /// NonNull attribute for function argument.
1841 struct AANonNullArgument final
1842     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1843                                                               AANonNullImpl> {
1844   AANonNullArgument(const IRPosition &IRP)
1845       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1846                                                                 AANonNullImpl>(
1847             IRP) {}
1848 
1849   /// See AbstractAttribute::trackStatistics()
1850   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1851 };
1852 
1853 struct AANonNullCallSiteArgument final : AANonNullFloating {
1854   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1855 
1856   /// See AbstractAttribute::trackStatistics()
1857   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1858 };
1859 
1860 /// NonNull attribute for a call site return position.
1861 struct AANonNullCallSiteReturned final
1862     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1863                                                              AANonNullImpl> {
1864   AANonNullCallSiteReturned(const IRPosition &IRP)
1865       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1866                                                                AANonNullImpl>(
1867             IRP) {}
1868 
1869   /// See AbstractAttribute::trackStatistics()
1870   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1871 };
1872 
1873 /// ------------------------ No-Recurse Attributes ----------------------------
1874 
1875 struct AANoRecurseImpl : public AANoRecurse {
1876   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1877 
1878   /// See AbstractAttribute::getAsStr()
1879   const std::string getAsStr() const override {
1880     return getAssumed() ? "norecurse" : "may-recurse";
1881   }
1882 };
1883 
1884 struct AANoRecurseFunction final : AANoRecurseImpl {
1885   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1886 
1887   /// See AbstractAttribute::initialize(...).
1888   void initialize(Attributor &A) override {
1889     AANoRecurseImpl::initialize(A);
1890     if (const Function *F = getAnchorScope())
1891       if (A.getInfoCache().getSccSize(*F) == 1)
1892         return;
1893     indicatePessimisticFixpoint();
1894   }
1895 
1896   /// See AbstractAttribute::updateImpl(...).
1897   ChangeStatus updateImpl(Attributor &A) override {
1898 
1899     auto CheckForNoRecurse = [&](Instruction &I) {
1900       ImmutableCallSite ICS(&I);
1901       if (ICS.hasFnAttr(Attribute::NoRecurse))
1902         return true;
1903 
1904       const auto &NoRecurseAA =
1905           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
1906       if (!NoRecurseAA.isAssumedNoRecurse())
1907         return false;
1908 
1909       // Recursion to the same function
1910       if (ICS.getCalledFunction() == getAnchorScope())
1911         return false;
1912 
1913       return true;
1914     };
1915 
1916     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1917       return indicatePessimisticFixpoint();
1918     return ChangeStatus::UNCHANGED;
1919   }
1920 
1921   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1922 };
1923 
1924 /// NoRecurse attribute deduction for a call sites.
1925 struct AANoRecurseCallSite final : AANoRecurseImpl {
1926   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1927 
1928   /// See AbstractAttribute::initialize(...).
1929   void initialize(Attributor &A) override {
1930     AANoRecurseImpl::initialize(A);
1931     Function *F = getAssociatedFunction();
1932     if (!F)
1933       indicatePessimisticFixpoint();
1934   }
1935 
1936   /// See AbstractAttribute::updateImpl(...).
1937   ChangeStatus updateImpl(Attributor &A) override {
1938     // TODO: Once we have call site specific value information we can provide
1939     //       call site specific liveness information and then it makes
1940     //       sense to specialize attributes for call sites arguments instead of
1941     //       redirecting requests to the callee argument.
1942     Function *F = getAssociatedFunction();
1943     const IRPosition &FnPos = IRPosition::function(*F);
1944     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1945     return clampStateAndIndicateChange(
1946         getState(),
1947         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1948   }
1949 
1950   /// See AbstractAttribute::trackStatistics()
1951   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1952 };
1953 
1954 /// ------------------------ Will-Return Attributes ----------------------------
1955 
1956 // Helper function that checks whether a function has any cycle.
1957 // TODO: Replace with more efficent code
1958 static bool containsCycle(Function &F) {
1959   SmallPtrSet<BasicBlock *, 32> Visited;
1960 
1961   // Traverse BB by dfs and check whether successor is already visited.
1962   for (BasicBlock *BB : depth_first(&F)) {
1963     Visited.insert(BB);
1964     for (auto *SuccBB : successors(BB)) {
1965       if (Visited.count(SuccBB))
1966         return true;
1967     }
1968   }
1969   return false;
1970 }
1971 
1972 // Helper function that checks the function have a loop which might become an
1973 // endless loop
1974 // FIXME: Any cycle is regarded as endless loop for now.
1975 //        We have to allow some patterns.
1976 static bool containsPossiblyEndlessLoop(Function *F) {
1977   return !F || !F->hasExactDefinition() || containsCycle(*F);
1978 }
1979 
1980 struct AAWillReturnImpl : public AAWillReturn {
1981   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
1982 
1983   /// See AbstractAttribute::initialize(...).
1984   void initialize(Attributor &A) override {
1985     AAWillReturn::initialize(A);
1986 
1987     Function *F = getAssociatedFunction();
1988     if (containsPossiblyEndlessLoop(F))
1989       indicatePessimisticFixpoint();
1990   }
1991 
1992   /// See AbstractAttribute::updateImpl(...).
1993   ChangeStatus updateImpl(Attributor &A) override {
1994     auto CheckForWillReturn = [&](Instruction &I) {
1995       IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
1996       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
1997       if (WillReturnAA.isKnownWillReturn())
1998         return true;
1999       if (!WillReturnAA.isAssumedWillReturn())
2000         return false;
2001       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2002       return NoRecurseAA.isAssumedNoRecurse();
2003     };
2004 
2005     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2006       return indicatePessimisticFixpoint();
2007 
2008     return ChangeStatus::UNCHANGED;
2009   }
2010 
2011   /// See AbstractAttribute::getAsStr()
2012   const std::string getAsStr() const override {
2013     return getAssumed() ? "willreturn" : "may-noreturn";
2014   }
2015 };
2016 
2017 struct AAWillReturnFunction final : AAWillReturnImpl {
2018   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2019 
2020   /// See AbstractAttribute::trackStatistics()
2021   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2022 };
2023 
2024 /// WillReturn attribute deduction for a call sites.
2025 struct AAWillReturnCallSite final : AAWillReturnImpl {
2026   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2027 
2028   /// See AbstractAttribute::initialize(...).
2029   void initialize(Attributor &A) override {
2030     AAWillReturnImpl::initialize(A);
2031     Function *F = getAssociatedFunction();
2032     if (!F)
2033       indicatePessimisticFixpoint();
2034   }
2035 
2036   /// See AbstractAttribute::updateImpl(...).
2037   ChangeStatus updateImpl(Attributor &A) override {
2038     // TODO: Once we have call site specific value information we can provide
2039     //       call site specific liveness information and then it makes
2040     //       sense to specialize attributes for call sites arguments instead of
2041     //       redirecting requests to the callee argument.
2042     Function *F = getAssociatedFunction();
2043     const IRPosition &FnPos = IRPosition::function(*F);
2044     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2045     return clampStateAndIndicateChange(
2046         getState(),
2047         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2048   }
2049 
2050   /// See AbstractAttribute::trackStatistics()
2051   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2052 };
2053 
2054 /// ------------------------ NoAlias Argument Attribute ------------------------
2055 
2056 struct AANoAliasImpl : AANoAlias {
2057   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
2058 
2059   const std::string getAsStr() const override {
2060     return getAssumed() ? "noalias" : "may-alias";
2061   }
2062 };
2063 
2064 /// NoAlias attribute for a floating value.
2065 struct AANoAliasFloating final : AANoAliasImpl {
2066   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2067 
2068   /// See AbstractAttribute::initialize(...).
2069   void initialize(Attributor &A) override {
2070     AANoAliasImpl::initialize(A);
2071     Value &Val = getAssociatedValue();
2072     if (isa<AllocaInst>(Val))
2073       indicateOptimisticFixpoint();
2074     if (isa<ConstantPointerNull>(Val) &&
2075         Val.getType()->getPointerAddressSpace() == 0)
2076       indicateOptimisticFixpoint();
2077   }
2078 
2079   /// See AbstractAttribute::updateImpl(...).
2080   ChangeStatus updateImpl(Attributor &A) override {
2081     // TODO: Implement this.
2082     return indicatePessimisticFixpoint();
2083   }
2084 
2085   /// See AbstractAttribute::trackStatistics()
2086   void trackStatistics() const override {
2087     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2088   }
2089 };
2090 
2091 /// NoAlias attribute for an argument.
2092 struct AANoAliasArgument final
2093     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2094   AANoAliasArgument(const IRPosition &IRP)
2095       : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>(IRP) {}
2096 
2097   /// See AbstractAttribute::trackStatistics()
2098   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2099 };
2100 
2101 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2102   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2103 
2104   /// See AbstractAttribute::initialize(...).
2105   void initialize(Attributor &A) override {
2106     // See callsite argument attribute and callee argument attribute.
2107     ImmutableCallSite ICS(&getAnchorValue());
2108     if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2109       indicateOptimisticFixpoint();
2110   }
2111 
2112   /// See AbstractAttribute::updateImpl(...).
2113   ChangeStatus updateImpl(Attributor &A) override {
2114     // We can deduce "noalias" if the following conditions hold.
2115     // (i)   Associated value is assumed to be noalias in the definition.
2116     // (ii)  Associated value is assumed to be no-capture in all the uses
2117     //       possibly executed before this callsite.
2118     // (iii) There is no other pointer argument which could alias with the
2119     //       value.
2120 
2121     const Value &V = getAssociatedValue();
2122     const IRPosition IRP = IRPosition::value(V);
2123 
2124     // (i) Check whether noalias holds in the definition.
2125 
2126     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
2127 
2128     if (!NoAliasAA.isAssumedNoAlias())
2129       return indicatePessimisticFixpoint();
2130 
2131     LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] " << V
2132                       << " is assumed NoAlias in the definition\n");
2133 
2134     // (ii) Check whether the value is captured in the scope using AANoCapture.
2135     //      FIXME: This is conservative though, it is better to look at CFG and
2136     //             check only uses possibly executed before this callsite.
2137 
2138     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
2139     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2140       LLVM_DEBUG(
2141           dbgs() << "[Attributor][AANoAliasCSArg] " << V
2142                  << " cannot be noalias as it is potentially captured\n");
2143       return indicatePessimisticFixpoint();
2144     }
2145 
2146     // (iii) Check there is no other pointer argument which could alias with the
2147     // value.
2148     ImmutableCallSite ICS(&getAnchorValue());
2149     for (unsigned i = 0; i < ICS.getNumArgOperands(); i++) {
2150       if (getArgNo() == (int)i)
2151         continue;
2152       const Value *ArgOp = ICS.getArgOperand(i);
2153       if (!ArgOp->getType()->isPointerTy())
2154         continue;
2155 
2156       if (const Function *F = getAnchorScope()) {
2157         if (AAResults *AAR = A.getInfoCache().getAAResultsForFunction(*F)) {
2158           bool IsAliasing = AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2159           LLVM_DEBUG(dbgs()
2160                      << "[Attributor][NoAliasCSArg] Check alias between "
2161                         "callsite arguments "
2162                      << AAR->isNoAlias(&getAssociatedValue(), ArgOp) << " "
2163                      << getAssociatedValue() << " " << *ArgOp << " => "
2164                      << (IsAliasing ? "" : "no-") << "alias \n");
2165 
2166           if (IsAliasing)
2167             continue;
2168         }
2169       }
2170       return indicatePessimisticFixpoint();
2171     }
2172 
2173     return ChangeStatus::UNCHANGED;
2174   }
2175 
2176   /// See AbstractAttribute::trackStatistics()
2177   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2178 };
2179 
2180 /// NoAlias attribute for function return value.
2181 struct AANoAliasReturned final : AANoAliasImpl {
2182   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2183 
2184   /// See AbstractAttribute::updateImpl(...).
2185   virtual ChangeStatus updateImpl(Attributor &A) override {
2186 
2187     auto CheckReturnValue = [&](Value &RV) -> bool {
2188       if (Constant *C = dyn_cast<Constant>(&RV))
2189         if (C->isNullValue() || isa<UndefValue>(C))
2190           return true;
2191 
2192       /// For now, we can only deduce noalias if we have call sites.
2193       /// FIXME: add more support.
2194       ImmutableCallSite ICS(&RV);
2195       if (!ICS)
2196         return false;
2197 
2198       const IRPosition &RVPos = IRPosition::value(RV);
2199       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2200       if (!NoAliasAA.isAssumedNoAlias())
2201         return false;
2202 
2203       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2204       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2205     };
2206 
2207     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2208       return indicatePessimisticFixpoint();
2209 
2210     return ChangeStatus::UNCHANGED;
2211   }
2212 
2213   /// See AbstractAttribute::trackStatistics()
2214   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2215 };
2216 
2217 /// NoAlias attribute deduction for a call site return value.
2218 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2219   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2220 
2221   /// See AbstractAttribute::initialize(...).
2222   void initialize(Attributor &A) override {
2223     AANoAliasImpl::initialize(A);
2224     Function *F = getAssociatedFunction();
2225     if (!F)
2226       indicatePessimisticFixpoint();
2227   }
2228 
2229   /// See AbstractAttribute::updateImpl(...).
2230   ChangeStatus updateImpl(Attributor &A) override {
2231     // TODO: Once we have call site specific value information we can provide
2232     //       call site specific liveness information and then it makes
2233     //       sense to specialize attributes for call sites arguments instead of
2234     //       redirecting requests to the callee argument.
2235     Function *F = getAssociatedFunction();
2236     const IRPosition &FnPos = IRPosition::returned(*F);
2237     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2238     return clampStateAndIndicateChange(
2239         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2240   }
2241 
2242   /// See AbstractAttribute::trackStatistics()
2243   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2244 };
2245 
2246 /// -------------------AAIsDead Function Attribute-----------------------
2247 
2248 struct AAIsDeadValueImpl : public AAIsDead {
2249   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2250 
2251   /// See AAIsDead::isAssumedDead().
2252   bool isAssumedDead() const override { return getAssumed(); }
2253 
2254   /// See AAIsDead::isAssumedDead(BasicBlock *).
2255   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2256 
2257   /// See AAIsDead::isKnownDead(BasicBlock *).
2258   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2259 
2260   /// See AAIsDead::isAssumedDead(Instruction *I).
2261   bool isAssumedDead(const Instruction *I) const override {
2262     return I == getCtxI() && isAssumedDead();
2263   }
2264 
2265   /// See AAIsDead::isKnownDead(Instruction *I).
2266   bool isKnownDead(const Instruction *I) const override {
2267     return I == getCtxI() && getKnown();
2268   }
2269 
2270   /// See AbstractAttribute::getAsStr().
2271   const std::string getAsStr() const override {
2272     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2273   }
2274 };
2275 
2276 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2277   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2278 
2279   /// See AbstractAttribute::initialize(...).
2280   void initialize(Attributor &A) override {
2281     if (Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()))
2282       if (!wouldInstructionBeTriviallyDead(I))
2283         indicatePessimisticFixpoint();
2284     if (isa<UndefValue>(getAssociatedValue()))
2285       indicatePessimisticFixpoint();
2286   }
2287 
2288   /// See AbstractAttribute::updateImpl(...).
2289   ChangeStatus updateImpl(Attributor &A) override {
2290     auto UsePred = [&](const Use &U, bool &Follow) {
2291       Instruction *UserI = cast<Instruction>(U.getUser());
2292       if (CallSite CS = CallSite(UserI)) {
2293         if (!CS.isArgOperand(&U))
2294           return false;
2295         const IRPosition &CSArgPos =
2296             IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
2297         const auto &CSArgIsDead = A.getAAFor<AAIsDead>(*this, CSArgPos);
2298         return CSArgIsDead.isAssumedDead();
2299       }
2300       if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
2301         const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
2302         const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, RetPos);
2303         return RetIsDeadAA.isAssumedDead();
2304       }
2305       Follow = true;
2306       return wouldInstructionBeTriviallyDead(UserI);
2307     };
2308 
2309     if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
2310       return indicatePessimisticFixpoint();
2311     return ChangeStatus::UNCHANGED;
2312   }
2313 
2314   /// See AbstractAttribute::manifest(...).
2315   ChangeStatus manifest(Attributor &A) override {
2316     Value &V = getAssociatedValue();
2317     if (auto *I = dyn_cast<Instruction>(&V))
2318       if (wouldInstructionBeTriviallyDead(I)) {
2319         A.deleteAfterManifest(*I);
2320         return ChangeStatus::CHANGED;
2321       }
2322 
2323     if (V.use_empty())
2324       return ChangeStatus::UNCHANGED;
2325 
2326     UndefValue &UV = *UndefValue::get(V.getType());
2327     bool AnyChange = false;
2328     for (Use &U : V.uses())
2329       AnyChange |= A.changeUseAfterManifest(U, UV);
2330     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2331   }
2332 
2333   /// See AbstractAttribute::trackStatistics()
2334   void trackStatistics() const override {
2335     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2336   }
2337 };
2338 
2339 struct AAIsDeadArgument : public AAIsDeadFloating {
2340   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2341 
2342   /// See AbstractAttribute::initialize(...).
2343   void initialize(Attributor &A) override {
2344     if (!getAssociatedFunction()->hasExactDefinition())
2345       indicatePessimisticFixpoint();
2346   }
2347 
2348   /// See AbstractAttribute::trackStatistics()
2349   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2350 };
2351 
2352 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2353   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2354 
2355   /// See AbstractAttribute::initialize(...).
2356   void initialize(Attributor &A) override {
2357     if (isa<UndefValue>(getAssociatedValue()))
2358       indicatePessimisticFixpoint();
2359   }
2360 
2361   /// See AbstractAttribute::updateImpl(...).
2362   ChangeStatus updateImpl(Attributor &A) override {
2363     // TODO: Once we have call site specific value information we can provide
2364     //       call site specific liveness information and then it makes
2365     //       sense to specialize attributes for call sites arguments instead of
2366     //       redirecting requests to the callee argument.
2367     Argument *Arg = getAssociatedArgument();
2368     if (!Arg)
2369       return indicatePessimisticFixpoint();
2370     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2371     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2372     return clampStateAndIndicateChange(
2373         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2374   }
2375 
2376   /// See AbstractAttribute::manifest(...).
2377   ChangeStatus manifest(Attributor &A) override {
2378     CallBase &CB = cast<CallBase>(getAnchorValue());
2379     Use &U = CB.getArgOperandUse(getArgNo());
2380     assert(!isa<UndefValue>(U.get()) &&
2381            "Expected undef values to be filtered out!");
2382     UndefValue &UV = *UndefValue::get(U->getType());
2383     if (A.changeUseAfterManifest(U, UV))
2384       return ChangeStatus::CHANGED;
2385     return ChangeStatus::UNCHANGED;
2386   }
2387 
2388   /// See AbstractAttribute::trackStatistics()
2389   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2390 };
2391 
2392 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2393   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2394 
2395   /// See AbstractAttribute::updateImpl(...).
2396   ChangeStatus updateImpl(Attributor &A) override {
2397 
2398     auto PredForCallSite = [&](AbstractCallSite ACS) {
2399       if (ACS.isCallbackCall())
2400         return false;
2401       const IRPosition &CSRetPos =
2402           IRPosition::callsite_returned(ACS.getCallSite());
2403       const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, CSRetPos);
2404       return RetIsDeadAA.isAssumedDead();
2405     };
2406 
2407     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
2408       return indicatePessimisticFixpoint();
2409 
2410     return ChangeStatus::UNCHANGED;
2411   }
2412 
2413   /// See AbstractAttribute::manifest(...).
2414   ChangeStatus manifest(Attributor &A) override {
2415     // TODO: Rewrite the signature to return void?
2416     bool AnyChange = false;
2417     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2418     auto RetInstPred = [&](Instruction &I) {
2419       ReturnInst &RI = cast<ReturnInst>(I);
2420       if (!isa<UndefValue>(RI.getReturnValue()))
2421         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2422       return true;
2423     };
2424     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2425     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2426   }
2427 
2428   /// See AbstractAttribute::trackStatistics()
2429   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2430 };
2431 
2432 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2433   AAIsDeadCallSiteReturned(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2434 
2435   /// See AbstractAttribute::initialize(...).
2436   void initialize(Attributor &A) override {}
2437 
2438   /// See AbstractAttribute::trackStatistics()
2439   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(IsDead) }
2440 };
2441 
2442 struct AAIsDeadFunction : public AAIsDead {
2443   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
2444 
2445   /// See AbstractAttribute::initialize(...).
2446   void initialize(Attributor &A) override {
2447     const Function *F = getAssociatedFunction();
2448     if (F && !F->isDeclaration()) {
2449       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2450       assumeLive(A, F->getEntryBlock());
2451     }
2452   }
2453 
2454   /// See AbstractAttribute::getAsStr().
2455   const std::string getAsStr() const override {
2456     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2457            std::to_string(getAssociatedFunction()->size()) + "][#TBEP " +
2458            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2459            std::to_string(KnownDeadEnds.size()) + "]";
2460   }
2461 
2462   /// See AbstractAttribute::manifest(...).
2463   ChangeStatus manifest(Attributor &A) override {
2464     assert(getState().isValidState() &&
2465            "Attempted to manifest an invalid state!");
2466 
2467     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2468     Function &F = *getAssociatedFunction();
2469 
2470     if (AssumedLiveBlocks.empty()) {
2471       A.deleteAfterManifest(F);
2472       return ChangeStatus::CHANGED;
2473     }
2474 
2475     // Flag to determine if we can change an invoke to a call assuming the
2476     // callee is nounwind. This is not possible if the personality of the
2477     // function allows to catch asynchronous exceptions.
2478     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2479 
2480     KnownDeadEnds.set_union(ToBeExploredFrom);
2481     for (const Instruction *DeadEndI : KnownDeadEnds) {
2482       auto *CB = dyn_cast<CallBase>(DeadEndI);
2483       if (!CB)
2484         continue;
2485       const auto &NoReturnAA =
2486           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2487       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2488       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2489         continue;
2490       Instruction *I = const_cast<Instruction *>(DeadEndI);
2491       BasicBlock *BB = I->getParent();
2492       Instruction *SplitPos = I->getNextNode();
2493       // TODO: mark stuff before unreachable instructions as dead.
2494 
2495       if (auto *II = dyn_cast<InvokeInst>(I)) {
2496         // If we keep the invoke the split position is at the beginning of the
2497         // normal desitination block (it invokes a noreturn function after all).
2498         BasicBlock *NormalDestBB = II->getNormalDest();
2499         SplitPos = &NormalDestBB->front();
2500 
2501         /// Invoke is replaced with a call and unreachable is placed after it if
2502         /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
2503         /// and only place an unreachable in the normal successor.
2504         if (Invoke2CallAllowed) {
2505           if (II->getCalledFunction()) {
2506             const IRPosition &IPos = IRPosition::callsite_function(*II);
2507             const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
2508             if (AANoUnw.isAssumedNoUnwind()) {
2509               LLVM_DEBUG(dbgs()
2510                          << "[AAIsDead] Replace invoke with call inst\n");
2511               CallInst *CI = createCallMatchingInvoke(II);
2512               CI->insertBefore(II);
2513               CI->takeName(II);
2514               II->replaceAllUsesWith(CI);
2515 
2516               // If this is a nounwind + mayreturn invoke we only remove the unwind edge.
2517               // This is done by moving the invoke into a new and dead block and connecting
2518               // the normal destination of the invoke with a branch that follows the call
2519               // replacement we created above.
2520               if (MayReturn) {
2521                 BasicBlock *NewDeadBB = SplitBlock(BB, II, nullptr, nullptr, nullptr, ".i2c");
2522                 assert(isa<BranchInst>(BB->getTerminator()) &&
2523                        BB->getTerminator()->getNumSuccessors() == 1 &&
2524                        BB->getTerminator()->getSuccessor(0) == NewDeadBB);
2525                 new UnreachableInst(I->getContext(), NewDeadBB);
2526                 BB->getTerminator()->setOperand(0, NormalDestBB);
2527                 A.deleteAfterManifest(*II);
2528                 continue;
2529               }
2530 
2531               // We do not need an invoke (II) but instead want a call followed
2532               // by an unreachable. However, we do not remove II as other
2533               // abstract attributes might have it cached as part of their
2534               // results. Given that we modify the CFG anyway, we simply keep II
2535               // around but in a new dead block. To avoid II being live through
2536               // a different edge we have to ensure the block we place it in is
2537               // only reached from the current block of II and then not reached
2538               // at all when we insert the unreachable.
2539               SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c");
2540               SplitPos = CI->getNextNode();
2541             }
2542           }
2543         }
2544 
2545         if (SplitPos == &NormalDestBB->front()) {
2546           // If this is an invoke of a noreturn function the edge to the normal
2547           // destination block is dead but not necessarily the block itself.
2548           // TODO: We need to move to an edge based system during deduction and
2549           //       also manifest.
2550           assert(!NormalDestBB->isLandingPad() &&
2551                  "Expected the normal destination not to be a landingpad!");
2552           if (NormalDestBB->getUniquePredecessor() == BB) {
2553             assumeLive(A, *NormalDestBB);
2554           } else {
2555             BasicBlock *SplitBB =
2556                 SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2557             // The split block is live even if it contains only an unreachable
2558             // instruction at the end.
2559             assumeLive(A, *SplitBB);
2560             SplitPos = SplitBB->getTerminator();
2561             HasChanged = ChangeStatus::CHANGED;
2562           }
2563         }
2564       }
2565 
2566       if (isa_and_nonnull<UnreachableInst>(SplitPos))
2567         continue;
2568 
2569       BB = SplitPos->getParent();
2570       SplitBlock(BB, SplitPos);
2571       changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
2572       HasChanged = ChangeStatus::CHANGED;
2573     }
2574 
2575     for (BasicBlock &BB : F)
2576       if (!AssumedLiveBlocks.count(&BB))
2577         A.deleteAfterManifest(BB);
2578 
2579     return HasChanged;
2580   }
2581 
2582   /// See AbstractAttribute::updateImpl(...).
2583   ChangeStatus updateImpl(Attributor &A) override;
2584 
2585   /// See AbstractAttribute::trackStatistics()
2586   void trackStatistics() const override {}
2587 
2588   /// Returns true if the function is assumed dead.
2589   bool isAssumedDead() const override { return false; }
2590 
2591   /// See AAIsDead::isAssumedDead(BasicBlock *).
2592   bool isAssumedDead(const BasicBlock *BB) const override {
2593     assert(BB->getParent() == getAssociatedFunction() &&
2594            "BB must be in the same anchor scope function.");
2595 
2596     if (!getAssumed())
2597       return false;
2598     return !AssumedLiveBlocks.count(BB);
2599   }
2600 
2601   /// See AAIsDead::isKnownDead(BasicBlock *).
2602   bool isKnownDead(const BasicBlock *BB) const override {
2603     return getKnown() && isAssumedDead(BB);
2604   }
2605 
2606   /// See AAIsDead::isAssumed(Instruction *I).
2607   bool isAssumedDead(const Instruction *I) const override {
2608     assert(I->getParent()->getParent() == getAssociatedFunction() &&
2609            "Instruction must be in the same anchor scope function.");
2610 
2611     if (!getAssumed())
2612       return false;
2613 
2614     // If it is not in AssumedLiveBlocks then it for sure dead.
2615     // Otherwise, it can still be after noreturn call in a live block.
2616     if (!AssumedLiveBlocks.count(I->getParent()))
2617       return true;
2618 
2619     // If it is not after a liveness barrier it is live.
2620     const Instruction *PrevI = I->getPrevNode();
2621     while (PrevI) {
2622       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2623         return true;
2624       PrevI = PrevI->getPrevNode();
2625     }
2626     return false;
2627   }
2628 
2629   /// See AAIsDead::isKnownDead(Instruction *I).
2630   bool isKnownDead(const Instruction *I) const override {
2631     return getKnown() && isAssumedDead(I);
2632   }
2633 
2634   /// Determine if \p F might catch asynchronous exceptions.
2635   static bool mayCatchAsynchronousExceptions(const Function &F) {
2636     return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
2637   }
2638 
2639   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2640   /// that internal function called from \p BB should now be looked at.
2641   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2642     if (!AssumedLiveBlocks.insert(&BB).second)
2643       return false;
2644 
2645     // We assume that all of BB is (probably) live now and if there are calls to
2646     // internal functions we will assume that those are now live as well. This
2647     // is a performance optimization for blocks with calls to a lot of internal
2648     // functions. It can however cause dead functions to be treated as live.
2649     for (const Instruction &I : BB)
2650       if (ImmutableCallSite ICS = ImmutableCallSite(&I))
2651         if (const Function *F = ICS.getCalledFunction())
2652           if (F->hasLocalLinkage())
2653             A.markLiveInternalFunction(*F);
2654     return true;
2655   }
2656 
2657   /// Collection of instructions that need to be explored again, e.g., we
2658   /// did assume they do not transfer control to (one of their) successors.
2659   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
2660 
2661   /// Collection of instructions that are known to not transfer control.
2662   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
2663 
2664   /// Collection of all assumed live BasicBlocks.
2665   DenseSet<const BasicBlock *> AssumedLiveBlocks;
2666 };
2667 
2668 static bool
2669 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
2670                         AbstractAttribute &AA,
2671                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2672   const IRPosition &IPos = IRPosition::callsite_function(CB);
2673 
2674   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
2675   if (NoReturnAA.isAssumedNoReturn())
2676     return !NoReturnAA.isKnownNoReturn();
2677   if (CB.isTerminator())
2678     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
2679   else
2680     AliveSuccessors.push_back(CB.getNextNode());
2681   return false;
2682 }
2683 
2684 static bool
2685 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
2686                         AbstractAttribute &AA,
2687                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2688   bool UsedAssumedInformation =
2689       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
2690 
2691   // First, determine if we can change an invoke to a call assuming the
2692   // callee is nounwind. This is not possible if the personality of the
2693   // function allows to catch asynchronous exceptions.
2694   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
2695     AliveSuccessors.push_back(&II.getUnwindDest()->front());
2696   } else {
2697     const IRPosition &IPos = IRPosition::callsite_function(II);
2698     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
2699     if (AANoUnw.isAssumedNoUnwind()) {
2700       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
2701     } else {
2702       AliveSuccessors.push_back(&II.getUnwindDest()->front());
2703     }
2704   }
2705   return UsedAssumedInformation;
2706 }
2707 
2708 static Optional<ConstantInt *>
2709 getAssumedConstant(Attributor &A, const Value &V, AbstractAttribute &AA,
2710                    bool &UsedAssumedInformation) {
2711   const auto &ValueSimplifyAA =
2712       A.getAAFor<AAValueSimplify>(AA, IRPosition::value(V));
2713   Optional<Value *> SimplifiedV = ValueSimplifyAA.getAssumedSimplifiedValue(A);
2714   UsedAssumedInformation |= !ValueSimplifyAA.isKnown();
2715   if (!SimplifiedV.hasValue())
2716     return llvm::None;
2717   if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue()))
2718     return llvm::None;
2719   return dyn_cast_or_null<ConstantInt>(SimplifiedV.getValue());
2720 }
2721 
2722 static bool
2723 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
2724                         AbstractAttribute &AA,
2725                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2726   bool UsedAssumedInformation = false;
2727   if (BI.getNumSuccessors() == 1) {
2728     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
2729   } else {
2730     Optional<ConstantInt *> CI =
2731         getAssumedConstant(A, *BI.getCondition(), AA, UsedAssumedInformation);
2732     if (!CI.hasValue()) {
2733       // No value yet, assume both edges are dead.
2734     } else if (CI.getValue()) {
2735       const BasicBlock *SuccBB =
2736           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
2737       AliveSuccessors.push_back(&SuccBB->front());
2738     } else {
2739       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
2740       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
2741       UsedAssumedInformation = false;
2742     }
2743   }
2744   return UsedAssumedInformation;
2745 }
2746 
2747 static bool
2748 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
2749                         AbstractAttribute &AA,
2750                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2751   bool UsedAssumedInformation = false;
2752   Optional<ConstantInt *> CI =
2753       getAssumedConstant(A, *SI.getCondition(), AA, UsedAssumedInformation);
2754   if (!CI.hasValue()) {
2755     // No value yet, assume all edges are dead.
2756   } else if (CI.getValue()) {
2757     for (auto &CaseIt : SI.cases()) {
2758       if (CaseIt.getCaseValue() == CI.getValue()) {
2759         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
2760         return UsedAssumedInformation;
2761       }
2762     }
2763     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
2764     return UsedAssumedInformation;
2765   } else {
2766     for (const BasicBlock *SuccBB : successors(SI.getParent()))
2767       AliveSuccessors.push_back(&SuccBB->front());
2768   }
2769   return UsedAssumedInformation;
2770 }
2771 
2772 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
2773   ChangeStatus Change = ChangeStatus::UNCHANGED;
2774 
2775   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
2776                     << getAssociatedFunction()->size() << "] BBs and "
2777                     << ToBeExploredFrom.size() << " exploration points and "
2778                     << KnownDeadEnds.size() << " known dead ends\n");
2779 
2780   // Copy and clear the list of instructions we need to explore from. It is
2781   // refilled with instructions the next update has to look at.
2782   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
2783                                                ToBeExploredFrom.end());
2784   decltype(ToBeExploredFrom) NewToBeExploredFrom;
2785 
2786   SmallVector<const Instruction *, 8> AliveSuccessors;
2787   while (!Worklist.empty()) {
2788     const Instruction *I = Worklist.pop_back_val();
2789     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
2790 
2791     AliveSuccessors.clear();
2792 
2793     bool UsedAssumedInformation = false;
2794     switch (I->getOpcode()) {
2795     // TODO: look for (assumed) UB to backwards propagate "deadness".
2796     default:
2797       if (I->isTerminator()) {
2798         for (const BasicBlock *SuccBB : successors(I->getParent()))
2799           AliveSuccessors.push_back(&SuccBB->front());
2800       } else {
2801         AliveSuccessors.push_back(I->getNextNode());
2802       }
2803       break;
2804     case Instruction::Call:
2805       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
2806                                                        *this, AliveSuccessors);
2807       break;
2808     case Instruction::Invoke:
2809       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
2810                                                        *this, AliveSuccessors);
2811       break;
2812     case Instruction::Br:
2813       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
2814                                                        *this, AliveSuccessors);
2815       break;
2816     case Instruction::Switch:
2817       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
2818                                                        *this, AliveSuccessors);
2819       break;
2820     }
2821 
2822     if (UsedAssumedInformation) {
2823       NewToBeExploredFrom.insert(I);
2824     } else {
2825       Change = ChangeStatus::CHANGED;
2826       if (AliveSuccessors.empty() ||
2827           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
2828         KnownDeadEnds.insert(I);
2829     }
2830 
2831     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
2832                       << AliveSuccessors.size() << " UsedAssumedInformation: "
2833                       << UsedAssumedInformation << "\n");
2834 
2835     for (const Instruction *AliveSuccessor : AliveSuccessors) {
2836       if (!I->isTerminator()) {
2837         assert(AliveSuccessors.size() == 1 &&
2838                "Non-terminator expected to have a single successor!");
2839         Worklist.push_back(AliveSuccessor);
2840       } else {
2841         if (assumeLive(A, *AliveSuccessor->getParent()))
2842           Worklist.push_back(AliveSuccessor);
2843       }
2844     }
2845   }
2846 
2847   ToBeExploredFrom = std::move(NewToBeExploredFrom);
2848 
2849   // If we know everything is live there is no need to query for liveness.
2850   // Instead, indicating a pessimistic fixpoint will cause the state to be
2851   // "invalid" and all queries to be answered conservatively without lookups.
2852   // To be in this state we have to (1) finished the exploration and (3) not
2853   // discovered any non-trivial dead end and (2) not ruled unreachable code
2854   // dead.
2855   if (ToBeExploredFrom.empty() &&
2856       getAssociatedFunction()->size() == AssumedLiveBlocks.size() &&
2857       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
2858         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
2859       }))
2860     return indicatePessimisticFixpoint();
2861   return Change;
2862 }
2863 
2864 /// Liveness information for a call sites.
2865 struct AAIsDeadCallSite final : AAIsDeadFunction {
2866   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
2867 
2868   /// See AbstractAttribute::initialize(...).
2869   void initialize(Attributor &A) override {
2870     // TODO: Once we have call site specific value information we can provide
2871     //       call site specific liveness information and then it makes
2872     //       sense to specialize attributes for call sites instead of
2873     //       redirecting requests to the callee.
2874     llvm_unreachable("Abstract attributes for liveness are not "
2875                      "supported for call sites yet!");
2876   }
2877 
2878   /// See AbstractAttribute::updateImpl(...).
2879   ChangeStatus updateImpl(Attributor &A) override {
2880     return indicatePessimisticFixpoint();
2881   }
2882 
2883   /// See AbstractAttribute::trackStatistics()
2884   void trackStatistics() const override {}
2885 };
2886 
2887 /// -------------------- Dereferenceable Argument Attribute --------------------
2888 
2889 template <>
2890 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
2891                                                      const DerefState &R) {
2892   ChangeStatus CS0 =
2893       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
2894   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
2895   return CS0 | CS1;
2896 }
2897 
2898 struct AADereferenceableImpl : AADereferenceable {
2899   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
2900   using StateType = DerefState;
2901 
2902   void initialize(Attributor &A) override {
2903     SmallVector<Attribute, 4> Attrs;
2904     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
2905              Attrs);
2906     for (const Attribute &Attr : Attrs)
2907       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
2908 
2909     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
2910 
2911     const IRPosition &IRP = this->getIRPosition();
2912     bool IsFnInterface = IRP.isFnInterfaceKind();
2913     const Function *FnScope = IRP.getAnchorScope();
2914     if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
2915       indicatePessimisticFixpoint();
2916   }
2917 
2918   /// See AbstractAttribute::getState()
2919   /// {
2920   StateType &getState() override { return *this; }
2921   const StateType &getState() const override { return *this; }
2922   /// }
2923 
2924   /// See AAFromMustBeExecutedContext
2925   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
2926     bool IsNonNull = false;
2927     bool TrackUse = false;
2928     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
2929         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
2930     takeKnownDerefBytesMaximum(DerefBytes);
2931     return TrackUse;
2932   }
2933 
2934   void getDeducedAttributes(LLVMContext &Ctx,
2935                             SmallVectorImpl<Attribute> &Attrs) const override {
2936     // TODO: Add *_globally support
2937     if (isAssumedNonNull())
2938       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
2939           Ctx, getAssumedDereferenceableBytes()));
2940     else
2941       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
2942           Ctx, getAssumedDereferenceableBytes()));
2943   }
2944 
2945   /// See AbstractAttribute::getAsStr().
2946   const std::string getAsStr() const override {
2947     if (!getAssumedDereferenceableBytes())
2948       return "unknown-dereferenceable";
2949     return std::string("dereferenceable") +
2950            (isAssumedNonNull() ? "" : "_or_null") +
2951            (isAssumedGlobal() ? "_globally" : "") + "<" +
2952            std::to_string(getKnownDereferenceableBytes()) + "-" +
2953            std::to_string(getAssumedDereferenceableBytes()) + ">";
2954   }
2955 };
2956 
2957 /// Dereferenceable attribute for a floating value.
2958 struct AADereferenceableFloating
2959     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
2960   using Base =
2961       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
2962   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
2963 
2964   /// See AbstractAttribute::updateImpl(...).
2965   ChangeStatus updateImpl(Attributor &A) override {
2966     ChangeStatus Change = Base::updateImpl(A);
2967 
2968     const DataLayout &DL = A.getDataLayout();
2969 
2970     auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
2971       unsigned IdxWidth =
2972           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
2973       APInt Offset(IdxWidth, 0);
2974       const Value *Base =
2975           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
2976 
2977       const auto &AA =
2978           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
2979       int64_t DerefBytes = 0;
2980       if (!Stripped && this == &AA) {
2981         // Use IR information if we did not strip anything.
2982         // TODO: track globally.
2983         bool CanBeNull;
2984         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
2985         T.GlobalState.indicatePessimisticFixpoint();
2986       } else {
2987         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
2988         DerefBytes = DS.DerefBytesState.getAssumed();
2989         T.GlobalState &= DS.GlobalState;
2990       }
2991 
2992       // For now we do not try to "increase" dereferenceability due to negative
2993       // indices as we first have to come up with code to deal with loops and
2994       // for overflows of the dereferenceable bytes.
2995       int64_t OffsetSExt = Offset.getSExtValue();
2996       if (OffsetSExt < 0)
2997         OffsetSExt = 0;
2998 
2999       T.takeAssumedDerefBytesMinimum(
3000           std::max(int64_t(0), DerefBytes - OffsetSExt));
3001 
3002       if (this == &AA) {
3003         if (!Stripped) {
3004           // If nothing was stripped IR information is all we got.
3005           T.takeKnownDerefBytesMaximum(
3006               std::max(int64_t(0), DerefBytes - OffsetSExt));
3007           T.indicatePessimisticFixpoint();
3008         } else if (OffsetSExt > 0) {
3009           // If something was stripped but there is circular reasoning we look
3010           // for the offset. If it is positive we basically decrease the
3011           // dereferenceable bytes in a circluar loop now, which will simply
3012           // drive them down to the known value in a very slow way which we
3013           // can accelerate.
3014           T.indicatePessimisticFixpoint();
3015         }
3016       }
3017 
3018       return T.isValidState();
3019     };
3020 
3021     DerefState T;
3022     if (!genericValueTraversal<AADereferenceable, DerefState>(
3023             A, getIRPosition(), *this, T, VisitValueCB))
3024       return indicatePessimisticFixpoint();
3025 
3026     return Change | clampStateAndIndicateChange(getState(), T);
3027   }
3028 
3029   /// See AbstractAttribute::trackStatistics()
3030   void trackStatistics() const override {
3031     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3032   }
3033 };
3034 
3035 /// Dereferenceable attribute for a return value.
3036 struct AADereferenceableReturned final
3037     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3038                                    DerefState> {
3039   AADereferenceableReturned(const IRPosition &IRP)
3040       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3041                                      DerefState>(IRP) {}
3042 
3043   /// See AbstractAttribute::trackStatistics()
3044   void trackStatistics() const override {
3045     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3046   }
3047 };
3048 
3049 /// Dereferenceable attribute for an argument
3050 struct AADereferenceableArgument final
3051     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3052           AADereferenceable, AADereferenceableImpl, DerefState> {
3053   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3054       AADereferenceable, AADereferenceableImpl, DerefState>;
3055   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3056 
3057   /// See AbstractAttribute::trackStatistics()
3058   void trackStatistics() const override {
3059     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3060   }
3061 };
3062 
3063 /// Dereferenceable attribute for a call site argument.
3064 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3065   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3066       : AADereferenceableFloating(IRP) {}
3067 
3068   /// See AbstractAttribute::trackStatistics()
3069   void trackStatistics() const override {
3070     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3071   }
3072 };
3073 
3074 /// Dereferenceable attribute deduction for a call site return value.
3075 struct AADereferenceableCallSiteReturned final
3076     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3077           AADereferenceable, AADereferenceableImpl> {
3078   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3079       AADereferenceable, AADereferenceableImpl>;
3080   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3081 
3082   /// See AbstractAttribute::trackStatistics()
3083   void trackStatistics() const override {
3084     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3085   }
3086 };
3087 
3088 // ------------------------ Align Argument Attribute ------------------------
3089 
3090 static unsigned int getKnownAlignForUse(Attributor &A,
3091                                         AbstractAttribute &QueryingAA,
3092                                         Value &AssociatedValue, const Use *U,
3093                                         const Instruction *I, bool &TrackUse) {
3094   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
3095     if (ICS.isBundleOperand(U) || ICS.isCallee(U))
3096       return 0;
3097 
3098     unsigned ArgNo = ICS.getArgumentNo(U);
3099     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
3100     // As long as we only use known information there is no need to track
3101     // dependences here.
3102     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3103                                         /* TrackDependence */ false);
3104     return AlignAA.getKnownAlign();
3105   }
3106 
3107   // We need to follow common pointer manipulation uses to the accesses they
3108   // feed into.
3109   // TODO: Consider gep instruction
3110   if (isa<CastInst>(I)) {
3111     TrackUse = true;
3112     return 0;
3113   }
3114 
3115   if (auto *SI = dyn_cast<StoreInst>(I))
3116     return SI->getAlignment();
3117   else if (auto *LI = dyn_cast<LoadInst>(I))
3118     return LI->getAlignment();
3119 
3120   return 0;
3121 }
3122 struct AAAlignImpl : AAAlign {
3123   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3124 
3125   /// See AbstractAttribute::initialize(...).
3126   void initialize(Attributor &A) override {
3127     SmallVector<Attribute, 4> Attrs;
3128     getAttrs({Attribute::Alignment}, Attrs);
3129     for (const Attribute &Attr : Attrs)
3130       takeKnownMaximum(Attr.getValueAsInt());
3131 
3132     if (getIRPosition().isFnInterfaceKind() &&
3133         (!getAssociatedFunction() ||
3134          !getAssociatedFunction()->hasExactDefinition()))
3135       indicatePessimisticFixpoint();
3136   }
3137 
3138   /// See AbstractAttribute::manifest(...).
3139   ChangeStatus manifest(Attributor &A) override {
3140     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3141 
3142     // Check for users that allow alignment annotations.
3143     Value &AnchorVal = getIRPosition().getAnchorValue();
3144     for (const Use &U : AnchorVal.uses()) {
3145       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3146         if (SI->getPointerOperand() == &AnchorVal)
3147           if (SI->getAlignment() < getAssumedAlign()) {
3148             STATS_DECLTRACK(AAAlign, Store,
3149                             "Number of times alignemnt added to a store");
3150             SI->setAlignment(Align(getAssumedAlign()));
3151             Changed = ChangeStatus::CHANGED;
3152           }
3153       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3154         if (LI->getPointerOperand() == &AnchorVal)
3155           if (LI->getAlignment() < getAssumedAlign()) {
3156             LI->setAlignment(Align(getAssumedAlign()));
3157             STATS_DECLTRACK(AAAlign, Load,
3158                             "Number of times alignemnt added to a load");
3159             Changed = ChangeStatus::CHANGED;
3160           }
3161       }
3162     }
3163 
3164     return AAAlign::manifest(A) | Changed;
3165   }
3166 
3167   // TODO: Provide a helper to determine the implied ABI alignment and check in
3168   //       the existing manifest method and a new one for AAAlignImpl that value
3169   //       to avoid making the alignment explicit if it did not improve.
3170 
3171   /// See AbstractAttribute::getDeducedAttributes
3172   virtual void
3173   getDeducedAttributes(LLVMContext &Ctx,
3174                        SmallVectorImpl<Attribute> &Attrs) const override {
3175     if (getAssumedAlign() > 1)
3176       Attrs.emplace_back(
3177           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3178   }
3179   /// See AAFromMustBeExecutedContext
3180   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3181     bool TrackUse = false;
3182 
3183     unsigned int KnownAlign = getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3184     takeKnownMaximum(KnownAlign);
3185 
3186     return TrackUse;
3187   }
3188 
3189   /// See AbstractAttribute::getAsStr().
3190   const std::string getAsStr() const override {
3191     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3192                                 "-" + std::to_string(getAssumedAlign()) + ">")
3193                              : "unknown-align";
3194   }
3195 };
3196 
3197 /// Align attribute for a floating value.
3198 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3199   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3200   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3201 
3202   /// See AbstractAttribute::updateImpl(...).
3203   ChangeStatus updateImpl(Attributor &A) override {
3204     Base::updateImpl(A);
3205 
3206     const DataLayout &DL = A.getDataLayout();
3207 
3208     auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
3209                             bool Stripped) -> bool {
3210       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3211       if (!Stripped && this == &AA) {
3212         // Use only IR information if we did not strip anything.
3213         const MaybeAlign PA = V.getPointerAlignment(DL);
3214         T.takeKnownMaximum(PA ? PA->value() : 0);
3215         T.indicatePessimisticFixpoint();
3216       } else {
3217         // Use abstract attribute information.
3218         const AAAlign::StateType &DS =
3219             static_cast<const AAAlign::StateType &>(AA.getState());
3220         T ^= DS;
3221       }
3222       return T.isValidState();
3223     };
3224 
3225     StateType T;
3226     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3227                                                    VisitValueCB))
3228       return indicatePessimisticFixpoint();
3229 
3230     // TODO: If we know we visited all incoming values, thus no are assumed
3231     // dead, we can take the known information from the state T.
3232     return clampStateAndIndicateChange(getState(), T);
3233   }
3234 
3235   /// See AbstractAttribute::trackStatistics()
3236   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3237 };
3238 
3239 /// Align attribute for function return value.
3240 struct AAAlignReturned final
3241     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3242   AAAlignReturned(const IRPosition &IRP)
3243       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3244 
3245   /// See AbstractAttribute::trackStatistics()
3246   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3247 };
3248 
3249 /// Align attribute for function argument.
3250 struct AAAlignArgument final
3251     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3252                                                               AAAlignImpl> {
3253   AAAlignArgument(const IRPosition &IRP)
3254       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3255                                                                 AAAlignImpl>(
3256             IRP) {}
3257 
3258   /// See AbstractAttribute::trackStatistics()
3259   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3260 };
3261 
3262 struct AAAlignCallSiteArgument final : AAAlignFloating {
3263   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3264 
3265   /// See AbstractAttribute::manifest(...).
3266   ChangeStatus manifest(Attributor &A) override {
3267     return AAAlignImpl::manifest(A);
3268   }
3269 
3270   /// See AbstractAttribute::trackStatistics()
3271   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3272 };
3273 
3274 /// Align attribute deduction for a call site return value.
3275 struct AAAlignCallSiteReturned final
3276     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3277                                                              AAAlignImpl> {
3278   using Base =
3279       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3280                                                              AAAlignImpl>;
3281   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3282 
3283   /// See AbstractAttribute::initialize(...).
3284   void initialize(Attributor &A) override {
3285     Base::initialize(A);
3286     Function *F = getAssociatedFunction();
3287     if (!F)
3288       indicatePessimisticFixpoint();
3289   }
3290 
3291   /// See AbstractAttribute::trackStatistics()
3292   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3293 };
3294 
3295 /// ------------------ Function No-Return Attribute ----------------------------
3296 struct AANoReturnImpl : public AANoReturn {
3297   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3298 
3299   /// See AbstractAttribute::initialize(...).
3300   void initialize(Attributor &A) override {
3301     AANoReturn::initialize(A);
3302     Function *F = getAssociatedFunction();
3303     if (!F)
3304       indicatePessimisticFixpoint();
3305   }
3306 
3307   /// See AbstractAttribute::getAsStr().
3308   const std::string getAsStr() const override {
3309     return getAssumed() ? "noreturn" : "may-return";
3310   }
3311 
3312   /// See AbstractAttribute::updateImpl(Attributor &A).
3313   virtual ChangeStatus updateImpl(Attributor &A) override {
3314     auto CheckForNoReturn = [](Instruction &) { return false; };
3315     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3316                                    {(unsigned)Instruction::Ret}))
3317       return indicatePessimisticFixpoint();
3318     return ChangeStatus::UNCHANGED;
3319   }
3320 };
3321 
3322 struct AANoReturnFunction final : AANoReturnImpl {
3323   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3324 
3325   /// See AbstractAttribute::trackStatistics()
3326   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3327 };
3328 
3329 /// NoReturn attribute deduction for a call sites.
3330 struct AANoReturnCallSite final : AANoReturnImpl {
3331   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3332 
3333   /// See AbstractAttribute::updateImpl(...).
3334   ChangeStatus updateImpl(Attributor &A) override {
3335     // TODO: Once we have call site specific value information we can provide
3336     //       call site specific liveness information and then it makes
3337     //       sense to specialize attributes for call sites arguments instead of
3338     //       redirecting requests to the callee argument.
3339     Function *F = getAssociatedFunction();
3340     const IRPosition &FnPos = IRPosition::function(*F);
3341     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3342     return clampStateAndIndicateChange(
3343         getState(),
3344         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3345   }
3346 
3347   /// See AbstractAttribute::trackStatistics()
3348   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3349 };
3350 
3351 /// ----------------------- Variable Capturing ---------------------------------
3352 
3353 /// A class to hold the state of for no-capture attributes.
3354 struct AANoCaptureImpl : public AANoCapture {
3355   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
3356 
3357   /// See AbstractAttribute::initialize(...).
3358   void initialize(Attributor &A) override {
3359     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3360       indicateOptimisticFixpoint();
3361       return;
3362     }
3363     Function *AnchorScope = getAnchorScope();
3364     if (isFnInterfaceKind() &&
3365         (!AnchorScope || !AnchorScope->hasExactDefinition())) {
3366       indicatePessimisticFixpoint();
3367       return;
3368     }
3369 
3370     // You cannot "capture" null in the default address space.
3371     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3372         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3373       indicateOptimisticFixpoint();
3374       return;
3375     }
3376 
3377     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3378 
3379     // Check what state the associated function can actually capture.
3380     if (F)
3381       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3382     else
3383       indicatePessimisticFixpoint();
3384   }
3385 
3386   /// See AbstractAttribute::updateImpl(...).
3387   ChangeStatus updateImpl(Attributor &A) override;
3388 
3389   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3390   virtual void
3391   getDeducedAttributes(LLVMContext &Ctx,
3392                        SmallVectorImpl<Attribute> &Attrs) const override {
3393     if (!isAssumedNoCaptureMaybeReturned())
3394       return;
3395 
3396     if (getArgNo() >= 0) {
3397       if (isAssumedNoCapture())
3398         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3399       else if (ManifestInternal)
3400         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3401     }
3402   }
3403 
3404   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3405   /// depending on the ability of the function associated with \p IRP to capture
3406   /// state in memory and through "returning/throwing", respectively.
3407   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3408                                                    const Function &F,
3409                                                    BitIntegerState &State) {
3410     // TODO: Once we have memory behavior attributes we should use them here.
3411 
3412     // If we know we cannot communicate or write to memory, we do not care about
3413     // ptr2int anymore.
3414     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3415         F.getReturnType()->isVoidTy()) {
3416       State.addKnownBits(NO_CAPTURE);
3417       return;
3418     }
3419 
3420     // A function cannot capture state in memory if it only reads memory, it can
3421     // however return/throw state and the state might be influenced by the
3422     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3423     if (F.onlyReadsMemory())
3424       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3425 
3426     // A function cannot communicate state back if it does not through
3427     // exceptions and doesn not return values.
3428     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3429       State.addKnownBits(NOT_CAPTURED_IN_RET);
3430 
3431     // Check existing "returned" attributes.
3432     int ArgNo = IRP.getArgNo();
3433     if (F.doesNotThrow() && ArgNo >= 0) {
3434       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3435         if (F.hasParamAttribute(u, Attribute::Returned)) {
3436           if (u == unsigned(ArgNo))
3437             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3438           else if (F.onlyReadsMemory())
3439             State.addKnownBits(NO_CAPTURE);
3440           else
3441             State.addKnownBits(NOT_CAPTURED_IN_RET);
3442           break;
3443         }
3444     }
3445   }
3446 
3447   /// See AbstractState::getAsStr().
3448   const std::string getAsStr() const override {
3449     if (isKnownNoCapture())
3450       return "known not-captured";
3451     if (isAssumedNoCapture())
3452       return "assumed not-captured";
3453     if (isKnownNoCaptureMaybeReturned())
3454       return "known not-captured-maybe-returned";
3455     if (isAssumedNoCaptureMaybeReturned())
3456       return "assumed not-captured-maybe-returned";
3457     return "assumed-captured";
3458   }
3459 };
3460 
3461 /// Attributor-aware capture tracker.
3462 struct AACaptureUseTracker final : public CaptureTracker {
3463 
3464   /// Create a capture tracker that can lookup in-flight abstract attributes
3465   /// through the Attributor \p A.
3466   ///
3467   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3468   /// search is stopped. If a use leads to a return instruction,
3469   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3470   /// If a use leads to a ptr2int which may capture the value,
3471   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3472   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3473   /// set. All values in \p PotentialCopies are later tracked as well. For every
3474   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3475   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3476   /// conservatively set to true.
3477   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3478                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3479                       SmallVectorImpl<const Value *> &PotentialCopies,
3480                       unsigned &RemainingUsesToExplore)
3481       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3482         PotentialCopies(PotentialCopies),
3483         RemainingUsesToExplore(RemainingUsesToExplore) {}
3484 
3485   /// Determine if \p V maybe captured. *Also updates the state!*
3486   bool valueMayBeCaptured(const Value *V) {
3487     if (V->getType()->isPointerTy()) {
3488       PointerMayBeCaptured(V, this);
3489     } else {
3490       State.indicatePessimisticFixpoint();
3491     }
3492     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3493   }
3494 
3495   /// See CaptureTracker::tooManyUses().
3496   void tooManyUses() override {
3497     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3498   }
3499 
3500   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3501     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3502       return true;
3503     const auto &DerefAA =
3504         A.getAAFor<AADereferenceable>(NoCaptureAA, IRPosition::value(*O));
3505     return DerefAA.getAssumedDereferenceableBytes();
3506   }
3507 
3508   /// See CaptureTracker::captured(...).
3509   bool captured(const Use *U) override {
3510     Instruction *UInst = cast<Instruction>(U->getUser());
3511     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3512                       << "\n");
3513 
3514     // Because we may reuse the tracker multiple times we keep track of the
3515     // number of explored uses ourselves as well.
3516     if (RemainingUsesToExplore-- == 0) {
3517       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3518       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3519                           /* Return */ true);
3520     }
3521 
3522     // Deal with ptr2int by following uses.
3523     if (isa<PtrToIntInst>(UInst)) {
3524       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3525       return valueMayBeCaptured(UInst);
3526     }
3527 
3528     // Explicitly catch return instructions.
3529     if (isa<ReturnInst>(UInst))
3530       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3531                           /* Return */ true);
3532 
3533     // For now we only use special logic for call sites. However, the tracker
3534     // itself knows about a lot of other non-capturing cases already.
3535     CallSite CS(UInst);
3536     if (!CS || !CS.isArgOperand(U))
3537       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3538                           /* Return */ true);
3539 
3540     unsigned ArgNo = CS.getArgumentNo(U);
3541     const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
3542     // If we have a abstract no-capture attribute for the argument we can use
3543     // it to justify a non-capture attribute here. This allows recursion!
3544     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3545     if (ArgNoCaptureAA.isAssumedNoCapture())
3546       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3547                           /* Return */ false);
3548     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3549       addPotentialCopy(CS);
3550       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3551                           /* Return */ false);
3552     }
3553 
3554     // Lastly, we could not find a reason no-capture can be assumed so we don't.
3555     return isCapturedIn(/* Memory */ true, /* Integer */ true,
3556                         /* Return */ true);
3557   }
3558 
3559   /// Register \p CS as potential copy of the value we are checking.
3560   void addPotentialCopy(CallSite CS) {
3561     PotentialCopies.push_back(CS.getInstruction());
3562   }
3563 
3564   /// See CaptureTracker::shouldExplore(...).
3565   bool shouldExplore(const Use *U) override {
3566     // Check liveness.
3567     return !IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()));
3568   }
3569 
3570   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
3571   /// \p CapturedInRet, then return the appropriate value for use in the
3572   /// CaptureTracker::captured() interface.
3573   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
3574                     bool CapturedInRet) {
3575     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
3576                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
3577     if (CapturedInMem)
3578       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
3579     if (CapturedInInt)
3580       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
3581     if (CapturedInRet)
3582       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
3583     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3584   }
3585 
3586 private:
3587   /// The attributor providing in-flight abstract attributes.
3588   Attributor &A;
3589 
3590   /// The abstract attribute currently updated.
3591   AANoCapture &NoCaptureAA;
3592 
3593   /// The abstract liveness state.
3594   const AAIsDead &IsDeadAA;
3595 
3596   /// The state currently updated.
3597   AANoCapture::StateType &State;
3598 
3599   /// Set of potential copies of the tracked value.
3600   SmallVectorImpl<const Value *> &PotentialCopies;
3601 
3602   /// Global counter to limit the number of explored uses.
3603   unsigned &RemainingUsesToExplore;
3604 };
3605 
3606 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
3607   const IRPosition &IRP = getIRPosition();
3608   const Value *V =
3609       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
3610   if (!V)
3611     return indicatePessimisticFixpoint();
3612 
3613   const Function *F =
3614       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
3615   assert(F && "Expected a function!");
3616   const IRPosition &FnPos = IRPosition::function(*F);
3617   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos);
3618 
3619   AANoCapture::StateType T;
3620 
3621   // Readonly means we cannot capture through memory.
3622   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
3623   if (FnMemAA.isAssumedReadOnly()) {
3624     T.addKnownBits(NOT_CAPTURED_IN_MEM);
3625     if (FnMemAA.isKnownReadOnly())
3626       addKnownBits(NOT_CAPTURED_IN_MEM);
3627   }
3628 
3629   // Make sure all returned values are different than the underlying value.
3630   // TODO: we could do this in a more sophisticated way inside
3631   //       AAReturnedValues, e.g., track all values that escape through returns
3632   //       directly somehow.
3633   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
3634     bool SeenConstant = false;
3635     for (auto &It : RVAA.returned_values()) {
3636       if (isa<Constant>(It.first)) {
3637         if (SeenConstant)
3638           return false;
3639         SeenConstant = true;
3640       } else if (!isa<Argument>(It.first) ||
3641                  It.first == getAssociatedArgument())
3642         return false;
3643     }
3644     return true;
3645   };
3646 
3647   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, FnPos);
3648   if (NoUnwindAA.isAssumedNoUnwind()) {
3649     bool IsVoidTy = F->getReturnType()->isVoidTy();
3650     const AAReturnedValues *RVAA =
3651         IsVoidTy ? nullptr : &A.getAAFor<AAReturnedValues>(*this, FnPos);
3652     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
3653       T.addKnownBits(NOT_CAPTURED_IN_RET);
3654       if (T.isKnown(NOT_CAPTURED_IN_MEM))
3655         return ChangeStatus::UNCHANGED;
3656       if (NoUnwindAA.isKnownNoUnwind() &&
3657           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
3658         addKnownBits(NOT_CAPTURED_IN_RET);
3659         if (isKnown(NOT_CAPTURED_IN_MEM))
3660           return indicateOptimisticFixpoint();
3661       }
3662     }
3663   }
3664 
3665   // Use the CaptureTracker interface and logic with the specialized tracker,
3666   // defined in AACaptureUseTracker, that can look at in-flight abstract
3667   // attributes and directly updates the assumed state.
3668   SmallVector<const Value *, 4> PotentialCopies;
3669   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
3670   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
3671                               RemainingUsesToExplore);
3672 
3673   // Check all potential copies of the associated value until we can assume
3674   // none will be captured or we have to assume at least one might be.
3675   unsigned Idx = 0;
3676   PotentialCopies.push_back(V);
3677   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
3678     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
3679 
3680   AANoCapture::StateType &S = getState();
3681   auto Assumed = S.getAssumed();
3682   S.intersectAssumedBits(T.getAssumed());
3683   if (!isAssumedNoCaptureMaybeReturned())
3684     return indicatePessimisticFixpoint();
3685   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
3686                                    : ChangeStatus::CHANGED;
3687 }
3688 
3689 /// NoCapture attribute for function arguments.
3690 struct AANoCaptureArgument final : AANoCaptureImpl {
3691   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3692 
3693   /// See AbstractAttribute::trackStatistics()
3694   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
3695 };
3696 
3697 /// NoCapture attribute for call site arguments.
3698 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
3699   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3700 
3701   /// See AbstractAttribute::updateImpl(...).
3702   ChangeStatus updateImpl(Attributor &A) override {
3703     // TODO: Once we have call site specific value information we can provide
3704     //       call site specific liveness information and then it makes
3705     //       sense to specialize attributes for call sites arguments instead of
3706     //       redirecting requests to the callee argument.
3707     Argument *Arg = getAssociatedArgument();
3708     if (!Arg)
3709       return indicatePessimisticFixpoint();
3710     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3711     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
3712     return clampStateAndIndicateChange(
3713         getState(),
3714         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
3715   }
3716 
3717   /// See AbstractAttribute::trackStatistics()
3718   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
3719 };
3720 
3721 /// NoCapture attribute for floating values.
3722 struct AANoCaptureFloating final : AANoCaptureImpl {
3723   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3724 
3725   /// See AbstractAttribute::trackStatistics()
3726   void trackStatistics() const override {
3727     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
3728   }
3729 };
3730 
3731 /// NoCapture attribute for function return value.
3732 struct AANoCaptureReturned final : AANoCaptureImpl {
3733   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
3734     llvm_unreachable("NoCapture is not applicable to function returns!");
3735   }
3736 
3737   /// See AbstractAttribute::initialize(...).
3738   void initialize(Attributor &A) override {
3739     llvm_unreachable("NoCapture is not applicable to function returns!");
3740   }
3741 
3742   /// See AbstractAttribute::updateImpl(...).
3743   ChangeStatus updateImpl(Attributor &A) override {
3744     llvm_unreachable("NoCapture is not applicable to function returns!");
3745   }
3746 
3747   /// See AbstractAttribute::trackStatistics()
3748   void trackStatistics() const override {}
3749 };
3750 
3751 /// NoCapture attribute deduction for a call site return value.
3752 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
3753   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3754 
3755   /// See AbstractAttribute::trackStatistics()
3756   void trackStatistics() const override {
3757     STATS_DECLTRACK_CSRET_ATTR(nocapture)
3758   }
3759 };
3760 
3761 /// ------------------ Value Simplify Attribute ----------------------------
3762 struct AAValueSimplifyImpl : AAValueSimplify {
3763   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
3764 
3765   /// See AbstractAttribute::getAsStr().
3766   const std::string getAsStr() const override {
3767     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
3768                         : "not-simple";
3769   }
3770 
3771   /// See AbstractAttribute::trackStatistics()
3772   void trackStatistics() const override {}
3773 
3774   /// See AAValueSimplify::getAssumedSimplifiedValue()
3775   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
3776     if (!getAssumed())
3777       return const_cast<Value *>(&getAssociatedValue());
3778     return SimplifiedAssociatedValue;
3779   }
3780   void initialize(Attributor &A) override {}
3781 
3782   /// Helper function for querying AAValueSimplify and updating candicate.
3783   /// \param QueryingValue Value trying to unify with SimplifiedValue
3784   /// \param AccumulatedSimplifiedValue Current simplification result.
3785   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
3786                              Value &QueryingValue,
3787                              Optional<Value *> &AccumulatedSimplifiedValue) {
3788     // FIXME: Add a typecast support.
3789 
3790     auto &ValueSimpifyAA = A.getAAFor<AAValueSimplify>(
3791         QueryingAA, IRPosition::value(QueryingValue));
3792 
3793     Optional<Value *> QueryingValueSimplified =
3794         ValueSimpifyAA.getAssumedSimplifiedValue(A);
3795 
3796     if (!QueryingValueSimplified.hasValue())
3797       return true;
3798 
3799     if (!QueryingValueSimplified.getValue())
3800       return false;
3801 
3802     Value &QueryingValueSimplifiedUnwrapped =
3803         *QueryingValueSimplified.getValue();
3804 
3805     if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
3806       return true;
3807 
3808     if (AccumulatedSimplifiedValue.hasValue())
3809       return AccumulatedSimplifiedValue == QueryingValueSimplified;
3810 
3811     LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << QueryingValue
3812                       << " is assumed to be "
3813                       << QueryingValueSimplifiedUnwrapped << "\n");
3814 
3815     AccumulatedSimplifiedValue = QueryingValueSimplified;
3816     return true;
3817   }
3818 
3819   /// See AbstractAttribute::manifest(...).
3820   ChangeStatus manifest(Attributor &A) override {
3821     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3822 
3823     if (!SimplifiedAssociatedValue.hasValue() ||
3824         !SimplifiedAssociatedValue.getValue())
3825       return Changed;
3826 
3827     if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
3828       // We can replace the AssociatedValue with the constant.
3829       Value &V = getAssociatedValue();
3830       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
3831         LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << V << " -> " << *C
3832                           << "\n");
3833         V.replaceAllUsesWith(C);
3834         Changed = ChangeStatus::CHANGED;
3835       }
3836     }
3837 
3838     return Changed | AAValueSimplify::manifest(A);
3839   }
3840 
3841 protected:
3842   // An assumed simplified value. Initially, it is set to Optional::None, which
3843   // means that the value is not clear under current assumption. If in the
3844   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
3845   // returns orignal associated value.
3846   Optional<Value *> SimplifiedAssociatedValue;
3847 };
3848 
3849 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
3850   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3851 
3852   void initialize(Attributor &A) override {
3853     AAValueSimplifyImpl::initialize(A);
3854     if (!getAssociatedFunction() || getAssociatedFunction()->isDeclaration())
3855       indicatePessimisticFixpoint();
3856     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
3857                 /* IgnoreSubsumingPositions */ true))
3858       indicatePessimisticFixpoint();
3859   }
3860 
3861   /// See AbstractAttribute::updateImpl(...).
3862   ChangeStatus updateImpl(Attributor &A) override {
3863     // Byval is only replacable if it is readonly otherwise we would write into
3864     // the replaced value and not the copy that byval creates implicitly.
3865     Argument *Arg = getAssociatedArgument();
3866     if (Arg->hasByValAttr()) {
3867       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
3868       if (!MemAA.isAssumedReadOnly())
3869         return indicatePessimisticFixpoint();
3870     }
3871 
3872     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3873 
3874     auto PredForCallSite = [&](AbstractCallSite ACS) {
3875       // Check if we have an associated argument or not (which can happen for
3876       // callback calls).
3877       Value *ArgOp = ACS.getCallArgOperand(getArgNo());
3878       if (!ArgOp)
3879        return false;
3880       // We can only propagate thread independent values through callbacks.
3881       // This is different to direct/indirect call sites because for them we
3882       // know the thread executing the caller and callee is the same. For
3883       // callbacks this is not guaranteed, thus a thread dependent value could
3884       // be different for the caller and callee, making it invalid to propagate.
3885       if (ACS.isCallbackCall())
3886         if (auto *C =dyn_cast<Constant>(ArgOp))
3887           if (C->isThreadDependent())
3888             return false;
3889       return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
3890     };
3891 
3892     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
3893       return indicatePessimisticFixpoint();
3894 
3895     // If a candicate was found in this update, return CHANGED.
3896     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3897                ? ChangeStatus::UNCHANGED
3898                : ChangeStatus ::CHANGED;
3899   }
3900 
3901   /// See AbstractAttribute::trackStatistics()
3902   void trackStatistics() const override {
3903     STATS_DECLTRACK_ARG_ATTR(value_simplify)
3904   }
3905 };
3906 
3907 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
3908   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3909 
3910   /// See AbstractAttribute::updateImpl(...).
3911   ChangeStatus updateImpl(Attributor &A) override {
3912     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3913 
3914     auto PredForReturned = [&](Value &V) {
3915       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3916     };
3917 
3918     if (!A.checkForAllReturnedValues(PredForReturned, *this))
3919       return indicatePessimisticFixpoint();
3920 
3921     // If a candicate was found in this update, return CHANGED.
3922     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3923                ? ChangeStatus::UNCHANGED
3924                : ChangeStatus ::CHANGED;
3925   }
3926   /// See AbstractAttribute::trackStatistics()
3927   void trackStatistics() const override {
3928     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
3929   }
3930 };
3931 
3932 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
3933   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3934 
3935   /// See AbstractAttribute::initialize(...).
3936   void initialize(Attributor &A) override {
3937     Value &V = getAnchorValue();
3938 
3939     // TODO: add other stuffs
3940     if (isa<Constant>(V) || isa<UndefValue>(V))
3941       indicatePessimisticFixpoint();
3942   }
3943 
3944   /// See AbstractAttribute::updateImpl(...).
3945   ChangeStatus updateImpl(Attributor &A) override {
3946     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3947 
3948     auto VisitValueCB = [&](Value &V, BooleanState, bool Stripped) -> bool {
3949       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
3950       if (!Stripped && this == &AA) {
3951         // TODO: Look the instruction and check recursively.
3952         LLVM_DEBUG(
3953             dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
3954                    << V << "\n");
3955         indicatePessimisticFixpoint();
3956         return false;
3957       }
3958       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3959     };
3960 
3961     if (!genericValueTraversal<AAValueSimplify, BooleanState>(
3962             A, getIRPosition(), *this, static_cast<BooleanState &>(*this),
3963             VisitValueCB))
3964       return indicatePessimisticFixpoint();
3965 
3966     // If a candicate was found in this update, return CHANGED.
3967 
3968     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3969                ? ChangeStatus::UNCHANGED
3970                : ChangeStatus ::CHANGED;
3971   }
3972 
3973   /// See AbstractAttribute::trackStatistics()
3974   void trackStatistics() const override {
3975     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
3976   }
3977 };
3978 
3979 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
3980   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3981 
3982   /// See AbstractAttribute::initialize(...).
3983   void initialize(Attributor &A) override {
3984     SimplifiedAssociatedValue = &getAnchorValue();
3985     indicateOptimisticFixpoint();
3986   }
3987   /// See AbstractAttribute::initialize(...).
3988   ChangeStatus updateImpl(Attributor &A) override {
3989     llvm_unreachable(
3990         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
3991   }
3992   /// See AbstractAttribute::trackStatistics()
3993   void trackStatistics() const override {
3994     STATS_DECLTRACK_FN_ATTR(value_simplify)
3995   }
3996 };
3997 
3998 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
3999   AAValueSimplifyCallSite(const IRPosition &IRP)
4000       : AAValueSimplifyFunction(IRP) {}
4001   /// See AbstractAttribute::trackStatistics()
4002   void trackStatistics() const override {
4003     STATS_DECLTRACK_CS_ATTR(value_simplify)
4004   }
4005 };
4006 
4007 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4008   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4009       : AAValueSimplifyReturned(IRP) {}
4010 
4011   void trackStatistics() const override {
4012     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4013   }
4014 };
4015 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4016   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4017       : AAValueSimplifyFloating(IRP) {}
4018 
4019   void trackStatistics() const override {
4020     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4021   }
4022 };
4023 
4024 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4025 struct AAHeapToStackImpl : public AAHeapToStack {
4026   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4027 
4028   const std::string getAsStr() const override {
4029     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4030   }
4031 
4032   ChangeStatus manifest(Attributor &A) override {
4033     assert(getState().isValidState() &&
4034            "Attempted to manifest an invalid state!");
4035 
4036     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4037     Function *F = getAssociatedFunction();
4038     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4039 
4040     for (Instruction *MallocCall : MallocCalls) {
4041       // This malloc cannot be replaced.
4042       if (BadMallocCalls.count(MallocCall))
4043         continue;
4044 
4045       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4046         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4047         A.deleteAfterManifest(*FreeCall);
4048         HasChanged = ChangeStatus::CHANGED;
4049       }
4050 
4051       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4052                         << "\n");
4053 
4054       Constant *Size;
4055       if (isCallocLikeFn(MallocCall, TLI)) {
4056         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4057         auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
4058         APInt TotalSize = SizeT->getValue() * Num->getValue();
4059         Size =
4060             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4061       } else {
4062         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4063       }
4064 
4065       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4066       Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
4067                                        Size, "", MallocCall->getNextNode());
4068 
4069       if (AI->getType() != MallocCall->getType())
4070         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4071                              AI->getNextNode());
4072 
4073       MallocCall->replaceAllUsesWith(AI);
4074 
4075       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4076         auto *NBB = II->getNormalDest();
4077         BranchInst::Create(NBB, MallocCall->getParent());
4078         A.deleteAfterManifest(*MallocCall);
4079       } else {
4080         A.deleteAfterManifest(*MallocCall);
4081       }
4082 
4083       if (isCallocLikeFn(MallocCall, TLI)) {
4084         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4085                                    AI->getNextNode());
4086         Value *Ops[] = {
4087             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4088             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4089 
4090         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4091         Module *M = F->getParent();
4092         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4093         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4094       }
4095       HasChanged = ChangeStatus::CHANGED;
4096     }
4097 
4098     return HasChanged;
4099   }
4100 
4101   /// Collection of all malloc calls in a function.
4102   SmallSetVector<Instruction *, 4> MallocCalls;
4103 
4104   /// Collection of malloc calls that cannot be converted.
4105   DenseSet<const Instruction *> BadMallocCalls;
4106 
4107   /// A map for each malloc call to the set of associated free calls.
4108   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4109 
4110   ChangeStatus updateImpl(Attributor &A) override;
4111 };
4112 
4113 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4114   const Function *F = getAssociatedFunction();
4115   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4116 
4117   MustBeExecutedContextExplorer &Explorer =
4118       A.getInfoCache().getMustBeExecutedContextExplorer();
4119 
4120   auto FreeCheck = [&](Instruction &I) {
4121     const auto &Frees = FreesForMalloc.lookup(&I);
4122     if (Frees.size() != 1)
4123       return false;
4124     Instruction *UniqueFree = *Frees.begin();
4125     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4126   };
4127 
4128   auto UsesCheck = [&](Instruction &I) {
4129     bool ValidUsesOnly = true;
4130     bool MustUse = true;
4131 
4132     SmallPtrSet<const Use *, 8> Visited;
4133     SmallVector<const Use *, 8> Worklist;
4134 
4135     for (Use &U : I.uses())
4136       Worklist.push_back(&U);
4137 
4138     while (!Worklist.empty()) {
4139       const Use *U = Worklist.pop_back_val();
4140       if (!Visited.insert(U).second)
4141         continue;
4142 
4143       auto *UserI = U->getUser();
4144 
4145       if (isa<LoadInst>(UserI))
4146         continue;
4147       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4148         if (SI->getValueOperand() == U->get()) {
4149           LLVM_DEBUG(dbgs()
4150                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4151           ValidUsesOnly = false;
4152         } else {
4153           // A store into the malloc'ed memory is fine.
4154         }
4155         continue;
4156       }
4157 
4158       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4159         if (!CB->isArgOperand(U))
4160           continue;
4161 
4162         if (CB->isLifetimeStartOrEnd())
4163           continue;
4164 
4165         // Record malloc.
4166         if (isFreeCall(UserI, TLI)) {
4167           if (MustUse) {
4168             FreesForMalloc[&I].insert(
4169                 cast<Instruction>(const_cast<User *>(UserI)));
4170           } else {
4171             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4172                               << *UserI << "\n");
4173             ValidUsesOnly = false;
4174           }
4175           continue;
4176         }
4177 
4178         unsigned ArgNo = CB->getArgOperandNo(U);
4179 
4180         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4181             *this, IRPosition::callsite_argument(*CB, ArgNo));
4182 
4183         // If a callsite argument use is nofree, we are fine.
4184         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4185             *this, IRPosition::callsite_argument(*CB, ArgNo));
4186 
4187         if (!NoCaptureAA.isAssumedNoCapture() || !ArgNoFreeAA.isAssumedNoFree()) {
4188           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4189           ValidUsesOnly = false;
4190         }
4191         continue;
4192       }
4193 
4194       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4195           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4196         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4197         for (Use &U : UserI->uses())
4198           Worklist.push_back(&U);
4199         continue;
4200       }
4201 
4202       // Unknown user for which we can not track uses further (in a way that
4203       // makes sense).
4204       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4205       ValidUsesOnly = false;
4206     }
4207     return ValidUsesOnly;
4208   };
4209 
4210   auto MallocCallocCheck = [&](Instruction &I) {
4211     if (BadMallocCalls.count(&I))
4212       return true;
4213 
4214     bool IsMalloc = isMallocLikeFn(&I, TLI);
4215     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4216     if (!IsMalloc && !IsCalloc) {
4217       BadMallocCalls.insert(&I);
4218       return true;
4219     }
4220 
4221     if (IsMalloc) {
4222       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4223         if (Size->getValue().sle(MaxHeapToStackSize))
4224           if (UsesCheck(I) || FreeCheck(I)) {
4225             MallocCalls.insert(&I);
4226             return true;
4227           }
4228     } else if (IsCalloc) {
4229       bool Overflow = false;
4230       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4231         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4232           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4233                   .sle(MaxHeapToStackSize))
4234             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4235               MallocCalls.insert(&I);
4236               return true;
4237             }
4238     }
4239 
4240     BadMallocCalls.insert(&I);
4241     return true;
4242   };
4243 
4244   size_t NumBadMallocs = BadMallocCalls.size();
4245 
4246   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4247 
4248   if (NumBadMallocs != BadMallocCalls.size())
4249     return ChangeStatus::CHANGED;
4250 
4251   return ChangeStatus::UNCHANGED;
4252 }
4253 
4254 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4255   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
4256 
4257   /// See AbstractAttribute::trackStatistics()
4258   void trackStatistics() const override {
4259     STATS_DECL(MallocCalls, Function,
4260                "Number of malloc calls converted to allocas");
4261     for (auto *C : MallocCalls)
4262       if (!BadMallocCalls.count(C))
4263         ++BUILD_STAT_NAME(MallocCalls, Function);
4264   }
4265 };
4266 
4267 /// -------------------- Memory Behavior Attributes ----------------------------
4268 /// Includes read-none, read-only, and write-only.
4269 /// ----------------------------------------------------------------------------
4270 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
4271   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
4272 
4273   /// See AbstractAttribute::initialize(...).
4274   void initialize(Attributor &A) override {
4275     intersectAssumedBits(BEST_STATE);
4276     getKnownStateFromValue(getIRPosition(), getState());
4277     IRAttribute::initialize(A);
4278   }
4279 
4280   /// Return the memory behavior information encoded in the IR for \p IRP.
4281   static void getKnownStateFromValue(const IRPosition &IRP,
4282                                      BitIntegerState &State) {
4283     SmallVector<Attribute, 2> Attrs;
4284     IRP.getAttrs(AttrKinds, Attrs);
4285     for (const Attribute &Attr : Attrs) {
4286       switch (Attr.getKindAsEnum()) {
4287       case Attribute::ReadNone:
4288         State.addKnownBits(NO_ACCESSES);
4289         break;
4290       case Attribute::ReadOnly:
4291         State.addKnownBits(NO_WRITES);
4292         break;
4293       case Attribute::WriteOnly:
4294         State.addKnownBits(NO_READS);
4295         break;
4296       default:
4297         llvm_unreachable("Unexpcted attribute!");
4298       }
4299     }
4300 
4301     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
4302       if (!I->mayReadFromMemory())
4303         State.addKnownBits(NO_READS);
4304       if (!I->mayWriteToMemory())
4305         State.addKnownBits(NO_WRITES);
4306     }
4307   }
4308 
4309   /// See AbstractAttribute::getDeducedAttributes(...).
4310   void getDeducedAttributes(LLVMContext &Ctx,
4311                             SmallVectorImpl<Attribute> &Attrs) const override {
4312     assert(Attrs.size() == 0);
4313     if (isAssumedReadNone())
4314       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
4315     else if (isAssumedReadOnly())
4316       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
4317     else if (isAssumedWriteOnly())
4318       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
4319     assert(Attrs.size() <= 1);
4320   }
4321 
4322   /// See AbstractAttribute::manifest(...).
4323   ChangeStatus manifest(Attributor &A) override {
4324     const IRPosition &IRP = getIRPosition();
4325 
4326     // Check if we would improve the existing attributes first.
4327     SmallVector<Attribute, 4> DeducedAttrs;
4328     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
4329     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
4330           return IRP.hasAttr(Attr.getKindAsEnum(),
4331                              /* IgnoreSubsumingPositions */ true);
4332         }))
4333       return ChangeStatus::UNCHANGED;
4334 
4335     // Clear existing attributes.
4336     IRP.removeAttrs(AttrKinds);
4337 
4338     // Use the generic manifest method.
4339     return IRAttribute::manifest(A);
4340   }
4341 
4342   /// See AbstractState::getAsStr().
4343   const std::string getAsStr() const override {
4344     if (isAssumedReadNone())
4345       return "readnone";
4346     if (isAssumedReadOnly())
4347       return "readonly";
4348     if (isAssumedWriteOnly())
4349       return "writeonly";
4350     return "may-read/write";
4351   }
4352 
4353   /// The set of IR attributes AAMemoryBehavior deals with.
4354   static const Attribute::AttrKind AttrKinds[3];
4355 };
4356 
4357 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
4358     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
4359 
4360 /// Memory behavior attribute for a floating value.
4361 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
4362   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4363 
4364   /// See AbstractAttribute::initialize(...).
4365   void initialize(Attributor &A) override {
4366     AAMemoryBehaviorImpl::initialize(A);
4367     // Initialize the use vector with all direct uses of the associated value.
4368     for (const Use &U : getAssociatedValue().uses())
4369       Uses.insert(&U);
4370   }
4371 
4372   /// See AbstractAttribute::updateImpl(...).
4373   ChangeStatus updateImpl(Attributor &A) override;
4374 
4375   /// See AbstractAttribute::trackStatistics()
4376   void trackStatistics() const override {
4377     if (isAssumedReadNone())
4378       STATS_DECLTRACK_FLOATING_ATTR(readnone)
4379     else if (isAssumedReadOnly())
4380       STATS_DECLTRACK_FLOATING_ATTR(readonly)
4381     else if (isAssumedWriteOnly())
4382       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
4383   }
4384 
4385 private:
4386   /// Return true if users of \p UserI might access the underlying
4387   /// variable/location described by \p U and should therefore be analyzed.
4388   bool followUsersOfUseIn(Attributor &A, const Use *U,
4389                           const Instruction *UserI);
4390 
4391   /// Update the state according to the effect of use \p U in \p UserI.
4392   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
4393 
4394 protected:
4395   /// Container for (transitive) uses of the associated argument.
4396   SetVector<const Use *> Uses;
4397 };
4398 
4399 /// Memory behavior attribute for function argument.
4400 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
4401   AAMemoryBehaviorArgument(const IRPosition &IRP)
4402       : AAMemoryBehaviorFloating(IRP) {}
4403 
4404   /// See AbstractAttribute::initialize(...).
4405   void initialize(Attributor &A) override {
4406     AAMemoryBehaviorFloating::initialize(A);
4407 
4408     // Initialize the use vector with all direct uses of the associated value.
4409     Argument *Arg = getAssociatedArgument();
4410     if (!Arg || !Arg->getParent()->hasExactDefinition())
4411       indicatePessimisticFixpoint();
4412   }
4413 
4414   ChangeStatus manifest(Attributor &A) override {
4415     // TODO: From readattrs.ll: "inalloca parameters are always
4416     //                           considered written"
4417     if (hasAttr({Attribute::InAlloca})) {
4418       removeKnownBits(NO_WRITES);
4419       removeAssumedBits(NO_WRITES);
4420     }
4421     return AAMemoryBehaviorFloating::manifest(A);
4422   }
4423 
4424   /// See AbstractAttribute::trackStatistics()
4425   void trackStatistics() const override {
4426     if (isAssumedReadNone())
4427       STATS_DECLTRACK_ARG_ATTR(readnone)
4428     else if (isAssumedReadOnly())
4429       STATS_DECLTRACK_ARG_ATTR(readonly)
4430     else if (isAssumedWriteOnly())
4431       STATS_DECLTRACK_ARG_ATTR(writeonly)
4432   }
4433 };
4434 
4435 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
4436   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
4437       : AAMemoryBehaviorArgument(IRP) {}
4438 
4439   /// See AbstractAttribute::updateImpl(...).
4440   ChangeStatus updateImpl(Attributor &A) override {
4441     // TODO: Once we have call site specific value information we can provide
4442     //       call site specific liveness liveness information and then it makes
4443     //       sense to specialize attributes for call sites arguments instead of
4444     //       redirecting requests to the callee argument.
4445     Argument *Arg = getAssociatedArgument();
4446     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4447     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4448     return clampStateAndIndicateChange(
4449         getState(),
4450         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
4451   }
4452 
4453   /// See AbstractAttribute::trackStatistics()
4454   void trackStatistics() const override {
4455     if (isAssumedReadNone())
4456       STATS_DECLTRACK_CSARG_ATTR(readnone)
4457     else if (isAssumedReadOnly())
4458       STATS_DECLTRACK_CSARG_ATTR(readonly)
4459     else if (isAssumedWriteOnly())
4460       STATS_DECLTRACK_CSARG_ATTR(writeonly)
4461   }
4462 };
4463 
4464 /// Memory behavior attribute for a call site return position.
4465 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
4466   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
4467       : AAMemoryBehaviorFloating(IRP) {}
4468 
4469   /// See AbstractAttribute::manifest(...).
4470   ChangeStatus manifest(Attributor &A) override {
4471     // We do not annotate returned values.
4472     return ChangeStatus::UNCHANGED;
4473   }
4474 
4475   /// See AbstractAttribute::trackStatistics()
4476   void trackStatistics() const override {}
4477 };
4478 
4479 /// An AA to represent the memory behavior function attributes.
4480 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
4481   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4482 
4483   /// See AbstractAttribute::updateImpl(Attributor &A).
4484   virtual ChangeStatus updateImpl(Attributor &A) override;
4485 
4486   /// See AbstractAttribute::manifest(...).
4487   ChangeStatus manifest(Attributor &A) override {
4488     Function &F = cast<Function>(getAnchorValue());
4489     if (isAssumedReadNone()) {
4490       F.removeFnAttr(Attribute::ArgMemOnly);
4491       F.removeFnAttr(Attribute::InaccessibleMemOnly);
4492       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
4493     }
4494     return AAMemoryBehaviorImpl::manifest(A);
4495   }
4496 
4497   /// See AbstractAttribute::trackStatistics()
4498   void trackStatistics() const override {
4499     if (isAssumedReadNone())
4500       STATS_DECLTRACK_FN_ATTR(readnone)
4501     else if (isAssumedReadOnly())
4502       STATS_DECLTRACK_FN_ATTR(readonly)
4503     else if (isAssumedWriteOnly())
4504       STATS_DECLTRACK_FN_ATTR(writeonly)
4505   }
4506 };
4507 
4508 /// AAMemoryBehavior attribute for call sites.
4509 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
4510   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4511 
4512   /// See AbstractAttribute::initialize(...).
4513   void initialize(Attributor &A) override {
4514     AAMemoryBehaviorImpl::initialize(A);
4515     Function *F = getAssociatedFunction();
4516     if (!F || !F->hasExactDefinition())
4517       indicatePessimisticFixpoint();
4518   }
4519 
4520   /// See AbstractAttribute::updateImpl(...).
4521   ChangeStatus updateImpl(Attributor &A) override {
4522     // TODO: Once we have call site specific value information we can provide
4523     //       call site specific liveness liveness information and then it makes
4524     //       sense to specialize attributes for call sites arguments instead of
4525     //       redirecting requests to the callee argument.
4526     Function *F = getAssociatedFunction();
4527     const IRPosition &FnPos = IRPosition::function(*F);
4528     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4529     return clampStateAndIndicateChange(
4530         getState(),
4531         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
4532   }
4533 
4534   /// See AbstractAttribute::trackStatistics()
4535   void trackStatistics() const override {
4536     if (isAssumedReadNone())
4537       STATS_DECLTRACK_CS_ATTR(readnone)
4538     else if (isAssumedReadOnly())
4539       STATS_DECLTRACK_CS_ATTR(readonly)
4540     else if (isAssumedWriteOnly())
4541       STATS_DECLTRACK_CS_ATTR(writeonly)
4542   }
4543 };
4544 } // namespace
4545 
4546 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
4547 
4548   // The current assumed state used to determine a change.
4549   auto AssumedState = getAssumed();
4550 
4551   auto CheckRWInst = [&](Instruction &I) {
4552     // If the instruction has an own memory behavior state, use it to restrict
4553     // the local state. No further analysis is required as the other memory
4554     // state is as optimistic as it gets.
4555     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
4556       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
4557           *this, IRPosition::callsite_function(ICS));
4558       intersectAssumedBits(MemBehaviorAA.getAssumed());
4559       return !isAtFixpoint();
4560     }
4561 
4562     // Remove access kind modifiers if necessary.
4563     if (I.mayReadFromMemory())
4564       removeAssumedBits(NO_READS);
4565     if (I.mayWriteToMemory())
4566       removeAssumedBits(NO_WRITES);
4567     return !isAtFixpoint();
4568   };
4569 
4570   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
4571     return indicatePessimisticFixpoint();
4572 
4573   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4574                                         : ChangeStatus::UNCHANGED;
4575 }
4576 
4577 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
4578 
4579   const IRPosition &IRP = getIRPosition();
4580   const IRPosition &FnPos = IRPosition::function_scope(IRP);
4581   AAMemoryBehavior::StateType &S = getState();
4582 
4583   // First, check the function scope. We take the known information and we avoid
4584   // work if the assumed information implies the current assumed information for
4585   // this attribute.
4586   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4587   S.addKnownBits(FnMemAA.getKnown());
4588   if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
4589     return ChangeStatus::UNCHANGED;
4590 
4591   // Make sure the value is not captured (except through "return"), if
4592   // it is, any information derived would be irrelevant anyway as we cannot
4593   // check the potential aliases introduced by the capture. However, no need
4594   // to fall back to anythign less optimistic than the function state.
4595   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
4596   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4597     S.intersectAssumedBits(FnMemAA.getAssumed());
4598     return ChangeStatus::CHANGED;
4599   }
4600 
4601   // The current assumed state used to determine a change.
4602   auto AssumedState = S.getAssumed();
4603 
4604   // Liveness information to exclude dead users.
4605   // TODO: Take the FnPos once we have call site specific liveness information.
4606   const auto &LivenessAA = A.getAAFor<AAIsDead>(
4607       *this, IRPosition::function(*IRP.getAssociatedFunction()));
4608 
4609   // Visit and expand uses until all are analyzed or a fixpoint is reached.
4610   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
4611     const Use *U = Uses[i];
4612     Instruction *UserI = cast<Instruction>(U->getUser());
4613     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
4614                       << " [Dead: " << (LivenessAA.isAssumedDead(UserI))
4615                       << "]\n");
4616     if (LivenessAA.isAssumedDead(UserI))
4617       continue;
4618 
4619     // Check if the users of UserI should also be visited.
4620     if (followUsersOfUseIn(A, U, UserI))
4621       for (const Use &UserIUse : UserI->uses())
4622         Uses.insert(&UserIUse);
4623 
4624     // If UserI might touch memory we analyze the use in detail.
4625     if (UserI->mayReadOrWriteMemory())
4626       analyzeUseIn(A, U, UserI);
4627   }
4628 
4629   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4630                                         : ChangeStatus::UNCHANGED;
4631 }
4632 
4633 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
4634                                                   const Instruction *UserI) {
4635   // The loaded value is unrelated to the pointer argument, no need to
4636   // follow the users of the load.
4637   if (isa<LoadInst>(UserI))
4638     return false;
4639 
4640   // By default we follow all uses assuming UserI might leak information on U,
4641   // we have special handling for call sites operands though.
4642   ImmutableCallSite ICS(UserI);
4643   if (!ICS || !ICS.isArgOperand(U))
4644     return true;
4645 
4646   // If the use is a call argument known not to be captured, the users of
4647   // the call do not need to be visited because they have to be unrelated to
4648   // the input. Note that this check is not trivial even though we disallow
4649   // general capturing of the underlying argument. The reason is that the
4650   // call might the argument "through return", which we allow and for which we
4651   // need to check call users.
4652   unsigned ArgNo = ICS.getArgumentNo(U);
4653   const auto &ArgNoCaptureAA =
4654       A.getAAFor<AANoCapture>(*this, IRPosition::callsite_argument(ICS, ArgNo));
4655   return !ArgNoCaptureAA.isAssumedNoCapture();
4656 }
4657 
4658 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
4659                                             const Instruction *UserI) {
4660   assert(UserI->mayReadOrWriteMemory());
4661 
4662   switch (UserI->getOpcode()) {
4663   default:
4664     // TODO: Handle all atomics and other side-effect operations we know of.
4665     break;
4666   case Instruction::Load:
4667     // Loads cause the NO_READS property to disappear.
4668     removeAssumedBits(NO_READS);
4669     return;
4670 
4671   case Instruction::Store:
4672     // Stores cause the NO_WRITES property to disappear if the use is the
4673     // pointer operand. Note that we do assume that capturing was taken care of
4674     // somewhere else.
4675     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
4676       removeAssumedBits(NO_WRITES);
4677     return;
4678 
4679   case Instruction::Call:
4680   case Instruction::CallBr:
4681   case Instruction::Invoke: {
4682     // For call sites we look at the argument memory behavior attribute (this
4683     // could be recursive!) in order to restrict our own state.
4684     ImmutableCallSite ICS(UserI);
4685 
4686     // Give up on operand bundles.
4687     if (ICS.isBundleOperand(U)) {
4688       indicatePessimisticFixpoint();
4689       return;
4690     }
4691 
4692     // Calling a function does read the function pointer, maybe write it if the
4693     // function is self-modifying.
4694     if (ICS.isCallee(U)) {
4695       removeAssumedBits(NO_READS);
4696       break;
4697     }
4698 
4699     // Adjust the possible access behavior based on the information on the
4700     // argument.
4701     unsigned ArgNo = ICS.getArgumentNo(U);
4702     const IRPosition &ArgPos = IRPosition::callsite_argument(ICS, ArgNo);
4703     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4704     // "assumed" has at most the same bits as the MemBehaviorAA assumed
4705     // and at least "known".
4706     intersectAssumedBits(MemBehaviorAA.getAssumed());
4707     return;
4708   }
4709   };
4710 
4711   // Generally, look at the "may-properties" and adjust the assumed state if we
4712   // did not trigger special handling before.
4713   if (UserI->mayReadFromMemory())
4714     removeAssumedBits(NO_READS);
4715   if (UserI->mayWriteToMemory())
4716     removeAssumedBits(NO_WRITES);
4717 }
4718 
4719 /// ----------------------------------------------------------------------------
4720 ///                               Attributor
4721 /// ----------------------------------------------------------------------------
4722 
4723 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
4724                                const AAIsDead *LivenessAA) {
4725   const Instruction *CtxI = AA.getIRPosition().getCtxI();
4726   if (!CtxI)
4727     return false;
4728 
4729   // TODO: Find a good way to utilize fine and coarse grained liveness
4730   // information.
4731   if (!LivenessAA)
4732     LivenessAA =
4733         &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
4734                             /* TrackDependence */ false);
4735 
4736   // Don't check liveness for AAIsDead.
4737   if (&AA == LivenessAA)
4738     return false;
4739 
4740   if (!LivenessAA->isAssumedDead(CtxI))
4741     return false;
4742 
4743   // We actually used liveness information so we have to record a dependence.
4744   recordDependence(*LivenessAA, AA, DepClassTy::OPTIONAL);
4745 
4746   return true;
4747 }
4748 
4749 bool Attributor::checkForAllUses(
4750     const function_ref<bool(const Use &, bool &)> &Pred,
4751     const AbstractAttribute &QueryingAA, const Value &V) {
4752   const IRPosition &IRP = QueryingAA.getIRPosition();
4753   SmallVector<const Use *, 16> Worklist;
4754   SmallPtrSet<const Use *, 16> Visited;
4755 
4756   for (const Use &U : V.uses())
4757     Worklist.push_back(&U);
4758 
4759   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
4760                     << " initial uses to check\n");
4761 
4762   if (Worklist.empty())
4763     return true;
4764 
4765   bool AnyDead = false;
4766   const Function *ScopeFn = IRP.getAnchorScope();
4767   const auto *LivenessAA =
4768       ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
4769                                     /* TrackDependence */ false)
4770               : nullptr;
4771 
4772   while (!Worklist.empty()) {
4773     const Use *U = Worklist.pop_back_val();
4774     if (!Visited.insert(U).second)
4775       continue;
4776     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << **U << "\n");
4777     if (Instruction *UserI = dyn_cast<Instruction>(U->getUser()))
4778       if (LivenessAA && LivenessAA->isAssumedDead(UserI)) {
4779         LLVM_DEBUG(dbgs() << "[Attributor] Dead user: " << *UserI << ": "
4780                           << static_cast<const AbstractAttribute &>(*LivenessAA)
4781                           << "\n");
4782         AnyDead = true;
4783         continue;
4784       }
4785 
4786     bool Follow = false;
4787     if (!Pred(*U, Follow))
4788       return false;
4789     if (!Follow)
4790       continue;
4791     for (const Use &UU : U->getUser()->uses())
4792       Worklist.push_back(&UU);
4793   }
4794 
4795   if (AnyDead)
4796     recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
4797 
4798   return true;
4799 }
4800 
4801 bool Attributor::checkForAllCallSites(
4802     const function_ref<bool(AbstractCallSite)> &Pred,
4803     const AbstractAttribute &QueryingAA, bool RequireAllCallSites) {
4804   // We can try to determine information from
4805   // the call sites. However, this is only possible all call sites are known,
4806   // hence the function has internal linkage.
4807   const IRPosition &IRP = QueryingAA.getIRPosition();
4808   const Function *AssociatedFunction = IRP.getAssociatedFunction();
4809   if (!AssociatedFunction) {
4810     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
4811                       << "\n");
4812     return false;
4813   }
4814 
4815   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
4816                               &QueryingAA);
4817 }
4818 
4819 bool Attributor::checkForAllCallSites(
4820     const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
4821     bool RequireAllCallSites, const AbstractAttribute *QueryingAA) {
4822   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
4823     LLVM_DEBUG(
4824         dbgs()
4825         << "[Attributor] Function " << Fn.getName()
4826         << " has no internal linkage, hence not all call sites are known\n");
4827     return false;
4828   }
4829 
4830   for (const Use &U : Fn.uses()) {
4831     AbstractCallSite ACS(&U);
4832     if (!ACS) {
4833       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
4834                         << " has non call site use " << *U.get() << " in "
4835                         << *U.getUser() << "\n");
4836       // BlockAddress users are allowed.
4837       if (isa<BlockAddress>(U.getUser()))
4838         continue;
4839       return false;
4840     }
4841 
4842     Instruction *I = ACS.getInstruction();
4843     Function *Caller = I->getFunction();
4844 
4845     const auto *LivenessAA =
4846         lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
4847                               /* TrackDependence */ false);
4848 
4849     // Skip dead calls.
4850     if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4851       // We actually used liveness information so we have to record a
4852       // dependence.
4853       if (QueryingAA)
4854         recordDependence(*LivenessAA, *QueryingAA, DepClassTy::OPTIONAL);
4855       continue;
4856     }
4857 
4858     const Use *EffectiveUse =
4859         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
4860     if (!ACS.isCallee(EffectiveUse)) {
4861       if (!RequireAllCallSites)
4862         continue;
4863       LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
4864                         << " is an invalid use of " << Fn.getName() << "\n");
4865       return false;
4866     }
4867 
4868     if (Pred(ACS))
4869       continue;
4870 
4871     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
4872                       << *ACS.getInstruction() << "\n");
4873     return false;
4874   }
4875 
4876   return true;
4877 }
4878 
4879 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
4880     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
4881         &Pred,
4882     const AbstractAttribute &QueryingAA) {
4883 
4884   const IRPosition &IRP = QueryingAA.getIRPosition();
4885   // Since we need to provide return instructions we have to have an exact
4886   // definition.
4887   const Function *AssociatedFunction = IRP.getAssociatedFunction();
4888   if (!AssociatedFunction)
4889     return false;
4890 
4891   // If this is a call site query we use the call site specific return values
4892   // and liveness information.
4893   // TODO: use the function scope once we have call site AAReturnedValues.
4894   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4895   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4896   if (!AARetVal.getState().isValidState())
4897     return false;
4898 
4899   return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
4900 }
4901 
4902 bool Attributor::checkForAllReturnedValues(
4903     const function_ref<bool(Value &)> &Pred,
4904     const AbstractAttribute &QueryingAA) {
4905 
4906   const IRPosition &IRP = QueryingAA.getIRPosition();
4907   const Function *AssociatedFunction = IRP.getAssociatedFunction();
4908   if (!AssociatedFunction)
4909     return false;
4910 
4911   // TODO: use the function scope once we have call site AAReturnedValues.
4912   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4913   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4914   if (!AARetVal.getState().isValidState())
4915     return false;
4916 
4917   return AARetVal.checkForAllReturnedValuesAndReturnInsts(
4918       [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
4919         return Pred(RV);
4920       });
4921 }
4922 
4923 static bool
4924 checkForAllInstructionsImpl(InformationCache::OpcodeInstMapTy &OpcodeInstMap,
4925                             const function_ref<bool(Instruction &)> &Pred,
4926                             const AAIsDead *LivenessAA, bool &AnyDead,
4927                             const ArrayRef<unsigned> &Opcodes) {
4928   for (unsigned Opcode : Opcodes) {
4929     for (Instruction *I : OpcodeInstMap[Opcode]) {
4930       // Skip dead instructions.
4931       if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4932         AnyDead = true;
4933         continue;
4934       }
4935 
4936       if (!Pred(*I))
4937         return false;
4938     }
4939   }
4940   return true;
4941 }
4942 
4943 bool Attributor::checkForAllInstructions(
4944     const llvm::function_ref<bool(Instruction &)> &Pred,
4945     const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
4946 
4947   const IRPosition &IRP = QueryingAA.getIRPosition();
4948   // Since we need to provide instructions we have to have an exact definition.
4949   const Function *AssociatedFunction = IRP.getAssociatedFunction();
4950   if (!AssociatedFunction)
4951     return false;
4952 
4953   // TODO: use the function scope once we have call site AAReturnedValues.
4954   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4955   const auto &LivenessAA =
4956       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
4957   bool AnyDead = false;
4958 
4959   auto &OpcodeInstMap =
4960       InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
4961   if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
4962                                    Opcodes))
4963     return false;
4964 
4965   // If we actually used liveness information so we have to record a dependence.
4966   if (AnyDead)
4967     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
4968 
4969   return true;
4970 }
4971 
4972 bool Attributor::checkForAllReadWriteInstructions(
4973     const llvm::function_ref<bool(Instruction &)> &Pred,
4974     AbstractAttribute &QueryingAA) {
4975 
4976   const Function *AssociatedFunction =
4977       QueryingAA.getIRPosition().getAssociatedFunction();
4978   if (!AssociatedFunction)
4979     return false;
4980 
4981   // TODO: use the function scope once we have call site AAReturnedValues.
4982   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4983   const auto &LivenessAA =
4984       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
4985   bool AnyDead = false;
4986 
4987   for (Instruction *I :
4988        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
4989     // Skip dead instructions.
4990     if (LivenessAA.isAssumedDead(I)) {
4991       AnyDead = true;
4992       continue;
4993     }
4994 
4995     if (!Pred(*I))
4996       return false;
4997   }
4998 
4999   // If we actually used liveness information so we have to record a dependence.
5000   if (AnyDead)
5001     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5002 
5003   return true;
5004 }
5005 
5006 ChangeStatus Attributor::run(Module &M) {
5007   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
5008                     << AllAbstractAttributes.size()
5009                     << " abstract attributes.\n");
5010 
5011   // Now that all abstract attributes are collected and initialized we start
5012   // the abstract analysis.
5013 
5014   unsigned IterationCounter = 1;
5015 
5016   SmallVector<AbstractAttribute *, 64> ChangedAAs;
5017   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
5018   Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
5019 
5020   bool RecomputeDependences = false;
5021 
5022   do {
5023     // Remember the size to determine new attributes.
5024     size_t NumAAs = AllAbstractAttributes.size();
5025     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
5026                       << ", Worklist size: " << Worklist.size() << "\n");
5027 
5028     // For invalid AAs we can fix dependent AAs that have a required dependence,
5029     // thereby folding long dependence chains in a single step without the need
5030     // to run updates.
5031     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
5032       AbstractAttribute *InvalidAA = InvalidAAs[u];
5033       auto &QuerriedAAs = QueryMap[InvalidAA];
5034       LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "
5035                         << QuerriedAAs.RequiredAAs.size() << "/"
5036                         << QuerriedAAs.OptionalAAs.size()
5037                         << " required/optional dependences\n");
5038       for (AbstractAttribute *DepOnInvalidAA : QuerriedAAs.RequiredAAs) {
5039         AbstractState &DOIAAState = DepOnInvalidAA->getState();
5040         DOIAAState.indicatePessimisticFixpoint();
5041         ++NumAttributesFixedDueToRequiredDependences;
5042         assert(DOIAAState.isAtFixpoint() && "Expected fixpoint state!");
5043         if (!DOIAAState.isValidState())
5044           InvalidAAs.insert(DepOnInvalidAA);
5045       }
5046       if (!RecomputeDependences)
5047         Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5048                         QuerriedAAs.OptionalAAs.end());
5049     }
5050 
5051     // If dependences (=QueryMap) are recomputed we have to look at all abstract
5052     // attributes again, regardless of what changed in the last iteration.
5053     if (RecomputeDependences) {
5054       LLVM_DEBUG(
5055           dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
5056       QueryMap.clear();
5057       ChangedAAs.clear();
5058       Worklist.insert(AllAbstractAttributes.begin(),
5059                       AllAbstractAttributes.end());
5060     }
5061 
5062     // Add all abstract attributes that are potentially dependent on one that
5063     // changed to the work list.
5064     for (AbstractAttribute *ChangedAA : ChangedAAs) {
5065       auto &QuerriedAAs = QueryMap[ChangedAA];
5066       Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5067                       QuerriedAAs.OptionalAAs.end());
5068       Worklist.insert(QuerriedAAs.RequiredAAs.begin(),
5069                       QuerriedAAs.RequiredAAs.end());
5070     }
5071 
5072     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
5073                       << ", Worklist+Dependent size: " << Worklist.size()
5074                       << "\n");
5075 
5076     // Reset the changed and invalid set.
5077     ChangedAAs.clear();
5078     InvalidAAs.clear();
5079 
5080     // Update all abstract attribute in the work list and record the ones that
5081     // changed.
5082     for (AbstractAttribute *AA : Worklist)
5083       if (!AA->getState().isAtFixpoint() && !isAssumedDead(*AA, nullptr)) {
5084         QueriedNonFixAA = false;
5085         if (AA->update(*this) == ChangeStatus::CHANGED) {
5086           ChangedAAs.push_back(AA);
5087           if (!AA->getState().isValidState())
5088             InvalidAAs.insert(AA);
5089         } else if (!QueriedNonFixAA) {
5090           // If the attribute did not query any non-fix information, the state
5091           // will not change and we can indicate that right away.
5092           AA->getState().indicateOptimisticFixpoint();
5093         }
5094       }
5095 
5096     // Check if we recompute the dependences in the next iteration.
5097     RecomputeDependences = (DepRecomputeInterval > 0 &&
5098                             IterationCounter % DepRecomputeInterval == 0);
5099 
5100     // Add attributes to the changed set if they have been created in the last
5101     // iteration.
5102     ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
5103                       AllAbstractAttributes.end());
5104 
5105     // Reset the work list and repopulate with the changed abstract attributes.
5106     // Note that dependent ones are added above.
5107     Worklist.clear();
5108     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
5109 
5110   } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
5111                                  VerifyMaxFixpointIterations));
5112 
5113   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
5114                     << IterationCounter << "/" << MaxFixpointIterations
5115                     << " iterations\n");
5116 
5117   size_t NumFinalAAs = AllAbstractAttributes.size();
5118 
5119   // Reset abstract arguments not settled in a sound fixpoint by now. This
5120   // happens when we stopped the fixpoint iteration early. Note that only the
5121   // ones marked as "changed" *and* the ones transitively depending on them
5122   // need to be reverted to a pessimistic state. Others might not be in a
5123   // fixpoint state but we can use the optimistic results for them anyway.
5124   SmallPtrSet<AbstractAttribute *, 32> Visited;
5125   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
5126     AbstractAttribute *ChangedAA = ChangedAAs[u];
5127     if (!Visited.insert(ChangedAA).second)
5128       continue;
5129 
5130     AbstractState &State = ChangedAA->getState();
5131     if (!State.isAtFixpoint()) {
5132       State.indicatePessimisticFixpoint();
5133 
5134       NumAttributesTimedOut++;
5135     }
5136 
5137     auto &QuerriedAAs = QueryMap[ChangedAA];
5138     ChangedAAs.append(QuerriedAAs.OptionalAAs.begin(),
5139                       QuerriedAAs.OptionalAAs.end());
5140     ChangedAAs.append(QuerriedAAs.RequiredAAs.begin(),
5141                       QuerriedAAs.RequiredAAs.end());
5142   }
5143 
5144   LLVM_DEBUG({
5145     if (!Visited.empty())
5146       dbgs() << "\n[Attributor] Finalized " << Visited.size()
5147              << " abstract attributes.\n";
5148   });
5149 
5150   unsigned NumManifested = 0;
5151   unsigned NumAtFixpoint = 0;
5152   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
5153   for (AbstractAttribute *AA : AllAbstractAttributes) {
5154     AbstractState &State = AA->getState();
5155 
5156     // If there is not already a fixpoint reached, we can now take the
5157     // optimistic state. This is correct because we enforced a pessimistic one
5158     // on abstract attributes that were transitively dependent on a changed one
5159     // already above.
5160     if (!State.isAtFixpoint())
5161       State.indicateOptimisticFixpoint();
5162 
5163     // If the state is invalid, we do not try to manifest it.
5164     if (!State.isValidState())
5165       continue;
5166 
5167     // Skip dead code.
5168     if (isAssumedDead(*AA, nullptr))
5169       continue;
5170     // Manifest the state and record if we changed the IR.
5171     ChangeStatus LocalChange = AA->manifest(*this);
5172     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
5173       AA->trackStatistics();
5174 
5175     ManifestChange = ManifestChange | LocalChange;
5176 
5177     NumAtFixpoint++;
5178     NumManifested += (LocalChange == ChangeStatus::CHANGED);
5179   }
5180 
5181   (void)NumManifested;
5182   (void)NumAtFixpoint;
5183   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
5184                     << " arguments while " << NumAtFixpoint
5185                     << " were in a valid fixpoint state\n");
5186 
5187   NumAttributesManifested += NumManifested;
5188   NumAttributesValidFixpoint += NumAtFixpoint;
5189 
5190   (void)NumFinalAAs;
5191   assert(
5192       NumFinalAAs == AllAbstractAttributes.size() &&
5193       "Expected the final number of abstract attributes to remain unchanged!");
5194 
5195   // Delete stuff at the end to avoid invalid references and a nice order.
5196   {
5197     LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
5198                       << ToBeDeletedFunctions.size() << " functions and "
5199                       << ToBeDeletedBlocks.size() << " blocks and "
5200                       << ToBeDeletedInsts.size() << " instructions and "
5201                       << ToBeChangedUses.size() << " uses\n");
5202 
5203     SmallVector<Instruction *, 32> DeadInsts;
5204     SmallVector<Instruction *, 32> TerminatorsToFold;
5205     SmallVector<Instruction *, 32> UnreachablesToInsert;
5206 
5207     for (auto &It : ToBeChangedUses) {
5208       Use *U = It.first;
5209       Value *NewV = It.second;
5210       Value *OldV = U->get();
5211       LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
5212                         << " instead of " << *OldV << "\n");
5213       U->set(NewV);
5214       if (Instruction *I = dyn_cast<Instruction>(OldV))
5215         if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
5216             isInstructionTriviallyDead(I)) {
5217           DeadInsts.push_back(I);
5218         }
5219       if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
5220         Instruction *UserI = cast<Instruction>(U->getUser());
5221         if (isa<UndefValue>(NewV)) {
5222           UnreachablesToInsert.push_back(UserI);
5223         } else {
5224           TerminatorsToFold.push_back(UserI);
5225         }
5226       }
5227     }
5228     for (Instruction *I : UnreachablesToInsert)
5229       changeToUnreachable(I, /* UseLLVMTrap */ false);
5230     for (Instruction *I : TerminatorsToFold)
5231       ConstantFoldTerminator(I->getParent());
5232 
5233     for (Instruction *I : ToBeDeletedInsts) {
5234       I->replaceAllUsesWith(UndefValue::get(I->getType()));
5235       if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
5236         DeadInsts.push_back(I);
5237       else
5238         I->eraseFromParent();
5239     }
5240 
5241     RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
5242 
5243     if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
5244       SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
5245       ToBeDeletedBBs.reserve(NumDeadBlocks);
5246       ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end());
5247       // Actually we do not delete the blocks but squash them into a single
5248       // unreachable but untangling branches that jump here is something we need
5249       // to do in a more generic way.
5250       DetatchDeadBlocks(ToBeDeletedBBs, nullptr);
5251       STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
5252       BUILD_STAT_NAME(AAIsDead, BasicBlock) += ToBeDeletedBlocks.size();
5253     }
5254 
5255     STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
5256     for (Function *Fn : ToBeDeletedFunctions) {
5257       Fn->replaceAllUsesWith(UndefValue::get(Fn->getType()));
5258       Fn->eraseFromParent();
5259       STATS_TRACK(AAIsDead, Function);
5260     }
5261 
5262     // Identify dead internal functions and delete them. This happens outside
5263     // the other fixpoint analysis as we might treat potentially dead functions
5264     // as live to lower the number of iterations. If they happen to be dead, the
5265     // below fixpoint loop will identify and eliminate them.
5266     SmallVector<Function *, 8> InternalFns;
5267     for (Function &F : M)
5268       if (F.hasLocalLinkage())
5269         InternalFns.push_back(&F);
5270 
5271     bool FoundDeadFn = true;
5272     while (FoundDeadFn) {
5273       FoundDeadFn = false;
5274       for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
5275         Function *F = InternalFns[u];
5276         if (!F)
5277           continue;
5278 
5279         if (!checkForAllCallSites([](AbstractCallSite ACS) { return false; },
5280                                   *F, true, nullptr))
5281           continue;
5282 
5283         STATS_TRACK(AAIsDead, Function);
5284         ToBeDeletedFunctions.insert(F);
5285         F->deleteBody();
5286         F->replaceAllUsesWith(UndefValue::get(F->getType()));
5287         F->eraseFromParent();
5288         InternalFns[u] = nullptr;
5289         FoundDeadFn = true;
5290       }
5291     }
5292   }
5293 
5294   if (VerifyMaxFixpointIterations &&
5295       IterationCounter != MaxFixpointIterations) {
5296     errs() << "\n[Attributor] Fixpoint iteration done after: "
5297            << IterationCounter << "/" << MaxFixpointIterations
5298            << " iterations\n";
5299     llvm_unreachable("The fixpoint was not reached with exactly the number of "
5300                      "specified iterations!");
5301   }
5302 
5303   return ManifestChange;
5304 }
5305 
5306 void Attributor::initializeInformationCache(Function &F) {
5307 
5308   // Walk all instructions to find interesting instructions that might be
5309   // queried by abstract attributes during their initialization or update.
5310   // This has to happen before we create attributes.
5311   auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
5312   auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
5313 
5314   for (Instruction &I : instructions(&F)) {
5315     bool IsInterestingOpcode = false;
5316 
5317     // To allow easy access to all instructions in a function with a given
5318     // opcode we store them in the InfoCache. As not all opcodes are interesting
5319     // to concrete attributes we only cache the ones that are as identified in
5320     // the following switch.
5321     // Note: There are no concrete attributes now so this is initially empty.
5322     switch (I.getOpcode()) {
5323     default:
5324       assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
5325              "New call site/base instruction type needs to be known int the "
5326              "Attributor.");
5327       break;
5328     case Instruction::Load:
5329       // The alignment of a pointer is interesting for loads.
5330     case Instruction::Store:
5331       // The alignment of a pointer is interesting for stores.
5332     case Instruction::Call:
5333     case Instruction::CallBr:
5334     case Instruction::Invoke:
5335     case Instruction::CleanupRet:
5336     case Instruction::CatchSwitch:
5337     case Instruction::Resume:
5338     case Instruction::Ret:
5339       IsInterestingOpcode = true;
5340     }
5341     if (IsInterestingOpcode)
5342       InstOpcodeMap[I.getOpcode()].push_back(&I);
5343     if (I.mayReadOrWriteMemory())
5344       ReadOrWriteInsts.push_back(&I);
5345   }
5346 }
5347 
5348 void Attributor::recordDependence(const AbstractAttribute &FromAA,
5349                                   const AbstractAttribute &ToAA,
5350                                   DepClassTy DepClass) {
5351   if (FromAA.getState().isAtFixpoint())
5352     return;
5353 
5354   if (DepClass == DepClassTy::REQUIRED)
5355     QueryMap[&FromAA].RequiredAAs.insert(
5356         const_cast<AbstractAttribute *>(&ToAA));
5357   else
5358     QueryMap[&FromAA].OptionalAAs.insert(
5359         const_cast<AbstractAttribute *>(&ToAA));
5360   QueriedNonFixAA = true;
5361 }
5362 
5363 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
5364   if (!VisitedFunctions.insert(&F).second)
5365     return;
5366   if (F.isDeclaration())
5367     return;
5368 
5369   IRPosition FPos = IRPosition::function(F);
5370 
5371   // Check for dead BasicBlocks in every function.
5372   // We need dead instruction detection because we do not want to deal with
5373   // broken IR in which SSA rules do not apply.
5374   getOrCreateAAFor<AAIsDead>(FPos);
5375 
5376   // Every function might be "will-return".
5377   getOrCreateAAFor<AAWillReturn>(FPos);
5378 
5379   // Every function can be nounwind.
5380   getOrCreateAAFor<AANoUnwind>(FPos);
5381 
5382   // Every function might be marked "nosync"
5383   getOrCreateAAFor<AANoSync>(FPos);
5384 
5385   // Every function might be "no-free".
5386   getOrCreateAAFor<AANoFree>(FPos);
5387 
5388   // Every function might be "no-return".
5389   getOrCreateAAFor<AANoReturn>(FPos);
5390 
5391   // Every function might be "no-recurse".
5392   getOrCreateAAFor<AANoRecurse>(FPos);
5393 
5394   // Every function might be "readnone/readonly/writeonly/...".
5395   getOrCreateAAFor<AAMemoryBehavior>(FPos);
5396 
5397   // Every function might be applicable for Heap-To-Stack conversion.
5398   if (EnableHeapToStack)
5399     getOrCreateAAFor<AAHeapToStack>(FPos);
5400 
5401   // Return attributes are only appropriate if the return type is non void.
5402   Type *ReturnType = F.getReturnType();
5403   if (!ReturnType->isVoidTy()) {
5404     // Argument attribute "returned" --- Create only one per function even
5405     // though it is an argument attribute.
5406     getOrCreateAAFor<AAReturnedValues>(FPos);
5407 
5408     IRPosition RetPos = IRPosition::returned(F);
5409 
5410     // Every returned value might be dead.
5411     getOrCreateAAFor<AAIsDead>(RetPos);
5412 
5413     // Every function might be simplified.
5414     getOrCreateAAFor<AAValueSimplify>(RetPos);
5415 
5416     if (ReturnType->isPointerTy()) {
5417 
5418       // Every function with pointer return type might be marked align.
5419       getOrCreateAAFor<AAAlign>(RetPos);
5420 
5421       // Every function with pointer return type might be marked nonnull.
5422       getOrCreateAAFor<AANonNull>(RetPos);
5423 
5424       // Every function with pointer return type might be marked noalias.
5425       getOrCreateAAFor<AANoAlias>(RetPos);
5426 
5427       // Every function with pointer return type might be marked
5428       // dereferenceable.
5429       getOrCreateAAFor<AADereferenceable>(RetPos);
5430     }
5431   }
5432 
5433   for (Argument &Arg : F.args()) {
5434     IRPosition ArgPos = IRPosition::argument(Arg);
5435 
5436     // Every argument might be simplified.
5437     getOrCreateAAFor<AAValueSimplify>(ArgPos);
5438 
5439     if (Arg.getType()->isPointerTy()) {
5440       // Every argument with pointer type might be marked nonnull.
5441       getOrCreateAAFor<AANonNull>(ArgPos);
5442 
5443       // Every argument with pointer type might be marked noalias.
5444       getOrCreateAAFor<AANoAlias>(ArgPos);
5445 
5446       // Every argument with pointer type might be marked dereferenceable.
5447       getOrCreateAAFor<AADereferenceable>(ArgPos);
5448 
5449       // Every argument with pointer type might be marked align.
5450       getOrCreateAAFor<AAAlign>(ArgPos);
5451 
5452       // Every argument with pointer type might be marked nocapture.
5453       getOrCreateAAFor<AANoCapture>(ArgPos);
5454 
5455       // Every argument with pointer type might be marked
5456       // "readnone/readonly/writeonly/..."
5457       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
5458 
5459       // Every argument with pointer type might be marked nofree.
5460       getOrCreateAAFor<AANoFree>(ArgPos);
5461     }
5462   }
5463 
5464   auto CallSitePred = [&](Instruction &I) -> bool {
5465     CallSite CS(&I);
5466     if (Function *Callee = CS.getCalledFunction()) {
5467       // Skip declerations except if annotations on their call sites were
5468       // explicitly requested.
5469       if (!AnnotateDeclarationCallSites && Callee->isDeclaration())
5470         return true;
5471 
5472       if (!Callee->getReturnType()->isVoidTy() && !CS->use_empty()) {
5473         IRPosition CSRetPos = IRPosition::callsite_returned(CS);
5474 
5475         // Call site return values might be dead.
5476         getOrCreateAAFor<AAIsDead>(CSRetPos);
5477       }
5478 
5479       for (int i = 0, e = Callee->arg_size(); i < e; i++) {
5480 
5481         IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
5482 
5483         // Every call site argument might be dead.
5484         getOrCreateAAFor<AAIsDead>(CSArgPos);
5485 
5486         // Call site argument might be simplified.
5487         getOrCreateAAFor<AAValueSimplify>(CSArgPos);
5488 
5489         if (!CS.getArgument(i)->getType()->isPointerTy())
5490           continue;
5491 
5492         // Call site argument attribute "non-null".
5493         getOrCreateAAFor<AANonNull>(CSArgPos);
5494 
5495         // Call site argument attribute "no-alias".
5496         getOrCreateAAFor<AANoAlias>(CSArgPos);
5497 
5498         // Call site argument attribute "dereferenceable".
5499         getOrCreateAAFor<AADereferenceable>(CSArgPos);
5500 
5501         // Call site argument attribute "align".
5502         getOrCreateAAFor<AAAlign>(CSArgPos);
5503 
5504 	// Call site argument attribute "nofree".
5505 	getOrCreateAAFor<AANoFree>(CSArgPos);
5506       }
5507     }
5508     return true;
5509   };
5510 
5511   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
5512   bool Success, AnyDead = false;
5513   Success = checkForAllInstructionsImpl(
5514       OpcodeInstMap, CallSitePred, nullptr, AnyDead,
5515       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
5516        (unsigned)Instruction::Call});
5517   (void)Success;
5518   assert(Success && !AnyDead && "Expected the check call to be successful!");
5519 
5520   auto LoadStorePred = [&](Instruction &I) -> bool {
5521     if (isa<LoadInst>(I))
5522       getOrCreateAAFor<AAAlign>(
5523           IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
5524     else
5525       getOrCreateAAFor<AAAlign>(
5526           IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
5527     return true;
5528   };
5529   Success = checkForAllInstructionsImpl(
5530       OpcodeInstMap, LoadStorePred, nullptr, AnyDead,
5531       {(unsigned)Instruction::Load, (unsigned)Instruction::Store});
5532   (void)Success;
5533   assert(Success && !AnyDead && "Expected the check call to be successful!");
5534 }
5535 
5536 /// Helpers to ease debugging through output streams and print calls.
5537 ///
5538 ///{
5539 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
5540   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
5541 }
5542 
5543 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
5544   switch (AP) {
5545   case IRPosition::IRP_INVALID:
5546     return OS << "inv";
5547   case IRPosition::IRP_FLOAT:
5548     return OS << "flt";
5549   case IRPosition::IRP_RETURNED:
5550     return OS << "fn_ret";
5551   case IRPosition::IRP_CALL_SITE_RETURNED:
5552     return OS << "cs_ret";
5553   case IRPosition::IRP_FUNCTION:
5554     return OS << "fn";
5555   case IRPosition::IRP_CALL_SITE:
5556     return OS << "cs";
5557   case IRPosition::IRP_ARGUMENT:
5558     return OS << "arg";
5559   case IRPosition::IRP_CALL_SITE_ARGUMENT:
5560     return OS << "cs_arg";
5561   }
5562   llvm_unreachable("Unknown attribute position!");
5563 }
5564 
5565 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
5566   const Value &AV = Pos.getAssociatedValue();
5567   return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
5568             << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
5569 }
5570 
5571 template <typename base_ty, base_ty BestState, base_ty WorstState>
5572 raw_ostream &llvm::
5573 operator<<(raw_ostream &OS,
5574            const IntegerStateBase<base_ty, BestState, WorstState> &S) {
5575   return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
5576             << static_cast<const AbstractState &>(S);
5577 }
5578 
5579 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
5580   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
5581 }
5582 
5583 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
5584   AA.print(OS);
5585   return OS;
5586 }
5587 
5588 void AbstractAttribute::print(raw_ostream &OS) const {
5589   OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
5590      << "]";
5591 }
5592 ///}
5593 
5594 /// ----------------------------------------------------------------------------
5595 ///                       Pass (Manager) Boilerplate
5596 /// ----------------------------------------------------------------------------
5597 
5598 static bool runAttributorOnModule(Module &M, AnalysisGetter &AG) {
5599   if (DisableAttributor)
5600     return false;
5601 
5602   LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
5603                     << " functions.\n");
5604 
5605   // Create an Attributor and initially empty information cache that is filled
5606   // while we identify default attribute opportunities.
5607   InformationCache InfoCache(M, AG);
5608   Attributor A(InfoCache, DepRecInterval);
5609 
5610   for (Function &F : M)
5611     A.initializeInformationCache(F);
5612 
5613   for (Function &F : M) {
5614     if (F.hasExactDefinition())
5615       NumFnWithExactDefinition++;
5616     else
5617       NumFnWithoutExactDefinition++;
5618 
5619     // We look at internal functions only on-demand but if any use is not a
5620     // direct call, we have to do it eagerly.
5621     if (F.hasLocalLinkage()) {
5622       if (llvm::all_of(F.uses(), [](const Use &U) {
5623             return ImmutableCallSite(U.getUser()) &&
5624                    ImmutableCallSite(U.getUser()).isCallee(&U);
5625           }))
5626         continue;
5627     }
5628 
5629     // Populate the Attributor with abstract attribute opportunities in the
5630     // function and the information cache with IR information.
5631     A.identifyDefaultAbstractAttributes(F);
5632   }
5633 
5634   return A.run(M) == ChangeStatus::CHANGED;
5635 }
5636 
5637 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
5638   AnalysisGetter AG(AM);
5639   if (runAttributorOnModule(M, AG)) {
5640     // FIXME: Think about passes we will preserve and add them here.
5641     return PreservedAnalyses::none();
5642   }
5643   return PreservedAnalyses::all();
5644 }
5645 
5646 namespace {
5647 
5648 struct AttributorLegacyPass : public ModulePass {
5649   static char ID;
5650 
5651   AttributorLegacyPass() : ModulePass(ID) {
5652     initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
5653   }
5654 
5655   bool runOnModule(Module &M) override {
5656     if (skipModule(M))
5657       return false;
5658 
5659     AnalysisGetter AG;
5660     return runAttributorOnModule(M, AG);
5661   }
5662 
5663   void getAnalysisUsage(AnalysisUsage &AU) const override {
5664     // FIXME: Think about passes we will preserve and add them here.
5665     AU.addRequired<TargetLibraryInfoWrapperPass>();
5666   }
5667 };
5668 
5669 } // end anonymous namespace
5670 
5671 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
5672 
5673 char AttributorLegacyPass::ID = 0;
5674 
5675 const char AAReturnedValues::ID = 0;
5676 const char AANoUnwind::ID = 0;
5677 const char AANoSync::ID = 0;
5678 const char AANoFree::ID = 0;
5679 const char AANonNull::ID = 0;
5680 const char AANoRecurse::ID = 0;
5681 const char AAWillReturn::ID = 0;
5682 const char AANoAlias::ID = 0;
5683 const char AANoReturn::ID = 0;
5684 const char AAIsDead::ID = 0;
5685 const char AADereferenceable::ID = 0;
5686 const char AAAlign::ID = 0;
5687 const char AANoCapture::ID = 0;
5688 const char AAValueSimplify::ID = 0;
5689 const char AAHeapToStack::ID = 0;
5690 const char AAMemoryBehavior::ID = 0;
5691 
5692 // Macro magic to create the static generator function for attributes that
5693 // follow the naming scheme.
5694 
5695 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
5696   case IRPosition::PK:                                                         \
5697     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
5698 
5699 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
5700   case IRPosition::PK:                                                         \
5701     AA = new CLASS##SUFFIX(IRP);                                               \
5702     break;
5703 
5704 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
5705   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5706     CLASS *AA = nullptr;                                                       \
5707     switch (IRP.getPositionKind()) {                                           \
5708       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5709       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
5710       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
5711       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
5712       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
5713       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
5714       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5715       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
5716     }                                                                          \
5717     return *AA;                                                                \
5718   }
5719 
5720 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
5721   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5722     CLASS *AA = nullptr;                                                       \
5723     switch (IRP.getPositionKind()) {                                           \
5724       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5725       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
5726       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
5727       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
5728       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
5729       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
5730       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
5731       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
5732     }                                                                          \
5733     return *AA;                                                                \
5734   }
5735 
5736 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
5737   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5738     CLASS *AA = nullptr;                                                       \
5739     switch (IRP.getPositionKind()) {                                           \
5740       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5741       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5742       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
5743       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
5744       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
5745       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
5746       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
5747       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
5748     }                                                                          \
5749     return *AA;                                                                \
5750   }
5751 
5752 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
5753   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5754     CLASS *AA = nullptr;                                                       \
5755     switch (IRP.getPositionKind()) {                                           \
5756       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5757       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
5758       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
5759       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
5760       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
5761       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
5762       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
5763       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5764     }                                                                          \
5765     return *AA;                                                                \
5766   }
5767 
5768 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
5769   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5770     CLASS *AA = nullptr;                                                       \
5771     switch (IRP.getPositionKind()) {                                           \
5772       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5773       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
5774       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5775       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
5776       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
5777       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
5778       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
5779       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
5780     }                                                                          \
5781     return *AA;                                                                \
5782   }
5783 
5784 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
5785 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
5786 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
5787 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
5788 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
5789 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
5790 
5791 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
5792 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
5793 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
5794 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
5795 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
5796 
5797 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
5798 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
5799 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
5800 
5801 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
5802 
5803 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
5804 
5805 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
5806 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
5807 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
5808 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
5809 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
5810 #undef SWITCH_PK_CREATE
5811 #undef SWITCH_PK_INV
5812 
5813 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
5814                       "Deduce and propagate attributes", false, false)
5815 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
5816 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
5817                     "Deduce and propagate attributes", false, false)
5818