1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/InstIterator.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/InitializePasses.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
39 #include "llvm/Transforms/Utils/Local.h"
40 
41 #include <cassert>
42 
43 using namespace llvm;
44 
45 #define DEBUG_TYPE "attributor"
46 
47 STATISTIC(NumFnWithExactDefinition,
48           "Number of function with exact definitions");
49 STATISTIC(NumFnWithoutExactDefinition,
50           "Number of function without exact definitions");
51 STATISTIC(NumAttributesTimedOut,
52           "Number of abstract attributes timed out before fixpoint");
53 STATISTIC(NumAttributesValidFixpoint,
54           "Number of abstract attributes in a valid fixpoint state");
55 STATISTIC(NumAttributesManifested,
56           "Number of abstract attributes manifested in IR");
57 STATISTIC(NumAttributesFixedDueToRequiredDependences,
58           "Number of abstract attributes fixed due to required dependences");
59 
60 // Some helper macros to deal with statistics tracking.
61 //
62 // Usage:
63 // For simple IR attribute tracking overload trackStatistics in the abstract
64 // attribute and choose the right STATS_DECLTRACK_********* macro,
65 // e.g.,:
66 //  void trackStatistics() const override {
67 //    STATS_DECLTRACK_ARG_ATTR(returned)
68 //  }
69 // If there is a single "increment" side one can use the macro
70 // STATS_DECLTRACK with a custom message. If there are multiple increment
71 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
72 //
73 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
74   ("Number of " #TYPE " marked '" #NAME "'")
75 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
76 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
77 #define STATS_DECL(NAME, TYPE, MSG)                                            \
78   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
79 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
80 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
81   {                                                                            \
82     STATS_DECL(NAME, TYPE, MSG)                                                \
83     STATS_TRACK(NAME, TYPE)                                                    \
84   }
85 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
86   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
87 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
88   STATS_DECLTRACK(NAME, CSArguments,                                           \
89                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
90 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
91   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
92 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
94 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
95   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
96                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
97 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
98   STATS_DECLTRACK(NAME, CSReturn,                                              \
99                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
100 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
101   STATS_DECLTRACK(NAME, Floating,                                              \
102                   ("Number of floating values known to be '" #NAME "'"))
103 
104 // TODO: Determine a good default value.
105 //
106 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
107 // (when run with the first 5 abstract attributes). The results also indicate
108 // that we never reach 32 iterations but always find a fixpoint sooner.
109 //
110 // This will become more evolved once we perform two interleaved fixpoint
111 // iterations: bottom-up and top-down.
112 static cl::opt<unsigned>
113     MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
114                           cl::desc("Maximal number of fixpoint iterations."),
115                           cl::init(32));
116 static cl::opt<bool> VerifyMaxFixpointIterations(
117     "attributor-max-iterations-verify", cl::Hidden,
118     cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
119     cl::init(false));
120 
121 static cl::opt<bool> DisableAttributor(
122     "attributor-disable", cl::Hidden,
123     cl::desc("Disable the attributor inter-procedural deduction pass."),
124     cl::init(true));
125 
126 static cl::opt<bool> AnnotateDeclarationCallSites(
127     "attributor-annotate-decl-cs", cl::Hidden,
128     cl::desc("Annoate call sites of function declarations."), cl::init(false));
129 
130 static cl::opt<bool> ManifestInternal(
131     "attributor-manifest-internal", cl::Hidden,
132     cl::desc("Manifest Attributor internal string attributes."),
133     cl::init(false));
134 
135 static cl::opt<unsigned> DepRecInterval(
136     "attributor-dependence-recompute-interval", cl::Hidden,
137     cl::desc("Number of iterations until dependences are recomputed."),
138     cl::init(4));
139 
140 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
141                                        cl::init(true), cl::Hidden);
142 
143 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
144                                        cl::Hidden);
145 
146 /// Logic operators for the change status enum class.
147 ///
148 ///{
149 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
150   return l == ChangeStatus::CHANGED ? l : r;
151 }
152 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
153   return l == ChangeStatus::UNCHANGED ? l : r;
154 }
155 ///}
156 
157 /// Recursively visit all values that might become \p IRP at some point. This
158 /// will be done by looking through cast instructions, selects, phis, and calls
159 /// with the "returned" attribute. Once we cannot look through the value any
160 /// further, the callback \p VisitValueCB is invoked and passed the current
161 /// value, the \p State, and a flag to indicate if we stripped anything. To
162 /// limit how much effort is invested, we will never visit more values than
163 /// specified by \p MaxValues.
164 template <typename AAType, typename StateTy>
165 static bool genericValueTraversal(
166     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
167     const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
168     int MaxValues = 8) {
169 
170   const AAIsDead *LivenessAA = nullptr;
171   if (IRP.getAnchorScope())
172     LivenessAA = &A.getAAFor<AAIsDead>(
173         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
174         /* TrackDependence */ false);
175   bool AnyDead = false;
176 
177   // TODO: Use Positions here to allow context sensitivity in VisitValueCB
178   SmallPtrSet<Value *, 16> Visited;
179   SmallVector<Value *, 16> Worklist;
180   Worklist.push_back(&IRP.getAssociatedValue());
181 
182   int Iteration = 0;
183   do {
184     Value *V = Worklist.pop_back_val();
185 
186     // Check if we should process the current value. To prevent endless
187     // recursion keep a record of the values we followed!
188     if (!Visited.insert(V).second)
189       continue;
190 
191     // Make sure we limit the compile time for complex expressions.
192     if (Iteration++ >= MaxValues)
193       return false;
194 
195     // Explicitly look through calls with a "returned" attribute if we do
196     // not have a pointer as stripPointerCasts only works on them.
197     Value *NewV = nullptr;
198     if (V->getType()->isPointerTy()) {
199       NewV = V->stripPointerCasts();
200     } else {
201       CallSite CS(V);
202       if (CS && CS.getCalledFunction()) {
203         for (Argument &Arg : CS.getCalledFunction()->args())
204           if (Arg.hasReturnedAttr()) {
205             NewV = CS.getArgOperand(Arg.getArgNo());
206             break;
207           }
208       }
209     }
210     if (NewV && NewV != V) {
211       Worklist.push_back(NewV);
212       continue;
213     }
214 
215     // Look through select instructions, visit both potential values.
216     if (auto *SI = dyn_cast<SelectInst>(V)) {
217       Worklist.push_back(SI->getTrueValue());
218       Worklist.push_back(SI->getFalseValue());
219       continue;
220     }
221 
222     // Look through phi nodes, visit all live operands.
223     if (auto *PHI = dyn_cast<PHINode>(V)) {
224       assert(LivenessAA &&
225              "Expected liveness in the presence of instructions!");
226       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
227         const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
228         if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
229           AnyDead = true;
230           continue;
231         }
232         Worklist.push_back(PHI->getIncomingValue(u));
233       }
234       continue;
235     }
236 
237     // Once a leaf is reached we inform the user through the callback.
238     if (!VisitValueCB(*V, State, Iteration > 1))
239       return false;
240   } while (!Worklist.empty());
241 
242   // If we actually used liveness information so we have to record a dependence.
243   if (AnyDead)
244     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
245 
246   // All values have been visited.
247   return true;
248 }
249 
250 /// Return true if \p New is equal or worse than \p Old.
251 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
252   if (!Old.isIntAttribute())
253     return true;
254 
255   return Old.getValueAsInt() >= New.getValueAsInt();
256 }
257 
258 /// Return true if the information provided by \p Attr was added to the
259 /// attribute list \p Attrs. This is only the case if it was not already present
260 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
261 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
262                              AttributeList &Attrs, int AttrIdx) {
263 
264   if (Attr.isEnumAttribute()) {
265     Attribute::AttrKind Kind = Attr.getKindAsEnum();
266     if (Attrs.hasAttribute(AttrIdx, Kind))
267       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
268         return false;
269     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
270     return true;
271   }
272   if (Attr.isStringAttribute()) {
273     StringRef Kind = Attr.getKindAsString();
274     if (Attrs.hasAttribute(AttrIdx, Kind))
275       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
276         return false;
277     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
278     return true;
279   }
280   if (Attr.isIntAttribute()) {
281     Attribute::AttrKind Kind = Attr.getKindAsEnum();
282     if (Attrs.hasAttribute(AttrIdx, Kind))
283       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
284         return false;
285     Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
286     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
287     return true;
288   }
289 
290   llvm_unreachable("Expected enum or string attribute!");
291 }
292 static const Value *getPointerOperand(const Instruction *I) {
293   if (auto *LI = dyn_cast<LoadInst>(I))
294     if (!LI->isVolatile())
295       return LI->getPointerOperand();
296 
297   if (auto *SI = dyn_cast<StoreInst>(I))
298     if (!SI->isVolatile())
299       return SI->getPointerOperand();
300 
301   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I))
302     if (!CXI->isVolatile())
303       return CXI->getPointerOperand();
304 
305   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I))
306     if (!RMWI->isVolatile())
307       return RMWI->getPointerOperand();
308 
309   return nullptr;
310 }
311 static const Value *getBasePointerOfAccessPointerOperand(const Instruction *I,
312                                                          int64_t &BytesOffset,
313                                                          const DataLayout &DL) {
314   const Value *Ptr = getPointerOperand(I);
315   if (!Ptr)
316     return nullptr;
317 
318   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
319                                           /*AllowNonInbounds*/ false);
320 }
321 
322 ChangeStatus AbstractAttribute::update(Attributor &A) {
323   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
324   if (getState().isAtFixpoint())
325     return HasChanged;
326 
327   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
328 
329   HasChanged = updateImpl(A);
330 
331   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
332                     << "\n");
333 
334   return HasChanged;
335 }
336 
337 ChangeStatus
338 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
339                                    const ArrayRef<Attribute> &DeducedAttrs) {
340   Function *ScopeFn = IRP.getAssociatedFunction();
341   IRPosition::Kind PK = IRP.getPositionKind();
342 
343   // In the following some generic code that will manifest attributes in
344   // DeducedAttrs if they improve the current IR. Due to the different
345   // annotation positions we use the underlying AttributeList interface.
346 
347   AttributeList Attrs;
348   switch (PK) {
349   case IRPosition::IRP_INVALID:
350   case IRPosition::IRP_FLOAT:
351     return ChangeStatus::UNCHANGED;
352   case IRPosition::IRP_ARGUMENT:
353   case IRPosition::IRP_FUNCTION:
354   case IRPosition::IRP_RETURNED:
355     Attrs = ScopeFn->getAttributes();
356     break;
357   case IRPosition::IRP_CALL_SITE:
358   case IRPosition::IRP_CALL_SITE_RETURNED:
359   case IRPosition::IRP_CALL_SITE_ARGUMENT:
360     Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
361     break;
362   }
363 
364   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
365   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
366   for (const Attribute &Attr : DeducedAttrs) {
367     if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
368       continue;
369 
370     HasChanged = ChangeStatus::CHANGED;
371   }
372 
373   if (HasChanged == ChangeStatus::UNCHANGED)
374     return HasChanged;
375 
376   switch (PK) {
377   case IRPosition::IRP_ARGUMENT:
378   case IRPosition::IRP_FUNCTION:
379   case IRPosition::IRP_RETURNED:
380     ScopeFn->setAttributes(Attrs);
381     break;
382   case IRPosition::IRP_CALL_SITE:
383   case IRPosition::IRP_CALL_SITE_RETURNED:
384   case IRPosition::IRP_CALL_SITE_ARGUMENT:
385     CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
386     break;
387   case IRPosition::IRP_INVALID:
388   case IRPosition::IRP_FLOAT:
389     break;
390   }
391 
392   return HasChanged;
393 }
394 
395 const IRPosition IRPosition::EmptyKey(255);
396 const IRPosition IRPosition::TombstoneKey(256);
397 
398 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
399   IRPositions.emplace_back(IRP);
400 
401   ImmutableCallSite ICS(&IRP.getAnchorValue());
402   switch (IRP.getPositionKind()) {
403   case IRPosition::IRP_INVALID:
404   case IRPosition::IRP_FLOAT:
405   case IRPosition::IRP_FUNCTION:
406     return;
407   case IRPosition::IRP_ARGUMENT:
408   case IRPosition::IRP_RETURNED:
409     IRPositions.emplace_back(
410         IRPosition::function(*IRP.getAssociatedFunction()));
411     return;
412   case IRPosition::IRP_CALL_SITE:
413     assert(ICS && "Expected call site!");
414     // TODO: We need to look at the operand bundles similar to the redirection
415     //       in CallBase.
416     if (!ICS.hasOperandBundles())
417       if (const Function *Callee = ICS.getCalledFunction())
418         IRPositions.emplace_back(IRPosition::function(*Callee));
419     return;
420   case IRPosition::IRP_CALL_SITE_RETURNED:
421     assert(ICS && "Expected call site!");
422     // TODO: We need to look at the operand bundles similar to the redirection
423     //       in CallBase.
424     if (!ICS.hasOperandBundles()) {
425       if (const Function *Callee = ICS.getCalledFunction()) {
426         IRPositions.emplace_back(IRPosition::returned(*Callee));
427         IRPositions.emplace_back(IRPosition::function(*Callee));
428       }
429     }
430     IRPositions.emplace_back(
431         IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
432     return;
433   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
434     int ArgNo = IRP.getArgNo();
435     assert(ICS && ArgNo >= 0 && "Expected call site!");
436     // TODO: We need to look at the operand bundles similar to the redirection
437     //       in CallBase.
438     if (!ICS.hasOperandBundles()) {
439       const Function *Callee = ICS.getCalledFunction();
440       if (Callee && Callee->arg_size() > unsigned(ArgNo))
441         IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
442       if (Callee)
443         IRPositions.emplace_back(IRPosition::function(*Callee));
444     }
445     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
446     return;
447   }
448   }
449 }
450 
451 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
452                          bool IgnoreSubsumingPositions) const {
453   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
454     for (Attribute::AttrKind AK : AKs)
455       if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
456         return true;
457     // The first position returned by the SubsumingPositionIterator is
458     // always the position itself. If we ignore subsuming positions we
459     // are done after the first iteration.
460     if (IgnoreSubsumingPositions)
461       break;
462   }
463   return false;
464 }
465 
466 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
467                           SmallVectorImpl<Attribute> &Attrs) const {
468   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
469     for (Attribute::AttrKind AK : AKs) {
470       const Attribute &Attr = EquivIRP.getAttr(AK);
471       if (Attr.getKindAsEnum() == AK)
472         Attrs.push_back(Attr);
473     }
474 }
475 
476 void IRPosition::verify() {
477   switch (KindOrArgNo) {
478   default:
479     assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
480     assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
481            "Expected call base or argument for positive attribute index!");
482     if (isa<Argument>(AnchorVal)) {
483       assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
484              "Argument number mismatch!");
485       assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
486              "Associated value mismatch!");
487     } else {
488       assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
489              "Call site argument number mismatch!");
490       assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
491                  &getAssociatedValue() &&
492              "Associated value mismatch!");
493     }
494     break;
495   case IRP_INVALID:
496     assert(!AnchorVal && "Expected no value for an invalid position!");
497     break;
498   case IRP_FLOAT:
499     assert((!isa<CallBase>(&getAssociatedValue()) &&
500             !isa<Argument>(&getAssociatedValue())) &&
501            "Expected specialized kind for call base and argument values!");
502     break;
503   case IRP_RETURNED:
504     assert(isa<Function>(AnchorVal) &&
505            "Expected function for a 'returned' position!");
506     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
507     break;
508   case IRP_CALL_SITE_RETURNED:
509     assert((isa<CallBase>(AnchorVal)) &&
510            "Expected call base for 'call site returned' position!");
511     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
512     break;
513   case IRP_CALL_SITE:
514     assert((isa<CallBase>(AnchorVal)) &&
515            "Expected call base for 'call site function' position!");
516     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
517     break;
518   case IRP_FUNCTION:
519     assert(isa<Function>(AnchorVal) &&
520            "Expected function for a 'function' position!");
521     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
522     break;
523   }
524 }
525 
526 namespace {
527 /// Helper function to clamp a state \p S of type \p StateType with the
528 /// information in \p R and indicate/return if \p S did change (as-in update is
529 /// required to be run again).
530 template <typename StateType>
531 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
532   auto Assumed = S.getAssumed();
533   S ^= R;
534   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
535                                    : ChangeStatus::CHANGED;
536 }
537 
538 /// Clamp the information known for all returned values of a function
539 /// (identified by \p QueryingAA) into \p S.
540 template <typename AAType, typename StateType = typename AAType::StateType>
541 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
542                                      StateType &S) {
543   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
544                     << static_cast<const AbstractAttribute &>(QueryingAA)
545                     << " into " << S << "\n");
546 
547   assert((QueryingAA.getIRPosition().getPositionKind() ==
548               IRPosition::IRP_RETURNED ||
549           QueryingAA.getIRPosition().getPositionKind() ==
550               IRPosition::IRP_CALL_SITE_RETURNED) &&
551          "Can only clamp returned value states for a function returned or call "
552          "site returned position!");
553 
554   // Use an optional state as there might not be any return values and we want
555   // to join (IntegerState::operator&) the state of all there are.
556   Optional<StateType> T;
557 
558   // Callback for each possibly returned value.
559   auto CheckReturnValue = [&](Value &RV) -> bool {
560     const IRPosition &RVPos = IRPosition::value(RV);
561     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
562     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
563                       << " @ " << RVPos << "\n");
564     const StateType &AAS = static_cast<const StateType &>(AA.getState());
565     if (T.hasValue())
566       *T &= AAS;
567     else
568       T = AAS;
569     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
570                       << "\n");
571     return T->isValidState();
572   };
573 
574   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
575     S.indicatePessimisticFixpoint();
576   else if (T.hasValue())
577     S ^= *T;
578 }
579 
580 /// Helper class to compose two generic deduction
581 template <typename AAType, typename Base, typename StateType,
582           template <typename...> class F, template <typename...> class G>
583 struct AAComposeTwoGenericDeduction
584     : public F<AAType, G<AAType, Base, StateType>, StateType> {
585   AAComposeTwoGenericDeduction(const IRPosition &IRP)
586       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
587 
588   /// See AbstractAttribute::updateImpl(...).
589   ChangeStatus updateImpl(Attributor &A) override {
590     ChangeStatus ChangedF =
591         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
592     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
593     return ChangedF | ChangedG;
594   }
595 };
596 
597 /// Helper class for generic deduction: return value -> returned position.
598 template <typename AAType, typename Base,
599           typename StateType = typename AAType::StateType>
600 struct AAReturnedFromReturnedValues : public Base {
601   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
602 
603   /// See AbstractAttribute::updateImpl(...).
604   ChangeStatus updateImpl(Attributor &A) override {
605     StateType S;
606     clampReturnedValueStates<AAType, StateType>(A, *this, S);
607     // TODO: If we know we visited all returned values, thus no are assumed
608     // dead, we can take the known information from the state T.
609     return clampStateAndIndicateChange<StateType>(this->getState(), S);
610   }
611 };
612 
613 /// Clamp the information known at all call sites for a given argument
614 /// (identified by \p QueryingAA) into \p S.
615 template <typename AAType, typename StateType = typename AAType::StateType>
616 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
617                                         StateType &S) {
618   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
619                     << static_cast<const AbstractAttribute &>(QueryingAA)
620                     << " into " << S << "\n");
621 
622   assert(QueryingAA.getIRPosition().getPositionKind() ==
623              IRPosition::IRP_ARGUMENT &&
624          "Can only clamp call site argument states for an argument position!");
625 
626   // Use an optional state as there might not be any return values and we want
627   // to join (IntegerState::operator&) the state of all there are.
628   Optional<StateType> T;
629 
630   // The argument number which is also the call site argument number.
631   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
632 
633   auto CallSiteCheck = [&](AbstractCallSite ACS) {
634     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
635     // Check if a coresponding argument was found or if it is on not associated
636     // (which can happen for callback calls).
637     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
638       return false;
639 
640     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
641     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
642                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
643     const StateType &AAS = static_cast<const StateType &>(AA.getState());
644     if (T.hasValue())
645       *T &= AAS;
646     else
647       T = AAS;
648     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
649                       << "\n");
650     return T->isValidState();
651   };
652 
653   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
654     S.indicatePessimisticFixpoint();
655   else if (T.hasValue())
656     S ^= *T;
657 }
658 
659 /// Helper class for generic deduction: call site argument -> argument position.
660 template <typename AAType, typename Base,
661           typename StateType = typename AAType::StateType>
662 struct AAArgumentFromCallSiteArguments : public Base {
663   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
664 
665   /// See AbstractAttribute::updateImpl(...).
666   ChangeStatus updateImpl(Attributor &A) override {
667     StateType S;
668     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
669     // TODO: If we know we visited all incoming values, thus no are assumed
670     // dead, we can take the known information from the state T.
671     return clampStateAndIndicateChange<StateType>(this->getState(), S);
672   }
673 };
674 
675 /// Helper class for generic replication: function returned -> cs returned.
676 template <typename AAType, typename Base,
677           typename StateType = typename AAType::StateType>
678 struct AACallSiteReturnedFromReturned : public Base {
679   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
680 
681   /// See AbstractAttribute::updateImpl(...).
682   ChangeStatus updateImpl(Attributor &A) override {
683     assert(this->getIRPosition().getPositionKind() ==
684                IRPosition::IRP_CALL_SITE_RETURNED &&
685            "Can only wrap function returned positions for call site returned "
686            "positions!");
687     auto &S = this->getState();
688 
689     const Function *AssociatedFunction =
690         this->getIRPosition().getAssociatedFunction();
691     if (!AssociatedFunction)
692       return S.indicatePessimisticFixpoint();
693 
694     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
695     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
696     return clampStateAndIndicateChange(
697         S, static_cast<const typename AAType::StateType &>(AA.getState()));
698   }
699 };
700 
701 /// Helper class for generic deduction using must-be-executed-context
702 /// Base class is required to have `followUse` method.
703 
704 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
705 /// U - Underlying use.
706 /// I - The user of the \p U.
707 /// `followUse` returns true if the value should be tracked transitively.
708 
709 template <typename AAType, typename Base,
710           typename StateType = typename AAType::StateType>
711 struct AAFromMustBeExecutedContext : public Base {
712   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
713 
714   void initialize(Attributor &A) override {
715     Base::initialize(A);
716     const IRPosition &IRP = this->getIRPosition();
717     Instruction *CtxI = IRP.getCtxI();
718 
719     if (!CtxI)
720       return;
721 
722     for (const Use &U : IRP.getAssociatedValue().uses())
723       Uses.insert(&U);
724   }
725 
726   /// See AbstractAttribute::updateImpl(...).
727   ChangeStatus updateImpl(Attributor &A) override {
728     auto BeforeState = this->getState();
729     auto &S = this->getState();
730     Instruction *CtxI = this->getIRPosition().getCtxI();
731     if (!CtxI)
732       return ChangeStatus::UNCHANGED;
733 
734     MustBeExecutedContextExplorer &Explorer =
735         A.getInfoCache().getMustBeExecutedContextExplorer();
736 
737     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
738     for (unsigned u = 0; u < Uses.size(); ++u) {
739       const Use *U = Uses[u];
740       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
741         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
742         if (Found && Base::followUse(A, U, UserI))
743           for (const Use &Us : UserI->uses())
744             Uses.insert(&Us);
745       }
746     }
747 
748     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
749   }
750 
751 private:
752   /// Container for (transitive) uses of the associated value.
753   SetVector<const Use *> Uses;
754 };
755 
756 template <typename AAType, typename Base,
757           typename StateType = typename AAType::StateType>
758 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
759     AAComposeTwoGenericDeduction<AAType, Base, StateType,
760                                  AAFromMustBeExecutedContext,
761                                  AAArgumentFromCallSiteArguments>;
762 
763 template <typename AAType, typename Base,
764           typename StateType = typename AAType::StateType>
765 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
766     AAComposeTwoGenericDeduction<AAType, Base, StateType,
767                                  AAFromMustBeExecutedContext,
768                                  AACallSiteReturnedFromReturned>;
769 
770 /// -----------------------NoUnwind Function Attribute--------------------------
771 
772 struct AANoUnwindImpl : AANoUnwind {
773   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
774 
775   const std::string getAsStr() const override {
776     return getAssumed() ? "nounwind" : "may-unwind";
777   }
778 
779   /// See AbstractAttribute::updateImpl(...).
780   ChangeStatus updateImpl(Attributor &A) override {
781     auto Opcodes = {
782         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
783         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
784         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
785 
786     auto CheckForNoUnwind = [&](Instruction &I) {
787       if (!I.mayThrow())
788         return true;
789 
790       if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
791         const auto &NoUnwindAA =
792             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
793         return NoUnwindAA.isAssumedNoUnwind();
794       }
795       return false;
796     };
797 
798     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
799       return indicatePessimisticFixpoint();
800 
801     return ChangeStatus::UNCHANGED;
802   }
803 };
804 
805 struct AANoUnwindFunction final : public AANoUnwindImpl {
806   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
807 
808   /// See AbstractAttribute::trackStatistics()
809   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
810 };
811 
812 /// NoUnwind attribute deduction for a call sites.
813 struct AANoUnwindCallSite final : AANoUnwindImpl {
814   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
815 
816   /// See AbstractAttribute::initialize(...).
817   void initialize(Attributor &A) override {
818     AANoUnwindImpl::initialize(A);
819     Function *F = getAssociatedFunction();
820     if (!F)
821       indicatePessimisticFixpoint();
822   }
823 
824   /// See AbstractAttribute::updateImpl(...).
825   ChangeStatus updateImpl(Attributor &A) override {
826     // TODO: Once we have call site specific value information we can provide
827     //       call site specific liveness information and then it makes
828     //       sense to specialize attributes for call sites arguments instead of
829     //       redirecting requests to the callee argument.
830     Function *F = getAssociatedFunction();
831     const IRPosition &FnPos = IRPosition::function(*F);
832     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
833     return clampStateAndIndicateChange(
834         getState(),
835         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
836   }
837 
838   /// See AbstractAttribute::trackStatistics()
839   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
840 };
841 
842 /// --------------------- Function Return Values -------------------------------
843 
844 /// "Attribute" that collects all potential returned values and the return
845 /// instructions that they arise from.
846 ///
847 /// If there is a unique returned value R, the manifest method will:
848 ///   - mark R with the "returned" attribute, if R is an argument.
849 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
850 
851   /// Mapping of values potentially returned by the associated function to the
852   /// return instructions that might return them.
853   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
854 
855   /// Mapping to remember the number of returned values for a call site such
856   /// that we can avoid updates if nothing changed.
857   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
858 
859   /// Set of unresolved calls returned by the associated function.
860   SmallSetVector<CallBase *, 4> UnresolvedCalls;
861 
862   /// State flags
863   ///
864   ///{
865   bool IsFixed = false;
866   bool IsValidState = true;
867   ///}
868 
869 public:
870   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
871 
872   /// See AbstractAttribute::initialize(...).
873   void initialize(Attributor &A) override {
874     // Reset the state.
875     IsFixed = false;
876     IsValidState = true;
877     ReturnedValues.clear();
878 
879     Function *F = getAssociatedFunction();
880     if (!F) {
881       indicatePessimisticFixpoint();
882       return;
883     }
884 
885     // The map from instruction opcodes to those instructions in the function.
886     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
887 
888     // Look through all arguments, if one is marked as returned we are done.
889     for (Argument &Arg : F->args()) {
890       if (Arg.hasReturnedAttr()) {
891         auto &ReturnInstSet = ReturnedValues[&Arg];
892         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
893           ReturnInstSet.insert(cast<ReturnInst>(RI));
894 
895         indicateOptimisticFixpoint();
896         return;
897       }
898     }
899 
900     if (!F->hasExactDefinition())
901       indicatePessimisticFixpoint();
902   }
903 
904   /// See AbstractAttribute::manifest(...).
905   ChangeStatus manifest(Attributor &A) override;
906 
907   /// See AbstractAttribute::getState(...).
908   AbstractState &getState() override { return *this; }
909 
910   /// See AbstractAttribute::getState(...).
911   const AbstractState &getState() const override { return *this; }
912 
913   /// See AbstractAttribute::updateImpl(Attributor &A).
914   ChangeStatus updateImpl(Attributor &A) override;
915 
916   llvm::iterator_range<iterator> returned_values() override {
917     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
918   }
919 
920   llvm::iterator_range<const_iterator> returned_values() const override {
921     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
922   }
923 
924   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
925     return UnresolvedCalls;
926   }
927 
928   /// Return the number of potential return values, -1 if unknown.
929   size_t getNumReturnValues() const override {
930     return isValidState() ? ReturnedValues.size() : -1;
931   }
932 
933   /// Return an assumed unique return value if a single candidate is found. If
934   /// there cannot be one, return a nullptr. If it is not clear yet, return the
935   /// Optional::NoneType.
936   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
937 
938   /// See AbstractState::checkForAllReturnedValues(...).
939   bool checkForAllReturnedValuesAndReturnInsts(
940       const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
941           &Pred) const override;
942 
943   /// Pretty print the attribute similar to the IR representation.
944   const std::string getAsStr() const override;
945 
946   /// See AbstractState::isAtFixpoint().
947   bool isAtFixpoint() const override { return IsFixed; }
948 
949   /// See AbstractState::isValidState().
950   bool isValidState() const override { return IsValidState; }
951 
952   /// See AbstractState::indicateOptimisticFixpoint(...).
953   ChangeStatus indicateOptimisticFixpoint() override {
954     IsFixed = true;
955     return ChangeStatus::UNCHANGED;
956   }
957 
958   ChangeStatus indicatePessimisticFixpoint() override {
959     IsFixed = true;
960     IsValidState = false;
961     return ChangeStatus::CHANGED;
962   }
963 };
964 
965 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
966   ChangeStatus Changed = ChangeStatus::UNCHANGED;
967 
968   // Bookkeeping.
969   assert(isValidState());
970   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
971                   "Number of function with known return values");
972 
973   // Check if we have an assumed unique return value that we could manifest.
974   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
975 
976   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
977     return Changed;
978 
979   // Bookkeeping.
980   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
981                   "Number of function with unique return");
982 
983   // Callback to replace the uses of CB with the constant C.
984   auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
985     if (CB.getNumUses() == 0 || CB.isMustTailCall())
986       return ChangeStatus::UNCHANGED;
987     CB.replaceAllUsesWith(&C);
988     return ChangeStatus::CHANGED;
989   };
990 
991   // If the assumed unique return value is an argument, annotate it.
992   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
993     // TODO: This should be handled differently!
994     this->AnchorVal = UniqueRVArg;
995     this->KindOrArgNo = UniqueRVArg->getArgNo();
996     Changed = IRAttribute::manifest(A);
997   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
998     // We can replace the returned value with the unique returned constant.
999     Value &AnchorValue = getAnchorValue();
1000     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1001       for (const Use &U : F->uses())
1002         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1003           if (CB->isCallee(&U)) {
1004             Constant *RVCCast =
1005                 CB->getType() == RVC->getType()
1006                     ? RVC
1007                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
1008             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1009           }
1010     } else {
1011       assert(isa<CallBase>(AnchorValue) &&
1012              "Expcected a function or call base anchor!");
1013       Constant *RVCCast =
1014           AnchorValue.getType() == RVC->getType()
1015               ? RVC
1016               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1017       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1018     }
1019     if (Changed == ChangeStatus::CHANGED)
1020       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1021                       "Number of function returns replaced by constant return");
1022   }
1023 
1024   return Changed;
1025 }
1026 
1027 const std::string AAReturnedValuesImpl::getAsStr() const {
1028   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1029          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1030          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1031 }
1032 
1033 Optional<Value *>
1034 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1035   // If checkForAllReturnedValues provides a unique value, ignoring potential
1036   // undef values that can also be present, it is assumed to be the actual
1037   // return value and forwarded to the caller of this method. If there are
1038   // multiple, a nullptr is returned indicating there cannot be a unique
1039   // returned value.
1040   Optional<Value *> UniqueRV;
1041 
1042   auto Pred = [&](Value &RV) -> bool {
1043     // If we found a second returned value and neither the current nor the saved
1044     // one is an undef, there is no unique returned value. Undefs are special
1045     // since we can pretend they have any value.
1046     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1047         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1048       UniqueRV = nullptr;
1049       return false;
1050     }
1051 
1052     // Do not overwrite a value with an undef.
1053     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1054       UniqueRV = &RV;
1055 
1056     return true;
1057   };
1058 
1059   if (!A.checkForAllReturnedValues(Pred, *this))
1060     UniqueRV = nullptr;
1061 
1062   return UniqueRV;
1063 }
1064 
1065 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1066     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1067         &Pred) const {
1068   if (!isValidState())
1069     return false;
1070 
1071   // Check all returned values but ignore call sites as long as we have not
1072   // encountered an overdefined one during an update.
1073   for (auto &It : ReturnedValues) {
1074     Value *RV = It.first;
1075 
1076     CallBase *CB = dyn_cast<CallBase>(RV);
1077     if (CB && !UnresolvedCalls.count(CB))
1078       continue;
1079 
1080     if (!Pred(*RV, It.second))
1081       return false;
1082   }
1083 
1084   return true;
1085 }
1086 
1087 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1088   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1089   bool Changed = false;
1090 
1091   // State used in the value traversals starting in returned values.
1092   struct RVState {
1093     // The map in which we collect return values -> return instrs.
1094     decltype(ReturnedValues) &RetValsMap;
1095     // The flag to indicate a change.
1096     bool &Changed;
1097     // The return instrs we come from.
1098     SmallSetVector<ReturnInst *, 4> RetInsts;
1099   };
1100 
1101   // Callback for a leaf value returned by the associated function.
1102   auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1103     auto Size = RVS.RetValsMap[&Val].size();
1104     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1105     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1106     RVS.Changed |= Inserted;
1107     LLVM_DEBUG({
1108       if (Inserted)
1109         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1110                << " => " << RVS.RetInsts.size() << "\n";
1111     });
1112     return true;
1113   };
1114 
1115   // Helper method to invoke the generic value traversal.
1116   auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1117     IRPosition RetValPos = IRPosition::value(RV);
1118     return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1119                                                             RVS, VisitValueCB);
1120   };
1121 
1122   // Callback for all "return intructions" live in the associated function.
1123   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1124     ReturnInst &Ret = cast<ReturnInst>(I);
1125     RVState RVS({ReturnedValues, Changed, {}});
1126     RVS.RetInsts.insert(&Ret);
1127     return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1128   };
1129 
1130   // Start by discovering returned values from all live returned instructions in
1131   // the associated function.
1132   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1133     return indicatePessimisticFixpoint();
1134 
1135   // Once returned values "directly" present in the code are handled we try to
1136   // resolve returned calls.
1137   decltype(ReturnedValues) NewRVsMap;
1138   for (auto &It : ReturnedValues) {
1139     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1140                       << " by #" << It.second.size() << " RIs\n");
1141     CallBase *CB = dyn_cast<CallBase>(It.first);
1142     if (!CB || UnresolvedCalls.count(CB))
1143       continue;
1144 
1145     if (!CB->getCalledFunction()) {
1146       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1147                         << "\n");
1148       UnresolvedCalls.insert(CB);
1149       continue;
1150     }
1151 
1152     // TODO: use the function scope once we have call site AAReturnedValues.
1153     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1154         *this, IRPosition::function(*CB->getCalledFunction()));
1155     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1156                       << static_cast<const AbstractAttribute &>(RetValAA)
1157                       << "\n");
1158 
1159     // Skip dead ends, thus if we do not know anything about the returned
1160     // call we mark it as unresolved and it will stay that way.
1161     if (!RetValAA.getState().isValidState()) {
1162       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1163                         << "\n");
1164       UnresolvedCalls.insert(CB);
1165       continue;
1166     }
1167 
1168     // Do not try to learn partial information. If the callee has unresolved
1169     // return values we will treat the call as unresolved/opaque.
1170     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1171     if (!RetValAAUnresolvedCalls.empty()) {
1172       UnresolvedCalls.insert(CB);
1173       continue;
1174     }
1175 
1176     // Now check if we can track transitively returned values. If possible, thus
1177     // if all return value can be represented in the current scope, do so.
1178     bool Unresolved = false;
1179     for (auto &RetValAAIt : RetValAA.returned_values()) {
1180       Value *RetVal = RetValAAIt.first;
1181       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1182           isa<Constant>(RetVal))
1183         continue;
1184       // Anything that did not fit in the above categories cannot be resolved,
1185       // mark the call as unresolved.
1186       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1187                            "cannot be translated: "
1188                         << *RetVal << "\n");
1189       UnresolvedCalls.insert(CB);
1190       Unresolved = true;
1191       break;
1192     }
1193 
1194     if (Unresolved)
1195       continue;
1196 
1197     // Now track transitively returned values.
1198     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1199     if (NumRetAA == RetValAA.getNumReturnValues()) {
1200       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1201                            "changed since it was seen last\n");
1202       continue;
1203     }
1204     NumRetAA = RetValAA.getNumReturnValues();
1205 
1206     for (auto &RetValAAIt : RetValAA.returned_values()) {
1207       Value *RetVal = RetValAAIt.first;
1208       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1209         // Arguments are mapped to call site operands and we begin the traversal
1210         // again.
1211         bool Unused = false;
1212         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1213         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1214         continue;
1215       } else if (isa<CallBase>(RetVal)) {
1216         // Call sites are resolved by the callee attribute over time, no need to
1217         // do anything for us.
1218         continue;
1219       } else if (isa<Constant>(RetVal)) {
1220         // Constants are valid everywhere, we can simply take them.
1221         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1222         continue;
1223       }
1224     }
1225   }
1226 
1227   // To avoid modifications to the ReturnedValues map while we iterate over it
1228   // we kept record of potential new entries in a copy map, NewRVsMap.
1229   for (auto &It : NewRVsMap) {
1230     assert(!It.second.empty() && "Entry does not add anything.");
1231     auto &ReturnInsts = ReturnedValues[It.first];
1232     for (ReturnInst *RI : It.second)
1233       if (ReturnInsts.insert(RI)) {
1234         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1235                           << *It.first << " => " << *RI << "\n");
1236         Changed = true;
1237       }
1238   }
1239 
1240   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1241   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1242 }
1243 
1244 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1245   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1246 
1247   /// See AbstractAttribute::trackStatistics()
1248   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1249 };
1250 
1251 /// Returned values information for a call sites.
1252 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1253   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1254 
1255   /// See AbstractAttribute::initialize(...).
1256   void initialize(Attributor &A) override {
1257     // TODO: Once we have call site specific value information we can provide
1258     //       call site specific liveness information and then it makes
1259     //       sense to specialize attributes for call sites instead of
1260     //       redirecting requests to the callee.
1261     llvm_unreachable("Abstract attributes for returned values are not "
1262                      "supported for call sites yet!");
1263   }
1264 
1265   /// See AbstractAttribute::updateImpl(...).
1266   ChangeStatus updateImpl(Attributor &A) override {
1267     return indicatePessimisticFixpoint();
1268   }
1269 
1270   /// See AbstractAttribute::trackStatistics()
1271   void trackStatistics() const override {}
1272 };
1273 
1274 /// ------------------------ NoSync Function Attribute -------------------------
1275 
1276 struct AANoSyncImpl : AANoSync {
1277   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1278 
1279   const std::string getAsStr() const override {
1280     return getAssumed() ? "nosync" : "may-sync";
1281   }
1282 
1283   /// See AbstractAttribute::updateImpl(...).
1284   ChangeStatus updateImpl(Attributor &A) override;
1285 
1286   /// Helper function used to determine whether an instruction is non-relaxed
1287   /// atomic. In other words, if an atomic instruction does not have unordered
1288   /// or monotonic ordering
1289   static bool isNonRelaxedAtomic(Instruction *I);
1290 
1291   /// Helper function used to determine whether an instruction is volatile.
1292   static bool isVolatile(Instruction *I);
1293 
1294   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1295   /// memset).
1296   static bool isNoSyncIntrinsic(Instruction *I);
1297 };
1298 
1299 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1300   if (!I->isAtomic())
1301     return false;
1302 
1303   AtomicOrdering Ordering;
1304   switch (I->getOpcode()) {
1305   case Instruction::AtomicRMW:
1306     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1307     break;
1308   case Instruction::Store:
1309     Ordering = cast<StoreInst>(I)->getOrdering();
1310     break;
1311   case Instruction::Load:
1312     Ordering = cast<LoadInst>(I)->getOrdering();
1313     break;
1314   case Instruction::Fence: {
1315     auto *FI = cast<FenceInst>(I);
1316     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1317       return false;
1318     Ordering = FI->getOrdering();
1319     break;
1320   }
1321   case Instruction::AtomicCmpXchg: {
1322     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1323     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1324     // Only if both are relaxed, than it can be treated as relaxed.
1325     // Otherwise it is non-relaxed.
1326     if (Success != AtomicOrdering::Unordered &&
1327         Success != AtomicOrdering::Monotonic)
1328       return true;
1329     if (Failure != AtomicOrdering::Unordered &&
1330         Failure != AtomicOrdering::Monotonic)
1331       return true;
1332     return false;
1333   }
1334   default:
1335     llvm_unreachable(
1336         "New atomic operations need to be known in the attributor.");
1337   }
1338 
1339   // Relaxed.
1340   if (Ordering == AtomicOrdering::Unordered ||
1341       Ordering == AtomicOrdering::Monotonic)
1342     return false;
1343   return true;
1344 }
1345 
1346 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1347 /// FIXME: We should ipmrove the handling of intrinsics.
1348 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1349   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1350     switch (II->getIntrinsicID()) {
1351     /// Element wise atomic memory intrinsics are can only be unordered,
1352     /// therefore nosync.
1353     case Intrinsic::memset_element_unordered_atomic:
1354     case Intrinsic::memmove_element_unordered_atomic:
1355     case Intrinsic::memcpy_element_unordered_atomic:
1356       return true;
1357     case Intrinsic::memset:
1358     case Intrinsic::memmove:
1359     case Intrinsic::memcpy:
1360       if (!cast<MemIntrinsic>(II)->isVolatile())
1361         return true;
1362       return false;
1363     default:
1364       return false;
1365     }
1366   }
1367   return false;
1368 }
1369 
1370 bool AANoSyncImpl::isVolatile(Instruction *I) {
1371   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1372          "Calls should not be checked here");
1373 
1374   switch (I->getOpcode()) {
1375   case Instruction::AtomicRMW:
1376     return cast<AtomicRMWInst>(I)->isVolatile();
1377   case Instruction::Store:
1378     return cast<StoreInst>(I)->isVolatile();
1379   case Instruction::Load:
1380     return cast<LoadInst>(I)->isVolatile();
1381   case Instruction::AtomicCmpXchg:
1382     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1383   default:
1384     return false;
1385   }
1386 }
1387 
1388 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1389 
1390   auto CheckRWInstForNoSync = [&](Instruction &I) {
1391     /// We are looking for volatile instructions or Non-Relaxed atomics.
1392     /// FIXME: We should improve the handling of intrinsics.
1393 
1394     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1395       return true;
1396 
1397     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1398       if (ICS.hasFnAttr(Attribute::NoSync))
1399         return true;
1400 
1401       const auto &NoSyncAA =
1402           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1403       if (NoSyncAA.isAssumedNoSync())
1404         return true;
1405       return false;
1406     }
1407 
1408     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1409       return true;
1410 
1411     return false;
1412   };
1413 
1414   auto CheckForNoSync = [&](Instruction &I) {
1415     // At this point we handled all read/write effects and they are all
1416     // nosync, so they can be skipped.
1417     if (I.mayReadOrWriteMemory())
1418       return true;
1419 
1420     // non-convergent and readnone imply nosync.
1421     return !ImmutableCallSite(&I).isConvergent();
1422   };
1423 
1424   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1425       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1426     return indicatePessimisticFixpoint();
1427 
1428   return ChangeStatus::UNCHANGED;
1429 }
1430 
1431 struct AANoSyncFunction final : public AANoSyncImpl {
1432   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1433 
1434   /// See AbstractAttribute::trackStatistics()
1435   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1436 };
1437 
1438 /// NoSync attribute deduction for a call sites.
1439 struct AANoSyncCallSite final : AANoSyncImpl {
1440   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1441 
1442   /// See AbstractAttribute::initialize(...).
1443   void initialize(Attributor &A) override {
1444     AANoSyncImpl::initialize(A);
1445     Function *F = getAssociatedFunction();
1446     if (!F)
1447       indicatePessimisticFixpoint();
1448   }
1449 
1450   /// See AbstractAttribute::updateImpl(...).
1451   ChangeStatus updateImpl(Attributor &A) override {
1452     // TODO: Once we have call site specific value information we can provide
1453     //       call site specific liveness information and then it makes
1454     //       sense to specialize attributes for call sites arguments instead of
1455     //       redirecting requests to the callee argument.
1456     Function *F = getAssociatedFunction();
1457     const IRPosition &FnPos = IRPosition::function(*F);
1458     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1459     return clampStateAndIndicateChange(
1460         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1461   }
1462 
1463   /// See AbstractAttribute::trackStatistics()
1464   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1465 };
1466 
1467 /// ------------------------ No-Free Attributes ----------------------------
1468 
1469 struct AANoFreeImpl : public AANoFree {
1470   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1471 
1472   /// See AbstractAttribute::updateImpl(...).
1473   ChangeStatus updateImpl(Attributor &A) override {
1474     auto CheckForNoFree = [&](Instruction &I) {
1475       ImmutableCallSite ICS(&I);
1476       if (ICS.hasFnAttr(Attribute::NoFree))
1477         return true;
1478 
1479       const auto &NoFreeAA =
1480           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1481       return NoFreeAA.isAssumedNoFree();
1482     };
1483 
1484     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1485       return indicatePessimisticFixpoint();
1486     return ChangeStatus::UNCHANGED;
1487   }
1488 
1489   /// See AbstractAttribute::getAsStr().
1490   const std::string getAsStr() const override {
1491     return getAssumed() ? "nofree" : "may-free";
1492   }
1493 };
1494 
1495 struct AANoFreeFunction final : public AANoFreeImpl {
1496   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1497 
1498   /// See AbstractAttribute::trackStatistics()
1499   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1500 };
1501 
1502 /// NoFree attribute deduction for a call sites.
1503 struct AANoFreeCallSite final : AANoFreeImpl {
1504   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1505 
1506   /// See AbstractAttribute::initialize(...).
1507   void initialize(Attributor &A) override {
1508     AANoFreeImpl::initialize(A);
1509     Function *F = getAssociatedFunction();
1510     if (!F)
1511       indicatePessimisticFixpoint();
1512   }
1513 
1514   /// See AbstractAttribute::updateImpl(...).
1515   ChangeStatus updateImpl(Attributor &A) override {
1516     // TODO: Once we have call site specific value information we can provide
1517     //       call site specific liveness information and then it makes
1518     //       sense to specialize attributes for call sites arguments instead of
1519     //       redirecting requests to the callee argument.
1520     Function *F = getAssociatedFunction();
1521     const IRPosition &FnPos = IRPosition::function(*F);
1522     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1523     return clampStateAndIndicateChange(
1524         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1525   }
1526 
1527   /// See AbstractAttribute::trackStatistics()
1528   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1529 };
1530 
1531 /// NoFree attribute for floating values.
1532 struct AANoFreeFloating : AANoFreeImpl {
1533   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1534 
1535   /// See AbstractAttribute::trackStatistics()
1536   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1537 
1538   /// See Abstract Attribute::updateImpl(...).
1539   ChangeStatus updateImpl(Attributor &A) override {
1540     const IRPosition &IRP = getIRPosition();
1541     Function *F = IRP.getAnchorScope();
1542 
1543     const AAIsDead &LivenessAA =
1544         A.getAAFor<AAIsDead>(*this, IRPosition::function(*F));
1545 
1546     const auto &NoFreeAA =
1547         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1548     if (NoFreeAA.isAssumedNoFree())
1549       return ChangeStatus::UNCHANGED;
1550 
1551     SmallPtrSet<const Use *, 8> Visited;
1552     SmallVector<const Use *, 8> Worklist;
1553 
1554     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1555     for (Use &U : AssociatedValue.uses())
1556       Worklist.push_back(&U);
1557 
1558     while (!Worklist.empty()) {
1559       const Use *U = Worklist.pop_back_val();
1560       if (!Visited.insert(U).second)
1561         continue;
1562 
1563       auto *UserI = U->getUser();
1564       if (!UserI)
1565         continue;
1566 
1567       if (LivenessAA.isAssumedDead(cast<Instruction>(UserI)))
1568         continue;
1569 
1570       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1571         if (CB->isBundleOperand(U))
1572           return indicatePessimisticFixpoint();
1573         if (!CB->isArgOperand(U))
1574           continue;
1575 
1576         unsigned ArgNo = CB->getArgOperandNo(U);
1577 
1578         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1579             *this, IRPosition::callsite_argument(*CB, ArgNo));
1580 
1581         if (NoFreeArg.isAssumedNoFree())
1582           continue;
1583 
1584         return indicatePessimisticFixpoint();
1585       }
1586 
1587       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1588           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1589         for (Use &U : UserI->uses())
1590           Worklist.push_back(&U);
1591         continue;
1592       }
1593 
1594       // Unknown user.
1595       return indicatePessimisticFixpoint();
1596     }
1597     return ChangeStatus::UNCHANGED;
1598   }
1599 };
1600 
1601 /// NoFree attribute for a call site argument.
1602 struct AANoFreeArgument final : AANoFreeFloating {
1603   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1604 
1605   /// See AbstractAttribute::trackStatistics()
1606   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1607 };
1608 
1609 /// NoFree attribute for call site arguments.
1610 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1611   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1612 
1613   /// See AbstractAttribute::updateImpl(...).
1614   ChangeStatus updateImpl(Attributor &A) override {
1615     // TODO: Once we have call site specific value information we can provide
1616     //       call site specific liveness information and then it makes
1617     //       sense to specialize attributes for call sites arguments instead of
1618     //       redirecting requests to the callee argument.
1619     Argument *Arg = getAssociatedArgument();
1620     if (!Arg)
1621       return indicatePessimisticFixpoint();
1622     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1623     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1624     return clampStateAndIndicateChange(
1625         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1626   }
1627 
1628   /// See AbstractAttribute::trackStatistics()
1629   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1630 };
1631 
1632 /// NoFree attribute for function return value.
1633 struct AANoFreeReturned final : AANoFreeFloating {
1634   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1635     llvm_unreachable("NoFree is not applicable to function returns!");
1636   }
1637 
1638   /// See AbstractAttribute::initialize(...).
1639   void initialize(Attributor &A) override {
1640     llvm_unreachable("NoFree is not applicable to function returns!");
1641   }
1642 
1643   /// See AbstractAttribute::updateImpl(...).
1644   ChangeStatus updateImpl(Attributor &A) override {
1645     llvm_unreachable("NoFree is not applicable to function returns!");
1646   }
1647 
1648   /// See AbstractAttribute::trackStatistics()
1649   void trackStatistics() const override {}
1650 };
1651 
1652 /// NoFree attribute deduction for a call site return value.
1653 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1654   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1655 
1656   ChangeStatus manifest(Attributor &A) override {
1657     return ChangeStatus::UNCHANGED;
1658   }
1659   /// See AbstractAttribute::trackStatistics()
1660   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1661 };
1662 
1663 /// ------------------------ NonNull Argument Attribute ------------------------
1664 static int64_t getKnownNonNullAndDerefBytesForUse(
1665     Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1666     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1667   TrackUse = false;
1668 
1669   const Value *UseV = U->get();
1670   if (!UseV->getType()->isPointerTy())
1671     return 0;
1672 
1673   Type *PtrTy = UseV->getType();
1674   const Function *F = I->getFunction();
1675   bool NullPointerIsDefined =
1676       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1677   const DataLayout &DL = A.getInfoCache().getDL();
1678   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1679     if (ICS.isBundleOperand(U))
1680       return 0;
1681 
1682     if (ICS.isCallee(U)) {
1683       IsNonNull |= !NullPointerIsDefined;
1684       return 0;
1685     }
1686 
1687     unsigned ArgNo = ICS.getArgumentNo(U);
1688     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1689     // As long as we only use known information there is no need to track
1690     // dependences here.
1691     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1692                                                   /* TrackDependence */ false);
1693     IsNonNull |= DerefAA.isKnownNonNull();
1694     return DerefAA.getKnownDereferenceableBytes();
1695   }
1696 
1697   // We need to follow common pointer manipulation uses to the accesses they
1698   // feed into. We can try to be smart to avoid looking through things we do not
1699   // like for now, e.g., non-inbounds GEPs.
1700   if (isa<CastInst>(I)) {
1701     TrackUse = true;
1702     return 0;
1703   }
1704   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1705     if (GEP->hasAllZeroIndices() ||
1706         (GEP->isInBounds() && GEP->hasAllConstantIndices())) {
1707       TrackUse = true;
1708       return 0;
1709     }
1710 
1711   int64_t Offset;
1712   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1713     if (Base == &AssociatedValue && getPointerOperand(I) == UseV) {
1714       int64_t DerefBytes =
1715           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1716 
1717       IsNonNull |= !NullPointerIsDefined;
1718       return std::max(int64_t(0), DerefBytes);
1719     }
1720   }
1721   if (const Value *Base =
1722           GetPointerBaseWithConstantOffset(UseV, Offset, DL,
1723                                            /*AllowNonInbounds*/ false)) {
1724     if (Base == &AssociatedValue) {
1725       // As long as we only use known information there is no need to track
1726       // dependences here.
1727       auto &DerefAA = A.getAAFor<AADereferenceable>(
1728           QueryingAA, IRPosition::value(*Base), /* TrackDependence */ false);
1729       IsNonNull |= (!NullPointerIsDefined && DerefAA.isKnownNonNull());
1730       IsNonNull |= (!NullPointerIsDefined && (Offset != 0));
1731       int64_t DerefBytes = DerefAA.getKnownDereferenceableBytes();
1732       return std::max(int64_t(0), DerefBytes - std::max(int64_t(0), Offset));
1733     }
1734   }
1735 
1736   return 0;
1737 }
1738 
1739 struct AANonNullImpl : AANonNull {
1740   AANonNullImpl(const IRPosition &IRP)
1741       : AANonNull(IRP),
1742         NullIsDefined(NullPointerIsDefined(
1743             getAnchorScope(),
1744             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1745 
1746   /// See AbstractAttribute::initialize(...).
1747   void initialize(Attributor &A) override {
1748     if (!NullIsDefined &&
1749         hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1750       indicateOptimisticFixpoint();
1751     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1752       indicatePessimisticFixpoint();
1753     else
1754       AANonNull::initialize(A);
1755   }
1756 
1757   /// See AAFromMustBeExecutedContext
1758   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1759     bool IsNonNull = false;
1760     bool TrackUse = false;
1761     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1762                                        IsNonNull, TrackUse);
1763     setKnown(IsNonNull);
1764     return TrackUse;
1765   }
1766 
1767   /// See AbstractAttribute::getAsStr().
1768   const std::string getAsStr() const override {
1769     return getAssumed() ? "nonnull" : "may-null";
1770   }
1771 
1772   /// Flag to determine if the underlying value can be null and still allow
1773   /// valid accesses.
1774   const bool NullIsDefined;
1775 };
1776 
1777 /// NonNull attribute for a floating value.
1778 struct AANonNullFloating
1779     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1780   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1781   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1782 
1783   /// See AbstractAttribute::updateImpl(...).
1784   ChangeStatus updateImpl(Attributor &A) override {
1785     ChangeStatus Change = Base::updateImpl(A);
1786     if (isKnownNonNull())
1787       return Change;
1788 
1789     if (!NullIsDefined) {
1790       const auto &DerefAA =
1791           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1792       if (DerefAA.getAssumedDereferenceableBytes())
1793         return Change;
1794     }
1795 
1796     const DataLayout &DL = A.getDataLayout();
1797 
1798     DominatorTree *DT = nullptr;
1799     InformationCache &InfoCache = A.getInfoCache();
1800     if (const Function *Fn = getAnchorScope())
1801       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1802 
1803     auto VisitValueCB = [&](Value &V, AANonNull::StateType &T,
1804                             bool Stripped) -> bool {
1805       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1806       if (!Stripped && this == &AA) {
1807         if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, getCtxI(), DT))
1808           T.indicatePessimisticFixpoint();
1809       } else {
1810         // Use abstract attribute information.
1811         const AANonNull::StateType &NS =
1812             static_cast<const AANonNull::StateType &>(AA.getState());
1813         T ^= NS;
1814       }
1815       return T.isValidState();
1816     };
1817 
1818     StateType T;
1819     if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1820                                                      T, VisitValueCB))
1821       return indicatePessimisticFixpoint();
1822 
1823     return clampStateAndIndicateChange(getState(), T);
1824   }
1825 
1826   /// See AbstractAttribute::trackStatistics()
1827   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1828 };
1829 
1830 /// NonNull attribute for function return value.
1831 struct AANonNullReturned final
1832     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1833   AANonNullReturned(const IRPosition &IRP)
1834       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1835 
1836   /// See AbstractAttribute::trackStatistics()
1837   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1838 };
1839 
1840 /// NonNull attribute for function argument.
1841 struct AANonNullArgument final
1842     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1843                                                               AANonNullImpl> {
1844   AANonNullArgument(const IRPosition &IRP)
1845       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1846                                                                 AANonNullImpl>(
1847             IRP) {}
1848 
1849   /// See AbstractAttribute::trackStatistics()
1850   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1851 };
1852 
1853 struct AANonNullCallSiteArgument final : AANonNullFloating {
1854   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1855 
1856   /// See AbstractAttribute::trackStatistics()
1857   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1858 };
1859 
1860 /// NonNull attribute for a call site return position.
1861 struct AANonNullCallSiteReturned final
1862     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1863                                                              AANonNullImpl> {
1864   AANonNullCallSiteReturned(const IRPosition &IRP)
1865       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1866                                                                AANonNullImpl>(
1867             IRP) {}
1868 
1869   /// See AbstractAttribute::trackStatistics()
1870   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1871 };
1872 
1873 /// ------------------------ No-Recurse Attributes ----------------------------
1874 
1875 struct AANoRecurseImpl : public AANoRecurse {
1876   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1877 
1878   /// See AbstractAttribute::getAsStr()
1879   const std::string getAsStr() const override {
1880     return getAssumed() ? "norecurse" : "may-recurse";
1881   }
1882 };
1883 
1884 struct AANoRecurseFunction final : AANoRecurseImpl {
1885   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1886 
1887   /// See AbstractAttribute::initialize(...).
1888   void initialize(Attributor &A) override {
1889     AANoRecurseImpl::initialize(A);
1890     if (const Function *F = getAnchorScope())
1891       if (A.getInfoCache().getSccSize(*F) == 1)
1892         return;
1893     indicatePessimisticFixpoint();
1894   }
1895 
1896   /// See AbstractAttribute::updateImpl(...).
1897   ChangeStatus updateImpl(Attributor &A) override {
1898 
1899     auto CheckForNoRecurse = [&](Instruction &I) {
1900       ImmutableCallSite ICS(&I);
1901       if (ICS.hasFnAttr(Attribute::NoRecurse))
1902         return true;
1903 
1904       const auto &NoRecurseAA =
1905           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
1906       if (!NoRecurseAA.isAssumedNoRecurse())
1907         return false;
1908 
1909       // Recursion to the same function
1910       if (ICS.getCalledFunction() == getAnchorScope())
1911         return false;
1912 
1913       return true;
1914     };
1915 
1916     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1917       return indicatePessimisticFixpoint();
1918     return ChangeStatus::UNCHANGED;
1919   }
1920 
1921   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1922 };
1923 
1924 /// NoRecurse attribute deduction for a call sites.
1925 struct AANoRecurseCallSite final : AANoRecurseImpl {
1926   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1927 
1928   /// See AbstractAttribute::initialize(...).
1929   void initialize(Attributor &A) override {
1930     AANoRecurseImpl::initialize(A);
1931     Function *F = getAssociatedFunction();
1932     if (!F)
1933       indicatePessimisticFixpoint();
1934   }
1935 
1936   /// See AbstractAttribute::updateImpl(...).
1937   ChangeStatus updateImpl(Attributor &A) override {
1938     // TODO: Once we have call site specific value information we can provide
1939     //       call site specific liveness information and then it makes
1940     //       sense to specialize attributes for call sites arguments instead of
1941     //       redirecting requests to the callee argument.
1942     Function *F = getAssociatedFunction();
1943     const IRPosition &FnPos = IRPosition::function(*F);
1944     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1945     return clampStateAndIndicateChange(
1946         getState(),
1947         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1948   }
1949 
1950   /// See AbstractAttribute::trackStatistics()
1951   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1952 };
1953 
1954 /// ------------------------ Will-Return Attributes ----------------------------
1955 
1956 // Helper function that checks whether a function has any cycle.
1957 // TODO: Replace with more efficent code
1958 static bool containsCycle(Function &F) {
1959   SmallPtrSet<BasicBlock *, 32> Visited;
1960 
1961   // Traverse BB by dfs and check whether successor is already visited.
1962   for (BasicBlock *BB : depth_first(&F)) {
1963     Visited.insert(BB);
1964     for (auto *SuccBB : successors(BB)) {
1965       if (Visited.count(SuccBB))
1966         return true;
1967     }
1968   }
1969   return false;
1970 }
1971 
1972 // Helper function that checks the function have a loop which might become an
1973 // endless loop
1974 // FIXME: Any cycle is regarded as endless loop for now.
1975 //        We have to allow some patterns.
1976 static bool containsPossiblyEndlessLoop(Function *F) {
1977   return !F || !F->hasExactDefinition() || containsCycle(*F);
1978 }
1979 
1980 struct AAWillReturnImpl : public AAWillReturn {
1981   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
1982 
1983   /// See AbstractAttribute::initialize(...).
1984   void initialize(Attributor &A) override {
1985     AAWillReturn::initialize(A);
1986 
1987     Function *F = getAssociatedFunction();
1988     if (containsPossiblyEndlessLoop(F))
1989       indicatePessimisticFixpoint();
1990   }
1991 
1992   /// See AbstractAttribute::updateImpl(...).
1993   ChangeStatus updateImpl(Attributor &A) override {
1994     auto CheckForWillReturn = [&](Instruction &I) {
1995       IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
1996       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
1997       if (WillReturnAA.isKnownWillReturn())
1998         return true;
1999       if (!WillReturnAA.isAssumedWillReturn())
2000         return false;
2001       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2002       return NoRecurseAA.isAssumedNoRecurse();
2003     };
2004 
2005     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2006       return indicatePessimisticFixpoint();
2007 
2008     return ChangeStatus::UNCHANGED;
2009   }
2010 
2011   /// See AbstractAttribute::getAsStr()
2012   const std::string getAsStr() const override {
2013     return getAssumed() ? "willreturn" : "may-noreturn";
2014   }
2015 };
2016 
2017 struct AAWillReturnFunction final : AAWillReturnImpl {
2018   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2019 
2020   /// See AbstractAttribute::trackStatistics()
2021   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2022 };
2023 
2024 /// WillReturn attribute deduction for a call sites.
2025 struct AAWillReturnCallSite final : AAWillReturnImpl {
2026   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2027 
2028   /// See AbstractAttribute::initialize(...).
2029   void initialize(Attributor &A) override {
2030     AAWillReturnImpl::initialize(A);
2031     Function *F = getAssociatedFunction();
2032     if (!F)
2033       indicatePessimisticFixpoint();
2034   }
2035 
2036   /// See AbstractAttribute::updateImpl(...).
2037   ChangeStatus updateImpl(Attributor &A) override {
2038     // TODO: Once we have call site specific value information we can provide
2039     //       call site specific liveness information and then it makes
2040     //       sense to specialize attributes for call sites arguments instead of
2041     //       redirecting requests to the callee argument.
2042     Function *F = getAssociatedFunction();
2043     const IRPosition &FnPos = IRPosition::function(*F);
2044     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2045     return clampStateAndIndicateChange(
2046         getState(),
2047         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2048   }
2049 
2050   /// See AbstractAttribute::trackStatistics()
2051   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2052 };
2053 
2054 /// -------------------AAReachability Attribute--------------------------
2055 
2056 struct AAReachabilityImpl : AAReachability {
2057   AAReachabilityImpl(const IRPosition &IRP) : AAReachability(IRP) {}
2058 
2059   const std::string getAsStr() const override {
2060     // TODO: Return the number of reachable queries.
2061     return "reachable";
2062   }
2063 
2064   /// See AbstractAttribute::initialize(...).
2065   void initialize(Attributor &A) override {
2066     indicatePessimisticFixpoint();
2067   }
2068 
2069   /// See AbstractAttribute::updateImpl(...).
2070   ChangeStatus updateImpl(Attributor &A) override {
2071     return indicatePessimisticFixpoint();
2072   }
2073 };
2074 
2075 struct AAReachabilityFunction final : public AAReachabilityImpl {
2076   AAReachabilityFunction(const IRPosition &IRP) : AAReachabilityImpl(IRP) {}
2077 
2078   /// See AbstractAttribute::trackStatistics()
2079   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2080 };
2081 
2082 /// ------------------------ NoAlias Argument Attribute ------------------------
2083 
2084 struct AANoAliasImpl : AANoAlias {
2085   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
2086 
2087   const std::string getAsStr() const override {
2088     return getAssumed() ? "noalias" : "may-alias";
2089   }
2090 };
2091 
2092 /// NoAlias attribute for a floating value.
2093 struct AANoAliasFloating final : AANoAliasImpl {
2094   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2095 
2096   /// See AbstractAttribute::initialize(...).
2097   void initialize(Attributor &A) override {
2098     AANoAliasImpl::initialize(A);
2099     Value &Val = getAssociatedValue();
2100     if (isa<AllocaInst>(Val))
2101       indicateOptimisticFixpoint();
2102     if (isa<ConstantPointerNull>(Val) &&
2103         Val.getType()->getPointerAddressSpace() == 0)
2104       indicateOptimisticFixpoint();
2105   }
2106 
2107   /// See AbstractAttribute::updateImpl(...).
2108   ChangeStatus updateImpl(Attributor &A) override {
2109     // TODO: Implement this.
2110     return indicatePessimisticFixpoint();
2111   }
2112 
2113   /// See AbstractAttribute::trackStatistics()
2114   void trackStatistics() const override {
2115     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2116   }
2117 };
2118 
2119 /// NoAlias attribute for an argument.
2120 struct AANoAliasArgument final
2121     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2122   AANoAliasArgument(const IRPosition &IRP)
2123       : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>(IRP) {}
2124 
2125   /// See AbstractAttribute::trackStatistics()
2126   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2127 };
2128 
2129 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2130   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2131 
2132   /// See AbstractAttribute::initialize(...).
2133   void initialize(Attributor &A) override {
2134     // See callsite argument attribute and callee argument attribute.
2135     ImmutableCallSite ICS(&getAnchorValue());
2136     if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2137       indicateOptimisticFixpoint();
2138   }
2139 
2140   /// See AbstractAttribute::updateImpl(...).
2141   ChangeStatus updateImpl(Attributor &A) override {
2142     // We can deduce "noalias" if the following conditions hold.
2143     // (i)   Associated value is assumed to be noalias in the definition.
2144     // (ii)  Associated value is assumed to be no-capture in all the uses
2145     //       possibly executed before this callsite.
2146     // (iii) There is no other pointer argument which could alias with the
2147     //       value.
2148 
2149     const Value &V = getAssociatedValue();
2150     const IRPosition IRP = IRPosition::value(V);
2151 
2152     // (i) Check whether noalias holds in the definition.
2153 
2154     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
2155 
2156     if (!NoAliasAA.isAssumedNoAlias())
2157       return indicatePessimisticFixpoint();
2158 
2159     LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] " << V
2160                       << " is assumed NoAlias in the definition\n");
2161 
2162     // (ii) Check whether the value is captured in the scope using AANoCapture.
2163     //      FIXME: This is conservative though, it is better to look at CFG and
2164     //             check only uses possibly executed before this callsite.
2165 
2166     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
2167     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2168       LLVM_DEBUG(
2169           dbgs() << "[Attributor][AANoAliasCSArg] " << V
2170                  << " cannot be noalias as it is potentially captured\n");
2171       return indicatePessimisticFixpoint();
2172     }
2173 
2174     // (iii) Check there is no other pointer argument which could alias with the
2175     // value.
2176     ImmutableCallSite ICS(&getAnchorValue());
2177     for (unsigned i = 0; i < ICS.getNumArgOperands(); i++) {
2178       if (getArgNo() == (int)i)
2179         continue;
2180       const Value *ArgOp = ICS.getArgOperand(i);
2181       if (!ArgOp->getType()->isPointerTy())
2182         continue;
2183 
2184       if (const Function *F = getAnchorScope()) {
2185         if (AAResults *AAR = A.getInfoCache().getAAResultsForFunction(*F)) {
2186           bool IsAliasing = AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2187           LLVM_DEBUG(dbgs()
2188                      << "[Attributor][NoAliasCSArg] Check alias between "
2189                         "callsite arguments "
2190                      << AAR->isNoAlias(&getAssociatedValue(), ArgOp) << " "
2191                      << getAssociatedValue() << " " << *ArgOp << " => "
2192                      << (IsAliasing ? "" : "no-") << "alias \n");
2193 
2194           if (IsAliasing)
2195             continue;
2196         }
2197       }
2198       return indicatePessimisticFixpoint();
2199     }
2200 
2201     return ChangeStatus::UNCHANGED;
2202   }
2203 
2204   /// See AbstractAttribute::trackStatistics()
2205   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2206 };
2207 
2208 /// NoAlias attribute for function return value.
2209 struct AANoAliasReturned final : AANoAliasImpl {
2210   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2211 
2212   /// See AbstractAttribute::updateImpl(...).
2213   virtual ChangeStatus updateImpl(Attributor &A) override {
2214 
2215     auto CheckReturnValue = [&](Value &RV) -> bool {
2216       if (Constant *C = dyn_cast<Constant>(&RV))
2217         if (C->isNullValue() || isa<UndefValue>(C))
2218           return true;
2219 
2220       /// For now, we can only deduce noalias if we have call sites.
2221       /// FIXME: add more support.
2222       ImmutableCallSite ICS(&RV);
2223       if (!ICS)
2224         return false;
2225 
2226       const IRPosition &RVPos = IRPosition::value(RV);
2227       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2228       if (!NoAliasAA.isAssumedNoAlias())
2229         return false;
2230 
2231       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2232       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2233     };
2234 
2235     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2236       return indicatePessimisticFixpoint();
2237 
2238     return ChangeStatus::UNCHANGED;
2239   }
2240 
2241   /// See AbstractAttribute::trackStatistics()
2242   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2243 };
2244 
2245 /// NoAlias attribute deduction for a call site return value.
2246 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2247   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2248 
2249   /// See AbstractAttribute::initialize(...).
2250   void initialize(Attributor &A) override {
2251     AANoAliasImpl::initialize(A);
2252     Function *F = getAssociatedFunction();
2253     if (!F)
2254       indicatePessimisticFixpoint();
2255   }
2256 
2257   /// See AbstractAttribute::updateImpl(...).
2258   ChangeStatus updateImpl(Attributor &A) override {
2259     // TODO: Once we have call site specific value information we can provide
2260     //       call site specific liveness information and then it makes
2261     //       sense to specialize attributes for call sites arguments instead of
2262     //       redirecting requests to the callee argument.
2263     Function *F = getAssociatedFunction();
2264     const IRPosition &FnPos = IRPosition::returned(*F);
2265     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2266     return clampStateAndIndicateChange(
2267         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2268   }
2269 
2270   /// See AbstractAttribute::trackStatistics()
2271   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2272 };
2273 
2274 /// -------------------AAIsDead Function Attribute-----------------------
2275 
2276 struct AAIsDeadValueImpl : public AAIsDead {
2277   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2278 
2279   /// See AAIsDead::isAssumedDead().
2280   bool isAssumedDead() const override { return getAssumed(); }
2281 
2282   /// See AAIsDead::isAssumedDead(BasicBlock *).
2283   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2284 
2285   /// See AAIsDead::isKnownDead(BasicBlock *).
2286   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2287 
2288   /// See AAIsDead::isAssumedDead(Instruction *I).
2289   bool isAssumedDead(const Instruction *I) const override {
2290     return I == getCtxI() && isAssumedDead();
2291   }
2292 
2293   /// See AAIsDead::isKnownDead(Instruction *I).
2294   bool isKnownDead(const Instruction *I) const override {
2295     return I == getCtxI() && getKnown();
2296   }
2297 
2298   /// See AbstractAttribute::getAsStr().
2299   const std::string getAsStr() const override {
2300     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2301   }
2302 };
2303 
2304 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2305   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2306 
2307   /// See AbstractAttribute::initialize(...).
2308   void initialize(Attributor &A) override {
2309     if (Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()))
2310       if (!wouldInstructionBeTriviallyDead(I))
2311         indicatePessimisticFixpoint();
2312     if (isa<UndefValue>(getAssociatedValue()))
2313       indicatePessimisticFixpoint();
2314   }
2315 
2316   /// See AbstractAttribute::updateImpl(...).
2317   ChangeStatus updateImpl(Attributor &A) override {
2318     auto UsePred = [&](const Use &U, bool &Follow) {
2319       Instruction *UserI = cast<Instruction>(U.getUser());
2320       if (CallSite CS = CallSite(UserI)) {
2321         if (!CS.isArgOperand(&U))
2322           return false;
2323         const IRPosition &CSArgPos =
2324             IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
2325         const auto &CSArgIsDead = A.getAAFor<AAIsDead>(*this, CSArgPos);
2326         return CSArgIsDead.isAssumedDead();
2327       }
2328       if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
2329         const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
2330         const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, RetPos);
2331         return RetIsDeadAA.isAssumedDead();
2332       }
2333       Follow = true;
2334       return wouldInstructionBeTriviallyDead(UserI);
2335     };
2336 
2337     if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
2338       return indicatePessimisticFixpoint();
2339     return ChangeStatus::UNCHANGED;
2340   }
2341 
2342   /// See AbstractAttribute::manifest(...).
2343   ChangeStatus manifest(Attributor &A) override {
2344     Value &V = getAssociatedValue();
2345     if (auto *I = dyn_cast<Instruction>(&V))
2346       if (wouldInstructionBeTriviallyDead(I)) {
2347         A.deleteAfterManifest(*I);
2348         return ChangeStatus::CHANGED;
2349       }
2350 
2351     if (V.use_empty())
2352       return ChangeStatus::UNCHANGED;
2353 
2354     UndefValue &UV = *UndefValue::get(V.getType());
2355     bool AnyChange = false;
2356     for (Use &U : V.uses())
2357       AnyChange |= A.changeUseAfterManifest(U, UV);
2358     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2359   }
2360 
2361   /// See AbstractAttribute::trackStatistics()
2362   void trackStatistics() const override {
2363     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2364   }
2365 };
2366 
2367 struct AAIsDeadArgument : public AAIsDeadFloating {
2368   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2369 
2370   /// See AbstractAttribute::initialize(...).
2371   void initialize(Attributor &A) override {
2372     if (!getAssociatedFunction()->hasExactDefinition())
2373       indicatePessimisticFixpoint();
2374   }
2375 
2376   /// See AbstractAttribute::trackStatistics()
2377   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2378 };
2379 
2380 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2381   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2382 
2383   /// See AbstractAttribute::initialize(...).
2384   void initialize(Attributor &A) override {
2385     if (isa<UndefValue>(getAssociatedValue()))
2386       indicatePessimisticFixpoint();
2387   }
2388 
2389   /// See AbstractAttribute::updateImpl(...).
2390   ChangeStatus updateImpl(Attributor &A) override {
2391     // TODO: Once we have call site specific value information we can provide
2392     //       call site specific liveness information and then it makes
2393     //       sense to specialize attributes for call sites arguments instead of
2394     //       redirecting requests to the callee argument.
2395     Argument *Arg = getAssociatedArgument();
2396     if (!Arg)
2397       return indicatePessimisticFixpoint();
2398     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2399     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2400     return clampStateAndIndicateChange(
2401         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2402   }
2403 
2404   /// See AbstractAttribute::manifest(...).
2405   ChangeStatus manifest(Attributor &A) override {
2406     CallBase &CB = cast<CallBase>(getAnchorValue());
2407     Use &U = CB.getArgOperandUse(getArgNo());
2408     assert(!isa<UndefValue>(U.get()) &&
2409            "Expected undef values to be filtered out!");
2410     UndefValue &UV = *UndefValue::get(U->getType());
2411     if (A.changeUseAfterManifest(U, UV))
2412       return ChangeStatus::CHANGED;
2413     return ChangeStatus::UNCHANGED;
2414   }
2415 
2416   /// See AbstractAttribute::trackStatistics()
2417   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2418 };
2419 
2420 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2421   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2422 
2423   /// See AbstractAttribute::updateImpl(...).
2424   ChangeStatus updateImpl(Attributor &A) override {
2425 
2426     auto PredForCallSite = [&](AbstractCallSite ACS) {
2427       if (ACS.isCallbackCall())
2428         return false;
2429       const IRPosition &CSRetPos =
2430           IRPosition::callsite_returned(ACS.getCallSite());
2431       const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, CSRetPos);
2432       return RetIsDeadAA.isAssumedDead();
2433     };
2434 
2435     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
2436       return indicatePessimisticFixpoint();
2437 
2438     return ChangeStatus::UNCHANGED;
2439   }
2440 
2441   /// See AbstractAttribute::manifest(...).
2442   ChangeStatus manifest(Attributor &A) override {
2443     // TODO: Rewrite the signature to return void?
2444     bool AnyChange = false;
2445     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2446     auto RetInstPred = [&](Instruction &I) {
2447       ReturnInst &RI = cast<ReturnInst>(I);
2448       if (!isa<UndefValue>(RI.getReturnValue()))
2449         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2450       return true;
2451     };
2452     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2453     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2454   }
2455 
2456   /// See AbstractAttribute::trackStatistics()
2457   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2458 };
2459 
2460 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2461   AAIsDeadCallSiteReturned(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2462 
2463   /// See AbstractAttribute::initialize(...).
2464   void initialize(Attributor &A) override {}
2465 
2466   /// See AbstractAttribute::trackStatistics()
2467   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(IsDead) }
2468 };
2469 
2470 struct AAIsDeadFunction : public AAIsDead {
2471   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
2472 
2473   /// See AbstractAttribute::initialize(...).
2474   void initialize(Attributor &A) override {
2475     const Function *F = getAssociatedFunction();
2476     if (F && !F->isDeclaration()) {
2477       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2478       assumeLive(A, F->getEntryBlock());
2479     }
2480   }
2481 
2482   /// See AbstractAttribute::getAsStr().
2483   const std::string getAsStr() const override {
2484     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2485            std::to_string(getAssociatedFunction()->size()) + "][#TBEP " +
2486            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2487            std::to_string(KnownDeadEnds.size()) + "]";
2488   }
2489 
2490   /// See AbstractAttribute::manifest(...).
2491   ChangeStatus manifest(Attributor &A) override {
2492     assert(getState().isValidState() &&
2493            "Attempted to manifest an invalid state!");
2494 
2495     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2496     Function &F = *getAssociatedFunction();
2497 
2498     if (AssumedLiveBlocks.empty()) {
2499       A.deleteAfterManifest(F);
2500       return ChangeStatus::CHANGED;
2501     }
2502 
2503     // Flag to determine if we can change an invoke to a call assuming the
2504     // callee is nounwind. This is not possible if the personality of the
2505     // function allows to catch asynchronous exceptions.
2506     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2507 
2508     KnownDeadEnds.set_union(ToBeExploredFrom);
2509     for (const Instruction *DeadEndI : KnownDeadEnds) {
2510       auto *CB = dyn_cast<CallBase>(DeadEndI);
2511       if (!CB)
2512         continue;
2513       const auto &NoReturnAA =
2514           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2515       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2516       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2517         continue;
2518       Instruction *I = const_cast<Instruction *>(DeadEndI);
2519       BasicBlock *BB = I->getParent();
2520       Instruction *SplitPos = I->getNextNode();
2521       // TODO: mark stuff before unreachable instructions as dead.
2522 
2523       if (auto *II = dyn_cast<InvokeInst>(I)) {
2524         // If we keep the invoke the split position is at the beginning of the
2525         // normal desitination block (it invokes a noreturn function after all).
2526         BasicBlock *NormalDestBB = II->getNormalDest();
2527         SplitPos = &NormalDestBB->front();
2528 
2529         /// Invoke is replaced with a call and unreachable is placed after it if
2530         /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
2531         /// and only place an unreachable in the normal successor.
2532         if (Invoke2CallAllowed) {
2533           if (II->getCalledFunction()) {
2534             const IRPosition &IPos = IRPosition::callsite_function(*II);
2535             const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
2536             if (AANoUnw.isAssumedNoUnwind()) {
2537               LLVM_DEBUG(dbgs()
2538                          << "[AAIsDead] Replace invoke with call inst\n");
2539               CallInst *CI = createCallMatchingInvoke(II);
2540               CI->insertBefore(II);
2541               CI->takeName(II);
2542               II->replaceAllUsesWith(CI);
2543 
2544               // If this is a nounwind + mayreturn invoke we only remove the unwind edge.
2545               // This is done by moving the invoke into a new and dead block and connecting
2546               // the normal destination of the invoke with a branch that follows the call
2547               // replacement we created above.
2548               if (MayReturn) {
2549                 BasicBlock *NewDeadBB = SplitBlock(BB, II, nullptr, nullptr, nullptr, ".i2c");
2550                 assert(isa<BranchInst>(BB->getTerminator()) &&
2551                        BB->getTerminator()->getNumSuccessors() == 1 &&
2552                        BB->getTerminator()->getSuccessor(0) == NewDeadBB);
2553                 new UnreachableInst(I->getContext(), NewDeadBB);
2554                 BB->getTerminator()->setOperand(0, NormalDestBB);
2555                 A.deleteAfterManifest(*II);
2556                 continue;
2557               }
2558 
2559               // We do not need an invoke (II) but instead want a call followed
2560               // by an unreachable. However, we do not remove II as other
2561               // abstract attributes might have it cached as part of their
2562               // results. Given that we modify the CFG anyway, we simply keep II
2563               // around but in a new dead block. To avoid II being live through
2564               // a different edge we have to ensure the block we place it in is
2565               // only reached from the current block of II and then not reached
2566               // at all when we insert the unreachable.
2567               SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c");
2568               SplitPos = CI->getNextNode();
2569             }
2570           }
2571         }
2572 
2573         if (SplitPos == &NormalDestBB->front()) {
2574           // If this is an invoke of a noreturn function the edge to the normal
2575           // destination block is dead but not necessarily the block itself.
2576           // TODO: We need to move to an edge based system during deduction and
2577           //       also manifest.
2578           assert(!NormalDestBB->isLandingPad() &&
2579                  "Expected the normal destination not to be a landingpad!");
2580           if (NormalDestBB->getUniquePredecessor() == BB) {
2581             assumeLive(A, *NormalDestBB);
2582           } else {
2583             BasicBlock *SplitBB =
2584                 SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2585             // The split block is live even if it contains only an unreachable
2586             // instruction at the end.
2587             assumeLive(A, *SplitBB);
2588             SplitPos = SplitBB->getTerminator();
2589             HasChanged = ChangeStatus::CHANGED;
2590           }
2591         }
2592       }
2593 
2594       if (isa_and_nonnull<UnreachableInst>(SplitPos))
2595         continue;
2596 
2597       BB = SplitPos->getParent();
2598       SplitBlock(BB, SplitPos);
2599       changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
2600       HasChanged = ChangeStatus::CHANGED;
2601     }
2602 
2603     for (BasicBlock &BB : F)
2604       if (!AssumedLiveBlocks.count(&BB))
2605         A.deleteAfterManifest(BB);
2606 
2607     return HasChanged;
2608   }
2609 
2610   /// See AbstractAttribute::updateImpl(...).
2611   ChangeStatus updateImpl(Attributor &A) override;
2612 
2613   /// See AbstractAttribute::trackStatistics()
2614   void trackStatistics() const override {}
2615 
2616   /// Returns true if the function is assumed dead.
2617   bool isAssumedDead() const override { return false; }
2618 
2619   /// See AAIsDead::isAssumedDead(BasicBlock *).
2620   bool isAssumedDead(const BasicBlock *BB) const override {
2621     assert(BB->getParent() == getAssociatedFunction() &&
2622            "BB must be in the same anchor scope function.");
2623 
2624     if (!getAssumed())
2625       return false;
2626     return !AssumedLiveBlocks.count(BB);
2627   }
2628 
2629   /// See AAIsDead::isKnownDead(BasicBlock *).
2630   bool isKnownDead(const BasicBlock *BB) const override {
2631     return getKnown() && isAssumedDead(BB);
2632   }
2633 
2634   /// See AAIsDead::isAssumed(Instruction *I).
2635   bool isAssumedDead(const Instruction *I) const override {
2636     assert(I->getParent()->getParent() == getAssociatedFunction() &&
2637            "Instruction must be in the same anchor scope function.");
2638 
2639     if (!getAssumed())
2640       return false;
2641 
2642     // If it is not in AssumedLiveBlocks then it for sure dead.
2643     // Otherwise, it can still be after noreturn call in a live block.
2644     if (!AssumedLiveBlocks.count(I->getParent()))
2645       return true;
2646 
2647     // If it is not after a liveness barrier it is live.
2648     const Instruction *PrevI = I->getPrevNode();
2649     while (PrevI) {
2650       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2651         return true;
2652       PrevI = PrevI->getPrevNode();
2653     }
2654     return false;
2655   }
2656 
2657   /// See AAIsDead::isKnownDead(Instruction *I).
2658   bool isKnownDead(const Instruction *I) const override {
2659     return getKnown() && isAssumedDead(I);
2660   }
2661 
2662   /// Determine if \p F might catch asynchronous exceptions.
2663   static bool mayCatchAsynchronousExceptions(const Function &F) {
2664     return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
2665   }
2666 
2667   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2668   /// that internal function called from \p BB should now be looked at.
2669   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2670     if (!AssumedLiveBlocks.insert(&BB).second)
2671       return false;
2672 
2673     // We assume that all of BB is (probably) live now and if there are calls to
2674     // internal functions we will assume that those are now live as well. This
2675     // is a performance optimization for blocks with calls to a lot of internal
2676     // functions. It can however cause dead functions to be treated as live.
2677     for (const Instruction &I : BB)
2678       if (ImmutableCallSite ICS = ImmutableCallSite(&I))
2679         if (const Function *F = ICS.getCalledFunction())
2680           if (F->hasLocalLinkage())
2681             A.markLiveInternalFunction(*F);
2682     return true;
2683   }
2684 
2685   /// Collection of instructions that need to be explored again, e.g., we
2686   /// did assume they do not transfer control to (one of their) successors.
2687   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
2688 
2689   /// Collection of instructions that are known to not transfer control.
2690   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
2691 
2692   /// Collection of all assumed live BasicBlocks.
2693   DenseSet<const BasicBlock *> AssumedLiveBlocks;
2694 };
2695 
2696 static bool
2697 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
2698                         AbstractAttribute &AA,
2699                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2700   const IRPosition &IPos = IRPosition::callsite_function(CB);
2701 
2702   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
2703   if (NoReturnAA.isAssumedNoReturn())
2704     return !NoReturnAA.isKnownNoReturn();
2705   if (CB.isTerminator())
2706     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
2707   else
2708     AliveSuccessors.push_back(CB.getNextNode());
2709   return false;
2710 }
2711 
2712 static bool
2713 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
2714                         AbstractAttribute &AA,
2715                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2716   bool UsedAssumedInformation =
2717       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
2718 
2719   // First, determine if we can change an invoke to a call assuming the
2720   // callee is nounwind. This is not possible if the personality of the
2721   // function allows to catch asynchronous exceptions.
2722   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
2723     AliveSuccessors.push_back(&II.getUnwindDest()->front());
2724   } else {
2725     const IRPosition &IPos = IRPosition::callsite_function(II);
2726     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
2727     if (AANoUnw.isAssumedNoUnwind()) {
2728       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
2729     } else {
2730       AliveSuccessors.push_back(&II.getUnwindDest()->front());
2731     }
2732   }
2733   return UsedAssumedInformation;
2734 }
2735 
2736 static Optional<ConstantInt *>
2737 getAssumedConstant(Attributor &A, const Value &V, AbstractAttribute &AA,
2738                    bool &UsedAssumedInformation) {
2739   const auto &ValueSimplifyAA =
2740       A.getAAFor<AAValueSimplify>(AA, IRPosition::value(V));
2741   Optional<Value *> SimplifiedV = ValueSimplifyAA.getAssumedSimplifiedValue(A);
2742   UsedAssumedInformation |= !ValueSimplifyAA.isKnown();
2743   if (!SimplifiedV.hasValue())
2744     return llvm::None;
2745   if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue()))
2746     return llvm::None;
2747   return dyn_cast_or_null<ConstantInt>(SimplifiedV.getValue());
2748 }
2749 
2750 static bool
2751 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
2752                         AbstractAttribute &AA,
2753                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2754   bool UsedAssumedInformation = false;
2755   if (BI.getNumSuccessors() == 1) {
2756     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
2757   } else {
2758     Optional<ConstantInt *> CI =
2759         getAssumedConstant(A, *BI.getCondition(), AA, UsedAssumedInformation);
2760     if (!CI.hasValue()) {
2761       // No value yet, assume both edges are dead.
2762     } else if (CI.getValue()) {
2763       const BasicBlock *SuccBB =
2764           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
2765       AliveSuccessors.push_back(&SuccBB->front());
2766     } else {
2767       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
2768       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
2769       UsedAssumedInformation = false;
2770     }
2771   }
2772   return UsedAssumedInformation;
2773 }
2774 
2775 static bool
2776 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
2777                         AbstractAttribute &AA,
2778                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2779   bool UsedAssumedInformation = false;
2780   Optional<ConstantInt *> CI =
2781       getAssumedConstant(A, *SI.getCondition(), AA, UsedAssumedInformation);
2782   if (!CI.hasValue()) {
2783     // No value yet, assume all edges are dead.
2784   } else if (CI.getValue()) {
2785     for (auto &CaseIt : SI.cases()) {
2786       if (CaseIt.getCaseValue() == CI.getValue()) {
2787         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
2788         return UsedAssumedInformation;
2789       }
2790     }
2791     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
2792     return UsedAssumedInformation;
2793   } else {
2794     for (const BasicBlock *SuccBB : successors(SI.getParent()))
2795       AliveSuccessors.push_back(&SuccBB->front());
2796   }
2797   return UsedAssumedInformation;
2798 }
2799 
2800 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
2801   ChangeStatus Change = ChangeStatus::UNCHANGED;
2802 
2803   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
2804                     << getAssociatedFunction()->size() << "] BBs and "
2805                     << ToBeExploredFrom.size() << " exploration points and "
2806                     << KnownDeadEnds.size() << " known dead ends\n");
2807 
2808   // Copy and clear the list of instructions we need to explore from. It is
2809   // refilled with instructions the next update has to look at.
2810   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
2811                                                ToBeExploredFrom.end());
2812   decltype(ToBeExploredFrom) NewToBeExploredFrom;
2813 
2814   SmallVector<const Instruction *, 8> AliveSuccessors;
2815   while (!Worklist.empty()) {
2816     const Instruction *I = Worklist.pop_back_val();
2817     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
2818 
2819     AliveSuccessors.clear();
2820 
2821     bool UsedAssumedInformation = false;
2822     switch (I->getOpcode()) {
2823     // TODO: look for (assumed) UB to backwards propagate "deadness".
2824     default:
2825       if (I->isTerminator()) {
2826         for (const BasicBlock *SuccBB : successors(I->getParent()))
2827           AliveSuccessors.push_back(&SuccBB->front());
2828       } else {
2829         AliveSuccessors.push_back(I->getNextNode());
2830       }
2831       break;
2832     case Instruction::Call:
2833       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
2834                                                        *this, AliveSuccessors);
2835       break;
2836     case Instruction::Invoke:
2837       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
2838                                                        *this, AliveSuccessors);
2839       break;
2840     case Instruction::Br:
2841       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
2842                                                        *this, AliveSuccessors);
2843       break;
2844     case Instruction::Switch:
2845       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
2846                                                        *this, AliveSuccessors);
2847       break;
2848     }
2849 
2850     if (UsedAssumedInformation) {
2851       NewToBeExploredFrom.insert(I);
2852     } else {
2853       Change = ChangeStatus::CHANGED;
2854       if (AliveSuccessors.empty() ||
2855           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
2856         KnownDeadEnds.insert(I);
2857     }
2858 
2859     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
2860                       << AliveSuccessors.size() << " UsedAssumedInformation: "
2861                       << UsedAssumedInformation << "\n");
2862 
2863     for (const Instruction *AliveSuccessor : AliveSuccessors) {
2864       if (!I->isTerminator()) {
2865         assert(AliveSuccessors.size() == 1 &&
2866                "Non-terminator expected to have a single successor!");
2867         Worklist.push_back(AliveSuccessor);
2868       } else {
2869         if (assumeLive(A, *AliveSuccessor->getParent()))
2870           Worklist.push_back(AliveSuccessor);
2871       }
2872     }
2873   }
2874 
2875   ToBeExploredFrom = std::move(NewToBeExploredFrom);
2876 
2877   // If we know everything is live there is no need to query for liveness.
2878   // Instead, indicating a pessimistic fixpoint will cause the state to be
2879   // "invalid" and all queries to be answered conservatively without lookups.
2880   // To be in this state we have to (1) finished the exploration and (3) not
2881   // discovered any non-trivial dead end and (2) not ruled unreachable code
2882   // dead.
2883   if (ToBeExploredFrom.empty() &&
2884       getAssociatedFunction()->size() == AssumedLiveBlocks.size() &&
2885       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
2886         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
2887       }))
2888     return indicatePessimisticFixpoint();
2889   return Change;
2890 }
2891 
2892 /// Liveness information for a call sites.
2893 struct AAIsDeadCallSite final : AAIsDeadFunction {
2894   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
2895 
2896   /// See AbstractAttribute::initialize(...).
2897   void initialize(Attributor &A) override {
2898     // TODO: Once we have call site specific value information we can provide
2899     //       call site specific liveness information and then it makes
2900     //       sense to specialize attributes for call sites instead of
2901     //       redirecting requests to the callee.
2902     llvm_unreachable("Abstract attributes for liveness are not "
2903                      "supported for call sites yet!");
2904   }
2905 
2906   /// See AbstractAttribute::updateImpl(...).
2907   ChangeStatus updateImpl(Attributor &A) override {
2908     return indicatePessimisticFixpoint();
2909   }
2910 
2911   /// See AbstractAttribute::trackStatistics()
2912   void trackStatistics() const override {}
2913 };
2914 
2915 /// -------------------- Dereferenceable Argument Attribute --------------------
2916 
2917 template <>
2918 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
2919                                                      const DerefState &R) {
2920   ChangeStatus CS0 =
2921       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
2922   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
2923   return CS0 | CS1;
2924 }
2925 
2926 struct AADereferenceableImpl : AADereferenceable {
2927   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
2928   using StateType = DerefState;
2929 
2930   void initialize(Attributor &A) override {
2931     SmallVector<Attribute, 4> Attrs;
2932     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
2933              Attrs);
2934     for (const Attribute &Attr : Attrs)
2935       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
2936 
2937     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
2938 
2939     const IRPosition &IRP = this->getIRPosition();
2940     bool IsFnInterface = IRP.isFnInterfaceKind();
2941     const Function *FnScope = IRP.getAnchorScope();
2942     if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
2943       indicatePessimisticFixpoint();
2944   }
2945 
2946   /// See AbstractAttribute::getState()
2947   /// {
2948   StateType &getState() override { return *this; }
2949   const StateType &getState() const override { return *this; }
2950   /// }
2951 
2952   /// See AAFromMustBeExecutedContext
2953   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
2954     bool IsNonNull = false;
2955     bool TrackUse = false;
2956     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
2957         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
2958     takeKnownDerefBytesMaximum(DerefBytes);
2959     return TrackUse;
2960   }
2961 
2962   void getDeducedAttributes(LLVMContext &Ctx,
2963                             SmallVectorImpl<Attribute> &Attrs) const override {
2964     // TODO: Add *_globally support
2965     if (isAssumedNonNull())
2966       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
2967           Ctx, getAssumedDereferenceableBytes()));
2968     else
2969       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
2970           Ctx, getAssumedDereferenceableBytes()));
2971   }
2972 
2973   /// See AbstractAttribute::getAsStr().
2974   const std::string getAsStr() const override {
2975     if (!getAssumedDereferenceableBytes())
2976       return "unknown-dereferenceable";
2977     return std::string("dereferenceable") +
2978            (isAssumedNonNull() ? "" : "_or_null") +
2979            (isAssumedGlobal() ? "_globally" : "") + "<" +
2980            std::to_string(getKnownDereferenceableBytes()) + "-" +
2981            std::to_string(getAssumedDereferenceableBytes()) + ">";
2982   }
2983 };
2984 
2985 /// Dereferenceable attribute for a floating value.
2986 struct AADereferenceableFloating
2987     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
2988   using Base =
2989       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
2990   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
2991 
2992   /// See AbstractAttribute::updateImpl(...).
2993   ChangeStatus updateImpl(Attributor &A) override {
2994     ChangeStatus Change = Base::updateImpl(A);
2995 
2996     const DataLayout &DL = A.getDataLayout();
2997 
2998     auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
2999       unsigned IdxWidth =
3000           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3001       APInt Offset(IdxWidth, 0);
3002       const Value *Base =
3003           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3004 
3005       const auto &AA =
3006           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3007       int64_t DerefBytes = 0;
3008       if (!Stripped && this == &AA) {
3009         // Use IR information if we did not strip anything.
3010         // TODO: track globally.
3011         bool CanBeNull;
3012         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3013         T.GlobalState.indicatePessimisticFixpoint();
3014       } else {
3015         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3016         DerefBytes = DS.DerefBytesState.getAssumed();
3017         T.GlobalState &= DS.GlobalState;
3018       }
3019 
3020       // For now we do not try to "increase" dereferenceability due to negative
3021       // indices as we first have to come up with code to deal with loops and
3022       // for overflows of the dereferenceable bytes.
3023       int64_t OffsetSExt = Offset.getSExtValue();
3024       if (OffsetSExt < 0)
3025         OffsetSExt = 0;
3026 
3027       T.takeAssumedDerefBytesMinimum(
3028           std::max(int64_t(0), DerefBytes - OffsetSExt));
3029 
3030       if (this == &AA) {
3031         if (!Stripped) {
3032           // If nothing was stripped IR information is all we got.
3033           T.takeKnownDerefBytesMaximum(
3034               std::max(int64_t(0), DerefBytes - OffsetSExt));
3035           T.indicatePessimisticFixpoint();
3036         } else if (OffsetSExt > 0) {
3037           // If something was stripped but there is circular reasoning we look
3038           // for the offset. If it is positive we basically decrease the
3039           // dereferenceable bytes in a circluar loop now, which will simply
3040           // drive them down to the known value in a very slow way which we
3041           // can accelerate.
3042           T.indicatePessimisticFixpoint();
3043         }
3044       }
3045 
3046       return T.isValidState();
3047     };
3048 
3049     DerefState T;
3050     if (!genericValueTraversal<AADereferenceable, DerefState>(
3051             A, getIRPosition(), *this, T, VisitValueCB))
3052       return indicatePessimisticFixpoint();
3053 
3054     return Change | clampStateAndIndicateChange(getState(), T);
3055   }
3056 
3057   /// See AbstractAttribute::trackStatistics()
3058   void trackStatistics() const override {
3059     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3060   }
3061 };
3062 
3063 /// Dereferenceable attribute for a return value.
3064 struct AADereferenceableReturned final
3065     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3066                                    DerefState> {
3067   AADereferenceableReturned(const IRPosition &IRP)
3068       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3069                                      DerefState>(IRP) {}
3070 
3071   /// See AbstractAttribute::trackStatistics()
3072   void trackStatistics() const override {
3073     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3074   }
3075 };
3076 
3077 /// Dereferenceable attribute for an argument
3078 struct AADereferenceableArgument final
3079     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3080           AADereferenceable, AADereferenceableImpl, DerefState> {
3081   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3082       AADereferenceable, AADereferenceableImpl, DerefState>;
3083   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3084 
3085   /// See AbstractAttribute::trackStatistics()
3086   void trackStatistics() const override {
3087     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3088   }
3089 };
3090 
3091 /// Dereferenceable attribute for a call site argument.
3092 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3093   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3094       : AADereferenceableFloating(IRP) {}
3095 
3096   /// See AbstractAttribute::trackStatistics()
3097   void trackStatistics() const override {
3098     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3099   }
3100 };
3101 
3102 /// Dereferenceable attribute deduction for a call site return value.
3103 struct AADereferenceableCallSiteReturned final
3104     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3105           AADereferenceable, AADereferenceableImpl> {
3106   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3107       AADereferenceable, AADereferenceableImpl>;
3108   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3109 
3110   /// See AbstractAttribute::trackStatistics()
3111   void trackStatistics() const override {
3112     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3113   }
3114 };
3115 
3116 // ------------------------ Align Argument Attribute ------------------------
3117 
3118 static unsigned int getKnownAlignForUse(Attributor &A,
3119                                         AbstractAttribute &QueryingAA,
3120                                         Value &AssociatedValue, const Use *U,
3121                                         const Instruction *I, bool &TrackUse) {
3122   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
3123     if (ICS.isBundleOperand(U) || ICS.isCallee(U))
3124       return 0;
3125 
3126     unsigned ArgNo = ICS.getArgumentNo(U);
3127     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
3128     // As long as we only use known information there is no need to track
3129     // dependences here.
3130     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3131                                         /* TrackDependence */ false);
3132     return AlignAA.getKnownAlign();
3133   }
3134 
3135   // We need to follow common pointer manipulation uses to the accesses they
3136   // feed into.
3137   // TODO: Consider gep instruction
3138   if (isa<CastInst>(I)) {
3139     TrackUse = true;
3140     return 0;
3141   }
3142 
3143   if (auto *SI = dyn_cast<StoreInst>(I))
3144     return SI->getAlignment();
3145   else if (auto *LI = dyn_cast<LoadInst>(I))
3146     return LI->getAlignment();
3147 
3148   return 0;
3149 }
3150 struct AAAlignImpl : AAAlign {
3151   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3152 
3153   /// See AbstractAttribute::initialize(...).
3154   void initialize(Attributor &A) override {
3155     SmallVector<Attribute, 4> Attrs;
3156     getAttrs({Attribute::Alignment}, Attrs);
3157     for (const Attribute &Attr : Attrs)
3158       takeKnownMaximum(Attr.getValueAsInt());
3159 
3160     if (getIRPosition().isFnInterfaceKind() &&
3161         (!getAssociatedFunction() ||
3162          !getAssociatedFunction()->hasExactDefinition()))
3163       indicatePessimisticFixpoint();
3164   }
3165 
3166   /// See AbstractAttribute::manifest(...).
3167   ChangeStatus manifest(Attributor &A) override {
3168     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3169 
3170     // Check for users that allow alignment annotations.
3171     Value &AnchorVal = getIRPosition().getAnchorValue();
3172     for (const Use &U : AnchorVal.uses()) {
3173       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3174         if (SI->getPointerOperand() == &AnchorVal)
3175           if (SI->getAlignment() < getAssumedAlign()) {
3176             STATS_DECLTRACK(AAAlign, Store,
3177                             "Number of times alignemnt added to a store");
3178             SI->setAlignment(Align(getAssumedAlign()));
3179             Changed = ChangeStatus::CHANGED;
3180           }
3181       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3182         if (LI->getPointerOperand() == &AnchorVal)
3183           if (LI->getAlignment() < getAssumedAlign()) {
3184             LI->setAlignment(Align(getAssumedAlign()));
3185             STATS_DECLTRACK(AAAlign, Load,
3186                             "Number of times alignemnt added to a load");
3187             Changed = ChangeStatus::CHANGED;
3188           }
3189       }
3190     }
3191 
3192     return AAAlign::manifest(A) | Changed;
3193   }
3194 
3195   // TODO: Provide a helper to determine the implied ABI alignment and check in
3196   //       the existing manifest method and a new one for AAAlignImpl that value
3197   //       to avoid making the alignment explicit if it did not improve.
3198 
3199   /// See AbstractAttribute::getDeducedAttributes
3200   virtual void
3201   getDeducedAttributes(LLVMContext &Ctx,
3202                        SmallVectorImpl<Attribute> &Attrs) const override {
3203     if (getAssumedAlign() > 1)
3204       Attrs.emplace_back(
3205           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3206   }
3207   /// See AAFromMustBeExecutedContext
3208   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3209     bool TrackUse = false;
3210 
3211     unsigned int KnownAlign = getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3212     takeKnownMaximum(KnownAlign);
3213 
3214     return TrackUse;
3215   }
3216 
3217   /// See AbstractAttribute::getAsStr().
3218   const std::string getAsStr() const override {
3219     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3220                                 "-" + std::to_string(getAssumedAlign()) + ">")
3221                              : "unknown-align";
3222   }
3223 };
3224 
3225 /// Align attribute for a floating value.
3226 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3227   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3228   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3229 
3230   /// See AbstractAttribute::updateImpl(...).
3231   ChangeStatus updateImpl(Attributor &A) override {
3232     Base::updateImpl(A);
3233 
3234     const DataLayout &DL = A.getDataLayout();
3235 
3236     auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
3237                             bool Stripped) -> bool {
3238       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3239       if (!Stripped && this == &AA) {
3240         // Use only IR information if we did not strip anything.
3241         const MaybeAlign PA = V.getPointerAlignment(DL);
3242         T.takeKnownMaximum(PA ? PA->value() : 0);
3243         T.indicatePessimisticFixpoint();
3244       } else {
3245         // Use abstract attribute information.
3246         const AAAlign::StateType &DS =
3247             static_cast<const AAAlign::StateType &>(AA.getState());
3248         T ^= DS;
3249       }
3250       return T.isValidState();
3251     };
3252 
3253     StateType T;
3254     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3255                                                    VisitValueCB))
3256       return indicatePessimisticFixpoint();
3257 
3258     // TODO: If we know we visited all incoming values, thus no are assumed
3259     // dead, we can take the known information from the state T.
3260     return clampStateAndIndicateChange(getState(), T);
3261   }
3262 
3263   /// See AbstractAttribute::trackStatistics()
3264   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3265 };
3266 
3267 /// Align attribute for function return value.
3268 struct AAAlignReturned final
3269     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3270   AAAlignReturned(const IRPosition &IRP)
3271       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3272 
3273   /// See AbstractAttribute::trackStatistics()
3274   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3275 };
3276 
3277 /// Align attribute for function argument.
3278 struct AAAlignArgument final
3279     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3280                                                               AAAlignImpl> {
3281   AAAlignArgument(const IRPosition &IRP)
3282       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3283                                                                 AAAlignImpl>(
3284             IRP) {}
3285 
3286   /// See AbstractAttribute::trackStatistics()
3287   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3288 };
3289 
3290 struct AAAlignCallSiteArgument final : AAAlignFloating {
3291   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3292 
3293   /// See AbstractAttribute::manifest(...).
3294   ChangeStatus manifest(Attributor &A) override {
3295     return AAAlignImpl::manifest(A);
3296   }
3297 
3298   /// See AbstractAttribute::trackStatistics()
3299   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3300 };
3301 
3302 /// Align attribute deduction for a call site return value.
3303 struct AAAlignCallSiteReturned final
3304     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3305                                                              AAAlignImpl> {
3306   using Base =
3307       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3308                                                              AAAlignImpl>;
3309   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3310 
3311   /// See AbstractAttribute::initialize(...).
3312   void initialize(Attributor &A) override {
3313     Base::initialize(A);
3314     Function *F = getAssociatedFunction();
3315     if (!F)
3316       indicatePessimisticFixpoint();
3317   }
3318 
3319   /// See AbstractAttribute::trackStatistics()
3320   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3321 };
3322 
3323 /// ------------------ Function No-Return Attribute ----------------------------
3324 struct AANoReturnImpl : public AANoReturn {
3325   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3326 
3327   /// See AbstractAttribute::initialize(...).
3328   void initialize(Attributor &A) override {
3329     AANoReturn::initialize(A);
3330     Function *F = getAssociatedFunction();
3331     if (!F)
3332       indicatePessimisticFixpoint();
3333   }
3334 
3335   /// See AbstractAttribute::getAsStr().
3336   const std::string getAsStr() const override {
3337     return getAssumed() ? "noreturn" : "may-return";
3338   }
3339 
3340   /// See AbstractAttribute::updateImpl(Attributor &A).
3341   virtual ChangeStatus updateImpl(Attributor &A) override {
3342     auto CheckForNoReturn = [](Instruction &) { return false; };
3343     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3344                                    {(unsigned)Instruction::Ret}))
3345       return indicatePessimisticFixpoint();
3346     return ChangeStatus::UNCHANGED;
3347   }
3348 };
3349 
3350 struct AANoReturnFunction final : AANoReturnImpl {
3351   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3352 
3353   /// See AbstractAttribute::trackStatistics()
3354   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3355 };
3356 
3357 /// NoReturn attribute deduction for a call sites.
3358 struct AANoReturnCallSite final : AANoReturnImpl {
3359   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3360 
3361   /// See AbstractAttribute::updateImpl(...).
3362   ChangeStatus updateImpl(Attributor &A) override {
3363     // TODO: Once we have call site specific value information we can provide
3364     //       call site specific liveness information and then it makes
3365     //       sense to specialize attributes for call sites arguments instead of
3366     //       redirecting requests to the callee argument.
3367     Function *F = getAssociatedFunction();
3368     const IRPosition &FnPos = IRPosition::function(*F);
3369     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3370     return clampStateAndIndicateChange(
3371         getState(),
3372         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3373   }
3374 
3375   /// See AbstractAttribute::trackStatistics()
3376   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3377 };
3378 
3379 /// ----------------------- Variable Capturing ---------------------------------
3380 
3381 /// A class to hold the state of for no-capture attributes.
3382 struct AANoCaptureImpl : public AANoCapture {
3383   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
3384 
3385   /// See AbstractAttribute::initialize(...).
3386   void initialize(Attributor &A) override {
3387     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3388       indicateOptimisticFixpoint();
3389       return;
3390     }
3391     Function *AnchorScope = getAnchorScope();
3392     if (isFnInterfaceKind() &&
3393         (!AnchorScope || !AnchorScope->hasExactDefinition())) {
3394       indicatePessimisticFixpoint();
3395       return;
3396     }
3397 
3398     // You cannot "capture" null in the default address space.
3399     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3400         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3401       indicateOptimisticFixpoint();
3402       return;
3403     }
3404 
3405     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3406 
3407     // Check what state the associated function can actually capture.
3408     if (F)
3409       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3410     else
3411       indicatePessimisticFixpoint();
3412   }
3413 
3414   /// See AbstractAttribute::updateImpl(...).
3415   ChangeStatus updateImpl(Attributor &A) override;
3416 
3417   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3418   virtual void
3419   getDeducedAttributes(LLVMContext &Ctx,
3420                        SmallVectorImpl<Attribute> &Attrs) const override {
3421     if (!isAssumedNoCaptureMaybeReturned())
3422       return;
3423 
3424     if (getArgNo() >= 0) {
3425       if (isAssumedNoCapture())
3426         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3427       else if (ManifestInternal)
3428         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3429     }
3430   }
3431 
3432   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3433   /// depending on the ability of the function associated with \p IRP to capture
3434   /// state in memory and through "returning/throwing", respectively.
3435   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3436                                                    const Function &F,
3437                                                    BitIntegerState &State) {
3438     // TODO: Once we have memory behavior attributes we should use them here.
3439 
3440     // If we know we cannot communicate or write to memory, we do not care about
3441     // ptr2int anymore.
3442     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3443         F.getReturnType()->isVoidTy()) {
3444       State.addKnownBits(NO_CAPTURE);
3445       return;
3446     }
3447 
3448     // A function cannot capture state in memory if it only reads memory, it can
3449     // however return/throw state and the state might be influenced by the
3450     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3451     if (F.onlyReadsMemory())
3452       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3453 
3454     // A function cannot communicate state back if it does not through
3455     // exceptions and doesn not return values.
3456     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3457       State.addKnownBits(NOT_CAPTURED_IN_RET);
3458 
3459     // Check existing "returned" attributes.
3460     int ArgNo = IRP.getArgNo();
3461     if (F.doesNotThrow() && ArgNo >= 0) {
3462       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3463         if (F.hasParamAttribute(u, Attribute::Returned)) {
3464           if (u == unsigned(ArgNo))
3465             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3466           else if (F.onlyReadsMemory())
3467             State.addKnownBits(NO_CAPTURE);
3468           else
3469             State.addKnownBits(NOT_CAPTURED_IN_RET);
3470           break;
3471         }
3472     }
3473   }
3474 
3475   /// See AbstractState::getAsStr().
3476   const std::string getAsStr() const override {
3477     if (isKnownNoCapture())
3478       return "known not-captured";
3479     if (isAssumedNoCapture())
3480       return "assumed not-captured";
3481     if (isKnownNoCaptureMaybeReturned())
3482       return "known not-captured-maybe-returned";
3483     if (isAssumedNoCaptureMaybeReturned())
3484       return "assumed not-captured-maybe-returned";
3485     return "assumed-captured";
3486   }
3487 };
3488 
3489 /// Attributor-aware capture tracker.
3490 struct AACaptureUseTracker final : public CaptureTracker {
3491 
3492   /// Create a capture tracker that can lookup in-flight abstract attributes
3493   /// through the Attributor \p A.
3494   ///
3495   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3496   /// search is stopped. If a use leads to a return instruction,
3497   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3498   /// If a use leads to a ptr2int which may capture the value,
3499   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3500   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3501   /// set. All values in \p PotentialCopies are later tracked as well. For every
3502   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3503   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3504   /// conservatively set to true.
3505   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3506                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3507                       SmallVectorImpl<const Value *> &PotentialCopies,
3508                       unsigned &RemainingUsesToExplore)
3509       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3510         PotentialCopies(PotentialCopies),
3511         RemainingUsesToExplore(RemainingUsesToExplore) {}
3512 
3513   /// Determine if \p V maybe captured. *Also updates the state!*
3514   bool valueMayBeCaptured(const Value *V) {
3515     if (V->getType()->isPointerTy()) {
3516       PointerMayBeCaptured(V, this);
3517     } else {
3518       State.indicatePessimisticFixpoint();
3519     }
3520     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3521   }
3522 
3523   /// See CaptureTracker::tooManyUses().
3524   void tooManyUses() override {
3525     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3526   }
3527 
3528   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3529     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3530       return true;
3531     const auto &DerefAA =
3532         A.getAAFor<AADereferenceable>(NoCaptureAA, IRPosition::value(*O));
3533     return DerefAA.getAssumedDereferenceableBytes();
3534   }
3535 
3536   /// See CaptureTracker::captured(...).
3537   bool captured(const Use *U) override {
3538     Instruction *UInst = cast<Instruction>(U->getUser());
3539     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3540                       << "\n");
3541 
3542     // Because we may reuse the tracker multiple times we keep track of the
3543     // number of explored uses ourselves as well.
3544     if (RemainingUsesToExplore-- == 0) {
3545       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3546       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3547                           /* Return */ true);
3548     }
3549 
3550     // Deal with ptr2int by following uses.
3551     if (isa<PtrToIntInst>(UInst)) {
3552       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3553       return valueMayBeCaptured(UInst);
3554     }
3555 
3556     // Explicitly catch return instructions.
3557     if (isa<ReturnInst>(UInst))
3558       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3559                           /* Return */ true);
3560 
3561     // For now we only use special logic for call sites. However, the tracker
3562     // itself knows about a lot of other non-capturing cases already.
3563     CallSite CS(UInst);
3564     if (!CS || !CS.isArgOperand(U))
3565       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3566                           /* Return */ true);
3567 
3568     unsigned ArgNo = CS.getArgumentNo(U);
3569     const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
3570     // If we have a abstract no-capture attribute for the argument we can use
3571     // it to justify a non-capture attribute here. This allows recursion!
3572     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3573     if (ArgNoCaptureAA.isAssumedNoCapture())
3574       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3575                           /* Return */ false);
3576     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3577       addPotentialCopy(CS);
3578       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3579                           /* Return */ false);
3580     }
3581 
3582     // Lastly, we could not find a reason no-capture can be assumed so we don't.
3583     return isCapturedIn(/* Memory */ true, /* Integer */ true,
3584                         /* Return */ true);
3585   }
3586 
3587   /// Register \p CS as potential copy of the value we are checking.
3588   void addPotentialCopy(CallSite CS) {
3589     PotentialCopies.push_back(CS.getInstruction());
3590   }
3591 
3592   /// See CaptureTracker::shouldExplore(...).
3593   bool shouldExplore(const Use *U) override {
3594     // Check liveness.
3595     return !IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()));
3596   }
3597 
3598   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
3599   /// \p CapturedInRet, then return the appropriate value for use in the
3600   /// CaptureTracker::captured() interface.
3601   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
3602                     bool CapturedInRet) {
3603     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
3604                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
3605     if (CapturedInMem)
3606       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
3607     if (CapturedInInt)
3608       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
3609     if (CapturedInRet)
3610       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
3611     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3612   }
3613 
3614 private:
3615   /// The attributor providing in-flight abstract attributes.
3616   Attributor &A;
3617 
3618   /// The abstract attribute currently updated.
3619   AANoCapture &NoCaptureAA;
3620 
3621   /// The abstract liveness state.
3622   const AAIsDead &IsDeadAA;
3623 
3624   /// The state currently updated.
3625   AANoCapture::StateType &State;
3626 
3627   /// Set of potential copies of the tracked value.
3628   SmallVectorImpl<const Value *> &PotentialCopies;
3629 
3630   /// Global counter to limit the number of explored uses.
3631   unsigned &RemainingUsesToExplore;
3632 };
3633 
3634 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
3635   const IRPosition &IRP = getIRPosition();
3636   const Value *V =
3637       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
3638   if (!V)
3639     return indicatePessimisticFixpoint();
3640 
3641   const Function *F =
3642       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
3643   assert(F && "Expected a function!");
3644   const IRPosition &FnPos = IRPosition::function(*F);
3645   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos);
3646 
3647   AANoCapture::StateType T;
3648 
3649   // Readonly means we cannot capture through memory.
3650   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
3651   if (FnMemAA.isAssumedReadOnly()) {
3652     T.addKnownBits(NOT_CAPTURED_IN_MEM);
3653     if (FnMemAA.isKnownReadOnly())
3654       addKnownBits(NOT_CAPTURED_IN_MEM);
3655   }
3656 
3657   // Make sure all returned values are different than the underlying value.
3658   // TODO: we could do this in a more sophisticated way inside
3659   //       AAReturnedValues, e.g., track all values that escape through returns
3660   //       directly somehow.
3661   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
3662     bool SeenConstant = false;
3663     for (auto &It : RVAA.returned_values()) {
3664       if (isa<Constant>(It.first)) {
3665         if (SeenConstant)
3666           return false;
3667         SeenConstant = true;
3668       } else if (!isa<Argument>(It.first) ||
3669                  It.first == getAssociatedArgument())
3670         return false;
3671     }
3672     return true;
3673   };
3674 
3675   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, FnPos);
3676   if (NoUnwindAA.isAssumedNoUnwind()) {
3677     bool IsVoidTy = F->getReturnType()->isVoidTy();
3678     const AAReturnedValues *RVAA =
3679         IsVoidTy ? nullptr : &A.getAAFor<AAReturnedValues>(*this, FnPos);
3680     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
3681       T.addKnownBits(NOT_CAPTURED_IN_RET);
3682       if (T.isKnown(NOT_CAPTURED_IN_MEM))
3683         return ChangeStatus::UNCHANGED;
3684       if (NoUnwindAA.isKnownNoUnwind() &&
3685           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
3686         addKnownBits(NOT_CAPTURED_IN_RET);
3687         if (isKnown(NOT_CAPTURED_IN_MEM))
3688           return indicateOptimisticFixpoint();
3689       }
3690     }
3691   }
3692 
3693   // Use the CaptureTracker interface and logic with the specialized tracker,
3694   // defined in AACaptureUseTracker, that can look at in-flight abstract
3695   // attributes and directly updates the assumed state.
3696   SmallVector<const Value *, 4> PotentialCopies;
3697   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
3698   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
3699                               RemainingUsesToExplore);
3700 
3701   // Check all potential copies of the associated value until we can assume
3702   // none will be captured or we have to assume at least one might be.
3703   unsigned Idx = 0;
3704   PotentialCopies.push_back(V);
3705   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
3706     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
3707 
3708   AANoCapture::StateType &S = getState();
3709   auto Assumed = S.getAssumed();
3710   S.intersectAssumedBits(T.getAssumed());
3711   if (!isAssumedNoCaptureMaybeReturned())
3712     return indicatePessimisticFixpoint();
3713   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
3714                                    : ChangeStatus::CHANGED;
3715 }
3716 
3717 /// NoCapture attribute for function arguments.
3718 struct AANoCaptureArgument final : AANoCaptureImpl {
3719   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3720 
3721   /// See AbstractAttribute::trackStatistics()
3722   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
3723 };
3724 
3725 /// NoCapture attribute for call site arguments.
3726 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
3727   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3728 
3729   /// See AbstractAttribute::updateImpl(...).
3730   ChangeStatus updateImpl(Attributor &A) override {
3731     // TODO: Once we have call site specific value information we can provide
3732     //       call site specific liveness information and then it makes
3733     //       sense to specialize attributes for call sites arguments instead of
3734     //       redirecting requests to the callee argument.
3735     Argument *Arg = getAssociatedArgument();
3736     if (!Arg)
3737       return indicatePessimisticFixpoint();
3738     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3739     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
3740     return clampStateAndIndicateChange(
3741         getState(),
3742         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
3743   }
3744 
3745   /// See AbstractAttribute::trackStatistics()
3746   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
3747 };
3748 
3749 /// NoCapture attribute for floating values.
3750 struct AANoCaptureFloating final : AANoCaptureImpl {
3751   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3752 
3753   /// See AbstractAttribute::trackStatistics()
3754   void trackStatistics() const override {
3755     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
3756   }
3757 };
3758 
3759 /// NoCapture attribute for function return value.
3760 struct AANoCaptureReturned final : AANoCaptureImpl {
3761   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
3762     llvm_unreachable("NoCapture is not applicable to function returns!");
3763   }
3764 
3765   /// See AbstractAttribute::initialize(...).
3766   void initialize(Attributor &A) override {
3767     llvm_unreachable("NoCapture is not applicable to function returns!");
3768   }
3769 
3770   /// See AbstractAttribute::updateImpl(...).
3771   ChangeStatus updateImpl(Attributor &A) override {
3772     llvm_unreachable("NoCapture is not applicable to function returns!");
3773   }
3774 
3775   /// See AbstractAttribute::trackStatistics()
3776   void trackStatistics() const override {}
3777 };
3778 
3779 /// NoCapture attribute deduction for a call site return value.
3780 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
3781   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
3782 
3783   /// See AbstractAttribute::trackStatistics()
3784   void trackStatistics() const override {
3785     STATS_DECLTRACK_CSRET_ATTR(nocapture)
3786   }
3787 };
3788 
3789 /// ------------------ Value Simplify Attribute ----------------------------
3790 struct AAValueSimplifyImpl : AAValueSimplify {
3791   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
3792 
3793   /// See AbstractAttribute::getAsStr().
3794   const std::string getAsStr() const override {
3795     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
3796                         : "not-simple";
3797   }
3798 
3799   /// See AbstractAttribute::trackStatistics()
3800   void trackStatistics() const override {}
3801 
3802   /// See AAValueSimplify::getAssumedSimplifiedValue()
3803   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
3804     if (!getAssumed())
3805       return const_cast<Value *>(&getAssociatedValue());
3806     return SimplifiedAssociatedValue;
3807   }
3808   void initialize(Attributor &A) override {}
3809 
3810   /// Helper function for querying AAValueSimplify and updating candicate.
3811   /// \param QueryingValue Value trying to unify with SimplifiedValue
3812   /// \param AccumulatedSimplifiedValue Current simplification result.
3813   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
3814                              Value &QueryingValue,
3815                              Optional<Value *> &AccumulatedSimplifiedValue) {
3816     // FIXME: Add a typecast support.
3817 
3818     auto &ValueSimpifyAA = A.getAAFor<AAValueSimplify>(
3819         QueryingAA, IRPosition::value(QueryingValue));
3820 
3821     Optional<Value *> QueryingValueSimplified =
3822         ValueSimpifyAA.getAssumedSimplifiedValue(A);
3823 
3824     if (!QueryingValueSimplified.hasValue())
3825       return true;
3826 
3827     if (!QueryingValueSimplified.getValue())
3828       return false;
3829 
3830     Value &QueryingValueSimplifiedUnwrapped =
3831         *QueryingValueSimplified.getValue();
3832 
3833     if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
3834       return true;
3835 
3836     if (AccumulatedSimplifiedValue.hasValue())
3837       return AccumulatedSimplifiedValue == QueryingValueSimplified;
3838 
3839     LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << QueryingValue
3840                       << " is assumed to be "
3841                       << QueryingValueSimplifiedUnwrapped << "\n");
3842 
3843     AccumulatedSimplifiedValue = QueryingValueSimplified;
3844     return true;
3845   }
3846 
3847   /// See AbstractAttribute::manifest(...).
3848   ChangeStatus manifest(Attributor &A) override {
3849     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3850 
3851     if (!SimplifiedAssociatedValue.hasValue() ||
3852         !SimplifiedAssociatedValue.getValue())
3853       return Changed;
3854 
3855     if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
3856       // We can replace the AssociatedValue with the constant.
3857       Value &V = getAssociatedValue();
3858       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
3859         LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << V << " -> " << *C
3860                           << "\n");
3861         V.replaceAllUsesWith(C);
3862         Changed = ChangeStatus::CHANGED;
3863       }
3864     }
3865 
3866     return Changed | AAValueSimplify::manifest(A);
3867   }
3868 
3869 protected:
3870   // An assumed simplified value. Initially, it is set to Optional::None, which
3871   // means that the value is not clear under current assumption. If in the
3872   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
3873   // returns orignal associated value.
3874   Optional<Value *> SimplifiedAssociatedValue;
3875 };
3876 
3877 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
3878   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3879 
3880   void initialize(Attributor &A) override {
3881     AAValueSimplifyImpl::initialize(A);
3882     if (!getAssociatedFunction() || getAssociatedFunction()->isDeclaration())
3883       indicatePessimisticFixpoint();
3884     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
3885                 /* IgnoreSubsumingPositions */ true))
3886       indicatePessimisticFixpoint();
3887   }
3888 
3889   /// See AbstractAttribute::updateImpl(...).
3890   ChangeStatus updateImpl(Attributor &A) override {
3891     // Byval is only replacable if it is readonly otherwise we would write into
3892     // the replaced value and not the copy that byval creates implicitly.
3893     Argument *Arg = getAssociatedArgument();
3894     if (Arg->hasByValAttr()) {
3895       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
3896       if (!MemAA.isAssumedReadOnly())
3897         return indicatePessimisticFixpoint();
3898     }
3899 
3900     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3901 
3902     auto PredForCallSite = [&](AbstractCallSite ACS) {
3903       // Check if we have an associated argument or not (which can happen for
3904       // callback calls).
3905       Value *ArgOp = ACS.getCallArgOperand(getArgNo());
3906       if (!ArgOp)
3907        return false;
3908       // We can only propagate thread independent values through callbacks.
3909       // This is different to direct/indirect call sites because for them we
3910       // know the thread executing the caller and callee is the same. For
3911       // callbacks this is not guaranteed, thus a thread dependent value could
3912       // be different for the caller and callee, making it invalid to propagate.
3913       if (ACS.isCallbackCall())
3914         if (auto *C =dyn_cast<Constant>(ArgOp))
3915           if (C->isThreadDependent())
3916             return false;
3917       return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
3918     };
3919 
3920     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
3921       return indicatePessimisticFixpoint();
3922 
3923     // If a candicate was found in this update, return CHANGED.
3924     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3925                ? ChangeStatus::UNCHANGED
3926                : ChangeStatus ::CHANGED;
3927   }
3928 
3929   /// See AbstractAttribute::trackStatistics()
3930   void trackStatistics() const override {
3931     STATS_DECLTRACK_ARG_ATTR(value_simplify)
3932   }
3933 };
3934 
3935 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
3936   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3937 
3938   /// See AbstractAttribute::updateImpl(...).
3939   ChangeStatus updateImpl(Attributor &A) override {
3940     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3941 
3942     auto PredForReturned = [&](Value &V) {
3943       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3944     };
3945 
3946     if (!A.checkForAllReturnedValues(PredForReturned, *this))
3947       return indicatePessimisticFixpoint();
3948 
3949     // If a candicate was found in this update, return CHANGED.
3950     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3951                ? ChangeStatus::UNCHANGED
3952                : ChangeStatus ::CHANGED;
3953   }
3954   /// See AbstractAttribute::trackStatistics()
3955   void trackStatistics() const override {
3956     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
3957   }
3958 };
3959 
3960 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
3961   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
3962 
3963   /// See AbstractAttribute::initialize(...).
3964   void initialize(Attributor &A) override {
3965     Value &V = getAnchorValue();
3966 
3967     // TODO: add other stuffs
3968     if (isa<Constant>(V) || isa<UndefValue>(V))
3969       indicatePessimisticFixpoint();
3970   }
3971 
3972   /// See AbstractAttribute::updateImpl(...).
3973   ChangeStatus updateImpl(Attributor &A) override {
3974     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
3975 
3976     auto VisitValueCB = [&](Value &V, BooleanState, bool Stripped) -> bool {
3977       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
3978       if (!Stripped && this == &AA) {
3979         // TODO: Look the instruction and check recursively.
3980         LLVM_DEBUG(
3981             dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
3982                    << V << "\n");
3983         indicatePessimisticFixpoint();
3984         return false;
3985       }
3986       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
3987     };
3988 
3989     if (!genericValueTraversal<AAValueSimplify, BooleanState>(
3990             A, getIRPosition(), *this, static_cast<BooleanState &>(*this),
3991             VisitValueCB))
3992       return indicatePessimisticFixpoint();
3993 
3994     // If a candicate was found in this update, return CHANGED.
3995 
3996     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
3997                ? ChangeStatus::UNCHANGED
3998                : ChangeStatus ::CHANGED;
3999   }
4000 
4001   /// See AbstractAttribute::trackStatistics()
4002   void trackStatistics() const override {
4003     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4004   }
4005 };
4006 
4007 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4008   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4009 
4010   /// See AbstractAttribute::initialize(...).
4011   void initialize(Attributor &A) override {
4012     SimplifiedAssociatedValue = &getAnchorValue();
4013     indicateOptimisticFixpoint();
4014   }
4015   /// See AbstractAttribute::initialize(...).
4016   ChangeStatus updateImpl(Attributor &A) override {
4017     llvm_unreachable(
4018         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4019   }
4020   /// See AbstractAttribute::trackStatistics()
4021   void trackStatistics() const override {
4022     STATS_DECLTRACK_FN_ATTR(value_simplify)
4023   }
4024 };
4025 
4026 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4027   AAValueSimplifyCallSite(const IRPosition &IRP)
4028       : AAValueSimplifyFunction(IRP) {}
4029   /// See AbstractAttribute::trackStatistics()
4030   void trackStatistics() const override {
4031     STATS_DECLTRACK_CS_ATTR(value_simplify)
4032   }
4033 };
4034 
4035 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4036   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4037       : AAValueSimplifyReturned(IRP) {}
4038 
4039   void trackStatistics() const override {
4040     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4041   }
4042 };
4043 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4044   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4045       : AAValueSimplifyFloating(IRP) {}
4046 
4047   void trackStatistics() const override {
4048     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4049   }
4050 };
4051 
4052 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4053 struct AAHeapToStackImpl : public AAHeapToStack {
4054   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4055 
4056   const std::string getAsStr() const override {
4057     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4058   }
4059 
4060   ChangeStatus manifest(Attributor &A) override {
4061     assert(getState().isValidState() &&
4062            "Attempted to manifest an invalid state!");
4063 
4064     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4065     Function *F = getAssociatedFunction();
4066     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4067 
4068     for (Instruction *MallocCall : MallocCalls) {
4069       // This malloc cannot be replaced.
4070       if (BadMallocCalls.count(MallocCall))
4071         continue;
4072 
4073       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4074         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4075         A.deleteAfterManifest(*FreeCall);
4076         HasChanged = ChangeStatus::CHANGED;
4077       }
4078 
4079       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4080                         << "\n");
4081 
4082       Constant *Size;
4083       if (isCallocLikeFn(MallocCall, TLI)) {
4084         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4085         auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
4086         APInt TotalSize = SizeT->getValue() * Num->getValue();
4087         Size =
4088             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4089       } else {
4090         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4091       }
4092 
4093       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4094       Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
4095                                        Size, "", MallocCall->getNextNode());
4096 
4097       if (AI->getType() != MallocCall->getType())
4098         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4099                              AI->getNextNode());
4100 
4101       MallocCall->replaceAllUsesWith(AI);
4102 
4103       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4104         auto *NBB = II->getNormalDest();
4105         BranchInst::Create(NBB, MallocCall->getParent());
4106         A.deleteAfterManifest(*MallocCall);
4107       } else {
4108         A.deleteAfterManifest(*MallocCall);
4109       }
4110 
4111       if (isCallocLikeFn(MallocCall, TLI)) {
4112         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4113                                    AI->getNextNode());
4114         Value *Ops[] = {
4115             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4116             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4117 
4118         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4119         Module *M = F->getParent();
4120         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4121         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4122       }
4123       HasChanged = ChangeStatus::CHANGED;
4124     }
4125 
4126     return HasChanged;
4127   }
4128 
4129   /// Collection of all malloc calls in a function.
4130   SmallSetVector<Instruction *, 4> MallocCalls;
4131 
4132   /// Collection of malloc calls that cannot be converted.
4133   DenseSet<const Instruction *> BadMallocCalls;
4134 
4135   /// A map for each malloc call to the set of associated free calls.
4136   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4137 
4138   ChangeStatus updateImpl(Attributor &A) override;
4139 };
4140 
4141 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4142   const Function *F = getAssociatedFunction();
4143   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4144 
4145   MustBeExecutedContextExplorer &Explorer =
4146       A.getInfoCache().getMustBeExecutedContextExplorer();
4147 
4148   auto FreeCheck = [&](Instruction &I) {
4149     const auto &Frees = FreesForMalloc.lookup(&I);
4150     if (Frees.size() != 1)
4151       return false;
4152     Instruction *UniqueFree = *Frees.begin();
4153     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4154   };
4155 
4156   auto UsesCheck = [&](Instruction &I) {
4157     bool ValidUsesOnly = true;
4158     bool MustUse = true;
4159 
4160     SmallPtrSet<const Use *, 8> Visited;
4161     SmallVector<const Use *, 8> Worklist;
4162 
4163     for (Use &U : I.uses())
4164       Worklist.push_back(&U);
4165 
4166     while (!Worklist.empty()) {
4167       const Use *U = Worklist.pop_back_val();
4168       if (!Visited.insert(U).second)
4169         continue;
4170 
4171       auto *UserI = U->getUser();
4172 
4173       if (isa<LoadInst>(UserI))
4174         continue;
4175       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4176         if (SI->getValueOperand() == U->get()) {
4177           LLVM_DEBUG(dbgs()
4178                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4179           ValidUsesOnly = false;
4180         } else {
4181           // A store into the malloc'ed memory is fine.
4182         }
4183         continue;
4184       }
4185 
4186       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4187         if (!CB->isArgOperand(U))
4188           continue;
4189 
4190         if (CB->isLifetimeStartOrEnd())
4191           continue;
4192 
4193         // Record malloc.
4194         if (isFreeCall(UserI, TLI)) {
4195           if (MustUse) {
4196             FreesForMalloc[&I].insert(
4197                 cast<Instruction>(const_cast<User *>(UserI)));
4198           } else {
4199             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4200                               << *UserI << "\n");
4201             ValidUsesOnly = false;
4202           }
4203           continue;
4204         }
4205 
4206         unsigned ArgNo = CB->getArgOperandNo(U);
4207 
4208         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4209             *this, IRPosition::callsite_argument(*CB, ArgNo));
4210 
4211         // If a callsite argument use is nofree, we are fine.
4212         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4213             *this, IRPosition::callsite_argument(*CB, ArgNo));
4214 
4215         if (!NoCaptureAA.isAssumedNoCapture() || !ArgNoFreeAA.isAssumedNoFree()) {
4216           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4217           ValidUsesOnly = false;
4218         }
4219         continue;
4220       }
4221 
4222       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4223           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4224         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4225         for (Use &U : UserI->uses())
4226           Worklist.push_back(&U);
4227         continue;
4228       }
4229 
4230       // Unknown user for which we can not track uses further (in a way that
4231       // makes sense).
4232       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4233       ValidUsesOnly = false;
4234     }
4235     return ValidUsesOnly;
4236   };
4237 
4238   auto MallocCallocCheck = [&](Instruction &I) {
4239     if (BadMallocCalls.count(&I))
4240       return true;
4241 
4242     bool IsMalloc = isMallocLikeFn(&I, TLI);
4243     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4244     if (!IsMalloc && !IsCalloc) {
4245       BadMallocCalls.insert(&I);
4246       return true;
4247     }
4248 
4249     if (IsMalloc) {
4250       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4251         if (Size->getValue().sle(MaxHeapToStackSize))
4252           if (UsesCheck(I) || FreeCheck(I)) {
4253             MallocCalls.insert(&I);
4254             return true;
4255           }
4256     } else if (IsCalloc) {
4257       bool Overflow = false;
4258       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4259         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4260           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4261                   .sle(MaxHeapToStackSize))
4262             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4263               MallocCalls.insert(&I);
4264               return true;
4265             }
4266     }
4267 
4268     BadMallocCalls.insert(&I);
4269     return true;
4270   };
4271 
4272   size_t NumBadMallocs = BadMallocCalls.size();
4273 
4274   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4275 
4276   if (NumBadMallocs != BadMallocCalls.size())
4277     return ChangeStatus::CHANGED;
4278 
4279   return ChangeStatus::UNCHANGED;
4280 }
4281 
4282 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4283   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
4284 
4285   /// See AbstractAttribute::trackStatistics()
4286   void trackStatistics() const override {
4287     STATS_DECL(MallocCalls, Function,
4288                "Number of malloc calls converted to allocas");
4289     for (auto *C : MallocCalls)
4290       if (!BadMallocCalls.count(C))
4291         ++BUILD_STAT_NAME(MallocCalls, Function);
4292   }
4293 };
4294 
4295 /// -------------------- Memory Behavior Attributes ----------------------------
4296 /// Includes read-none, read-only, and write-only.
4297 /// ----------------------------------------------------------------------------
4298 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
4299   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
4300 
4301   /// See AbstractAttribute::initialize(...).
4302   void initialize(Attributor &A) override {
4303     intersectAssumedBits(BEST_STATE);
4304     getKnownStateFromValue(getIRPosition(), getState());
4305     IRAttribute::initialize(A);
4306   }
4307 
4308   /// Return the memory behavior information encoded in the IR for \p IRP.
4309   static void getKnownStateFromValue(const IRPosition &IRP,
4310                                      BitIntegerState &State) {
4311     SmallVector<Attribute, 2> Attrs;
4312     IRP.getAttrs(AttrKinds, Attrs);
4313     for (const Attribute &Attr : Attrs) {
4314       switch (Attr.getKindAsEnum()) {
4315       case Attribute::ReadNone:
4316         State.addKnownBits(NO_ACCESSES);
4317         break;
4318       case Attribute::ReadOnly:
4319         State.addKnownBits(NO_WRITES);
4320         break;
4321       case Attribute::WriteOnly:
4322         State.addKnownBits(NO_READS);
4323         break;
4324       default:
4325         llvm_unreachable("Unexpcted attribute!");
4326       }
4327     }
4328 
4329     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
4330       if (!I->mayReadFromMemory())
4331         State.addKnownBits(NO_READS);
4332       if (!I->mayWriteToMemory())
4333         State.addKnownBits(NO_WRITES);
4334     }
4335   }
4336 
4337   /// See AbstractAttribute::getDeducedAttributes(...).
4338   void getDeducedAttributes(LLVMContext &Ctx,
4339                             SmallVectorImpl<Attribute> &Attrs) const override {
4340     assert(Attrs.size() == 0);
4341     if (isAssumedReadNone())
4342       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
4343     else if (isAssumedReadOnly())
4344       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
4345     else if (isAssumedWriteOnly())
4346       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
4347     assert(Attrs.size() <= 1);
4348   }
4349 
4350   /// See AbstractAttribute::manifest(...).
4351   ChangeStatus manifest(Attributor &A) override {
4352     const IRPosition &IRP = getIRPosition();
4353 
4354     // Check if we would improve the existing attributes first.
4355     SmallVector<Attribute, 4> DeducedAttrs;
4356     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
4357     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
4358           return IRP.hasAttr(Attr.getKindAsEnum(),
4359                              /* IgnoreSubsumingPositions */ true);
4360         }))
4361       return ChangeStatus::UNCHANGED;
4362 
4363     // Clear existing attributes.
4364     IRP.removeAttrs(AttrKinds);
4365 
4366     // Use the generic manifest method.
4367     return IRAttribute::manifest(A);
4368   }
4369 
4370   /// See AbstractState::getAsStr().
4371   const std::string getAsStr() const override {
4372     if (isAssumedReadNone())
4373       return "readnone";
4374     if (isAssumedReadOnly())
4375       return "readonly";
4376     if (isAssumedWriteOnly())
4377       return "writeonly";
4378     return "may-read/write";
4379   }
4380 
4381   /// The set of IR attributes AAMemoryBehavior deals with.
4382   static const Attribute::AttrKind AttrKinds[3];
4383 };
4384 
4385 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
4386     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
4387 
4388 /// Memory behavior attribute for a floating value.
4389 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
4390   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4391 
4392   /// See AbstractAttribute::initialize(...).
4393   void initialize(Attributor &A) override {
4394     AAMemoryBehaviorImpl::initialize(A);
4395     // Initialize the use vector with all direct uses of the associated value.
4396     for (const Use &U : getAssociatedValue().uses())
4397       Uses.insert(&U);
4398   }
4399 
4400   /// See AbstractAttribute::updateImpl(...).
4401   ChangeStatus updateImpl(Attributor &A) override;
4402 
4403   /// See AbstractAttribute::trackStatistics()
4404   void trackStatistics() const override {
4405     if (isAssumedReadNone())
4406       STATS_DECLTRACK_FLOATING_ATTR(readnone)
4407     else if (isAssumedReadOnly())
4408       STATS_DECLTRACK_FLOATING_ATTR(readonly)
4409     else if (isAssumedWriteOnly())
4410       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
4411   }
4412 
4413 private:
4414   /// Return true if users of \p UserI might access the underlying
4415   /// variable/location described by \p U and should therefore be analyzed.
4416   bool followUsersOfUseIn(Attributor &A, const Use *U,
4417                           const Instruction *UserI);
4418 
4419   /// Update the state according to the effect of use \p U in \p UserI.
4420   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
4421 
4422 protected:
4423   /// Container for (transitive) uses of the associated argument.
4424   SetVector<const Use *> Uses;
4425 };
4426 
4427 /// Memory behavior attribute for function argument.
4428 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
4429   AAMemoryBehaviorArgument(const IRPosition &IRP)
4430       : AAMemoryBehaviorFloating(IRP) {}
4431 
4432   /// See AbstractAttribute::initialize(...).
4433   void initialize(Attributor &A) override {
4434     AAMemoryBehaviorFloating::initialize(A);
4435 
4436     // Initialize the use vector with all direct uses of the associated value.
4437     Argument *Arg = getAssociatedArgument();
4438     if (!Arg || !Arg->getParent()->hasExactDefinition())
4439       indicatePessimisticFixpoint();
4440   }
4441 
4442   ChangeStatus manifest(Attributor &A) override {
4443     // TODO: From readattrs.ll: "inalloca parameters are always
4444     //                           considered written"
4445     if (hasAttr({Attribute::InAlloca})) {
4446       removeKnownBits(NO_WRITES);
4447       removeAssumedBits(NO_WRITES);
4448     }
4449     return AAMemoryBehaviorFloating::manifest(A);
4450   }
4451 
4452   /// See AbstractAttribute::trackStatistics()
4453   void trackStatistics() const override {
4454     if (isAssumedReadNone())
4455       STATS_DECLTRACK_ARG_ATTR(readnone)
4456     else if (isAssumedReadOnly())
4457       STATS_DECLTRACK_ARG_ATTR(readonly)
4458     else if (isAssumedWriteOnly())
4459       STATS_DECLTRACK_ARG_ATTR(writeonly)
4460   }
4461 };
4462 
4463 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
4464   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
4465       : AAMemoryBehaviorArgument(IRP) {}
4466 
4467   /// See AbstractAttribute::updateImpl(...).
4468   ChangeStatus updateImpl(Attributor &A) override {
4469     // TODO: Once we have call site specific value information we can provide
4470     //       call site specific liveness liveness information and then it makes
4471     //       sense to specialize attributes for call sites arguments instead of
4472     //       redirecting requests to the callee argument.
4473     Argument *Arg = getAssociatedArgument();
4474     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4475     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4476     return clampStateAndIndicateChange(
4477         getState(),
4478         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
4479   }
4480 
4481   /// See AbstractAttribute::trackStatistics()
4482   void trackStatistics() const override {
4483     if (isAssumedReadNone())
4484       STATS_DECLTRACK_CSARG_ATTR(readnone)
4485     else if (isAssumedReadOnly())
4486       STATS_DECLTRACK_CSARG_ATTR(readonly)
4487     else if (isAssumedWriteOnly())
4488       STATS_DECLTRACK_CSARG_ATTR(writeonly)
4489   }
4490 };
4491 
4492 /// Memory behavior attribute for a call site return position.
4493 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
4494   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
4495       : AAMemoryBehaviorFloating(IRP) {}
4496 
4497   /// See AbstractAttribute::manifest(...).
4498   ChangeStatus manifest(Attributor &A) override {
4499     // We do not annotate returned values.
4500     return ChangeStatus::UNCHANGED;
4501   }
4502 
4503   /// See AbstractAttribute::trackStatistics()
4504   void trackStatistics() const override {}
4505 };
4506 
4507 /// An AA to represent the memory behavior function attributes.
4508 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
4509   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4510 
4511   /// See AbstractAttribute::updateImpl(Attributor &A).
4512   virtual ChangeStatus updateImpl(Attributor &A) override;
4513 
4514   /// See AbstractAttribute::manifest(...).
4515   ChangeStatus manifest(Attributor &A) override {
4516     Function &F = cast<Function>(getAnchorValue());
4517     if (isAssumedReadNone()) {
4518       F.removeFnAttr(Attribute::ArgMemOnly);
4519       F.removeFnAttr(Attribute::InaccessibleMemOnly);
4520       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
4521     }
4522     return AAMemoryBehaviorImpl::manifest(A);
4523   }
4524 
4525   /// See AbstractAttribute::trackStatistics()
4526   void trackStatistics() const override {
4527     if (isAssumedReadNone())
4528       STATS_DECLTRACK_FN_ATTR(readnone)
4529     else if (isAssumedReadOnly())
4530       STATS_DECLTRACK_FN_ATTR(readonly)
4531     else if (isAssumedWriteOnly())
4532       STATS_DECLTRACK_FN_ATTR(writeonly)
4533   }
4534 };
4535 
4536 /// AAMemoryBehavior attribute for call sites.
4537 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
4538   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4539 
4540   /// See AbstractAttribute::initialize(...).
4541   void initialize(Attributor &A) override {
4542     AAMemoryBehaviorImpl::initialize(A);
4543     Function *F = getAssociatedFunction();
4544     if (!F || !F->hasExactDefinition())
4545       indicatePessimisticFixpoint();
4546   }
4547 
4548   /// See AbstractAttribute::updateImpl(...).
4549   ChangeStatus updateImpl(Attributor &A) override {
4550     // TODO: Once we have call site specific value information we can provide
4551     //       call site specific liveness liveness information and then it makes
4552     //       sense to specialize attributes for call sites arguments instead of
4553     //       redirecting requests to the callee argument.
4554     Function *F = getAssociatedFunction();
4555     const IRPosition &FnPos = IRPosition::function(*F);
4556     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4557     return clampStateAndIndicateChange(
4558         getState(),
4559         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
4560   }
4561 
4562   /// See AbstractAttribute::trackStatistics()
4563   void trackStatistics() const override {
4564     if (isAssumedReadNone())
4565       STATS_DECLTRACK_CS_ATTR(readnone)
4566     else if (isAssumedReadOnly())
4567       STATS_DECLTRACK_CS_ATTR(readonly)
4568     else if (isAssumedWriteOnly())
4569       STATS_DECLTRACK_CS_ATTR(writeonly)
4570   }
4571 };
4572 } // namespace
4573 
4574 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
4575 
4576   // The current assumed state used to determine a change.
4577   auto AssumedState = getAssumed();
4578 
4579   auto CheckRWInst = [&](Instruction &I) {
4580     // If the instruction has an own memory behavior state, use it to restrict
4581     // the local state. No further analysis is required as the other memory
4582     // state is as optimistic as it gets.
4583     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
4584       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
4585           *this, IRPosition::callsite_function(ICS));
4586       intersectAssumedBits(MemBehaviorAA.getAssumed());
4587       return !isAtFixpoint();
4588     }
4589 
4590     // Remove access kind modifiers if necessary.
4591     if (I.mayReadFromMemory())
4592       removeAssumedBits(NO_READS);
4593     if (I.mayWriteToMemory())
4594       removeAssumedBits(NO_WRITES);
4595     return !isAtFixpoint();
4596   };
4597 
4598   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
4599     return indicatePessimisticFixpoint();
4600 
4601   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4602                                         : ChangeStatus::UNCHANGED;
4603 }
4604 
4605 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
4606 
4607   const IRPosition &IRP = getIRPosition();
4608   const IRPosition &FnPos = IRPosition::function_scope(IRP);
4609   AAMemoryBehavior::StateType &S = getState();
4610 
4611   // First, check the function scope. We take the known information and we avoid
4612   // work if the assumed information implies the current assumed information for
4613   // this attribute.
4614   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4615   S.addKnownBits(FnMemAA.getKnown());
4616   if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
4617     return ChangeStatus::UNCHANGED;
4618 
4619   // Make sure the value is not captured (except through "return"), if
4620   // it is, any information derived would be irrelevant anyway as we cannot
4621   // check the potential aliases introduced by the capture. However, no need
4622   // to fall back to anythign less optimistic than the function state.
4623   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
4624   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4625     S.intersectAssumedBits(FnMemAA.getAssumed());
4626     return ChangeStatus::CHANGED;
4627   }
4628 
4629   // The current assumed state used to determine a change.
4630   auto AssumedState = S.getAssumed();
4631 
4632   // Liveness information to exclude dead users.
4633   // TODO: Take the FnPos once we have call site specific liveness information.
4634   const auto &LivenessAA = A.getAAFor<AAIsDead>(
4635       *this, IRPosition::function(*IRP.getAssociatedFunction()));
4636 
4637   // Visit and expand uses until all are analyzed or a fixpoint is reached.
4638   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
4639     const Use *U = Uses[i];
4640     Instruction *UserI = cast<Instruction>(U->getUser());
4641     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
4642                       << " [Dead: " << (LivenessAA.isAssumedDead(UserI))
4643                       << "]\n");
4644     if (LivenessAA.isAssumedDead(UserI))
4645       continue;
4646 
4647     // Check if the users of UserI should also be visited.
4648     if (followUsersOfUseIn(A, U, UserI))
4649       for (const Use &UserIUse : UserI->uses())
4650         Uses.insert(&UserIUse);
4651 
4652     // If UserI might touch memory we analyze the use in detail.
4653     if (UserI->mayReadOrWriteMemory())
4654       analyzeUseIn(A, U, UserI);
4655   }
4656 
4657   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4658                                         : ChangeStatus::UNCHANGED;
4659 }
4660 
4661 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
4662                                                   const Instruction *UserI) {
4663   // The loaded value is unrelated to the pointer argument, no need to
4664   // follow the users of the load.
4665   if (isa<LoadInst>(UserI))
4666     return false;
4667 
4668   // By default we follow all uses assuming UserI might leak information on U,
4669   // we have special handling for call sites operands though.
4670   ImmutableCallSite ICS(UserI);
4671   if (!ICS || !ICS.isArgOperand(U))
4672     return true;
4673 
4674   // If the use is a call argument known not to be captured, the users of
4675   // the call do not need to be visited because they have to be unrelated to
4676   // the input. Note that this check is not trivial even though we disallow
4677   // general capturing of the underlying argument. The reason is that the
4678   // call might the argument "through return", which we allow and for which we
4679   // need to check call users.
4680   unsigned ArgNo = ICS.getArgumentNo(U);
4681   const auto &ArgNoCaptureAA =
4682       A.getAAFor<AANoCapture>(*this, IRPosition::callsite_argument(ICS, ArgNo));
4683   return !ArgNoCaptureAA.isAssumedNoCapture();
4684 }
4685 
4686 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
4687                                             const Instruction *UserI) {
4688   assert(UserI->mayReadOrWriteMemory());
4689 
4690   switch (UserI->getOpcode()) {
4691   default:
4692     // TODO: Handle all atomics and other side-effect operations we know of.
4693     break;
4694   case Instruction::Load:
4695     // Loads cause the NO_READS property to disappear.
4696     removeAssumedBits(NO_READS);
4697     return;
4698 
4699   case Instruction::Store:
4700     // Stores cause the NO_WRITES property to disappear if the use is the
4701     // pointer operand. Note that we do assume that capturing was taken care of
4702     // somewhere else.
4703     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
4704       removeAssumedBits(NO_WRITES);
4705     return;
4706 
4707   case Instruction::Call:
4708   case Instruction::CallBr:
4709   case Instruction::Invoke: {
4710     // For call sites we look at the argument memory behavior attribute (this
4711     // could be recursive!) in order to restrict our own state.
4712     ImmutableCallSite ICS(UserI);
4713 
4714     // Give up on operand bundles.
4715     if (ICS.isBundleOperand(U)) {
4716       indicatePessimisticFixpoint();
4717       return;
4718     }
4719 
4720     // Calling a function does read the function pointer, maybe write it if the
4721     // function is self-modifying.
4722     if (ICS.isCallee(U)) {
4723       removeAssumedBits(NO_READS);
4724       break;
4725     }
4726 
4727     // Adjust the possible access behavior based on the information on the
4728     // argument.
4729     unsigned ArgNo = ICS.getArgumentNo(U);
4730     const IRPosition &ArgPos = IRPosition::callsite_argument(ICS, ArgNo);
4731     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4732     // "assumed" has at most the same bits as the MemBehaviorAA assumed
4733     // and at least "known".
4734     intersectAssumedBits(MemBehaviorAA.getAssumed());
4735     return;
4736   }
4737   };
4738 
4739   // Generally, look at the "may-properties" and adjust the assumed state if we
4740   // did not trigger special handling before.
4741   if (UserI->mayReadFromMemory())
4742     removeAssumedBits(NO_READS);
4743   if (UserI->mayWriteToMemory())
4744     removeAssumedBits(NO_WRITES);
4745 }
4746 
4747 /// ----------------------------------------------------------------------------
4748 ///                               Attributor
4749 /// ----------------------------------------------------------------------------
4750 
4751 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
4752                                const AAIsDead *LivenessAA) {
4753   const Instruction *CtxI = AA.getIRPosition().getCtxI();
4754   if (!CtxI)
4755     return false;
4756 
4757   // TODO: Find a good way to utilize fine and coarse grained liveness
4758   // information.
4759   if (!LivenessAA)
4760     LivenessAA =
4761         &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
4762                             /* TrackDependence */ false);
4763 
4764   // Don't check liveness for AAIsDead.
4765   if (&AA == LivenessAA)
4766     return false;
4767 
4768   if (!LivenessAA->isAssumedDead(CtxI))
4769     return false;
4770 
4771   // We actually used liveness information so we have to record a dependence.
4772   recordDependence(*LivenessAA, AA, DepClassTy::OPTIONAL);
4773 
4774   return true;
4775 }
4776 
4777 bool Attributor::checkForAllUses(
4778     const function_ref<bool(const Use &, bool &)> &Pred,
4779     const AbstractAttribute &QueryingAA, const Value &V) {
4780   const IRPosition &IRP = QueryingAA.getIRPosition();
4781   SmallVector<const Use *, 16> Worklist;
4782   SmallPtrSet<const Use *, 16> Visited;
4783 
4784   for (const Use &U : V.uses())
4785     Worklist.push_back(&U);
4786 
4787   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
4788                     << " initial uses to check\n");
4789 
4790   if (Worklist.empty())
4791     return true;
4792 
4793   bool AnyDead = false;
4794   const Function *ScopeFn = IRP.getAnchorScope();
4795   const auto *LivenessAA =
4796       ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
4797                                     /* TrackDependence */ false)
4798               : nullptr;
4799 
4800   while (!Worklist.empty()) {
4801     const Use *U = Worklist.pop_back_val();
4802     if (!Visited.insert(U).second)
4803       continue;
4804     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << **U << "\n");
4805     if (Instruction *UserI = dyn_cast<Instruction>(U->getUser()))
4806       if (LivenessAA && LivenessAA->isAssumedDead(UserI)) {
4807         LLVM_DEBUG(dbgs() << "[Attributor] Dead user: " << *UserI << ": "
4808                           << static_cast<const AbstractAttribute &>(*LivenessAA)
4809                           << "\n");
4810         AnyDead = true;
4811         continue;
4812       }
4813 
4814     bool Follow = false;
4815     if (!Pred(*U, Follow))
4816       return false;
4817     if (!Follow)
4818       continue;
4819     for (const Use &UU : U->getUser()->uses())
4820       Worklist.push_back(&UU);
4821   }
4822 
4823   if (AnyDead)
4824     recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
4825 
4826   return true;
4827 }
4828 
4829 bool Attributor::checkForAllCallSites(
4830     const function_ref<bool(AbstractCallSite)> &Pred,
4831     const AbstractAttribute &QueryingAA, bool RequireAllCallSites) {
4832   // We can try to determine information from
4833   // the call sites. However, this is only possible all call sites are known,
4834   // hence the function has internal linkage.
4835   const IRPosition &IRP = QueryingAA.getIRPosition();
4836   const Function *AssociatedFunction = IRP.getAssociatedFunction();
4837   if (!AssociatedFunction) {
4838     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
4839                       << "\n");
4840     return false;
4841   }
4842 
4843   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
4844                               &QueryingAA);
4845 }
4846 
4847 bool Attributor::checkForAllCallSites(
4848     const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
4849     bool RequireAllCallSites, const AbstractAttribute *QueryingAA) {
4850   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
4851     LLVM_DEBUG(
4852         dbgs()
4853         << "[Attributor] Function " << Fn.getName()
4854         << " has no internal linkage, hence not all call sites are known\n");
4855     return false;
4856   }
4857 
4858   for (const Use &U : Fn.uses()) {
4859     AbstractCallSite ACS(&U);
4860     if (!ACS) {
4861       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
4862                         << " has non call site use " << *U.get() << " in "
4863                         << *U.getUser() << "\n");
4864       // BlockAddress users are allowed.
4865       if (isa<BlockAddress>(U.getUser()))
4866         continue;
4867       return false;
4868     }
4869 
4870     Instruction *I = ACS.getInstruction();
4871     Function *Caller = I->getFunction();
4872 
4873     const auto *LivenessAA =
4874         lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
4875                               /* TrackDependence */ false);
4876 
4877     // Skip dead calls.
4878     if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4879       // We actually used liveness information so we have to record a
4880       // dependence.
4881       if (QueryingAA)
4882         recordDependence(*LivenessAA, *QueryingAA, DepClassTy::OPTIONAL);
4883       continue;
4884     }
4885 
4886     const Use *EffectiveUse =
4887         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
4888     if (!ACS.isCallee(EffectiveUse)) {
4889       if (!RequireAllCallSites)
4890         continue;
4891       LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
4892                         << " is an invalid use of " << Fn.getName() << "\n");
4893       return false;
4894     }
4895 
4896     if (Pred(ACS))
4897       continue;
4898 
4899     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
4900                       << *ACS.getInstruction() << "\n");
4901     return false;
4902   }
4903 
4904   return true;
4905 }
4906 
4907 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
4908     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
4909         &Pred,
4910     const AbstractAttribute &QueryingAA) {
4911 
4912   const IRPosition &IRP = QueryingAA.getIRPosition();
4913   // Since we need to provide return instructions we have to have an exact
4914   // definition.
4915   const Function *AssociatedFunction = IRP.getAssociatedFunction();
4916   if (!AssociatedFunction)
4917     return false;
4918 
4919   // If this is a call site query we use the call site specific return values
4920   // and liveness information.
4921   // TODO: use the function scope once we have call site AAReturnedValues.
4922   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4923   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4924   if (!AARetVal.getState().isValidState())
4925     return false;
4926 
4927   return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
4928 }
4929 
4930 bool Attributor::checkForAllReturnedValues(
4931     const function_ref<bool(Value &)> &Pred,
4932     const AbstractAttribute &QueryingAA) {
4933 
4934   const IRPosition &IRP = QueryingAA.getIRPosition();
4935   const Function *AssociatedFunction = IRP.getAssociatedFunction();
4936   if (!AssociatedFunction)
4937     return false;
4938 
4939   // TODO: use the function scope once we have call site AAReturnedValues.
4940   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4941   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
4942   if (!AARetVal.getState().isValidState())
4943     return false;
4944 
4945   return AARetVal.checkForAllReturnedValuesAndReturnInsts(
4946       [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
4947         return Pred(RV);
4948       });
4949 }
4950 
4951 static bool
4952 checkForAllInstructionsImpl(InformationCache::OpcodeInstMapTy &OpcodeInstMap,
4953                             const function_ref<bool(Instruction &)> &Pred,
4954                             const AAIsDead *LivenessAA, bool &AnyDead,
4955                             const ArrayRef<unsigned> &Opcodes) {
4956   for (unsigned Opcode : Opcodes) {
4957     for (Instruction *I : OpcodeInstMap[Opcode]) {
4958       // Skip dead instructions.
4959       if (LivenessAA && LivenessAA->isAssumedDead(I)) {
4960         AnyDead = true;
4961         continue;
4962       }
4963 
4964       if (!Pred(*I))
4965         return false;
4966     }
4967   }
4968   return true;
4969 }
4970 
4971 bool Attributor::checkForAllInstructions(
4972     const llvm::function_ref<bool(Instruction &)> &Pred,
4973     const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
4974 
4975   const IRPosition &IRP = QueryingAA.getIRPosition();
4976   // Since we need to provide instructions we have to have an exact definition.
4977   const Function *AssociatedFunction = IRP.getAssociatedFunction();
4978   if (!AssociatedFunction)
4979     return false;
4980 
4981   // TODO: use the function scope once we have call site AAReturnedValues.
4982   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
4983   const auto &LivenessAA =
4984       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
4985   bool AnyDead = false;
4986 
4987   auto &OpcodeInstMap =
4988       InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
4989   if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
4990                                    Opcodes))
4991     return false;
4992 
4993   // If we actually used liveness information so we have to record a dependence.
4994   if (AnyDead)
4995     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
4996 
4997   return true;
4998 }
4999 
5000 bool Attributor::checkForAllReadWriteInstructions(
5001     const llvm::function_ref<bool(Instruction &)> &Pred,
5002     AbstractAttribute &QueryingAA) {
5003 
5004   const Function *AssociatedFunction =
5005       QueryingAA.getIRPosition().getAssociatedFunction();
5006   if (!AssociatedFunction)
5007     return false;
5008 
5009   // TODO: use the function scope once we have call site AAReturnedValues.
5010   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5011   const auto &LivenessAA =
5012       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
5013   bool AnyDead = false;
5014 
5015   for (Instruction *I :
5016        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
5017     // Skip dead instructions.
5018     if (LivenessAA.isAssumedDead(I)) {
5019       AnyDead = true;
5020       continue;
5021     }
5022 
5023     if (!Pred(*I))
5024       return false;
5025   }
5026 
5027   // If we actually used liveness information so we have to record a dependence.
5028   if (AnyDead)
5029     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5030 
5031   return true;
5032 }
5033 
5034 ChangeStatus Attributor::run(Module &M) {
5035   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
5036                     << AllAbstractAttributes.size()
5037                     << " abstract attributes.\n");
5038 
5039   // Now that all abstract attributes are collected and initialized we start
5040   // the abstract analysis.
5041 
5042   unsigned IterationCounter = 1;
5043 
5044   SmallVector<AbstractAttribute *, 64> ChangedAAs;
5045   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
5046   Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
5047 
5048   bool RecomputeDependences = false;
5049 
5050   do {
5051     // Remember the size to determine new attributes.
5052     size_t NumAAs = AllAbstractAttributes.size();
5053     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
5054                       << ", Worklist size: " << Worklist.size() << "\n");
5055 
5056     // For invalid AAs we can fix dependent AAs that have a required dependence,
5057     // thereby folding long dependence chains in a single step without the need
5058     // to run updates.
5059     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
5060       AbstractAttribute *InvalidAA = InvalidAAs[u];
5061       auto &QuerriedAAs = QueryMap[InvalidAA];
5062       LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "
5063                         << QuerriedAAs.RequiredAAs.size() << "/"
5064                         << QuerriedAAs.OptionalAAs.size()
5065                         << " required/optional dependences\n");
5066       for (AbstractAttribute *DepOnInvalidAA : QuerriedAAs.RequiredAAs) {
5067         AbstractState &DOIAAState = DepOnInvalidAA->getState();
5068         DOIAAState.indicatePessimisticFixpoint();
5069         ++NumAttributesFixedDueToRequiredDependences;
5070         assert(DOIAAState.isAtFixpoint() && "Expected fixpoint state!");
5071         if (!DOIAAState.isValidState())
5072           InvalidAAs.insert(DepOnInvalidAA);
5073       }
5074       if (!RecomputeDependences)
5075         Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5076                         QuerriedAAs.OptionalAAs.end());
5077     }
5078 
5079     // If dependences (=QueryMap) are recomputed we have to look at all abstract
5080     // attributes again, regardless of what changed in the last iteration.
5081     if (RecomputeDependences) {
5082       LLVM_DEBUG(
5083           dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
5084       QueryMap.clear();
5085       ChangedAAs.clear();
5086       Worklist.insert(AllAbstractAttributes.begin(),
5087                       AllAbstractAttributes.end());
5088     }
5089 
5090     // Add all abstract attributes that are potentially dependent on one that
5091     // changed to the work list.
5092     for (AbstractAttribute *ChangedAA : ChangedAAs) {
5093       auto &QuerriedAAs = QueryMap[ChangedAA];
5094       Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5095                       QuerriedAAs.OptionalAAs.end());
5096       Worklist.insert(QuerriedAAs.RequiredAAs.begin(),
5097                       QuerriedAAs.RequiredAAs.end());
5098     }
5099 
5100     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
5101                       << ", Worklist+Dependent size: " << Worklist.size()
5102                       << "\n");
5103 
5104     // Reset the changed and invalid set.
5105     ChangedAAs.clear();
5106     InvalidAAs.clear();
5107 
5108     // Update all abstract attribute in the work list and record the ones that
5109     // changed.
5110     for (AbstractAttribute *AA : Worklist)
5111       if (!AA->getState().isAtFixpoint() && !isAssumedDead(*AA, nullptr)) {
5112         QueriedNonFixAA = false;
5113         if (AA->update(*this) == ChangeStatus::CHANGED) {
5114           ChangedAAs.push_back(AA);
5115           if (!AA->getState().isValidState())
5116             InvalidAAs.insert(AA);
5117         } else if (!QueriedNonFixAA) {
5118           // If the attribute did not query any non-fix information, the state
5119           // will not change and we can indicate that right away.
5120           AA->getState().indicateOptimisticFixpoint();
5121         }
5122       }
5123 
5124     // Check if we recompute the dependences in the next iteration.
5125     RecomputeDependences = (DepRecomputeInterval > 0 &&
5126                             IterationCounter % DepRecomputeInterval == 0);
5127 
5128     // Add attributes to the changed set if they have been created in the last
5129     // iteration.
5130     ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
5131                       AllAbstractAttributes.end());
5132 
5133     // Reset the work list and repopulate with the changed abstract attributes.
5134     // Note that dependent ones are added above.
5135     Worklist.clear();
5136     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
5137 
5138   } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
5139                                  VerifyMaxFixpointIterations));
5140 
5141   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
5142                     << IterationCounter << "/" << MaxFixpointIterations
5143                     << " iterations\n");
5144 
5145   size_t NumFinalAAs = AllAbstractAttributes.size();
5146 
5147   // Reset abstract arguments not settled in a sound fixpoint by now. This
5148   // happens when we stopped the fixpoint iteration early. Note that only the
5149   // ones marked as "changed" *and* the ones transitively depending on them
5150   // need to be reverted to a pessimistic state. Others might not be in a
5151   // fixpoint state but we can use the optimistic results for them anyway.
5152   SmallPtrSet<AbstractAttribute *, 32> Visited;
5153   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
5154     AbstractAttribute *ChangedAA = ChangedAAs[u];
5155     if (!Visited.insert(ChangedAA).second)
5156       continue;
5157 
5158     AbstractState &State = ChangedAA->getState();
5159     if (!State.isAtFixpoint()) {
5160       State.indicatePessimisticFixpoint();
5161 
5162       NumAttributesTimedOut++;
5163     }
5164 
5165     auto &QuerriedAAs = QueryMap[ChangedAA];
5166     ChangedAAs.append(QuerriedAAs.OptionalAAs.begin(),
5167                       QuerriedAAs.OptionalAAs.end());
5168     ChangedAAs.append(QuerriedAAs.RequiredAAs.begin(),
5169                       QuerriedAAs.RequiredAAs.end());
5170   }
5171 
5172   LLVM_DEBUG({
5173     if (!Visited.empty())
5174       dbgs() << "\n[Attributor] Finalized " << Visited.size()
5175              << " abstract attributes.\n";
5176   });
5177 
5178   unsigned NumManifested = 0;
5179   unsigned NumAtFixpoint = 0;
5180   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
5181   for (AbstractAttribute *AA : AllAbstractAttributes) {
5182     AbstractState &State = AA->getState();
5183 
5184     // If there is not already a fixpoint reached, we can now take the
5185     // optimistic state. This is correct because we enforced a pessimistic one
5186     // on abstract attributes that were transitively dependent on a changed one
5187     // already above.
5188     if (!State.isAtFixpoint())
5189       State.indicateOptimisticFixpoint();
5190 
5191     // If the state is invalid, we do not try to manifest it.
5192     if (!State.isValidState())
5193       continue;
5194 
5195     // Skip dead code.
5196     if (isAssumedDead(*AA, nullptr))
5197       continue;
5198     // Manifest the state and record if we changed the IR.
5199     ChangeStatus LocalChange = AA->manifest(*this);
5200     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
5201       AA->trackStatistics();
5202 
5203     ManifestChange = ManifestChange | LocalChange;
5204 
5205     NumAtFixpoint++;
5206     NumManifested += (LocalChange == ChangeStatus::CHANGED);
5207   }
5208 
5209   (void)NumManifested;
5210   (void)NumAtFixpoint;
5211   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
5212                     << " arguments while " << NumAtFixpoint
5213                     << " were in a valid fixpoint state\n");
5214 
5215   NumAttributesManifested += NumManifested;
5216   NumAttributesValidFixpoint += NumAtFixpoint;
5217 
5218   (void)NumFinalAAs;
5219   assert(
5220       NumFinalAAs == AllAbstractAttributes.size() &&
5221       "Expected the final number of abstract attributes to remain unchanged!");
5222 
5223   // Delete stuff at the end to avoid invalid references and a nice order.
5224   {
5225     LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
5226                       << ToBeDeletedFunctions.size() << " functions and "
5227                       << ToBeDeletedBlocks.size() << " blocks and "
5228                       << ToBeDeletedInsts.size() << " instructions and "
5229                       << ToBeChangedUses.size() << " uses\n");
5230 
5231     SmallVector<Instruction *, 32> DeadInsts;
5232     SmallVector<Instruction *, 32> TerminatorsToFold;
5233     SmallVector<Instruction *, 32> UnreachablesToInsert;
5234 
5235     for (auto &It : ToBeChangedUses) {
5236       Use *U = It.first;
5237       Value *NewV = It.second;
5238       Value *OldV = U->get();
5239       LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
5240                         << " instead of " << *OldV << "\n");
5241       U->set(NewV);
5242       if (Instruction *I = dyn_cast<Instruction>(OldV))
5243         if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
5244             isInstructionTriviallyDead(I)) {
5245           DeadInsts.push_back(I);
5246         }
5247       if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
5248         Instruction *UserI = cast<Instruction>(U->getUser());
5249         if (isa<UndefValue>(NewV)) {
5250           UnreachablesToInsert.push_back(UserI);
5251         } else {
5252           TerminatorsToFold.push_back(UserI);
5253         }
5254       }
5255     }
5256     for (Instruction *I : UnreachablesToInsert)
5257       changeToUnreachable(I, /* UseLLVMTrap */ false);
5258     for (Instruction *I : TerminatorsToFold)
5259       ConstantFoldTerminator(I->getParent());
5260 
5261     for (Instruction *I : ToBeDeletedInsts) {
5262       I->replaceAllUsesWith(UndefValue::get(I->getType()));
5263       if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
5264         DeadInsts.push_back(I);
5265       else
5266         I->eraseFromParent();
5267     }
5268 
5269     RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
5270 
5271     if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
5272       SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
5273       ToBeDeletedBBs.reserve(NumDeadBlocks);
5274       ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end());
5275       // Actually we do not delete the blocks but squash them into a single
5276       // unreachable but untangling branches that jump here is something we need
5277       // to do in a more generic way.
5278       DetatchDeadBlocks(ToBeDeletedBBs, nullptr);
5279       STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
5280       BUILD_STAT_NAME(AAIsDead, BasicBlock) += ToBeDeletedBlocks.size();
5281     }
5282 
5283     STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
5284     for (Function *Fn : ToBeDeletedFunctions) {
5285       Fn->replaceAllUsesWith(UndefValue::get(Fn->getType()));
5286       Fn->eraseFromParent();
5287       STATS_TRACK(AAIsDead, Function);
5288     }
5289 
5290     // Identify dead internal functions and delete them. This happens outside
5291     // the other fixpoint analysis as we might treat potentially dead functions
5292     // as live to lower the number of iterations. If they happen to be dead, the
5293     // below fixpoint loop will identify and eliminate them.
5294     SmallVector<Function *, 8> InternalFns;
5295     for (Function &F : M)
5296       if (F.hasLocalLinkage())
5297         InternalFns.push_back(&F);
5298 
5299     bool FoundDeadFn = true;
5300     while (FoundDeadFn) {
5301       FoundDeadFn = false;
5302       for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
5303         Function *F = InternalFns[u];
5304         if (!F)
5305           continue;
5306 
5307         if (!checkForAllCallSites([](AbstractCallSite ACS) { return false; },
5308                                   *F, true, nullptr))
5309           continue;
5310 
5311         STATS_TRACK(AAIsDead, Function);
5312         ToBeDeletedFunctions.insert(F);
5313         F->deleteBody();
5314         F->replaceAllUsesWith(UndefValue::get(F->getType()));
5315         F->eraseFromParent();
5316         InternalFns[u] = nullptr;
5317         FoundDeadFn = true;
5318       }
5319     }
5320   }
5321 
5322   if (VerifyMaxFixpointIterations &&
5323       IterationCounter != MaxFixpointIterations) {
5324     errs() << "\n[Attributor] Fixpoint iteration done after: "
5325            << IterationCounter << "/" << MaxFixpointIterations
5326            << " iterations\n";
5327     llvm_unreachable("The fixpoint was not reached with exactly the number of "
5328                      "specified iterations!");
5329   }
5330 
5331   return ManifestChange;
5332 }
5333 
5334 void Attributor::initializeInformationCache(Function &F) {
5335 
5336   // Walk all instructions to find interesting instructions that might be
5337   // queried by abstract attributes during their initialization or update.
5338   // This has to happen before we create attributes.
5339   auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
5340   auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
5341 
5342   for (Instruction &I : instructions(&F)) {
5343     bool IsInterestingOpcode = false;
5344 
5345     // To allow easy access to all instructions in a function with a given
5346     // opcode we store them in the InfoCache. As not all opcodes are interesting
5347     // to concrete attributes we only cache the ones that are as identified in
5348     // the following switch.
5349     // Note: There are no concrete attributes now so this is initially empty.
5350     switch (I.getOpcode()) {
5351     default:
5352       assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
5353              "New call site/base instruction type needs to be known int the "
5354              "Attributor.");
5355       break;
5356     case Instruction::Load:
5357       // The alignment of a pointer is interesting for loads.
5358     case Instruction::Store:
5359       // The alignment of a pointer is interesting for stores.
5360     case Instruction::Call:
5361     case Instruction::CallBr:
5362     case Instruction::Invoke:
5363     case Instruction::CleanupRet:
5364     case Instruction::CatchSwitch:
5365     case Instruction::Resume:
5366     case Instruction::Ret:
5367       IsInterestingOpcode = true;
5368     }
5369     if (IsInterestingOpcode)
5370       InstOpcodeMap[I.getOpcode()].push_back(&I);
5371     if (I.mayReadOrWriteMemory())
5372       ReadOrWriteInsts.push_back(&I);
5373   }
5374 }
5375 
5376 void Attributor::recordDependence(const AbstractAttribute &FromAA,
5377                                   const AbstractAttribute &ToAA,
5378                                   DepClassTy DepClass) {
5379   if (FromAA.getState().isAtFixpoint())
5380     return;
5381 
5382   if (DepClass == DepClassTy::REQUIRED)
5383     QueryMap[&FromAA].RequiredAAs.insert(
5384         const_cast<AbstractAttribute *>(&ToAA));
5385   else
5386     QueryMap[&FromAA].OptionalAAs.insert(
5387         const_cast<AbstractAttribute *>(&ToAA));
5388   QueriedNonFixAA = true;
5389 }
5390 
5391 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
5392   if (!VisitedFunctions.insert(&F).second)
5393     return;
5394   if (F.isDeclaration())
5395     return;
5396 
5397   IRPosition FPos = IRPosition::function(F);
5398 
5399   // Check for dead BasicBlocks in every function.
5400   // We need dead instruction detection because we do not want to deal with
5401   // broken IR in which SSA rules do not apply.
5402   getOrCreateAAFor<AAIsDead>(FPos);
5403 
5404   // Every function might be "will-return".
5405   getOrCreateAAFor<AAWillReturn>(FPos);
5406 
5407   // Every function can be nounwind.
5408   getOrCreateAAFor<AANoUnwind>(FPos);
5409 
5410   // Every function might be marked "nosync"
5411   getOrCreateAAFor<AANoSync>(FPos);
5412 
5413   // Every function might be "no-free".
5414   getOrCreateAAFor<AANoFree>(FPos);
5415 
5416   // Every function might be "no-return".
5417   getOrCreateAAFor<AANoReturn>(FPos);
5418 
5419   // Every function might be "no-recurse".
5420   getOrCreateAAFor<AANoRecurse>(FPos);
5421 
5422   // Every function might be "readnone/readonly/writeonly/...".
5423   getOrCreateAAFor<AAMemoryBehavior>(FPos);
5424 
5425   // Every function might be applicable for Heap-To-Stack conversion.
5426   if (EnableHeapToStack)
5427     getOrCreateAAFor<AAHeapToStack>(FPos);
5428 
5429   // Return attributes are only appropriate if the return type is non void.
5430   Type *ReturnType = F.getReturnType();
5431   if (!ReturnType->isVoidTy()) {
5432     // Argument attribute "returned" --- Create only one per function even
5433     // though it is an argument attribute.
5434     getOrCreateAAFor<AAReturnedValues>(FPos);
5435 
5436     IRPosition RetPos = IRPosition::returned(F);
5437 
5438     // Every returned value might be dead.
5439     getOrCreateAAFor<AAIsDead>(RetPos);
5440 
5441     // Every function might be simplified.
5442     getOrCreateAAFor<AAValueSimplify>(RetPos);
5443 
5444     if (ReturnType->isPointerTy()) {
5445 
5446       // Every function with pointer return type might be marked align.
5447       getOrCreateAAFor<AAAlign>(RetPos);
5448 
5449       // Every function with pointer return type might be marked nonnull.
5450       getOrCreateAAFor<AANonNull>(RetPos);
5451 
5452       // Every function with pointer return type might be marked noalias.
5453       getOrCreateAAFor<AANoAlias>(RetPos);
5454 
5455       // Every function with pointer return type might be marked
5456       // dereferenceable.
5457       getOrCreateAAFor<AADereferenceable>(RetPos);
5458     }
5459   }
5460 
5461   for (Argument &Arg : F.args()) {
5462     IRPosition ArgPos = IRPosition::argument(Arg);
5463 
5464     // Every argument might be simplified.
5465     getOrCreateAAFor<AAValueSimplify>(ArgPos);
5466 
5467     if (Arg.getType()->isPointerTy()) {
5468       // Every argument with pointer type might be marked nonnull.
5469       getOrCreateAAFor<AANonNull>(ArgPos);
5470 
5471       // Every argument with pointer type might be marked noalias.
5472       getOrCreateAAFor<AANoAlias>(ArgPos);
5473 
5474       // Every argument with pointer type might be marked dereferenceable.
5475       getOrCreateAAFor<AADereferenceable>(ArgPos);
5476 
5477       // Every argument with pointer type might be marked align.
5478       getOrCreateAAFor<AAAlign>(ArgPos);
5479 
5480       // Every argument with pointer type might be marked nocapture.
5481       getOrCreateAAFor<AANoCapture>(ArgPos);
5482 
5483       // Every argument with pointer type might be marked
5484       // "readnone/readonly/writeonly/..."
5485       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
5486 
5487       // Every argument with pointer type might be marked nofree.
5488       getOrCreateAAFor<AANoFree>(ArgPos);
5489     }
5490   }
5491 
5492   auto CallSitePred = [&](Instruction &I) -> bool {
5493     CallSite CS(&I);
5494     if (Function *Callee = CS.getCalledFunction()) {
5495       // Skip declerations except if annotations on their call sites were
5496       // explicitly requested.
5497       if (!AnnotateDeclarationCallSites && Callee->isDeclaration())
5498         return true;
5499 
5500       if (!Callee->getReturnType()->isVoidTy() && !CS->use_empty()) {
5501         IRPosition CSRetPos = IRPosition::callsite_returned(CS);
5502 
5503         // Call site return values might be dead.
5504         getOrCreateAAFor<AAIsDead>(CSRetPos);
5505       }
5506 
5507       for (int i = 0, e = Callee->arg_size(); i < e; i++) {
5508 
5509         IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
5510 
5511         // Every call site argument might be dead.
5512         getOrCreateAAFor<AAIsDead>(CSArgPos);
5513 
5514         // Call site argument might be simplified.
5515         getOrCreateAAFor<AAValueSimplify>(CSArgPos);
5516 
5517         if (!CS.getArgument(i)->getType()->isPointerTy())
5518           continue;
5519 
5520         // Call site argument attribute "non-null".
5521         getOrCreateAAFor<AANonNull>(CSArgPos);
5522 
5523         // Call site argument attribute "no-alias".
5524         getOrCreateAAFor<AANoAlias>(CSArgPos);
5525 
5526         // Call site argument attribute "dereferenceable".
5527         getOrCreateAAFor<AADereferenceable>(CSArgPos);
5528 
5529         // Call site argument attribute "align".
5530         getOrCreateAAFor<AAAlign>(CSArgPos);
5531 
5532 	// Call site argument attribute "nofree".
5533 	getOrCreateAAFor<AANoFree>(CSArgPos);
5534       }
5535     }
5536     return true;
5537   };
5538 
5539   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
5540   bool Success, AnyDead = false;
5541   Success = checkForAllInstructionsImpl(
5542       OpcodeInstMap, CallSitePred, nullptr, AnyDead,
5543       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
5544        (unsigned)Instruction::Call});
5545   (void)Success;
5546   assert(Success && !AnyDead && "Expected the check call to be successful!");
5547 
5548   auto LoadStorePred = [&](Instruction &I) -> bool {
5549     if (isa<LoadInst>(I))
5550       getOrCreateAAFor<AAAlign>(
5551           IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
5552     else
5553       getOrCreateAAFor<AAAlign>(
5554           IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
5555     return true;
5556   };
5557   Success = checkForAllInstructionsImpl(
5558       OpcodeInstMap, LoadStorePred, nullptr, AnyDead,
5559       {(unsigned)Instruction::Load, (unsigned)Instruction::Store});
5560   (void)Success;
5561   assert(Success && !AnyDead && "Expected the check call to be successful!");
5562 }
5563 
5564 /// Helpers to ease debugging through output streams and print calls.
5565 ///
5566 ///{
5567 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
5568   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
5569 }
5570 
5571 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
5572   switch (AP) {
5573   case IRPosition::IRP_INVALID:
5574     return OS << "inv";
5575   case IRPosition::IRP_FLOAT:
5576     return OS << "flt";
5577   case IRPosition::IRP_RETURNED:
5578     return OS << "fn_ret";
5579   case IRPosition::IRP_CALL_SITE_RETURNED:
5580     return OS << "cs_ret";
5581   case IRPosition::IRP_FUNCTION:
5582     return OS << "fn";
5583   case IRPosition::IRP_CALL_SITE:
5584     return OS << "cs";
5585   case IRPosition::IRP_ARGUMENT:
5586     return OS << "arg";
5587   case IRPosition::IRP_CALL_SITE_ARGUMENT:
5588     return OS << "cs_arg";
5589   }
5590   llvm_unreachable("Unknown attribute position!");
5591 }
5592 
5593 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
5594   const Value &AV = Pos.getAssociatedValue();
5595   return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
5596             << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
5597 }
5598 
5599 template <typename base_ty, base_ty BestState, base_ty WorstState>
5600 raw_ostream &llvm::
5601 operator<<(raw_ostream &OS,
5602            const IntegerStateBase<base_ty, BestState, WorstState> &S) {
5603   return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
5604             << static_cast<const AbstractState &>(S);
5605 }
5606 
5607 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
5608   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
5609 }
5610 
5611 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
5612   AA.print(OS);
5613   return OS;
5614 }
5615 
5616 void AbstractAttribute::print(raw_ostream &OS) const {
5617   OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
5618      << "]";
5619 }
5620 ///}
5621 
5622 /// ----------------------------------------------------------------------------
5623 ///                       Pass (Manager) Boilerplate
5624 /// ----------------------------------------------------------------------------
5625 
5626 static bool runAttributorOnModule(Module &M, AnalysisGetter &AG) {
5627   if (DisableAttributor)
5628     return false;
5629 
5630   LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
5631                     << " functions.\n");
5632 
5633   // Create an Attributor and initially empty information cache that is filled
5634   // while we identify default attribute opportunities.
5635   InformationCache InfoCache(M, AG);
5636   Attributor A(InfoCache, DepRecInterval);
5637 
5638   for (Function &F : M)
5639     A.initializeInformationCache(F);
5640 
5641   for (Function &F : M) {
5642     if (F.hasExactDefinition())
5643       NumFnWithExactDefinition++;
5644     else
5645       NumFnWithoutExactDefinition++;
5646 
5647     // We look at internal functions only on-demand but if any use is not a
5648     // direct call, we have to do it eagerly.
5649     if (F.hasLocalLinkage()) {
5650       if (llvm::all_of(F.uses(), [](const Use &U) {
5651             return ImmutableCallSite(U.getUser()) &&
5652                    ImmutableCallSite(U.getUser()).isCallee(&U);
5653           }))
5654         continue;
5655     }
5656 
5657     // Populate the Attributor with abstract attribute opportunities in the
5658     // function and the information cache with IR information.
5659     A.identifyDefaultAbstractAttributes(F);
5660   }
5661 
5662   return A.run(M) == ChangeStatus::CHANGED;
5663 }
5664 
5665 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
5666   AnalysisGetter AG(AM);
5667   if (runAttributorOnModule(M, AG)) {
5668     // FIXME: Think about passes we will preserve and add them here.
5669     return PreservedAnalyses::none();
5670   }
5671   return PreservedAnalyses::all();
5672 }
5673 
5674 namespace {
5675 
5676 struct AttributorLegacyPass : public ModulePass {
5677   static char ID;
5678 
5679   AttributorLegacyPass() : ModulePass(ID) {
5680     initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
5681   }
5682 
5683   bool runOnModule(Module &M) override {
5684     if (skipModule(M))
5685       return false;
5686 
5687     AnalysisGetter AG;
5688     return runAttributorOnModule(M, AG);
5689   }
5690 
5691   void getAnalysisUsage(AnalysisUsage &AU) const override {
5692     // FIXME: Think about passes we will preserve and add them here.
5693     AU.addRequired<TargetLibraryInfoWrapperPass>();
5694   }
5695 };
5696 
5697 } // end anonymous namespace
5698 
5699 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
5700 
5701 char AttributorLegacyPass::ID = 0;
5702 
5703 const char AAReturnedValues::ID = 0;
5704 const char AANoUnwind::ID = 0;
5705 const char AANoSync::ID = 0;
5706 const char AANoFree::ID = 0;
5707 const char AANonNull::ID = 0;
5708 const char AANoRecurse::ID = 0;
5709 const char AAWillReturn::ID = 0;
5710 const char AANoAlias::ID = 0;
5711 const char AAReachability::ID = 0;
5712 const char AANoReturn::ID = 0;
5713 const char AAIsDead::ID = 0;
5714 const char AADereferenceable::ID = 0;
5715 const char AAAlign::ID = 0;
5716 const char AANoCapture::ID = 0;
5717 const char AAValueSimplify::ID = 0;
5718 const char AAHeapToStack::ID = 0;
5719 const char AAMemoryBehavior::ID = 0;
5720 
5721 // Macro magic to create the static generator function for attributes that
5722 // follow the naming scheme.
5723 
5724 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
5725   case IRPosition::PK:                                                         \
5726     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
5727 
5728 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
5729   case IRPosition::PK:                                                         \
5730     AA = new CLASS##SUFFIX(IRP);                                               \
5731     break;
5732 
5733 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
5734   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5735     CLASS *AA = nullptr;                                                       \
5736     switch (IRP.getPositionKind()) {                                           \
5737       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5738       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
5739       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
5740       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
5741       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
5742       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
5743       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5744       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
5745     }                                                                          \
5746     return *AA;                                                                \
5747   }
5748 
5749 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
5750   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5751     CLASS *AA = nullptr;                                                       \
5752     switch (IRP.getPositionKind()) {                                           \
5753       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5754       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
5755       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
5756       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
5757       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
5758       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
5759       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
5760       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
5761     }                                                                          \
5762     return *AA;                                                                \
5763   }
5764 
5765 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
5766   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5767     CLASS *AA = nullptr;                                                       \
5768     switch (IRP.getPositionKind()) {                                           \
5769       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5770       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5771       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
5772       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
5773       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
5774       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
5775       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
5776       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
5777     }                                                                          \
5778     return *AA;                                                                \
5779   }
5780 
5781 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
5782   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5783     CLASS *AA = nullptr;                                                       \
5784     switch (IRP.getPositionKind()) {                                           \
5785       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5786       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
5787       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
5788       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
5789       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
5790       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
5791       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
5792       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5793     }                                                                          \
5794     return *AA;                                                                \
5795   }
5796 
5797 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
5798   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
5799     CLASS *AA = nullptr;                                                       \
5800     switch (IRP.getPositionKind()) {                                           \
5801       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
5802       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
5803       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
5804       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
5805       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
5806       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
5807       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
5808       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
5809     }                                                                          \
5810     return *AA;                                                                \
5811   }
5812 
5813 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
5814 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
5815 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
5816 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
5817 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
5818 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
5819 
5820 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
5821 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
5822 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
5823 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
5824 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
5825 
5826 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
5827 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
5828 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
5829 
5830 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
5831 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
5832 
5833 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
5834 
5835 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
5836 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
5837 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
5838 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
5839 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
5840 #undef SWITCH_PK_CREATE
5841 #undef SWITCH_PK_INV
5842 
5843 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
5844                       "Deduce and propagate attributes", false, false)
5845 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
5846 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
5847                     "Deduce and propagate attributes", false, false)
5848