1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/LazyValueInfo.h"
27 #include "llvm/Analysis/Loads.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/ScalarEvolution.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/CFG.h"
34 #include "llvm/IR/InstIterator.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/InitializePasses.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
41 #include "llvm/Transforms/Utils/Local.h"
42 
43 #include <cassert>
44 
45 using namespace llvm;
46 
47 #define DEBUG_TYPE "attributor"
48 
49 STATISTIC(NumFnWithExactDefinition,
50           "Number of function with exact definitions");
51 STATISTIC(NumFnWithoutExactDefinition,
52           "Number of function without exact definitions");
53 STATISTIC(NumAttributesTimedOut,
54           "Number of abstract attributes timed out before fixpoint");
55 STATISTIC(NumAttributesValidFixpoint,
56           "Number of abstract attributes in a valid fixpoint state");
57 STATISTIC(NumAttributesManifested,
58           "Number of abstract attributes manifested in IR");
59 STATISTIC(NumAttributesFixedDueToRequiredDependences,
60           "Number of abstract attributes fixed due to required dependences");
61 
62 // Some helper macros to deal with statistics tracking.
63 //
64 // Usage:
65 // For simple IR attribute tracking overload trackStatistics in the abstract
66 // attribute and choose the right STATS_DECLTRACK_********* macro,
67 // e.g.,:
68 //  void trackStatistics() const override {
69 //    STATS_DECLTRACK_ARG_ATTR(returned)
70 //  }
71 // If there is a single "increment" side one can use the macro
72 // STATS_DECLTRACK with a custom message. If there are multiple increment
73 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
74 //
75 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
76   ("Number of " #TYPE " marked '" #NAME "'")
77 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
78 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
79 #define STATS_DECL(NAME, TYPE, MSG)                                            \
80   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
81 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
82 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
83   {                                                                            \
84     STATS_DECL(NAME, TYPE, MSG)                                                \
85     STATS_TRACK(NAME, TYPE)                                                    \
86   }
87 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
88   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
89 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
90   STATS_DECLTRACK(NAME, CSArguments,                                           \
91                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
92 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
94 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
95   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
96 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
98                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
99 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
100   STATS_DECLTRACK(NAME, CSReturn,                                              \
101                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
102 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
103   STATS_DECLTRACK(NAME, Floating,                                              \
104                   ("Number of floating values known to be '" #NAME "'"))
105 
106 // Specialization of the operator<< for abstract attributes subclasses. This
107 // disambiguates situations where multiple operators are applicable.
108 namespace llvm {
109 #define PIPE_OPERATOR(CLASS)                                                   \
110   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
111     return OS << static_cast<const AbstractAttribute &>(AA);                   \
112   }
113 
114 PIPE_OPERATOR(AAIsDead)
115 PIPE_OPERATOR(AANoUnwind)
116 PIPE_OPERATOR(AANoSync)
117 PIPE_OPERATOR(AANoRecurse)
118 PIPE_OPERATOR(AAWillReturn)
119 PIPE_OPERATOR(AANoReturn)
120 PIPE_OPERATOR(AAReturnedValues)
121 PIPE_OPERATOR(AANonNull)
122 PIPE_OPERATOR(AANoAlias)
123 PIPE_OPERATOR(AADereferenceable)
124 PIPE_OPERATOR(AAAlign)
125 PIPE_OPERATOR(AANoCapture)
126 PIPE_OPERATOR(AAValueSimplify)
127 PIPE_OPERATOR(AANoFree)
128 PIPE_OPERATOR(AAHeapToStack)
129 PIPE_OPERATOR(AAReachability)
130 PIPE_OPERATOR(AAMemoryBehavior)
131 PIPE_OPERATOR(AAValueConstantRange)
132 
133 #undef PIPE_OPERATOR
134 } // namespace llvm
135 
136 // TODO: Determine a good default value.
137 //
138 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
139 // (when run with the first 5 abstract attributes). The results also indicate
140 // that we never reach 32 iterations but always find a fixpoint sooner.
141 //
142 // This will become more evolved once we perform two interleaved fixpoint
143 // iterations: bottom-up and top-down.
144 static cl::opt<unsigned>
145     MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
146                           cl::desc("Maximal number of fixpoint iterations."),
147                           cl::init(32));
148 static cl::opt<bool> VerifyMaxFixpointIterations(
149     "attributor-max-iterations-verify", cl::Hidden,
150     cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
151     cl::init(false));
152 
153 static cl::opt<bool> DisableAttributor(
154     "attributor-disable", cl::Hidden,
155     cl::desc("Disable the attributor inter-procedural deduction pass."),
156     cl::init(true));
157 
158 static cl::opt<bool> AnnotateDeclarationCallSites(
159     "attributor-annotate-decl-cs", cl::Hidden,
160     cl::desc("Annoate call sites of function declarations."), cl::init(false));
161 
162 static cl::opt<bool> ManifestInternal(
163     "attributor-manifest-internal", cl::Hidden,
164     cl::desc("Manifest Attributor internal string attributes."),
165     cl::init(false));
166 
167 static cl::opt<unsigned> DepRecInterval(
168     "attributor-dependence-recompute-interval", cl::Hidden,
169     cl::desc("Number of iterations until dependences are recomputed."),
170     cl::init(4));
171 
172 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
173                                        cl::init(true), cl::Hidden);
174 
175 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
176                                        cl::Hidden);
177 
178 /// Logic operators for the change status enum class.
179 ///
180 ///{
181 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
182   return l == ChangeStatus::CHANGED ? l : r;
183 }
184 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
185   return l == ChangeStatus::UNCHANGED ? l : r;
186 }
187 ///}
188 
189 Argument *IRPosition::getAssociatedArgument() const {
190   if (getPositionKind() == IRP_ARGUMENT)
191     return cast<Argument>(&getAnchorValue());
192 
193   // Not an Argument and no argument number means this is not a call site
194   // argument, thus we cannot find a callback argument to return.
195   int ArgNo = getArgNo();
196   if (ArgNo < 0)
197     return nullptr;
198 
199   // Use abstract call sites to make the connection between the call site
200   // values and the ones in callbacks. If a callback was found that makes use
201   // of the underlying call site operand, we want the corresponding callback
202   // callee argument and not the direct callee argument.
203   Optional<Argument *> CBCandidateArg;
204   SmallVector<const Use *, 4> CBUses;
205   ImmutableCallSite ICS(&getAnchorValue());
206   AbstractCallSite::getCallbackUses(ICS, CBUses);
207   for (const Use *U : CBUses) {
208     AbstractCallSite ACS(U);
209     assert(ACS && ACS.isCallbackCall());
210     if (!ACS.getCalledFunction())
211       continue;
212 
213     for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
214 
215       // Test if the underlying call site operand is argument number u of the
216       // callback callee.
217       if (ACS.getCallArgOperandNo(u) != ArgNo)
218         continue;
219 
220       assert(ACS.getCalledFunction()->arg_size() > u &&
221              "ACS mapped into var-args arguments!");
222       if (CBCandidateArg.hasValue()) {
223         CBCandidateArg = nullptr;
224         break;
225       }
226       CBCandidateArg = ACS.getCalledFunction()->getArg(u);
227     }
228   }
229 
230   // If we found a unique callback candidate argument, return it.
231   if (CBCandidateArg.hasValue() && CBCandidateArg.getValue())
232     return CBCandidateArg.getValue();
233 
234   // If no callbacks were found, or none used the underlying call site operand
235   // exclusively, use the direct callee argument if available.
236   const Function *Callee = ICS.getCalledFunction();
237   if (Callee && Callee->arg_size() > unsigned(ArgNo))
238     return Callee->getArg(ArgNo);
239 
240   return nullptr;
241 }
242 
243 /// For calls (and invokes) we will only replace instruction uses to not disturb
244 /// the old style call graph.
245 /// TODO: Remove this once we get rid of the old PM.
246 static void replaceAllInstructionUsesWith(Value &Old, Value &New) {
247   if (!isa<CallBase>(Old))
248     return Old.replaceAllUsesWith(&New);
249   SmallVector<Use *, 8> Uses;
250   for (Use &U : Old.uses())
251     if (isa<Instruction>(U.getUser()))
252       Uses.push_back(&U);
253   for (Use *U : Uses)
254     U->set(&New);
255 }
256 
257 /// Recursively visit all values that might become \p IRP at some point. This
258 /// will be done by looking through cast instructions, selects, phis, and calls
259 /// with the "returned" attribute. Once we cannot look through the value any
260 /// further, the callback \p VisitValueCB is invoked and passed the current
261 /// value, the \p State, and a flag to indicate if we stripped anything. To
262 /// limit how much effort is invested, we will never visit more values than
263 /// specified by \p MaxValues.
264 template <typename AAType, typename StateTy>
265 static bool genericValueTraversal(
266     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
267     const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
268     int MaxValues = 8) {
269 
270   const AAIsDead *LivenessAA = nullptr;
271   if (IRP.getAnchorScope())
272     LivenessAA = &A.getAAFor<AAIsDead>(
273         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
274         /* TrackDependence */ false);
275   bool AnyDead = false;
276 
277   // TODO: Use Positions here to allow context sensitivity in VisitValueCB
278   SmallPtrSet<Value *, 16> Visited;
279   SmallVector<Value *, 16> Worklist;
280   Worklist.push_back(&IRP.getAssociatedValue());
281 
282   int Iteration = 0;
283   do {
284     Value *V = Worklist.pop_back_val();
285 
286     // Check if we should process the current value. To prevent endless
287     // recursion keep a record of the values we followed!
288     if (!Visited.insert(V).second)
289       continue;
290 
291     // Make sure we limit the compile time for complex expressions.
292     if (Iteration++ >= MaxValues)
293       return false;
294 
295     // Explicitly look through calls with a "returned" attribute if we do
296     // not have a pointer as stripPointerCasts only works on them.
297     Value *NewV = nullptr;
298     if (V->getType()->isPointerTy()) {
299       NewV = V->stripPointerCasts();
300     } else {
301       CallSite CS(V);
302       if (CS && CS.getCalledFunction()) {
303         for (Argument &Arg : CS.getCalledFunction()->args())
304           if (Arg.hasReturnedAttr()) {
305             NewV = CS.getArgOperand(Arg.getArgNo());
306             break;
307           }
308       }
309     }
310     if (NewV && NewV != V) {
311       Worklist.push_back(NewV);
312       continue;
313     }
314 
315     // Look through select instructions, visit both potential values.
316     if (auto *SI = dyn_cast<SelectInst>(V)) {
317       Worklist.push_back(SI->getTrueValue());
318       Worklist.push_back(SI->getFalseValue());
319       continue;
320     }
321 
322     // Look through phi nodes, visit all live operands.
323     if (auto *PHI = dyn_cast<PHINode>(V)) {
324       assert(LivenessAA &&
325              "Expected liveness in the presence of instructions!");
326       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
327         const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
328         if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
329           AnyDead = true;
330           continue;
331         }
332         Worklist.push_back(PHI->getIncomingValue(u));
333       }
334       continue;
335     }
336 
337     // Once a leaf is reached we inform the user through the callback.
338     if (!VisitValueCB(*V, State, Iteration > 1))
339       return false;
340   } while (!Worklist.empty());
341 
342   // If we actually used liveness information so we have to record a dependence.
343   if (AnyDead)
344     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
345 
346   // All values have been visited.
347   return true;
348 }
349 
350 /// Return true if \p New is equal or worse than \p Old.
351 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
352   if (!Old.isIntAttribute())
353     return true;
354 
355   return Old.getValueAsInt() >= New.getValueAsInt();
356 }
357 
358 /// Return true if the information provided by \p Attr was added to the
359 /// attribute list \p Attrs. This is only the case if it was not already present
360 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
361 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
362                              AttributeList &Attrs, int AttrIdx) {
363 
364   if (Attr.isEnumAttribute()) {
365     Attribute::AttrKind Kind = Attr.getKindAsEnum();
366     if (Attrs.hasAttribute(AttrIdx, Kind))
367       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
368         return false;
369     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
370     return true;
371   }
372   if (Attr.isStringAttribute()) {
373     StringRef Kind = Attr.getKindAsString();
374     if (Attrs.hasAttribute(AttrIdx, Kind))
375       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
376         return false;
377     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
378     return true;
379   }
380   if (Attr.isIntAttribute()) {
381     Attribute::AttrKind Kind = Attr.getKindAsEnum();
382     if (Attrs.hasAttribute(AttrIdx, Kind))
383       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
384         return false;
385     Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
386     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
387     return true;
388   }
389 
390   llvm_unreachable("Expected enum or string attribute!");
391 }
392 
393 static const Value *
394 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
395                                      const DataLayout &DL,
396                                      bool AllowNonInbounds = false) {
397   const Value *Ptr =
398       Attributor::getPointerOperand(I, /* AllowVolatile */ false);
399   if (!Ptr)
400     return nullptr;
401 
402   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
403                                           AllowNonInbounds);
404 }
405 
406 ChangeStatus AbstractAttribute::update(Attributor &A) {
407   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
408   if (getState().isAtFixpoint())
409     return HasChanged;
410 
411   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
412 
413   HasChanged = updateImpl(A);
414 
415   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
416                     << "\n");
417 
418   return HasChanged;
419 }
420 
421 ChangeStatus
422 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
423                                    const ArrayRef<Attribute> &DeducedAttrs) {
424   Function *ScopeFn = IRP.getAssociatedFunction();
425   IRPosition::Kind PK = IRP.getPositionKind();
426 
427   // In the following some generic code that will manifest attributes in
428   // DeducedAttrs if they improve the current IR. Due to the different
429   // annotation positions we use the underlying AttributeList interface.
430 
431   AttributeList Attrs;
432   switch (PK) {
433   case IRPosition::IRP_INVALID:
434   case IRPosition::IRP_FLOAT:
435     return ChangeStatus::UNCHANGED;
436   case IRPosition::IRP_ARGUMENT:
437   case IRPosition::IRP_FUNCTION:
438   case IRPosition::IRP_RETURNED:
439     Attrs = ScopeFn->getAttributes();
440     break;
441   case IRPosition::IRP_CALL_SITE:
442   case IRPosition::IRP_CALL_SITE_RETURNED:
443   case IRPosition::IRP_CALL_SITE_ARGUMENT:
444     Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
445     break;
446   }
447 
448   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
449   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
450   for (const Attribute &Attr : DeducedAttrs) {
451     if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
452       continue;
453 
454     HasChanged = ChangeStatus::CHANGED;
455   }
456 
457   if (HasChanged == ChangeStatus::UNCHANGED)
458     return HasChanged;
459 
460   switch (PK) {
461   case IRPosition::IRP_ARGUMENT:
462   case IRPosition::IRP_FUNCTION:
463   case IRPosition::IRP_RETURNED:
464     ScopeFn->setAttributes(Attrs);
465     break;
466   case IRPosition::IRP_CALL_SITE:
467   case IRPosition::IRP_CALL_SITE_RETURNED:
468   case IRPosition::IRP_CALL_SITE_ARGUMENT:
469     CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
470     break;
471   case IRPosition::IRP_INVALID:
472   case IRPosition::IRP_FLOAT:
473     break;
474   }
475 
476   return HasChanged;
477 }
478 
479 const IRPosition IRPosition::EmptyKey(255);
480 const IRPosition IRPosition::TombstoneKey(256);
481 
482 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
483   IRPositions.emplace_back(IRP);
484 
485   ImmutableCallSite ICS(&IRP.getAnchorValue());
486   switch (IRP.getPositionKind()) {
487   case IRPosition::IRP_INVALID:
488   case IRPosition::IRP_FLOAT:
489   case IRPosition::IRP_FUNCTION:
490     return;
491   case IRPosition::IRP_ARGUMENT:
492   case IRPosition::IRP_RETURNED:
493     IRPositions.emplace_back(
494         IRPosition::function(*IRP.getAssociatedFunction()));
495     return;
496   case IRPosition::IRP_CALL_SITE:
497     assert(ICS && "Expected call site!");
498     // TODO: We need to look at the operand bundles similar to the redirection
499     //       in CallBase.
500     if (!ICS.hasOperandBundles())
501       if (const Function *Callee = ICS.getCalledFunction())
502         IRPositions.emplace_back(IRPosition::function(*Callee));
503     return;
504   case IRPosition::IRP_CALL_SITE_RETURNED:
505     assert(ICS && "Expected call site!");
506     // TODO: We need to look at the operand bundles similar to the redirection
507     //       in CallBase.
508     if (!ICS.hasOperandBundles()) {
509       if (const Function *Callee = ICS.getCalledFunction()) {
510         IRPositions.emplace_back(IRPosition::returned(*Callee));
511         IRPositions.emplace_back(IRPosition::function(*Callee));
512       }
513     }
514     IRPositions.emplace_back(
515         IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
516     return;
517   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
518     int ArgNo = IRP.getArgNo();
519     assert(ICS && ArgNo >= 0 && "Expected call site!");
520     // TODO: We need to look at the operand bundles similar to the redirection
521     //       in CallBase.
522     if (!ICS.hasOperandBundles()) {
523       const Function *Callee = ICS.getCalledFunction();
524       if (Callee && Callee->arg_size() > unsigned(ArgNo))
525         IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
526       if (Callee)
527         IRPositions.emplace_back(IRPosition::function(*Callee));
528     }
529     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
530     return;
531   }
532   }
533 }
534 
535 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
536                          bool IgnoreSubsumingPositions) const {
537   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
538     for (Attribute::AttrKind AK : AKs)
539       if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
540         return true;
541     // The first position returned by the SubsumingPositionIterator is
542     // always the position itself. If we ignore subsuming positions we
543     // are done after the first iteration.
544     if (IgnoreSubsumingPositions)
545       break;
546   }
547   return false;
548 }
549 
550 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
551                           SmallVectorImpl<Attribute> &Attrs,
552                           bool IgnoreSubsumingPositions) const {
553   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
554     for (Attribute::AttrKind AK : AKs) {
555       const Attribute &Attr = EquivIRP.getAttr(AK);
556       if (Attr.getKindAsEnum() == AK)
557         Attrs.push_back(Attr);
558     }
559     // The first position returned by the SubsumingPositionIterator is
560     // always the position itself. If we ignore subsuming positions we
561     // are done after the first iteration.
562     if (IgnoreSubsumingPositions)
563       break;
564   }
565 }
566 
567 void IRPosition::verify() {
568   switch (KindOrArgNo) {
569   default:
570     assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
571     assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
572            "Expected call base or argument for positive attribute index!");
573     if (isa<Argument>(AnchorVal)) {
574       assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
575              "Argument number mismatch!");
576       assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
577              "Associated value mismatch!");
578     } else {
579       assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
580              "Call site argument number mismatch!");
581       assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
582                  &getAssociatedValue() &&
583              "Associated value mismatch!");
584     }
585     break;
586   case IRP_INVALID:
587     assert(!AnchorVal && "Expected no value for an invalid position!");
588     break;
589   case IRP_FLOAT:
590     assert((!isa<CallBase>(&getAssociatedValue()) &&
591             !isa<Argument>(&getAssociatedValue())) &&
592            "Expected specialized kind for call base and argument values!");
593     break;
594   case IRP_RETURNED:
595     assert(isa<Function>(AnchorVal) &&
596            "Expected function for a 'returned' position!");
597     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
598     break;
599   case IRP_CALL_SITE_RETURNED:
600     assert((isa<CallBase>(AnchorVal)) &&
601            "Expected call base for 'call site returned' position!");
602     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
603     break;
604   case IRP_CALL_SITE:
605     assert((isa<CallBase>(AnchorVal)) &&
606            "Expected call base for 'call site function' position!");
607     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
608     break;
609   case IRP_FUNCTION:
610     assert(isa<Function>(AnchorVal) &&
611            "Expected function for a 'function' position!");
612     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
613     break;
614   }
615 }
616 
617 namespace {
618 /// Helper function to clamp a state \p S of type \p StateType with the
619 /// information in \p R and indicate/return if \p S did change (as-in update is
620 /// required to be run again).
621 template <typename StateType>
622 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
623   auto Assumed = S.getAssumed();
624   S ^= R;
625   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
626                                    : ChangeStatus::CHANGED;
627 }
628 
629 /// Clamp the information known for all returned values of a function
630 /// (identified by \p QueryingAA) into \p S.
631 template <typename AAType, typename StateType = typename AAType::StateType>
632 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
633                                      StateType &S) {
634   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
635                     << QueryingAA << " into " << S << "\n");
636 
637   assert((QueryingAA.getIRPosition().getPositionKind() ==
638               IRPosition::IRP_RETURNED ||
639           QueryingAA.getIRPosition().getPositionKind() ==
640               IRPosition::IRP_CALL_SITE_RETURNED) &&
641          "Can only clamp returned value states for a function returned or call "
642          "site returned position!");
643 
644   // Use an optional state as there might not be any return values and we want
645   // to join (IntegerState::operator&) the state of all there are.
646   Optional<StateType> T;
647 
648   // Callback for each possibly returned value.
649   auto CheckReturnValue = [&](Value &RV) -> bool {
650     const IRPosition &RVPos = IRPosition::value(RV);
651     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
652     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
653                       << " @ " << RVPos << "\n");
654     const StateType &AAS = static_cast<const StateType &>(AA.getState());
655     if (T.hasValue())
656       *T &= AAS;
657     else
658       T = AAS;
659     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
660                       << "\n");
661     return T->isValidState();
662   };
663 
664   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
665     S.indicatePessimisticFixpoint();
666   else if (T.hasValue())
667     S ^= *T;
668 }
669 
670 /// Helper class to compose two generic deduction
671 template <typename AAType, typename Base, typename StateType,
672           template <typename...> class F, template <typename...> class G>
673 struct AAComposeTwoGenericDeduction
674     : public F<AAType, G<AAType, Base, StateType>, StateType> {
675   AAComposeTwoGenericDeduction(const IRPosition &IRP)
676       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
677 
678   /// See AbstractAttribute::updateImpl(...).
679   ChangeStatus updateImpl(Attributor &A) override {
680     ChangeStatus ChangedF =
681         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
682     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
683     return ChangedF | ChangedG;
684   }
685 };
686 
687 /// Helper class for generic deduction: return value -> returned position.
688 template <typename AAType, typename Base,
689           typename StateType = typename AAType::StateType>
690 struct AAReturnedFromReturnedValues : public Base {
691   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
692 
693   /// See AbstractAttribute::updateImpl(...).
694   ChangeStatus updateImpl(Attributor &A) override {
695     StateType S;
696     clampReturnedValueStates<AAType, StateType>(A, *this, S);
697     // TODO: If we know we visited all returned values, thus no are assumed
698     // dead, we can take the known information from the state T.
699     return clampStateAndIndicateChange<StateType>(this->getState(), S);
700   }
701 };
702 
703 /// Clamp the information known at all call sites for a given argument
704 /// (identified by \p QueryingAA) into \p S.
705 template <typename AAType, typename StateType = typename AAType::StateType>
706 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
707                                         StateType &S) {
708   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
709                     << QueryingAA << " into " << S << "\n");
710 
711   assert(QueryingAA.getIRPosition().getPositionKind() ==
712              IRPosition::IRP_ARGUMENT &&
713          "Can only clamp call site argument states for an argument position!");
714 
715   // Use an optional state as there might not be any return values and we want
716   // to join (IntegerState::operator&) the state of all there are.
717   Optional<StateType> T;
718 
719   // The argument number which is also the call site argument number.
720   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
721 
722   auto CallSiteCheck = [&](AbstractCallSite ACS) {
723     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
724     // Check if a coresponding argument was found or if it is on not associated
725     // (which can happen for callback calls).
726     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
727       return false;
728 
729     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
730     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
731                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
732     const StateType &AAS = static_cast<const StateType &>(AA.getState());
733     if (T.hasValue())
734       *T &= AAS;
735     else
736       T = AAS;
737     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
738                       << "\n");
739     return T->isValidState();
740   };
741 
742   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
743     S.indicatePessimisticFixpoint();
744   else if (T.hasValue())
745     S ^= *T;
746 }
747 
748 /// Helper class for generic deduction: call site argument -> argument position.
749 template <typename AAType, typename Base,
750           typename StateType = typename AAType::StateType>
751 struct AAArgumentFromCallSiteArguments : public Base {
752   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
753 
754   /// See AbstractAttribute::updateImpl(...).
755   ChangeStatus updateImpl(Attributor &A) override {
756     StateType S;
757     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
758     // TODO: If we know we visited all incoming values, thus no are assumed
759     // dead, we can take the known information from the state T.
760     return clampStateAndIndicateChange<StateType>(this->getState(), S);
761   }
762 };
763 
764 /// Helper class for generic replication: function returned -> cs returned.
765 template <typename AAType, typename Base,
766           typename StateType = typename AAType::StateType>
767 struct AACallSiteReturnedFromReturned : public Base {
768   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
769 
770   /// See AbstractAttribute::updateImpl(...).
771   ChangeStatus updateImpl(Attributor &A) override {
772     assert(this->getIRPosition().getPositionKind() ==
773                IRPosition::IRP_CALL_SITE_RETURNED &&
774            "Can only wrap function returned positions for call site returned "
775            "positions!");
776     auto &S = this->getState();
777 
778     const Function *AssociatedFunction =
779         this->getIRPosition().getAssociatedFunction();
780     if (!AssociatedFunction)
781       return S.indicatePessimisticFixpoint();
782 
783     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
784     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
785     return clampStateAndIndicateChange(
786         S, static_cast<const typename AAType::StateType &>(AA.getState()));
787   }
788 };
789 
790 /// Helper class for generic deduction using must-be-executed-context
791 /// Base class is required to have `followUse` method.
792 
793 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
794 /// U - Underlying use.
795 /// I - The user of the \p U.
796 /// `followUse` returns true if the value should be tracked transitively.
797 
798 template <typename AAType, typename Base,
799           typename StateType = typename AAType::StateType>
800 struct AAFromMustBeExecutedContext : public Base {
801   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
802 
803   void initialize(Attributor &A) override {
804     Base::initialize(A);
805     const IRPosition &IRP = this->getIRPosition();
806     Instruction *CtxI = IRP.getCtxI();
807 
808     if (!CtxI)
809       return;
810 
811     for (const Use &U : IRP.getAssociatedValue().uses())
812       Uses.insert(&U);
813   }
814 
815   /// See AbstractAttribute::updateImpl(...).
816   ChangeStatus updateImpl(Attributor &A) override {
817     auto BeforeState = this->getState();
818     auto &S = this->getState();
819     Instruction *CtxI = this->getIRPosition().getCtxI();
820     if (!CtxI)
821       return ChangeStatus::UNCHANGED;
822 
823     MustBeExecutedContextExplorer &Explorer =
824         A.getInfoCache().getMustBeExecutedContextExplorer();
825 
826     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
827     for (unsigned u = 0; u < Uses.size(); ++u) {
828       const Use *U = Uses[u];
829       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
830         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
831         if (Found && Base::followUse(A, U, UserI))
832           for (const Use &Us : UserI->uses())
833             Uses.insert(&Us);
834       }
835     }
836 
837     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
838   }
839 
840 private:
841   /// Container for (transitive) uses of the associated value.
842   SetVector<const Use *> Uses;
843 };
844 
845 template <typename AAType, typename Base,
846           typename StateType = typename AAType::StateType>
847 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
848     AAComposeTwoGenericDeduction<AAType, Base, StateType,
849                                  AAFromMustBeExecutedContext,
850                                  AAArgumentFromCallSiteArguments>;
851 
852 template <typename AAType, typename Base,
853           typename StateType = typename AAType::StateType>
854 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
855     AAComposeTwoGenericDeduction<AAType, Base, StateType,
856                                  AAFromMustBeExecutedContext,
857                                  AACallSiteReturnedFromReturned>;
858 
859 /// -----------------------NoUnwind Function Attribute--------------------------
860 
861 struct AANoUnwindImpl : AANoUnwind {
862   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
863 
864   const std::string getAsStr() const override {
865     return getAssumed() ? "nounwind" : "may-unwind";
866   }
867 
868   /// See AbstractAttribute::updateImpl(...).
869   ChangeStatus updateImpl(Attributor &A) override {
870     auto Opcodes = {
871         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
872         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
873         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
874 
875     auto CheckForNoUnwind = [&](Instruction &I) {
876       if (!I.mayThrow())
877         return true;
878 
879       if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
880         const auto &NoUnwindAA =
881             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
882         return NoUnwindAA.isAssumedNoUnwind();
883       }
884       return false;
885     };
886 
887     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
888       return indicatePessimisticFixpoint();
889 
890     return ChangeStatus::UNCHANGED;
891   }
892 };
893 
894 struct AANoUnwindFunction final : public AANoUnwindImpl {
895   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
896 
897   /// See AbstractAttribute::trackStatistics()
898   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
899 };
900 
901 /// NoUnwind attribute deduction for a call sites.
902 struct AANoUnwindCallSite final : AANoUnwindImpl {
903   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
904 
905   /// See AbstractAttribute::initialize(...).
906   void initialize(Attributor &A) override {
907     AANoUnwindImpl::initialize(A);
908     Function *F = getAssociatedFunction();
909     if (!F)
910       indicatePessimisticFixpoint();
911   }
912 
913   /// See AbstractAttribute::updateImpl(...).
914   ChangeStatus updateImpl(Attributor &A) override {
915     // TODO: Once we have call site specific value information we can provide
916     //       call site specific liveness information and then it makes
917     //       sense to specialize attributes for call sites arguments instead of
918     //       redirecting requests to the callee argument.
919     Function *F = getAssociatedFunction();
920     const IRPosition &FnPos = IRPosition::function(*F);
921     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
922     return clampStateAndIndicateChange(
923         getState(),
924         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
925   }
926 
927   /// See AbstractAttribute::trackStatistics()
928   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
929 };
930 
931 /// --------------------- Function Return Values -------------------------------
932 
933 /// "Attribute" that collects all potential returned values and the return
934 /// instructions that they arise from.
935 ///
936 /// If there is a unique returned value R, the manifest method will:
937 ///   - mark R with the "returned" attribute, if R is an argument.
938 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
939 
940   /// Mapping of values potentially returned by the associated function to the
941   /// return instructions that might return them.
942   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
943 
944   /// Mapping to remember the number of returned values for a call site such
945   /// that we can avoid updates if nothing changed.
946   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
947 
948   /// Set of unresolved calls returned by the associated function.
949   SmallSetVector<CallBase *, 4> UnresolvedCalls;
950 
951   /// State flags
952   ///
953   ///{
954   bool IsFixed = false;
955   bool IsValidState = true;
956   ///}
957 
958 public:
959   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
960 
961   /// See AbstractAttribute::initialize(...).
962   void initialize(Attributor &A) override {
963     // Reset the state.
964     IsFixed = false;
965     IsValidState = true;
966     ReturnedValues.clear();
967 
968     Function *F = getAssociatedFunction();
969     if (!F) {
970       indicatePessimisticFixpoint();
971       return;
972     }
973 
974     // The map from instruction opcodes to those instructions in the function.
975     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
976 
977     // Look through all arguments, if one is marked as returned we are done.
978     for (Argument &Arg : F->args()) {
979       if (Arg.hasReturnedAttr()) {
980         auto &ReturnInstSet = ReturnedValues[&Arg];
981         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
982           ReturnInstSet.insert(cast<ReturnInst>(RI));
983 
984         indicateOptimisticFixpoint();
985         return;
986       }
987     }
988 
989     if (!F->hasExactDefinition())
990       indicatePessimisticFixpoint();
991   }
992 
993   /// See AbstractAttribute::manifest(...).
994   ChangeStatus manifest(Attributor &A) override;
995 
996   /// See AbstractAttribute::getState(...).
997   AbstractState &getState() override { return *this; }
998 
999   /// See AbstractAttribute::getState(...).
1000   const AbstractState &getState() const override { return *this; }
1001 
1002   /// See AbstractAttribute::updateImpl(Attributor &A).
1003   ChangeStatus updateImpl(Attributor &A) override;
1004 
1005   llvm::iterator_range<iterator> returned_values() override {
1006     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1007   }
1008 
1009   llvm::iterator_range<const_iterator> returned_values() const override {
1010     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1011   }
1012 
1013   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
1014     return UnresolvedCalls;
1015   }
1016 
1017   /// Return the number of potential return values, -1 if unknown.
1018   size_t getNumReturnValues() const override {
1019     return isValidState() ? ReturnedValues.size() : -1;
1020   }
1021 
1022   /// Return an assumed unique return value if a single candidate is found. If
1023   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1024   /// Optional::NoneType.
1025   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1026 
1027   /// See AbstractState::checkForAllReturnedValues(...).
1028   bool checkForAllReturnedValuesAndReturnInsts(
1029       const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1030           &Pred) const override;
1031 
1032   /// Pretty print the attribute similar to the IR representation.
1033   const std::string getAsStr() const override;
1034 
1035   /// See AbstractState::isAtFixpoint().
1036   bool isAtFixpoint() const override { return IsFixed; }
1037 
1038   /// See AbstractState::isValidState().
1039   bool isValidState() const override { return IsValidState; }
1040 
1041   /// See AbstractState::indicateOptimisticFixpoint(...).
1042   ChangeStatus indicateOptimisticFixpoint() override {
1043     IsFixed = true;
1044     return ChangeStatus::UNCHANGED;
1045   }
1046 
1047   ChangeStatus indicatePessimisticFixpoint() override {
1048     IsFixed = true;
1049     IsValidState = false;
1050     return ChangeStatus::CHANGED;
1051   }
1052 };
1053 
1054 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1055   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1056 
1057   // Bookkeeping.
1058   assert(isValidState());
1059   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1060                   "Number of function with known return values");
1061 
1062   // Check if we have an assumed unique return value that we could manifest.
1063   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1064 
1065   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1066     return Changed;
1067 
1068   // Bookkeeping.
1069   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1070                   "Number of function with unique return");
1071 
1072   // Callback to replace the uses of CB with the constant C.
1073   auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
1074     if (CB.getNumUses() == 0 || CB.isMustTailCall())
1075       return ChangeStatus::UNCHANGED;
1076     replaceAllInstructionUsesWith(CB, C);
1077     return ChangeStatus::CHANGED;
1078   };
1079 
1080   // If the assumed unique return value is an argument, annotate it.
1081   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1082     // TODO: This should be handled differently!
1083     this->AnchorVal = UniqueRVArg;
1084     this->KindOrArgNo = UniqueRVArg->getArgNo();
1085     Changed = IRAttribute::manifest(A);
1086   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
1087     // We can replace the returned value with the unique returned constant.
1088     Value &AnchorValue = getAnchorValue();
1089     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1090       for (const Use &U : F->uses())
1091         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1092           if (CB->isCallee(&U)) {
1093             Constant *RVCCast =
1094                 CB->getType() == RVC->getType()
1095                     ? RVC
1096                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
1097             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1098           }
1099     } else {
1100       assert(isa<CallBase>(AnchorValue) &&
1101              "Expcected a function or call base anchor!");
1102       Constant *RVCCast =
1103           AnchorValue.getType() == RVC->getType()
1104               ? RVC
1105               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1106       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1107     }
1108     if (Changed == ChangeStatus::CHANGED)
1109       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1110                       "Number of function returns replaced by constant return");
1111   }
1112 
1113   return Changed;
1114 }
1115 
1116 const std::string AAReturnedValuesImpl::getAsStr() const {
1117   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1118          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1119          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1120 }
1121 
1122 Optional<Value *>
1123 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1124   // If checkForAllReturnedValues provides a unique value, ignoring potential
1125   // undef values that can also be present, it is assumed to be the actual
1126   // return value and forwarded to the caller of this method. If there are
1127   // multiple, a nullptr is returned indicating there cannot be a unique
1128   // returned value.
1129   Optional<Value *> UniqueRV;
1130 
1131   auto Pred = [&](Value &RV) -> bool {
1132     // If we found a second returned value and neither the current nor the saved
1133     // one is an undef, there is no unique returned value. Undefs are special
1134     // since we can pretend they have any value.
1135     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1136         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1137       UniqueRV = nullptr;
1138       return false;
1139     }
1140 
1141     // Do not overwrite a value with an undef.
1142     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1143       UniqueRV = &RV;
1144 
1145     return true;
1146   };
1147 
1148   if (!A.checkForAllReturnedValues(Pred, *this))
1149     UniqueRV = nullptr;
1150 
1151   return UniqueRV;
1152 }
1153 
1154 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1155     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1156         &Pred) const {
1157   if (!isValidState())
1158     return false;
1159 
1160   // Check all returned values but ignore call sites as long as we have not
1161   // encountered an overdefined one during an update.
1162   for (auto &It : ReturnedValues) {
1163     Value *RV = It.first;
1164 
1165     CallBase *CB = dyn_cast<CallBase>(RV);
1166     if (CB && !UnresolvedCalls.count(CB))
1167       continue;
1168 
1169     if (!Pred(*RV, It.second))
1170       return false;
1171   }
1172 
1173   return true;
1174 }
1175 
1176 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1177   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1178   bool Changed = false;
1179 
1180   // State used in the value traversals starting in returned values.
1181   struct RVState {
1182     // The map in which we collect return values -> return instrs.
1183     decltype(ReturnedValues) &RetValsMap;
1184     // The flag to indicate a change.
1185     bool &Changed;
1186     // The return instrs we come from.
1187     SmallSetVector<ReturnInst *, 4> RetInsts;
1188   };
1189 
1190   // Callback for a leaf value returned by the associated function.
1191   auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1192     auto Size = RVS.RetValsMap[&Val].size();
1193     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1194     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1195     RVS.Changed |= Inserted;
1196     LLVM_DEBUG({
1197       if (Inserted)
1198         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1199                << " => " << RVS.RetInsts.size() << "\n";
1200     });
1201     return true;
1202   };
1203 
1204   // Helper method to invoke the generic value traversal.
1205   auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1206     IRPosition RetValPos = IRPosition::value(RV);
1207     return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1208                                                             RVS, VisitValueCB);
1209   };
1210 
1211   // Callback for all "return intructions" live in the associated function.
1212   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1213     ReturnInst &Ret = cast<ReturnInst>(I);
1214     RVState RVS({ReturnedValues, Changed, {}});
1215     RVS.RetInsts.insert(&Ret);
1216     return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1217   };
1218 
1219   // Start by discovering returned values from all live returned instructions in
1220   // the associated function.
1221   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1222     return indicatePessimisticFixpoint();
1223 
1224   // Once returned values "directly" present in the code are handled we try to
1225   // resolve returned calls.
1226   decltype(ReturnedValues) NewRVsMap;
1227   for (auto &It : ReturnedValues) {
1228     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1229                       << " by #" << It.second.size() << " RIs\n");
1230     CallBase *CB = dyn_cast<CallBase>(It.first);
1231     if (!CB || UnresolvedCalls.count(CB))
1232       continue;
1233 
1234     if (!CB->getCalledFunction()) {
1235       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1236                         << "\n");
1237       UnresolvedCalls.insert(CB);
1238       continue;
1239     }
1240 
1241     // TODO: use the function scope once we have call site AAReturnedValues.
1242     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1243         *this, IRPosition::function(*CB->getCalledFunction()));
1244     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1245                       << RetValAA << "\n");
1246 
1247     // Skip dead ends, thus if we do not know anything about the returned
1248     // call we mark it as unresolved and it will stay that way.
1249     if (!RetValAA.getState().isValidState()) {
1250       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1251                         << "\n");
1252       UnresolvedCalls.insert(CB);
1253       continue;
1254     }
1255 
1256     // Do not try to learn partial information. If the callee has unresolved
1257     // return values we will treat the call as unresolved/opaque.
1258     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1259     if (!RetValAAUnresolvedCalls.empty()) {
1260       UnresolvedCalls.insert(CB);
1261       continue;
1262     }
1263 
1264     // Now check if we can track transitively returned values. If possible, thus
1265     // if all return value can be represented in the current scope, do so.
1266     bool Unresolved = false;
1267     for (auto &RetValAAIt : RetValAA.returned_values()) {
1268       Value *RetVal = RetValAAIt.first;
1269       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1270           isa<Constant>(RetVal))
1271         continue;
1272       // Anything that did not fit in the above categories cannot be resolved,
1273       // mark the call as unresolved.
1274       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1275                            "cannot be translated: "
1276                         << *RetVal << "\n");
1277       UnresolvedCalls.insert(CB);
1278       Unresolved = true;
1279       break;
1280     }
1281 
1282     if (Unresolved)
1283       continue;
1284 
1285     // Now track transitively returned values.
1286     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1287     if (NumRetAA == RetValAA.getNumReturnValues()) {
1288       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1289                            "changed since it was seen last\n");
1290       continue;
1291     }
1292     NumRetAA = RetValAA.getNumReturnValues();
1293 
1294     for (auto &RetValAAIt : RetValAA.returned_values()) {
1295       Value *RetVal = RetValAAIt.first;
1296       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1297         // Arguments are mapped to call site operands and we begin the traversal
1298         // again.
1299         bool Unused = false;
1300         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1301         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1302         continue;
1303       } else if (isa<CallBase>(RetVal)) {
1304         // Call sites are resolved by the callee attribute over time, no need to
1305         // do anything for us.
1306         continue;
1307       } else if (isa<Constant>(RetVal)) {
1308         // Constants are valid everywhere, we can simply take them.
1309         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1310         continue;
1311       }
1312     }
1313   }
1314 
1315   // To avoid modifications to the ReturnedValues map while we iterate over it
1316   // we kept record of potential new entries in a copy map, NewRVsMap.
1317   for (auto &It : NewRVsMap) {
1318     assert(!It.second.empty() && "Entry does not add anything.");
1319     auto &ReturnInsts = ReturnedValues[It.first];
1320     for (ReturnInst *RI : It.second)
1321       if (ReturnInsts.insert(RI)) {
1322         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1323                           << *It.first << " => " << *RI << "\n");
1324         Changed = true;
1325       }
1326   }
1327 
1328   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1329   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1330 }
1331 
1332 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1333   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1334 
1335   /// See AbstractAttribute::trackStatistics()
1336   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1337 };
1338 
1339 /// Returned values information for a call sites.
1340 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1341   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1342 
1343   /// See AbstractAttribute::initialize(...).
1344   void initialize(Attributor &A) override {
1345     // TODO: Once we have call site specific value information we can provide
1346     //       call site specific liveness information and then it makes
1347     //       sense to specialize attributes for call sites instead of
1348     //       redirecting requests to the callee.
1349     llvm_unreachable("Abstract attributes for returned values are not "
1350                      "supported for call sites yet!");
1351   }
1352 
1353   /// See AbstractAttribute::updateImpl(...).
1354   ChangeStatus updateImpl(Attributor &A) override {
1355     return indicatePessimisticFixpoint();
1356   }
1357 
1358   /// See AbstractAttribute::trackStatistics()
1359   void trackStatistics() const override {}
1360 };
1361 
1362 /// ------------------------ NoSync Function Attribute -------------------------
1363 
1364 struct AANoSyncImpl : AANoSync {
1365   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1366 
1367   const std::string getAsStr() const override {
1368     return getAssumed() ? "nosync" : "may-sync";
1369   }
1370 
1371   /// See AbstractAttribute::updateImpl(...).
1372   ChangeStatus updateImpl(Attributor &A) override;
1373 
1374   /// Helper function used to determine whether an instruction is non-relaxed
1375   /// atomic. In other words, if an atomic instruction does not have unordered
1376   /// or monotonic ordering
1377   static bool isNonRelaxedAtomic(Instruction *I);
1378 
1379   /// Helper function used to determine whether an instruction is volatile.
1380   static bool isVolatile(Instruction *I);
1381 
1382   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1383   /// memset).
1384   static bool isNoSyncIntrinsic(Instruction *I);
1385 };
1386 
1387 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1388   if (!I->isAtomic())
1389     return false;
1390 
1391   AtomicOrdering Ordering;
1392   switch (I->getOpcode()) {
1393   case Instruction::AtomicRMW:
1394     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1395     break;
1396   case Instruction::Store:
1397     Ordering = cast<StoreInst>(I)->getOrdering();
1398     break;
1399   case Instruction::Load:
1400     Ordering = cast<LoadInst>(I)->getOrdering();
1401     break;
1402   case Instruction::Fence: {
1403     auto *FI = cast<FenceInst>(I);
1404     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1405       return false;
1406     Ordering = FI->getOrdering();
1407     break;
1408   }
1409   case Instruction::AtomicCmpXchg: {
1410     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1411     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1412     // Only if both are relaxed, than it can be treated as relaxed.
1413     // Otherwise it is non-relaxed.
1414     if (Success != AtomicOrdering::Unordered &&
1415         Success != AtomicOrdering::Monotonic)
1416       return true;
1417     if (Failure != AtomicOrdering::Unordered &&
1418         Failure != AtomicOrdering::Monotonic)
1419       return true;
1420     return false;
1421   }
1422   default:
1423     llvm_unreachable(
1424         "New atomic operations need to be known in the attributor.");
1425   }
1426 
1427   // Relaxed.
1428   if (Ordering == AtomicOrdering::Unordered ||
1429       Ordering == AtomicOrdering::Monotonic)
1430     return false;
1431   return true;
1432 }
1433 
1434 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1435 /// FIXME: We should ipmrove the handling of intrinsics.
1436 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1437   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1438     switch (II->getIntrinsicID()) {
1439     /// Element wise atomic memory intrinsics are can only be unordered,
1440     /// therefore nosync.
1441     case Intrinsic::memset_element_unordered_atomic:
1442     case Intrinsic::memmove_element_unordered_atomic:
1443     case Intrinsic::memcpy_element_unordered_atomic:
1444       return true;
1445     case Intrinsic::memset:
1446     case Intrinsic::memmove:
1447     case Intrinsic::memcpy:
1448       if (!cast<MemIntrinsic>(II)->isVolatile())
1449         return true;
1450       return false;
1451     default:
1452       return false;
1453     }
1454   }
1455   return false;
1456 }
1457 
1458 bool AANoSyncImpl::isVolatile(Instruction *I) {
1459   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1460          "Calls should not be checked here");
1461 
1462   switch (I->getOpcode()) {
1463   case Instruction::AtomicRMW:
1464     return cast<AtomicRMWInst>(I)->isVolatile();
1465   case Instruction::Store:
1466     return cast<StoreInst>(I)->isVolatile();
1467   case Instruction::Load:
1468     return cast<LoadInst>(I)->isVolatile();
1469   case Instruction::AtomicCmpXchg:
1470     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1471   default:
1472     return false;
1473   }
1474 }
1475 
1476 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1477 
1478   auto CheckRWInstForNoSync = [&](Instruction &I) {
1479     /// We are looking for volatile instructions or Non-Relaxed atomics.
1480     /// FIXME: We should improve the handling of intrinsics.
1481 
1482     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1483       return true;
1484 
1485     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1486       if (ICS.hasFnAttr(Attribute::NoSync))
1487         return true;
1488 
1489       const auto &NoSyncAA =
1490           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1491       if (NoSyncAA.isAssumedNoSync())
1492         return true;
1493       return false;
1494     }
1495 
1496     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1497       return true;
1498 
1499     return false;
1500   };
1501 
1502   auto CheckForNoSync = [&](Instruction &I) {
1503     // At this point we handled all read/write effects and they are all
1504     // nosync, so they can be skipped.
1505     if (I.mayReadOrWriteMemory())
1506       return true;
1507 
1508     // non-convergent and readnone imply nosync.
1509     return !ImmutableCallSite(&I).isConvergent();
1510   };
1511 
1512   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1513       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1514     return indicatePessimisticFixpoint();
1515 
1516   return ChangeStatus::UNCHANGED;
1517 }
1518 
1519 struct AANoSyncFunction final : public AANoSyncImpl {
1520   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1521 
1522   /// See AbstractAttribute::trackStatistics()
1523   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1524 };
1525 
1526 /// NoSync attribute deduction for a call sites.
1527 struct AANoSyncCallSite final : AANoSyncImpl {
1528   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1529 
1530   /// See AbstractAttribute::initialize(...).
1531   void initialize(Attributor &A) override {
1532     AANoSyncImpl::initialize(A);
1533     Function *F = getAssociatedFunction();
1534     if (!F)
1535       indicatePessimisticFixpoint();
1536   }
1537 
1538   /// See AbstractAttribute::updateImpl(...).
1539   ChangeStatus updateImpl(Attributor &A) override {
1540     // TODO: Once we have call site specific value information we can provide
1541     //       call site specific liveness information and then it makes
1542     //       sense to specialize attributes for call sites arguments instead of
1543     //       redirecting requests to the callee argument.
1544     Function *F = getAssociatedFunction();
1545     const IRPosition &FnPos = IRPosition::function(*F);
1546     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1547     return clampStateAndIndicateChange(
1548         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1549   }
1550 
1551   /// See AbstractAttribute::trackStatistics()
1552   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1553 };
1554 
1555 /// ------------------------ No-Free Attributes ----------------------------
1556 
1557 struct AANoFreeImpl : public AANoFree {
1558   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1559 
1560   /// See AbstractAttribute::updateImpl(...).
1561   ChangeStatus updateImpl(Attributor &A) override {
1562     auto CheckForNoFree = [&](Instruction &I) {
1563       ImmutableCallSite ICS(&I);
1564       if (ICS.hasFnAttr(Attribute::NoFree))
1565         return true;
1566 
1567       const auto &NoFreeAA =
1568           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1569       return NoFreeAA.isAssumedNoFree();
1570     };
1571 
1572     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1573       return indicatePessimisticFixpoint();
1574     return ChangeStatus::UNCHANGED;
1575   }
1576 
1577   /// See AbstractAttribute::getAsStr().
1578   const std::string getAsStr() const override {
1579     return getAssumed() ? "nofree" : "may-free";
1580   }
1581 };
1582 
1583 struct AANoFreeFunction final : public AANoFreeImpl {
1584   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1585 
1586   /// See AbstractAttribute::trackStatistics()
1587   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1588 };
1589 
1590 /// NoFree attribute deduction for a call sites.
1591 struct AANoFreeCallSite final : AANoFreeImpl {
1592   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1593 
1594   /// See AbstractAttribute::initialize(...).
1595   void initialize(Attributor &A) override {
1596     AANoFreeImpl::initialize(A);
1597     Function *F = getAssociatedFunction();
1598     if (!F)
1599       indicatePessimisticFixpoint();
1600   }
1601 
1602   /// See AbstractAttribute::updateImpl(...).
1603   ChangeStatus updateImpl(Attributor &A) override {
1604     // TODO: Once we have call site specific value information we can provide
1605     //       call site specific liveness information and then it makes
1606     //       sense to specialize attributes for call sites arguments instead of
1607     //       redirecting requests to the callee argument.
1608     Function *F = getAssociatedFunction();
1609     const IRPosition &FnPos = IRPosition::function(*F);
1610     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1611     return clampStateAndIndicateChange(
1612         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1613   }
1614 
1615   /// See AbstractAttribute::trackStatistics()
1616   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1617 };
1618 
1619 /// NoFree attribute for floating values.
1620 struct AANoFreeFloating : AANoFreeImpl {
1621   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1622 
1623   /// See AbstractAttribute::trackStatistics()
1624   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1625 
1626   /// See Abstract Attribute::updateImpl(...).
1627   ChangeStatus updateImpl(Attributor &A) override {
1628     const IRPosition &IRP = getIRPosition();
1629 
1630     const auto &NoFreeAA =
1631         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1632     if (NoFreeAA.isAssumedNoFree())
1633       return ChangeStatus::UNCHANGED;
1634 
1635     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1636     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1637       Instruction *UserI = cast<Instruction>(U.getUser());
1638       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1639         if (CB->isBundleOperand(&U))
1640           return false;
1641         if (!CB->isArgOperand(&U))
1642           return true;
1643         unsigned ArgNo = CB->getArgOperandNo(&U);
1644 
1645         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1646             *this, IRPosition::callsite_argument(*CB, ArgNo));
1647         return NoFreeArg.isAssumedNoFree();
1648       }
1649 
1650       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1651           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1652         Follow = true;
1653         return true;
1654       }
1655 
1656       // Unknown user.
1657       return false;
1658     };
1659     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1660       return indicatePessimisticFixpoint();
1661 
1662     return ChangeStatus::UNCHANGED;
1663   }
1664 };
1665 
1666 /// NoFree attribute for a call site argument.
1667 struct AANoFreeArgument final : AANoFreeFloating {
1668   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1669 
1670   /// See AbstractAttribute::trackStatistics()
1671   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1672 };
1673 
1674 /// NoFree attribute for call site arguments.
1675 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1676   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1677 
1678   /// See AbstractAttribute::updateImpl(...).
1679   ChangeStatus updateImpl(Attributor &A) override {
1680     // TODO: Once we have call site specific value information we can provide
1681     //       call site specific liveness information and then it makes
1682     //       sense to specialize attributes for call sites arguments instead of
1683     //       redirecting requests to the callee argument.
1684     Argument *Arg = getAssociatedArgument();
1685     if (!Arg)
1686       return indicatePessimisticFixpoint();
1687     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1688     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1689     return clampStateAndIndicateChange(
1690         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1691   }
1692 
1693   /// See AbstractAttribute::trackStatistics()
1694   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1695 };
1696 
1697 /// NoFree attribute for function return value.
1698 struct AANoFreeReturned final : AANoFreeFloating {
1699   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1700     llvm_unreachable("NoFree is not applicable to function returns!");
1701   }
1702 
1703   /// See AbstractAttribute::initialize(...).
1704   void initialize(Attributor &A) override {
1705     llvm_unreachable("NoFree is not applicable to function returns!");
1706   }
1707 
1708   /// See AbstractAttribute::updateImpl(...).
1709   ChangeStatus updateImpl(Attributor &A) override {
1710     llvm_unreachable("NoFree is not applicable to function returns!");
1711   }
1712 
1713   /// See AbstractAttribute::trackStatistics()
1714   void trackStatistics() const override {}
1715 };
1716 
1717 /// NoFree attribute deduction for a call site return value.
1718 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1719   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1720 
1721   ChangeStatus manifest(Attributor &A) override {
1722     return ChangeStatus::UNCHANGED;
1723   }
1724   /// See AbstractAttribute::trackStatistics()
1725   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1726 };
1727 
1728 /// ------------------------ NonNull Argument Attribute ------------------------
1729 static int64_t getKnownNonNullAndDerefBytesForUse(
1730     Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1731     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1732   TrackUse = false;
1733 
1734   const Value *UseV = U->get();
1735   if (!UseV->getType()->isPointerTy())
1736     return 0;
1737 
1738   Type *PtrTy = UseV->getType();
1739   const Function *F = I->getFunction();
1740   bool NullPointerIsDefined =
1741       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1742   const DataLayout &DL = A.getInfoCache().getDL();
1743   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1744     if (ICS.isBundleOperand(U))
1745       return 0;
1746 
1747     if (ICS.isCallee(U)) {
1748       IsNonNull |= !NullPointerIsDefined;
1749       return 0;
1750     }
1751 
1752     unsigned ArgNo = ICS.getArgumentNo(U);
1753     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1754     // As long as we only use known information there is no need to track
1755     // dependences here.
1756     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1757                                                   /* TrackDependence */ false);
1758     IsNonNull |= DerefAA.isKnownNonNull();
1759     return DerefAA.getKnownDereferenceableBytes();
1760   }
1761 
1762   // We need to follow common pointer manipulation uses to the accesses they
1763   // feed into. We can try to be smart to avoid looking through things we do not
1764   // like for now, e.g., non-inbounds GEPs.
1765   if (isa<CastInst>(I)) {
1766     TrackUse = true;
1767     return 0;
1768   }
1769   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1770     if (GEP->hasAllConstantIndices()) {
1771       TrackUse = true;
1772       return 0;
1773     }
1774 
1775   int64_t Offset;
1776   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1777     if (Base == &AssociatedValue &&
1778         Attributor::getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1779       int64_t DerefBytes =
1780           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1781 
1782       IsNonNull |= !NullPointerIsDefined;
1783       return std::max(int64_t(0), DerefBytes);
1784     }
1785   }
1786 
1787   /// Corner case when an offset is 0.
1788   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1789           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1790     if (Offset == 0 && Base == &AssociatedValue &&
1791         Attributor::getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1792       int64_t DerefBytes =
1793           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1794       IsNonNull |= !NullPointerIsDefined;
1795       return std::max(int64_t(0), DerefBytes);
1796     }
1797   }
1798   if (const Value *Base =
1799           GetPointerBaseWithConstantOffset(UseV, Offset, DL,
1800                                            /*AllowNonInbounds*/ false)) {
1801     if (Base == &AssociatedValue) {
1802       // As long as we only use known information there is no need to track
1803       // dependences here.
1804       auto &DerefAA = A.getAAFor<AADereferenceable>(
1805           QueryingAA, IRPosition::value(*Base), /* TrackDependence */ false);
1806       IsNonNull |= (!NullPointerIsDefined && DerefAA.isKnownNonNull());
1807       IsNonNull |= (!NullPointerIsDefined && (Offset != 0));
1808       int64_t DerefBytes = DerefAA.getKnownDereferenceableBytes();
1809       return std::max(int64_t(0), DerefBytes - std::max(int64_t(0), Offset));
1810     }
1811   }
1812 
1813   return 0;
1814 }
1815 
1816 struct AANonNullImpl : AANonNull {
1817   AANonNullImpl(const IRPosition &IRP)
1818       : AANonNull(IRP),
1819         NullIsDefined(NullPointerIsDefined(
1820             getAnchorScope(),
1821             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1822 
1823   /// See AbstractAttribute::initialize(...).
1824   void initialize(Attributor &A) override {
1825     if (!NullIsDefined &&
1826         hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1827       indicateOptimisticFixpoint();
1828     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1829       indicatePessimisticFixpoint();
1830     else
1831       AANonNull::initialize(A);
1832   }
1833 
1834   /// See AAFromMustBeExecutedContext
1835   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1836     bool IsNonNull = false;
1837     bool TrackUse = false;
1838     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1839                                        IsNonNull, TrackUse);
1840     setKnown(IsNonNull);
1841     return TrackUse;
1842   }
1843 
1844   /// See AbstractAttribute::getAsStr().
1845   const std::string getAsStr() const override {
1846     return getAssumed() ? "nonnull" : "may-null";
1847   }
1848 
1849   /// Flag to determine if the underlying value can be null and still allow
1850   /// valid accesses.
1851   const bool NullIsDefined;
1852 };
1853 
1854 /// NonNull attribute for a floating value.
1855 struct AANonNullFloating
1856     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1857   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1858   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1859 
1860   /// See AbstractAttribute::updateImpl(...).
1861   ChangeStatus updateImpl(Attributor &A) override {
1862     ChangeStatus Change = Base::updateImpl(A);
1863     if (isKnownNonNull())
1864       return Change;
1865 
1866     if (!NullIsDefined) {
1867       const auto &DerefAA =
1868           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1869       if (DerefAA.getAssumedDereferenceableBytes())
1870         return Change;
1871     }
1872 
1873     const DataLayout &DL = A.getDataLayout();
1874 
1875     DominatorTree *DT = nullptr;
1876     InformationCache &InfoCache = A.getInfoCache();
1877     if (const Function *Fn = getAnchorScope())
1878       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1879 
1880     auto VisitValueCB = [&](Value &V, AANonNull::StateType &T,
1881                             bool Stripped) -> bool {
1882       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1883       if (!Stripped && this == &AA) {
1884         if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, getCtxI(), DT))
1885           T.indicatePessimisticFixpoint();
1886       } else {
1887         // Use abstract attribute information.
1888         const AANonNull::StateType &NS =
1889             static_cast<const AANonNull::StateType &>(AA.getState());
1890         T ^= NS;
1891       }
1892       return T.isValidState();
1893     };
1894 
1895     StateType T;
1896     if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1897                                                      T, VisitValueCB))
1898       return indicatePessimisticFixpoint();
1899 
1900     return clampStateAndIndicateChange(getState(), T);
1901   }
1902 
1903   /// See AbstractAttribute::trackStatistics()
1904   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1905 };
1906 
1907 /// NonNull attribute for function return value.
1908 struct AANonNullReturned final
1909     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1910   AANonNullReturned(const IRPosition &IRP)
1911       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1912 
1913   /// See AbstractAttribute::trackStatistics()
1914   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1915 };
1916 
1917 /// NonNull attribute for function argument.
1918 struct AANonNullArgument final
1919     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1920                                                               AANonNullImpl> {
1921   AANonNullArgument(const IRPosition &IRP)
1922       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1923                                                                 AANonNullImpl>(
1924             IRP) {}
1925 
1926   /// See AbstractAttribute::trackStatistics()
1927   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1928 };
1929 
1930 struct AANonNullCallSiteArgument final : AANonNullFloating {
1931   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1932 
1933   /// See AbstractAttribute::trackStatistics()
1934   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1935 };
1936 
1937 /// NonNull attribute for a call site return position.
1938 struct AANonNullCallSiteReturned final
1939     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1940                                                              AANonNullImpl> {
1941   AANonNullCallSiteReturned(const IRPosition &IRP)
1942       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1943                                                                AANonNullImpl>(
1944             IRP) {}
1945 
1946   /// See AbstractAttribute::trackStatistics()
1947   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1948 };
1949 
1950 /// ------------------------ No-Recurse Attributes ----------------------------
1951 
1952 struct AANoRecurseImpl : public AANoRecurse {
1953   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1954 
1955   /// See AbstractAttribute::getAsStr()
1956   const std::string getAsStr() const override {
1957     return getAssumed() ? "norecurse" : "may-recurse";
1958   }
1959 };
1960 
1961 struct AANoRecurseFunction final : AANoRecurseImpl {
1962   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1963 
1964   /// See AbstractAttribute::initialize(...).
1965   void initialize(Attributor &A) override {
1966     AANoRecurseImpl::initialize(A);
1967     if (const Function *F = getAnchorScope())
1968       if (A.getInfoCache().getSccSize(*F) == 1)
1969         return;
1970     indicatePessimisticFixpoint();
1971   }
1972 
1973   /// See AbstractAttribute::updateImpl(...).
1974   ChangeStatus updateImpl(Attributor &A) override {
1975 
1976     auto CheckForNoRecurse = [&](Instruction &I) {
1977       ImmutableCallSite ICS(&I);
1978       if (ICS.hasFnAttr(Attribute::NoRecurse))
1979         return true;
1980 
1981       const auto &NoRecurseAA =
1982           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
1983       if (!NoRecurseAA.isAssumedNoRecurse())
1984         return false;
1985 
1986       // Recursion to the same function
1987       if (ICS.getCalledFunction() == getAnchorScope())
1988         return false;
1989 
1990       return true;
1991     };
1992 
1993     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1994       return indicatePessimisticFixpoint();
1995     return ChangeStatus::UNCHANGED;
1996   }
1997 
1998   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1999 };
2000 
2001 /// NoRecurse attribute deduction for a call sites.
2002 struct AANoRecurseCallSite final : AANoRecurseImpl {
2003   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
2004 
2005   /// See AbstractAttribute::initialize(...).
2006   void initialize(Attributor &A) override {
2007     AANoRecurseImpl::initialize(A);
2008     Function *F = getAssociatedFunction();
2009     if (!F)
2010       indicatePessimisticFixpoint();
2011   }
2012 
2013   /// See AbstractAttribute::updateImpl(...).
2014   ChangeStatus updateImpl(Attributor &A) override {
2015     // TODO: Once we have call site specific value information we can provide
2016     //       call site specific liveness information and then it makes
2017     //       sense to specialize attributes for call sites arguments instead of
2018     //       redirecting requests to the callee argument.
2019     Function *F = getAssociatedFunction();
2020     const IRPosition &FnPos = IRPosition::function(*F);
2021     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
2022     return clampStateAndIndicateChange(
2023         getState(),
2024         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
2025   }
2026 
2027   /// See AbstractAttribute::trackStatistics()
2028   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2029 };
2030 
2031 /// -------------------- Undefined-Behavior Attributes ------------------------
2032 
2033 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2034   AAUndefinedBehaviorImpl(const IRPosition &IRP) : AAUndefinedBehavior(IRP) {}
2035 
2036   /// See AbstractAttribute::updateImpl(...).
2037   // through a pointer (i.e. also branches etc.)
2038   ChangeStatus updateImpl(Attributor &A) override {
2039     const size_t UBPrevSize = KnownUBInsts.size();
2040     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2041 
2042     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2043       // Skip instructions that are already saved.
2044       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2045         return true;
2046 
2047       // If we reach here, we know we have an instruction
2048       // that accesses memory through a pointer operand,
2049       // for which getPointerOperand() should give it to us.
2050       const Value *PtrOp =
2051           Attributor::getPointerOperand(&I, /* AllowVolatile */ true);
2052       assert(PtrOp &&
2053              "Expected pointer operand of memory accessing instruction");
2054 
2055       // A memory access through a pointer is considered UB
2056       // only if the pointer has constant null value.
2057       // TODO: Expand it to not only check constant values.
2058       if (!isa<ConstantPointerNull>(PtrOp)) {
2059         AssumedNoUBInsts.insert(&I);
2060         return true;
2061       }
2062       const Type *PtrTy = PtrOp->getType();
2063 
2064       // Because we only consider instructions inside functions,
2065       // assume that a parent function exists.
2066       const Function *F = I.getFunction();
2067 
2068       // A memory access using constant null pointer is only considered UB
2069       // if null pointer is _not_ defined for the target platform.
2070       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2071         AssumedNoUBInsts.insert(&I);
2072       else
2073         KnownUBInsts.insert(&I);
2074       return true;
2075     };
2076 
2077     auto InspectBrInstForUB = [&](Instruction &I) {
2078       // A conditional branch instruction is considered UB if it has `undef`
2079       // condition.
2080 
2081       // Skip instructions that are already saved.
2082       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2083         return true;
2084 
2085       // We know we have a branch instruction.
2086       auto BrInst = cast<BranchInst>(&I);
2087 
2088       // Unconditional branches are never considered UB.
2089       if (BrInst->isUnconditional())
2090         return true;
2091 
2092       // Either we stopped and the appropriate action was taken,
2093       // or we got back a simplified value to continue.
2094       Optional<Value *> SimplifiedCond =
2095           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2096       if (!SimplifiedCond.hasValue())
2097         return true;
2098       AssumedNoUBInsts.insert(&I);
2099       return true;
2100     };
2101 
2102     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2103                               {Instruction::Load, Instruction::Store,
2104                                Instruction::AtomicCmpXchg,
2105                                Instruction::AtomicRMW});
2106     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br});
2107     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2108         UBPrevSize != KnownUBInsts.size())
2109       return ChangeStatus::CHANGED;
2110     return ChangeStatus::UNCHANGED;
2111   }
2112 
2113   bool isKnownToCauseUB(Instruction *I) const override {
2114     return KnownUBInsts.count(I);
2115   }
2116 
2117   bool isAssumedToCauseUB(Instruction *I) const override {
2118     // In simple words, if an instruction is not in the assumed to _not_
2119     // cause UB, then it is assumed UB (that includes those
2120     // in the KnownUBInsts set). The rest is boilerplate
2121     // is to ensure that it is one of the instructions we test
2122     // for UB.
2123 
2124     switch (I->getOpcode()) {
2125     case Instruction::Load:
2126     case Instruction::Store:
2127     case Instruction::AtomicCmpXchg:
2128     case Instruction::AtomicRMW:
2129       return !AssumedNoUBInsts.count(I);
2130     case Instruction::Br: {
2131       auto BrInst = cast<BranchInst>(I);
2132       if (BrInst->isUnconditional())
2133         return false;
2134       return !AssumedNoUBInsts.count(I);
2135     } break;
2136     default:
2137       return false;
2138     }
2139     return false;
2140   }
2141 
2142   ChangeStatus manifest(Attributor &A) override {
2143     if (KnownUBInsts.empty())
2144       return ChangeStatus::UNCHANGED;
2145     for (Instruction *I : KnownUBInsts)
2146       A.changeToUnreachableAfterManifest(I);
2147     return ChangeStatus::CHANGED;
2148   }
2149 
2150   /// See AbstractAttribute::getAsStr()
2151   const std::string getAsStr() const override {
2152     return getAssumed() ? "undefined-behavior" : "no-ub";
2153   }
2154 
2155   /// Note: The correctness of this analysis depends on the fact that the
2156   /// following 2 sets will stop changing after some point.
2157   /// "Change" here means that their size changes.
2158   /// The size of each set is monotonically increasing
2159   /// (we only add items to them) and it is upper bounded by the number of
2160   /// instructions in the processed function (we can never save more
2161   /// elements in either set than this number). Hence, at some point,
2162   /// they will stop increasing.
2163   /// Consequently, at some point, both sets will have stopped
2164   /// changing, effectively making the analysis reach a fixpoint.
2165 
2166   /// Note: These 2 sets are disjoint and an instruction can be considered
2167   /// one of 3 things:
2168   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2169   ///    the KnownUBInsts set.
2170   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2171   ///    has a reason to assume it).
2172   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2173   ///    could not find a reason to assume or prove that it can cause UB,
2174   ///    hence it assumes it doesn't. We have a set for these instructions
2175   ///    so that we don't reprocess them in every update.
2176   ///    Note however that instructions in this set may cause UB.
2177 
2178 protected:
2179   /// A set of all live instructions _known_ to cause UB.
2180   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2181 
2182 private:
2183   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2184   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2185 
2186   // Should be called on updates in which if we're processing an instruction
2187   // \p I that depends on a value \p V, one of the following has to happen:
2188   // - If the value is assumed, then stop.
2189   // - If the value is known but undef, then consider it UB.
2190   // - Otherwise, do specific processing with the simplified value.
2191   // We return None in the first 2 cases to signify that an appropriate
2192   // action was taken and the caller should stop.
2193   // Otherwise, we return the simplified value that the caller should
2194   // use for specific processing.
2195   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2196                                          Instruction *I) {
2197     const auto &ValueSimplifyAA =
2198         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2199     Optional<Value *> SimplifiedV =
2200         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2201     if (!ValueSimplifyAA.isKnown()) {
2202       // Don't depend on assumed values.
2203       return llvm::None;
2204     }
2205     if (!SimplifiedV.hasValue()) {
2206       // If it is known (which we tested above) but it doesn't have a value,
2207       // then we can assume `undef` and hence the instruction is UB.
2208       KnownUBInsts.insert(I);
2209       return llvm::None;
2210     }
2211     Value *Val = SimplifiedV.getValue();
2212     if (isa<UndefValue>(Val)) {
2213       KnownUBInsts.insert(I);
2214       return llvm::None;
2215     }
2216     return Val;
2217   }
2218 };
2219 
2220 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2221   AAUndefinedBehaviorFunction(const IRPosition &IRP)
2222       : AAUndefinedBehaviorImpl(IRP) {}
2223 
2224   /// See AbstractAttribute::trackStatistics()
2225   void trackStatistics() const override {
2226     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2227                "Number of instructions known to have UB");
2228     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2229         KnownUBInsts.size();
2230   }
2231 };
2232 
2233 /// ------------------------ Will-Return Attributes ----------------------------
2234 
2235 // Helper function that checks whether a function has any cycle.
2236 // TODO: Replace with more efficent code
2237 static bool containsCycle(Function &F) {
2238   SmallPtrSet<BasicBlock *, 32> Visited;
2239 
2240   // Traverse BB by dfs and check whether successor is already visited.
2241   for (BasicBlock *BB : depth_first(&F)) {
2242     Visited.insert(BB);
2243     for (auto *SuccBB : successors(BB)) {
2244       if (Visited.count(SuccBB))
2245         return true;
2246     }
2247   }
2248   return false;
2249 }
2250 
2251 // Helper function that checks the function have a loop which might become an
2252 // endless loop
2253 // FIXME: Any cycle is regarded as endless loop for now.
2254 //        We have to allow some patterns.
2255 static bool containsPossiblyEndlessLoop(Function *F) {
2256   return !F || !F->hasExactDefinition() || containsCycle(*F);
2257 }
2258 
2259 struct AAWillReturnImpl : public AAWillReturn {
2260   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
2261 
2262   /// See AbstractAttribute::initialize(...).
2263   void initialize(Attributor &A) override {
2264     AAWillReturn::initialize(A);
2265 
2266     Function *F = getAssociatedFunction();
2267     if (containsPossiblyEndlessLoop(F))
2268       indicatePessimisticFixpoint();
2269   }
2270 
2271   /// See AbstractAttribute::updateImpl(...).
2272   ChangeStatus updateImpl(Attributor &A) override {
2273     auto CheckForWillReturn = [&](Instruction &I) {
2274       IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
2275       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2276       if (WillReturnAA.isKnownWillReturn())
2277         return true;
2278       if (!WillReturnAA.isAssumedWillReturn())
2279         return false;
2280       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2281       return NoRecurseAA.isAssumedNoRecurse();
2282     };
2283 
2284     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2285       return indicatePessimisticFixpoint();
2286 
2287     return ChangeStatus::UNCHANGED;
2288   }
2289 
2290   /// See AbstractAttribute::getAsStr()
2291   const std::string getAsStr() const override {
2292     return getAssumed() ? "willreturn" : "may-noreturn";
2293   }
2294 };
2295 
2296 struct AAWillReturnFunction final : AAWillReturnImpl {
2297   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2298 
2299   /// See AbstractAttribute::trackStatistics()
2300   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2301 };
2302 
2303 /// WillReturn attribute deduction for a call sites.
2304 struct AAWillReturnCallSite final : AAWillReturnImpl {
2305   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2306 
2307   /// See AbstractAttribute::initialize(...).
2308   void initialize(Attributor &A) override {
2309     AAWillReturnImpl::initialize(A);
2310     Function *F = getAssociatedFunction();
2311     if (!F)
2312       indicatePessimisticFixpoint();
2313   }
2314 
2315   /// See AbstractAttribute::updateImpl(...).
2316   ChangeStatus updateImpl(Attributor &A) override {
2317     // TODO: Once we have call site specific value information we can provide
2318     //       call site specific liveness information and then it makes
2319     //       sense to specialize attributes for call sites arguments instead of
2320     //       redirecting requests to the callee argument.
2321     Function *F = getAssociatedFunction();
2322     const IRPosition &FnPos = IRPosition::function(*F);
2323     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2324     return clampStateAndIndicateChange(
2325         getState(),
2326         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2327   }
2328 
2329   /// See AbstractAttribute::trackStatistics()
2330   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2331 };
2332 
2333 /// -------------------AAReachability Attribute--------------------------
2334 
2335 struct AAReachabilityImpl : AAReachability {
2336   AAReachabilityImpl(const IRPosition &IRP) : AAReachability(IRP) {}
2337 
2338   const std::string getAsStr() const override {
2339     // TODO: Return the number of reachable queries.
2340     return "reachable";
2341   }
2342 
2343   /// See AbstractAttribute::initialize(...).
2344   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2345 
2346   /// See AbstractAttribute::updateImpl(...).
2347   ChangeStatus updateImpl(Attributor &A) override {
2348     return indicatePessimisticFixpoint();
2349   }
2350 };
2351 
2352 struct AAReachabilityFunction final : public AAReachabilityImpl {
2353   AAReachabilityFunction(const IRPosition &IRP) : AAReachabilityImpl(IRP) {}
2354 
2355   /// See AbstractAttribute::trackStatistics()
2356   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2357 };
2358 
2359 /// ------------------------ NoAlias Argument Attribute ------------------------
2360 
2361 struct AANoAliasImpl : AANoAlias {
2362   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
2363 
2364   const std::string getAsStr() const override {
2365     return getAssumed() ? "noalias" : "may-alias";
2366   }
2367 };
2368 
2369 /// NoAlias attribute for a floating value.
2370 struct AANoAliasFloating final : AANoAliasImpl {
2371   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2372 
2373   /// See AbstractAttribute::initialize(...).
2374   void initialize(Attributor &A) override {
2375     AANoAliasImpl::initialize(A);
2376     Value &Val = getAssociatedValue();
2377     if (isa<AllocaInst>(Val))
2378       indicateOptimisticFixpoint();
2379     if (isa<ConstantPointerNull>(Val) &&
2380         Val.getType()->getPointerAddressSpace() == 0)
2381       indicateOptimisticFixpoint();
2382   }
2383 
2384   /// See AbstractAttribute::updateImpl(...).
2385   ChangeStatus updateImpl(Attributor &A) override {
2386     // TODO: Implement this.
2387     return indicatePessimisticFixpoint();
2388   }
2389 
2390   /// See AbstractAttribute::trackStatistics()
2391   void trackStatistics() const override {
2392     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2393   }
2394 };
2395 
2396 /// NoAlias attribute for an argument.
2397 struct AANoAliasArgument final
2398     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2399   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2400   AANoAliasArgument(const IRPosition &IRP) : Base(IRP) {}
2401 
2402   /// See AbstractAttribute::update(...).
2403   ChangeStatus updateImpl(Attributor &A) override {
2404     // We have to make sure no-alias on the argument does not break
2405     // synchronization when this is a callback argument, see also [1] below.
2406     // If synchronization cannot be affected, we delegate to the base updateImpl
2407     // function, otherwise we give up for now.
2408 
2409     // If the function is no-sync, no-alias cannot break synchronization.
2410     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2411         *this, IRPosition::function_scope(getIRPosition()));
2412     if (NoSyncAA.isAssumedNoSync())
2413       return Base::updateImpl(A);
2414 
2415     // If the argument is read-only, no-alias cannot break synchronization.
2416     const auto &MemBehaviorAA =
2417         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2418     if (MemBehaviorAA.isAssumedReadOnly())
2419       return Base::updateImpl(A);
2420 
2421     // If the argument is never passed through callbacks, no-alias cannot break
2422     // synchronization.
2423     if (A.checkForAllCallSites(
2424             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2425             true))
2426       return Base::updateImpl(A);
2427 
2428     // TODO: add no-alias but make sure it doesn't break synchronization by
2429     // introducing fake uses. See:
2430     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2431     //     International Workshop on OpenMP 2018,
2432     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2433 
2434     return indicatePessimisticFixpoint();
2435   }
2436 
2437   /// See AbstractAttribute::trackStatistics()
2438   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2439 };
2440 
2441 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2442   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2443 
2444   /// See AbstractAttribute::initialize(...).
2445   void initialize(Attributor &A) override {
2446     // See callsite argument attribute and callee argument attribute.
2447     ImmutableCallSite ICS(&getAnchorValue());
2448     if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2449       indicateOptimisticFixpoint();
2450   }
2451 
2452   /// See AbstractAttribute::updateImpl(...).
2453   ChangeStatus updateImpl(Attributor &A) override {
2454     // We can deduce "noalias" if the following conditions hold.
2455     // (i)   Associated value is assumed to be noalias in the definition.
2456     // (ii)  Associated value is assumed to be no-capture in all the uses
2457     //       possibly executed before this callsite.
2458     // (iii) There is no other pointer argument which could alias with the
2459     //       value.
2460 
2461     const Value &V = getAssociatedValue();
2462     const IRPosition IRP = IRPosition::value(V);
2463 
2464     // (i) Check whether noalias holds in the definition.
2465 
2466     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
2467     LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] check definition: " << V
2468                       << " :: " << NoAliasAA << "\n");
2469 
2470     if (!NoAliasAA.isAssumedNoAlias())
2471       return indicatePessimisticFixpoint();
2472 
2473     LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] " << V
2474                       << " is assumed NoAlias in the definition\n");
2475 
2476     // (ii) Check whether the value is captured in the scope using AANoCapture.
2477     //      FIXME: This is conservative though, it is better to look at CFG and
2478     //             check only uses possibly executed before this callsite.
2479 
2480     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
2481     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2482       LLVM_DEBUG(
2483           dbgs() << "[Attributor][AANoAliasCSArg] " << V
2484                  << " cannot be noalias as it is potentially captured\n");
2485       return indicatePessimisticFixpoint();
2486     }
2487 
2488     // (iii) Check there is no other pointer argument which could alias with the
2489     // value.
2490     // TODO: AbstractCallSite
2491     ImmutableCallSite ICS(&getAnchorValue());
2492     for (unsigned i = 0; i < ICS.getNumArgOperands(); i++) {
2493       if (getArgNo() == (int)i)
2494         continue;
2495       const Value *ArgOp = ICS.getArgOperand(i);
2496       if (!ArgOp->getType()->isPointerTy())
2497         continue;
2498 
2499       if (const Function *F = getAnchorScope()) {
2500         if (AAResults *AAR = A.getInfoCache().getAAResultsForFunction(*F)) {
2501           bool IsAliasing = !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2502           LLVM_DEBUG(dbgs()
2503                      << "[Attributor][NoAliasCSArg] Check alias between "
2504                         "callsite arguments "
2505                      << AAR->isNoAlias(&getAssociatedValue(), ArgOp) << " "
2506                      << getAssociatedValue() << " " << *ArgOp << " => "
2507                      << (IsAliasing ? "" : "no-") << "alias \n");
2508 
2509           if (!IsAliasing)
2510             continue;
2511         }
2512       }
2513       return indicatePessimisticFixpoint();
2514     }
2515 
2516     return ChangeStatus::UNCHANGED;
2517   }
2518 
2519   /// See AbstractAttribute::trackStatistics()
2520   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2521 };
2522 
2523 /// NoAlias attribute for function return value.
2524 struct AANoAliasReturned final : AANoAliasImpl {
2525   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2526 
2527   /// See AbstractAttribute::updateImpl(...).
2528   virtual ChangeStatus updateImpl(Attributor &A) override {
2529 
2530     auto CheckReturnValue = [&](Value &RV) -> bool {
2531       if (Constant *C = dyn_cast<Constant>(&RV))
2532         if (C->isNullValue() || isa<UndefValue>(C))
2533           return true;
2534 
2535       /// For now, we can only deduce noalias if we have call sites.
2536       /// FIXME: add more support.
2537       ImmutableCallSite ICS(&RV);
2538       if (!ICS)
2539         return false;
2540 
2541       const IRPosition &RVPos = IRPosition::value(RV);
2542       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2543       if (!NoAliasAA.isAssumedNoAlias())
2544         return false;
2545 
2546       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2547       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2548     };
2549 
2550     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2551       return indicatePessimisticFixpoint();
2552 
2553     return ChangeStatus::UNCHANGED;
2554   }
2555 
2556   /// See AbstractAttribute::trackStatistics()
2557   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2558 };
2559 
2560 /// NoAlias attribute deduction for a call site return value.
2561 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2562   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2563 
2564   /// See AbstractAttribute::initialize(...).
2565   void initialize(Attributor &A) override {
2566     AANoAliasImpl::initialize(A);
2567     Function *F = getAssociatedFunction();
2568     if (!F)
2569       indicatePessimisticFixpoint();
2570   }
2571 
2572   /// See AbstractAttribute::updateImpl(...).
2573   ChangeStatus updateImpl(Attributor &A) override {
2574     // TODO: Once we have call site specific value information we can provide
2575     //       call site specific liveness information and then it makes
2576     //       sense to specialize attributes for call sites arguments instead of
2577     //       redirecting requests to the callee argument.
2578     Function *F = getAssociatedFunction();
2579     const IRPosition &FnPos = IRPosition::returned(*F);
2580     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2581     return clampStateAndIndicateChange(
2582         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2583   }
2584 
2585   /// See AbstractAttribute::trackStatistics()
2586   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2587 };
2588 
2589 /// -------------------AAIsDead Function Attribute-----------------------
2590 
2591 struct AAIsDeadValueImpl : public AAIsDead {
2592   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2593 
2594   /// See AAIsDead::isAssumedDead().
2595   bool isAssumedDead() const override { return getAssumed(); }
2596 
2597   /// See AAIsDead::isAssumedDead(BasicBlock *).
2598   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2599 
2600   /// See AAIsDead::isKnownDead(BasicBlock *).
2601   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2602 
2603   /// See AAIsDead::isAssumedDead(Instruction *I).
2604   bool isAssumedDead(const Instruction *I) const override {
2605     return I == getCtxI() && isAssumedDead();
2606   }
2607 
2608   /// See AAIsDead::isKnownDead(Instruction *I).
2609   bool isKnownDead(const Instruction *I) const override {
2610     return I == getCtxI() && getKnown();
2611   }
2612 
2613   /// See AbstractAttribute::getAsStr().
2614   const std::string getAsStr() const override {
2615     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2616   }
2617 };
2618 
2619 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2620   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2621 
2622   /// See AbstractAttribute::initialize(...).
2623   void initialize(Attributor &A) override {
2624     if (Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()))
2625       if (!wouldInstructionBeTriviallyDead(I))
2626         indicatePessimisticFixpoint();
2627     if (isa<UndefValue>(getAssociatedValue()))
2628       indicatePessimisticFixpoint();
2629   }
2630 
2631   /// See AbstractAttribute::updateImpl(...).
2632   ChangeStatus updateImpl(Attributor &A) override {
2633     auto UsePred = [&](const Use &U, bool &Follow) {
2634       Instruction *UserI = cast<Instruction>(U.getUser());
2635       if (CallSite CS = CallSite(UserI)) {
2636         if (!CS.isArgOperand(&U))
2637           return false;
2638         const IRPosition &CSArgPos =
2639             IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
2640         const auto &CSArgIsDead = A.getAAFor<AAIsDead>(*this, CSArgPos);
2641         return CSArgIsDead.isAssumedDead();
2642       }
2643       if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
2644         const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
2645         const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, RetPos);
2646         return RetIsDeadAA.isAssumedDead();
2647       }
2648       Follow = true;
2649       return wouldInstructionBeTriviallyDead(UserI);
2650     };
2651 
2652     if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
2653       return indicatePessimisticFixpoint();
2654     return ChangeStatus::UNCHANGED;
2655   }
2656 
2657   /// See AbstractAttribute::manifest(...).
2658   ChangeStatus manifest(Attributor &A) override {
2659     Value &V = getAssociatedValue();
2660     if (auto *I = dyn_cast<Instruction>(&V))
2661       if (wouldInstructionBeTriviallyDead(I)) {
2662         A.deleteAfterManifest(*I);
2663         return ChangeStatus::CHANGED;
2664       }
2665 
2666     if (V.use_empty())
2667       return ChangeStatus::UNCHANGED;
2668 
2669     UndefValue &UV = *UndefValue::get(V.getType());
2670     bool AnyChange = A.changeValueAfterManifest(V, UV);
2671     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2672   }
2673 
2674   /// See AbstractAttribute::trackStatistics()
2675   void trackStatistics() const override {
2676     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2677   }
2678 };
2679 
2680 struct AAIsDeadArgument : public AAIsDeadFloating {
2681   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2682 
2683   /// See AbstractAttribute::initialize(...).
2684   void initialize(Attributor &A) override {
2685     if (!getAssociatedFunction()->hasExactDefinition())
2686       indicatePessimisticFixpoint();
2687   }
2688 
2689   /// See AbstractAttribute::manifest(...).
2690   ChangeStatus manifest(Attributor &A) override {
2691     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2692     Argument &Arg = *getAssociatedArgument();
2693     if (Arg.getParent()->hasLocalLinkage())
2694       if (A.registerFunctionSignatureRewrite(
2695               Arg, /* ReplacementTypes */ {},
2696               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2697               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{}))
2698         return ChangeStatus::CHANGED;
2699     return Changed;
2700   }
2701 
2702   /// See AbstractAttribute::trackStatistics()
2703   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2704 };
2705 
2706 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2707   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2708 
2709   /// See AbstractAttribute::initialize(...).
2710   void initialize(Attributor &A) override {
2711     if (isa<UndefValue>(getAssociatedValue()))
2712       indicatePessimisticFixpoint();
2713   }
2714 
2715   /// See AbstractAttribute::updateImpl(...).
2716   ChangeStatus updateImpl(Attributor &A) override {
2717     // TODO: Once we have call site specific value information we can provide
2718     //       call site specific liveness information and then it makes
2719     //       sense to specialize attributes for call sites arguments instead of
2720     //       redirecting requests to the callee argument.
2721     Argument *Arg = getAssociatedArgument();
2722     if (!Arg)
2723       return indicatePessimisticFixpoint();
2724     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2725     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2726     return clampStateAndIndicateChange(
2727         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2728   }
2729 
2730   /// See AbstractAttribute::manifest(...).
2731   ChangeStatus manifest(Attributor &A) override {
2732     CallBase &CB = cast<CallBase>(getAnchorValue());
2733     Use &U = CB.getArgOperandUse(getArgNo());
2734     assert(!isa<UndefValue>(U.get()) &&
2735            "Expected undef values to be filtered out!");
2736     UndefValue &UV = *UndefValue::get(U->getType());
2737     if (A.changeUseAfterManifest(U, UV))
2738       return ChangeStatus::CHANGED;
2739     return ChangeStatus::UNCHANGED;
2740   }
2741 
2742   /// See AbstractAttribute::trackStatistics()
2743   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2744 };
2745 
2746 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2747   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2748 
2749   /// See AbstractAttribute::updateImpl(...).
2750   ChangeStatus updateImpl(Attributor &A) override {
2751 
2752     auto PredForCallSite = [&](AbstractCallSite ACS) {
2753       if (ACS.isCallbackCall())
2754         return false;
2755       const IRPosition &CSRetPos =
2756           IRPosition::callsite_returned(ACS.getCallSite());
2757       const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, CSRetPos);
2758       return RetIsDeadAA.isAssumedDead();
2759     };
2760 
2761     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
2762       return indicatePessimisticFixpoint();
2763 
2764     return ChangeStatus::UNCHANGED;
2765   }
2766 
2767   /// See AbstractAttribute::manifest(...).
2768   ChangeStatus manifest(Attributor &A) override {
2769     // TODO: Rewrite the signature to return void?
2770     bool AnyChange = false;
2771     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2772     auto RetInstPred = [&](Instruction &I) {
2773       ReturnInst &RI = cast<ReturnInst>(I);
2774       if (!isa<UndefValue>(RI.getReturnValue()))
2775         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2776       return true;
2777     };
2778     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2779     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2780   }
2781 
2782   /// See AbstractAttribute::trackStatistics()
2783   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2784 };
2785 
2786 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2787   AAIsDeadCallSiteReturned(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2788 
2789   /// See AbstractAttribute::initialize(...).
2790   void initialize(Attributor &A) override {}
2791 
2792   /// See AbstractAttribute::trackStatistics()
2793   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(IsDead) }
2794 };
2795 
2796 struct AAIsDeadFunction : public AAIsDead {
2797   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
2798 
2799   /// See AbstractAttribute::initialize(...).
2800   void initialize(Attributor &A) override {
2801     const Function *F = getAssociatedFunction();
2802     if (F && !F->isDeclaration()) {
2803       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2804       assumeLive(A, F->getEntryBlock());
2805     }
2806   }
2807 
2808   /// See AbstractAttribute::getAsStr().
2809   const std::string getAsStr() const override {
2810     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2811            std::to_string(getAssociatedFunction()->size()) + "][#TBEP " +
2812            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2813            std::to_string(KnownDeadEnds.size()) + "]";
2814   }
2815 
2816   /// See AbstractAttribute::manifest(...).
2817   ChangeStatus manifest(Attributor &A) override {
2818     assert(getState().isValidState() &&
2819            "Attempted to manifest an invalid state!");
2820 
2821     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2822     Function &F = *getAssociatedFunction();
2823 
2824     if (AssumedLiveBlocks.empty()) {
2825       A.deleteAfterManifest(F);
2826       return ChangeStatus::CHANGED;
2827     }
2828 
2829     // Flag to determine if we can change an invoke to a call assuming the
2830     // callee is nounwind. This is not possible if the personality of the
2831     // function allows to catch asynchronous exceptions.
2832     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2833 
2834     KnownDeadEnds.set_union(ToBeExploredFrom);
2835     for (const Instruction *DeadEndI : KnownDeadEnds) {
2836       auto *CB = dyn_cast<CallBase>(DeadEndI);
2837       if (!CB)
2838         continue;
2839       const auto &NoReturnAA =
2840           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2841       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2842       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2843         continue;
2844       Instruction *I = const_cast<Instruction *>(DeadEndI);
2845       BasicBlock *BB = I->getParent();
2846       Instruction *SplitPos = I->getNextNode();
2847       // TODO: mark stuff before unreachable instructions as dead.
2848 
2849       if (auto *II = dyn_cast<InvokeInst>(I)) {
2850         // If we keep the invoke the split position is at the beginning of the
2851         // normal desitination block (it invokes a noreturn function after all).
2852         BasicBlock *NormalDestBB = II->getNormalDest();
2853         SplitPos = &NormalDestBB->front();
2854 
2855         /// Invoke is replaced with a call and unreachable is placed after it if
2856         /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
2857         /// and only place an unreachable in the normal successor.
2858         if (Invoke2CallAllowed) {
2859           if (II->getCalledFunction()) {
2860             const IRPosition &IPos = IRPosition::callsite_function(*II);
2861             const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
2862             if (AANoUnw.isAssumedNoUnwind()) {
2863               LLVM_DEBUG(dbgs()
2864                          << "[AAIsDead] Replace invoke with call inst\n");
2865               CallInst *CI = createCallMatchingInvoke(II);
2866               CI->insertBefore(II);
2867               CI->takeName(II);
2868               replaceAllInstructionUsesWith(*II, *CI);
2869 
2870               // If this is a nounwind + mayreturn invoke we only remove the
2871               // unwind edge. This is done by moving the invoke into a new and
2872               // dead block and connecting the normal destination of the invoke
2873               // with a branch that follows the call replacement we created
2874               // above.
2875               if (MayReturn) {
2876                 BasicBlock *NewDeadBB =
2877                     SplitBlock(BB, II, nullptr, nullptr, nullptr, ".i2c");
2878                 assert(isa<BranchInst>(BB->getTerminator()) &&
2879                        BB->getTerminator()->getNumSuccessors() == 1 &&
2880                        BB->getTerminator()->getSuccessor(0) == NewDeadBB);
2881                 new UnreachableInst(I->getContext(), NewDeadBB);
2882                 BB->getTerminator()->setOperand(0, NormalDestBB);
2883                 A.deleteAfterManifest(*II);
2884                 continue;
2885               }
2886 
2887               // We do not need an invoke (II) but instead want a call followed
2888               // by an unreachable. However, we do not remove II as other
2889               // abstract attributes might have it cached as part of their
2890               // results. Given that we modify the CFG anyway, we simply keep II
2891               // around but in a new dead block. To avoid II being live through
2892               // a different edge we have to ensure the block we place it in is
2893               // only reached from the current block of II and then not reached
2894               // at all when we insert the unreachable.
2895               SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c");
2896               SplitPos = CI->getNextNode();
2897             }
2898           }
2899         }
2900 
2901         if (SplitPos == &NormalDestBB->front()) {
2902           // If this is an invoke of a noreturn function the edge to the normal
2903           // destination block is dead but not necessarily the block itself.
2904           // TODO: We need to move to an edge based system during deduction and
2905           //       also manifest.
2906           assert(!NormalDestBB->isLandingPad() &&
2907                  "Expected the normal destination not to be a landingpad!");
2908           if (NormalDestBB->getUniquePredecessor() == BB) {
2909             assumeLive(A, *NormalDestBB);
2910           } else {
2911             BasicBlock *SplitBB =
2912                 SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2913             // The split block is live even if it contains only an unreachable
2914             // instruction at the end.
2915             assumeLive(A, *SplitBB);
2916             SplitPos = SplitBB->getTerminator();
2917             HasChanged = ChangeStatus::CHANGED;
2918           }
2919         }
2920       }
2921 
2922       if (isa_and_nonnull<UnreachableInst>(SplitPos))
2923         continue;
2924 
2925       BB = SplitPos->getParent();
2926       SplitBlock(BB, SplitPos);
2927       A.changeToUnreachableAfterManifest(BB->getTerminator());
2928       HasChanged = ChangeStatus::CHANGED;
2929     }
2930 
2931     for (BasicBlock &BB : F)
2932       if (!AssumedLiveBlocks.count(&BB))
2933         A.deleteAfterManifest(BB);
2934 
2935     return HasChanged;
2936   }
2937 
2938   /// See AbstractAttribute::updateImpl(...).
2939   ChangeStatus updateImpl(Attributor &A) override;
2940 
2941   /// See AbstractAttribute::trackStatistics()
2942   void trackStatistics() const override {}
2943 
2944   /// Returns true if the function is assumed dead.
2945   bool isAssumedDead() const override { return false; }
2946 
2947   /// See AAIsDead::isAssumedDead(BasicBlock *).
2948   bool isAssumedDead(const BasicBlock *BB) const override {
2949     assert(BB->getParent() == getAssociatedFunction() &&
2950            "BB must be in the same anchor scope function.");
2951 
2952     if (!getAssumed())
2953       return false;
2954     return !AssumedLiveBlocks.count(BB);
2955   }
2956 
2957   /// See AAIsDead::isKnownDead(BasicBlock *).
2958   bool isKnownDead(const BasicBlock *BB) const override {
2959     return getKnown() && isAssumedDead(BB);
2960   }
2961 
2962   /// See AAIsDead::isAssumed(Instruction *I).
2963   bool isAssumedDead(const Instruction *I) const override {
2964     assert(I->getParent()->getParent() == getAssociatedFunction() &&
2965            "Instruction must be in the same anchor scope function.");
2966 
2967     if (!getAssumed())
2968       return false;
2969 
2970     // If it is not in AssumedLiveBlocks then it for sure dead.
2971     // Otherwise, it can still be after noreturn call in a live block.
2972     if (!AssumedLiveBlocks.count(I->getParent()))
2973       return true;
2974 
2975     // If it is not after a liveness barrier it is live.
2976     const Instruction *PrevI = I->getPrevNode();
2977     while (PrevI) {
2978       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2979         return true;
2980       PrevI = PrevI->getPrevNode();
2981     }
2982     return false;
2983   }
2984 
2985   /// See AAIsDead::isKnownDead(Instruction *I).
2986   bool isKnownDead(const Instruction *I) const override {
2987     return getKnown() && isAssumedDead(I);
2988   }
2989 
2990   /// Determine if \p F might catch asynchronous exceptions.
2991   static bool mayCatchAsynchronousExceptions(const Function &F) {
2992     return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
2993   }
2994 
2995   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2996   /// that internal function called from \p BB should now be looked at.
2997   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2998     if (!AssumedLiveBlocks.insert(&BB).second)
2999       return false;
3000 
3001     // We assume that all of BB is (probably) live now and if there are calls to
3002     // internal functions we will assume that those are now live as well. This
3003     // is a performance optimization for blocks with calls to a lot of internal
3004     // functions. It can however cause dead functions to be treated as live.
3005     for (const Instruction &I : BB)
3006       if (ImmutableCallSite ICS = ImmutableCallSite(&I))
3007         if (const Function *F = ICS.getCalledFunction())
3008           if (F->hasLocalLinkage())
3009             A.markLiveInternalFunction(*F);
3010     return true;
3011   }
3012 
3013   /// Collection of instructions that need to be explored again, e.g., we
3014   /// did assume they do not transfer control to (one of their) successors.
3015   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3016 
3017   /// Collection of instructions that are known to not transfer control.
3018   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3019 
3020   /// Collection of all assumed live BasicBlocks.
3021   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3022 };
3023 
3024 static bool
3025 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3026                         AbstractAttribute &AA,
3027                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3028   const IRPosition &IPos = IRPosition::callsite_function(CB);
3029 
3030   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
3031   if (NoReturnAA.isAssumedNoReturn())
3032     return !NoReturnAA.isKnownNoReturn();
3033   if (CB.isTerminator())
3034     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3035   else
3036     AliveSuccessors.push_back(CB.getNextNode());
3037   return false;
3038 }
3039 
3040 static bool
3041 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3042                         AbstractAttribute &AA,
3043                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3044   bool UsedAssumedInformation =
3045       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3046 
3047   // First, determine if we can change an invoke to a call assuming the
3048   // callee is nounwind. This is not possible if the personality of the
3049   // function allows to catch asynchronous exceptions.
3050   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3051     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3052   } else {
3053     const IRPosition &IPos = IRPosition::callsite_function(II);
3054     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
3055     if (AANoUnw.isAssumedNoUnwind()) {
3056       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3057     } else {
3058       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3059     }
3060   }
3061   return UsedAssumedInformation;
3062 }
3063 
3064 static Optional<ConstantInt *>
3065 getAssumedConstant(Attributor &A, const Value &V, AbstractAttribute &AA,
3066                    bool &UsedAssumedInformation) {
3067   const auto &ValueSimplifyAA =
3068       A.getAAFor<AAValueSimplify>(AA, IRPosition::value(V));
3069   Optional<Value *> SimplifiedV = ValueSimplifyAA.getAssumedSimplifiedValue(A);
3070   UsedAssumedInformation |= !ValueSimplifyAA.isKnown();
3071   if (!SimplifiedV.hasValue())
3072     return llvm::None;
3073   if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue()))
3074     return llvm::None;
3075   return dyn_cast_or_null<ConstantInt>(SimplifiedV.getValue());
3076 }
3077 
3078 static bool
3079 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3080                         AbstractAttribute &AA,
3081                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3082   bool UsedAssumedInformation = false;
3083   if (BI.getNumSuccessors() == 1) {
3084     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3085   } else {
3086     Optional<ConstantInt *> CI =
3087         getAssumedConstant(A, *BI.getCondition(), AA, UsedAssumedInformation);
3088     if (!CI.hasValue()) {
3089       // No value yet, assume both edges are dead.
3090     } else if (CI.getValue()) {
3091       const BasicBlock *SuccBB =
3092           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3093       AliveSuccessors.push_back(&SuccBB->front());
3094     } else {
3095       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3096       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3097       UsedAssumedInformation = false;
3098     }
3099   }
3100   return UsedAssumedInformation;
3101 }
3102 
3103 static bool
3104 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3105                         AbstractAttribute &AA,
3106                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3107   bool UsedAssumedInformation = false;
3108   Optional<ConstantInt *> CI =
3109       getAssumedConstant(A, *SI.getCondition(), AA, UsedAssumedInformation);
3110   if (!CI.hasValue()) {
3111     // No value yet, assume all edges are dead.
3112   } else if (CI.getValue()) {
3113     for (auto &CaseIt : SI.cases()) {
3114       if (CaseIt.getCaseValue() == CI.getValue()) {
3115         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3116         return UsedAssumedInformation;
3117       }
3118     }
3119     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3120     return UsedAssumedInformation;
3121   } else {
3122     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3123       AliveSuccessors.push_back(&SuccBB->front());
3124   }
3125   return UsedAssumedInformation;
3126 }
3127 
3128 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3129   ChangeStatus Change = ChangeStatus::UNCHANGED;
3130 
3131   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3132                     << getAssociatedFunction()->size() << "] BBs and "
3133                     << ToBeExploredFrom.size() << " exploration points and "
3134                     << KnownDeadEnds.size() << " known dead ends\n");
3135 
3136   // Copy and clear the list of instructions we need to explore from. It is
3137   // refilled with instructions the next update has to look at.
3138   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3139                                                ToBeExploredFrom.end());
3140   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3141 
3142   SmallVector<const Instruction *, 8> AliveSuccessors;
3143   while (!Worklist.empty()) {
3144     const Instruction *I = Worklist.pop_back_val();
3145     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3146 
3147     AliveSuccessors.clear();
3148 
3149     bool UsedAssumedInformation = false;
3150     switch (I->getOpcode()) {
3151     // TODO: look for (assumed) UB to backwards propagate "deadness".
3152     default:
3153       if (I->isTerminator()) {
3154         for (const BasicBlock *SuccBB : successors(I->getParent()))
3155           AliveSuccessors.push_back(&SuccBB->front());
3156       } else {
3157         AliveSuccessors.push_back(I->getNextNode());
3158       }
3159       break;
3160     case Instruction::Call:
3161       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3162                                                        *this, AliveSuccessors);
3163       break;
3164     case Instruction::Invoke:
3165       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3166                                                        *this, AliveSuccessors);
3167       break;
3168     case Instruction::Br:
3169       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3170                                                        *this, AliveSuccessors);
3171       break;
3172     case Instruction::Switch:
3173       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3174                                                        *this, AliveSuccessors);
3175       break;
3176     }
3177 
3178     if (UsedAssumedInformation) {
3179       NewToBeExploredFrom.insert(I);
3180     } else {
3181       Change = ChangeStatus::CHANGED;
3182       if (AliveSuccessors.empty() ||
3183           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3184         KnownDeadEnds.insert(I);
3185     }
3186 
3187     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3188                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3189                       << UsedAssumedInformation << "\n");
3190 
3191     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3192       if (!I->isTerminator()) {
3193         assert(AliveSuccessors.size() == 1 &&
3194                "Non-terminator expected to have a single successor!");
3195         Worklist.push_back(AliveSuccessor);
3196       } else {
3197         if (assumeLive(A, *AliveSuccessor->getParent()))
3198           Worklist.push_back(AliveSuccessor);
3199       }
3200     }
3201   }
3202 
3203   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3204 
3205   // If we know everything is live there is no need to query for liveness.
3206   // Instead, indicating a pessimistic fixpoint will cause the state to be
3207   // "invalid" and all queries to be answered conservatively without lookups.
3208   // To be in this state we have to (1) finished the exploration and (3) not
3209   // discovered any non-trivial dead end and (2) not ruled unreachable code
3210   // dead.
3211   if (ToBeExploredFrom.empty() &&
3212       getAssociatedFunction()->size() == AssumedLiveBlocks.size() &&
3213       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3214         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3215       }))
3216     return indicatePessimisticFixpoint();
3217   return Change;
3218 }
3219 
3220 /// Liveness information for a call sites.
3221 struct AAIsDeadCallSite final : AAIsDeadFunction {
3222   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
3223 
3224   /// See AbstractAttribute::initialize(...).
3225   void initialize(Attributor &A) override {
3226     // TODO: Once we have call site specific value information we can provide
3227     //       call site specific liveness information and then it makes
3228     //       sense to specialize attributes for call sites instead of
3229     //       redirecting requests to the callee.
3230     llvm_unreachable("Abstract attributes for liveness are not "
3231                      "supported for call sites yet!");
3232   }
3233 
3234   /// See AbstractAttribute::updateImpl(...).
3235   ChangeStatus updateImpl(Attributor &A) override {
3236     return indicatePessimisticFixpoint();
3237   }
3238 
3239   /// See AbstractAttribute::trackStatistics()
3240   void trackStatistics() const override {}
3241 };
3242 
3243 /// -------------------- Dereferenceable Argument Attribute --------------------
3244 
3245 template <>
3246 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3247                                                      const DerefState &R) {
3248   ChangeStatus CS0 =
3249       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3250   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3251   return CS0 | CS1;
3252 }
3253 
3254 struct AADereferenceableImpl : AADereferenceable {
3255   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
3256   using StateType = DerefState;
3257 
3258   void initialize(Attributor &A) override {
3259     SmallVector<Attribute, 4> Attrs;
3260     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3261              Attrs);
3262     for (const Attribute &Attr : Attrs)
3263       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3264 
3265     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
3266 
3267     const IRPosition &IRP = this->getIRPosition();
3268     bool IsFnInterface = IRP.isFnInterfaceKind();
3269     const Function *FnScope = IRP.getAnchorScope();
3270     if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
3271       indicatePessimisticFixpoint();
3272   }
3273 
3274   /// See AbstractAttribute::getState()
3275   /// {
3276   StateType &getState() override { return *this; }
3277   const StateType &getState() const override { return *this; }
3278   /// }
3279 
3280   /// Helper function for collecting accessed bytes in must-be-executed-context
3281   void addAccessedBytesForUse(Attributor &A, const Use *U,
3282                               const Instruction *I) {
3283     const Value *UseV = U->get();
3284     if (!UseV->getType()->isPointerTy())
3285       return;
3286 
3287     Type *PtrTy = UseV->getType();
3288     const DataLayout &DL = A.getDataLayout();
3289     int64_t Offset;
3290     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3291             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3292       if (Base == &getAssociatedValue() &&
3293           Attributor::getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3294         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3295         addAccessedBytes(Offset, Size);
3296       }
3297     }
3298     return;
3299   }
3300 
3301   /// See AAFromMustBeExecutedContext
3302   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3303     bool IsNonNull = false;
3304     bool TrackUse = false;
3305     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3306         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3307 
3308     addAccessedBytesForUse(A, U, I);
3309     takeKnownDerefBytesMaximum(DerefBytes);
3310     return TrackUse;
3311   }
3312 
3313   /// See AbstractAttribute::manifest(...).
3314   ChangeStatus manifest(Attributor &A) override {
3315     ChangeStatus Change = AADereferenceable::manifest(A);
3316     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3317       removeAttrs({Attribute::DereferenceableOrNull});
3318       return ChangeStatus::CHANGED;
3319     }
3320     return Change;
3321   }
3322 
3323   void getDeducedAttributes(LLVMContext &Ctx,
3324                             SmallVectorImpl<Attribute> &Attrs) const override {
3325     // TODO: Add *_globally support
3326     if (isAssumedNonNull())
3327       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3328           Ctx, getAssumedDereferenceableBytes()));
3329     else
3330       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3331           Ctx, getAssumedDereferenceableBytes()));
3332   }
3333 
3334   /// See AbstractAttribute::getAsStr().
3335   const std::string getAsStr() const override {
3336     if (!getAssumedDereferenceableBytes())
3337       return "unknown-dereferenceable";
3338     return std::string("dereferenceable") +
3339            (isAssumedNonNull() ? "" : "_or_null") +
3340            (isAssumedGlobal() ? "_globally" : "") + "<" +
3341            std::to_string(getKnownDereferenceableBytes()) + "-" +
3342            std::to_string(getAssumedDereferenceableBytes()) + ">";
3343   }
3344 };
3345 
3346 /// Dereferenceable attribute for a floating value.
3347 struct AADereferenceableFloating
3348     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3349   using Base =
3350       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3351   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
3352 
3353   /// See AbstractAttribute::updateImpl(...).
3354   ChangeStatus updateImpl(Attributor &A) override {
3355     ChangeStatus Change = Base::updateImpl(A);
3356 
3357     const DataLayout &DL = A.getDataLayout();
3358 
3359     auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
3360       unsigned IdxWidth =
3361           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3362       APInt Offset(IdxWidth, 0);
3363       const Value *Base =
3364           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3365 
3366       const auto &AA =
3367           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3368       int64_t DerefBytes = 0;
3369       if (!Stripped && this == &AA) {
3370         // Use IR information if we did not strip anything.
3371         // TODO: track globally.
3372         bool CanBeNull;
3373         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3374         T.GlobalState.indicatePessimisticFixpoint();
3375       } else {
3376         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3377         DerefBytes = DS.DerefBytesState.getAssumed();
3378         T.GlobalState &= DS.GlobalState;
3379       }
3380 
3381       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3382 
3383       // For now we do not try to "increase" dereferenceability due to negative
3384       // indices as we first have to come up with code to deal with loops and
3385       // for overflows of the dereferenceable bytes.
3386       int64_t OffsetSExt = Offset.getSExtValue();
3387       if (OffsetSExt < 0)
3388         OffsetSExt = 0;
3389 
3390       T.takeAssumedDerefBytesMinimum(
3391           std::max(int64_t(0), DerefBytes - OffsetSExt));
3392 
3393       if (this == &AA) {
3394         if (!Stripped) {
3395           // If nothing was stripped IR information is all we got.
3396           T.takeKnownDerefBytesMaximum(
3397               std::max(int64_t(0), DerefBytes - OffsetSExt));
3398           T.indicatePessimisticFixpoint();
3399         } else if (OffsetSExt > 0) {
3400           // If something was stripped but there is circular reasoning we look
3401           // for the offset. If it is positive we basically decrease the
3402           // dereferenceable bytes in a circluar loop now, which will simply
3403           // drive them down to the known value in a very slow way which we
3404           // can accelerate.
3405           T.indicatePessimisticFixpoint();
3406         }
3407       }
3408 
3409       return T.isValidState();
3410     };
3411 
3412     DerefState T;
3413     if (!genericValueTraversal<AADereferenceable, DerefState>(
3414             A, getIRPosition(), *this, T, VisitValueCB))
3415       return indicatePessimisticFixpoint();
3416 
3417     return Change | clampStateAndIndicateChange(getState(), T);
3418   }
3419 
3420   /// See AbstractAttribute::trackStatistics()
3421   void trackStatistics() const override {
3422     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3423   }
3424 };
3425 
3426 /// Dereferenceable attribute for a return value.
3427 struct AADereferenceableReturned final
3428     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3429                                    DerefState> {
3430   AADereferenceableReturned(const IRPosition &IRP)
3431       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3432                                      DerefState>(IRP) {}
3433 
3434   /// See AbstractAttribute::trackStatistics()
3435   void trackStatistics() const override {
3436     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3437   }
3438 };
3439 
3440 /// Dereferenceable attribute for an argument
3441 struct AADereferenceableArgument final
3442     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3443           AADereferenceable, AADereferenceableImpl, DerefState> {
3444   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3445       AADereferenceable, AADereferenceableImpl, DerefState>;
3446   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3447 
3448   /// See AbstractAttribute::trackStatistics()
3449   void trackStatistics() const override {
3450     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3451   }
3452 };
3453 
3454 /// Dereferenceable attribute for a call site argument.
3455 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3456   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3457       : AADereferenceableFloating(IRP) {}
3458 
3459   /// See AbstractAttribute::trackStatistics()
3460   void trackStatistics() const override {
3461     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3462   }
3463 };
3464 
3465 /// Dereferenceable attribute deduction for a call site return value.
3466 struct AADereferenceableCallSiteReturned final
3467     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3468           AADereferenceable, AADereferenceableImpl> {
3469   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3470       AADereferenceable, AADereferenceableImpl>;
3471   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3472 
3473   /// See AbstractAttribute::trackStatistics()
3474   void trackStatistics() const override {
3475     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3476   }
3477 };
3478 
3479 // ------------------------ Align Argument Attribute ------------------------
3480 
3481 static unsigned int getKnownAlignForUse(Attributor &A,
3482                                         AbstractAttribute &QueryingAA,
3483                                         Value &AssociatedValue, const Use *U,
3484                                         const Instruction *I, bool &TrackUse) {
3485   // We need to follow common pointer manipulation uses to the accesses they
3486   // feed into.
3487   if (isa<CastInst>(I)) {
3488     TrackUse = true;
3489     return 0;
3490   }
3491   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3492     if (GEP->hasAllConstantIndices()) {
3493       TrackUse = true;
3494       return 0;
3495     }
3496   }
3497 
3498   unsigned Alignment = 0;
3499   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
3500     if (ICS.isBundleOperand(U) || ICS.isCallee(U))
3501       return 0;
3502 
3503     unsigned ArgNo = ICS.getArgumentNo(U);
3504     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
3505     // As long as we only use known information there is no need to track
3506     // dependences here.
3507     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3508                                         /* TrackDependence */ false);
3509     Alignment = AlignAA.getKnownAlign();
3510   }
3511 
3512   const Value *UseV = U->get();
3513   if (auto *SI = dyn_cast<StoreInst>(I))
3514     Alignment = SI->getAlignment();
3515   else if (auto *LI = dyn_cast<LoadInst>(I))
3516     Alignment = LI->getAlignment();
3517 
3518   if (Alignment <= 1)
3519     return 0;
3520 
3521   auto &DL = A.getDataLayout();
3522   int64_t Offset;
3523 
3524   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3525     if (Base == &AssociatedValue) {
3526       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3527       // So we can say that the maximum power of two which is a divisor of
3528       // gcd(Offset, Alignment) is an alignment.
3529 
3530       uint32_t gcd =
3531           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3532       Alignment = llvm::PowerOf2Floor(gcd);
3533     }
3534   }
3535 
3536   return Alignment;
3537 }
3538 struct AAAlignImpl : AAAlign {
3539   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3540 
3541   /// See AbstractAttribute::initialize(...).
3542   void initialize(Attributor &A) override {
3543     SmallVector<Attribute, 4> Attrs;
3544     getAttrs({Attribute::Alignment}, Attrs);
3545     for (const Attribute &Attr : Attrs)
3546       takeKnownMaximum(Attr.getValueAsInt());
3547 
3548     if (getIRPosition().isFnInterfaceKind() &&
3549         (!getAssociatedFunction() ||
3550          !getAssociatedFunction()->hasExactDefinition()))
3551       indicatePessimisticFixpoint();
3552   }
3553 
3554   /// See AbstractAttribute::manifest(...).
3555   ChangeStatus manifest(Attributor &A) override {
3556     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3557 
3558     // Check for users that allow alignment annotations.
3559     Value &AnchorVal = getIRPosition().getAnchorValue();
3560     for (const Use &U : AnchorVal.uses()) {
3561       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3562         if (SI->getPointerOperand() == &AnchorVal)
3563           if (SI->getAlignment() < getAssumedAlign()) {
3564             STATS_DECLTRACK(AAAlign, Store,
3565                             "Number of times alignemnt added to a store");
3566             SI->setAlignment(Align(getAssumedAlign()));
3567             Changed = ChangeStatus::CHANGED;
3568           }
3569       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3570         if (LI->getPointerOperand() == &AnchorVal)
3571           if (LI->getAlignment() < getAssumedAlign()) {
3572             LI->setAlignment(Align(getAssumedAlign()));
3573             STATS_DECLTRACK(AAAlign, Load,
3574                             "Number of times alignemnt added to a load");
3575             Changed = ChangeStatus::CHANGED;
3576           }
3577       }
3578     }
3579 
3580     return AAAlign::manifest(A) | Changed;
3581   }
3582 
3583   // TODO: Provide a helper to determine the implied ABI alignment and check in
3584   //       the existing manifest method and a new one for AAAlignImpl that value
3585   //       to avoid making the alignment explicit if it did not improve.
3586 
3587   /// See AbstractAttribute::getDeducedAttributes
3588   virtual void
3589   getDeducedAttributes(LLVMContext &Ctx,
3590                        SmallVectorImpl<Attribute> &Attrs) const override {
3591     if (getAssumedAlign() > 1)
3592       Attrs.emplace_back(
3593           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3594   }
3595   /// See AAFromMustBeExecutedContext
3596   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3597     bool TrackUse = false;
3598 
3599     unsigned int KnownAlign =
3600         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3601     takeKnownMaximum(KnownAlign);
3602 
3603     return TrackUse;
3604   }
3605 
3606   /// See AbstractAttribute::getAsStr().
3607   const std::string getAsStr() const override {
3608     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3609                                 "-" + std::to_string(getAssumedAlign()) + ">")
3610                              : "unknown-align";
3611   }
3612 };
3613 
3614 /// Align attribute for a floating value.
3615 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3616   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3617   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3618 
3619   /// See AbstractAttribute::updateImpl(...).
3620   ChangeStatus updateImpl(Attributor &A) override {
3621     Base::updateImpl(A);
3622 
3623     const DataLayout &DL = A.getDataLayout();
3624 
3625     auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
3626                             bool Stripped) -> bool {
3627       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3628       if (!Stripped && this == &AA) {
3629         // Use only IR information if we did not strip anything.
3630         const MaybeAlign PA = V.getPointerAlignment(DL);
3631         T.takeKnownMaximum(PA ? PA->value() : 0);
3632         T.indicatePessimisticFixpoint();
3633       } else {
3634         // Use abstract attribute information.
3635         const AAAlign::StateType &DS =
3636             static_cast<const AAAlign::StateType &>(AA.getState());
3637         T ^= DS;
3638       }
3639       return T.isValidState();
3640     };
3641 
3642     StateType T;
3643     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3644                                                    VisitValueCB))
3645       return indicatePessimisticFixpoint();
3646 
3647     // TODO: If we know we visited all incoming values, thus no are assumed
3648     // dead, we can take the known information from the state T.
3649     return clampStateAndIndicateChange(getState(), T);
3650   }
3651 
3652   /// See AbstractAttribute::trackStatistics()
3653   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3654 };
3655 
3656 /// Align attribute for function return value.
3657 struct AAAlignReturned final
3658     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3659   AAAlignReturned(const IRPosition &IRP)
3660       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3661 
3662   /// See AbstractAttribute::trackStatistics()
3663   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3664 };
3665 
3666 /// Align attribute for function argument.
3667 struct AAAlignArgument final
3668     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3669                                                               AAAlignImpl> {
3670   AAAlignArgument(const IRPosition &IRP)
3671       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3672                                                                 AAAlignImpl>(
3673             IRP) {}
3674 
3675   /// See AbstractAttribute::trackStatistics()
3676   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3677 };
3678 
3679 struct AAAlignCallSiteArgument final : AAAlignFloating {
3680   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3681 
3682   /// See AbstractAttribute::manifest(...).
3683   ChangeStatus manifest(Attributor &A) override {
3684     return AAAlignImpl::manifest(A);
3685   }
3686 
3687   /// See AbstractAttribute::updateImpl(Attributor &A).
3688   ChangeStatus updateImpl(Attributor &A) override {
3689     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3690     if (Argument *Arg = getAssociatedArgument()) {
3691       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3692           *this, IRPosition::argument(*Arg), /* TrackDependence */ false,
3693           DepClassTy::OPTIONAL);
3694       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3695     }
3696     return Changed;
3697   }
3698 
3699   /// See AbstractAttribute::trackStatistics()
3700   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3701 };
3702 
3703 /// Align attribute deduction for a call site return value.
3704 struct AAAlignCallSiteReturned final
3705     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3706                                                              AAAlignImpl> {
3707   using Base =
3708       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3709                                                              AAAlignImpl>;
3710   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3711 
3712   /// See AbstractAttribute::initialize(...).
3713   void initialize(Attributor &A) override {
3714     Base::initialize(A);
3715     Function *F = getAssociatedFunction();
3716     if (!F)
3717       indicatePessimisticFixpoint();
3718   }
3719 
3720   /// See AbstractAttribute::trackStatistics()
3721   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3722 };
3723 
3724 /// ------------------ Function No-Return Attribute ----------------------------
3725 struct AANoReturnImpl : public AANoReturn {
3726   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3727 
3728   /// See AbstractAttribute::initialize(...).
3729   void initialize(Attributor &A) override {
3730     AANoReturn::initialize(A);
3731     Function *F = getAssociatedFunction();
3732     if (!F)
3733       indicatePessimisticFixpoint();
3734   }
3735 
3736   /// See AbstractAttribute::getAsStr().
3737   const std::string getAsStr() const override {
3738     return getAssumed() ? "noreturn" : "may-return";
3739   }
3740 
3741   /// See AbstractAttribute::updateImpl(Attributor &A).
3742   virtual ChangeStatus updateImpl(Attributor &A) override {
3743     auto CheckForNoReturn = [](Instruction &) { return false; };
3744     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3745                                    {(unsigned)Instruction::Ret}))
3746       return indicatePessimisticFixpoint();
3747     return ChangeStatus::UNCHANGED;
3748   }
3749 };
3750 
3751 struct AANoReturnFunction final : AANoReturnImpl {
3752   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3753 
3754   /// See AbstractAttribute::trackStatistics()
3755   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3756 };
3757 
3758 /// NoReturn attribute deduction for a call sites.
3759 struct AANoReturnCallSite final : AANoReturnImpl {
3760   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3761 
3762   /// See AbstractAttribute::updateImpl(...).
3763   ChangeStatus updateImpl(Attributor &A) override {
3764     // TODO: Once we have call site specific value information we can provide
3765     //       call site specific liveness information and then it makes
3766     //       sense to specialize attributes for call sites arguments instead of
3767     //       redirecting requests to the callee argument.
3768     Function *F = getAssociatedFunction();
3769     const IRPosition &FnPos = IRPosition::function(*F);
3770     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3771     return clampStateAndIndicateChange(
3772         getState(),
3773         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3774   }
3775 
3776   /// See AbstractAttribute::trackStatistics()
3777   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3778 };
3779 
3780 /// ----------------------- Variable Capturing ---------------------------------
3781 
3782 /// A class to hold the state of for no-capture attributes.
3783 struct AANoCaptureImpl : public AANoCapture {
3784   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
3785 
3786   /// See AbstractAttribute::initialize(...).
3787   void initialize(Attributor &A) override {
3788     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3789       indicateOptimisticFixpoint();
3790       return;
3791     }
3792     Function *AnchorScope = getAnchorScope();
3793     if (isFnInterfaceKind() &&
3794         (!AnchorScope || !AnchorScope->hasExactDefinition())) {
3795       indicatePessimisticFixpoint();
3796       return;
3797     }
3798 
3799     // You cannot "capture" null in the default address space.
3800     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3801         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3802       indicateOptimisticFixpoint();
3803       return;
3804     }
3805 
3806     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3807 
3808     // Check what state the associated function can actually capture.
3809     if (F)
3810       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3811     else
3812       indicatePessimisticFixpoint();
3813   }
3814 
3815   /// See AbstractAttribute::updateImpl(...).
3816   ChangeStatus updateImpl(Attributor &A) override;
3817 
3818   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3819   virtual void
3820   getDeducedAttributes(LLVMContext &Ctx,
3821                        SmallVectorImpl<Attribute> &Attrs) const override {
3822     if (!isAssumedNoCaptureMaybeReturned())
3823       return;
3824 
3825     if (getArgNo() >= 0) {
3826       if (isAssumedNoCapture())
3827         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3828       else if (ManifestInternal)
3829         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3830     }
3831   }
3832 
3833   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3834   /// depending on the ability of the function associated with \p IRP to capture
3835   /// state in memory and through "returning/throwing", respectively.
3836   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3837                                                    const Function &F,
3838                                                    BitIntegerState &State) {
3839     // TODO: Once we have memory behavior attributes we should use them here.
3840 
3841     // If we know we cannot communicate or write to memory, we do not care about
3842     // ptr2int anymore.
3843     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3844         F.getReturnType()->isVoidTy()) {
3845       State.addKnownBits(NO_CAPTURE);
3846       return;
3847     }
3848 
3849     // A function cannot capture state in memory if it only reads memory, it can
3850     // however return/throw state and the state might be influenced by the
3851     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3852     if (F.onlyReadsMemory())
3853       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3854 
3855     // A function cannot communicate state back if it does not through
3856     // exceptions and doesn not return values.
3857     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3858       State.addKnownBits(NOT_CAPTURED_IN_RET);
3859 
3860     // Check existing "returned" attributes.
3861     int ArgNo = IRP.getArgNo();
3862     if (F.doesNotThrow() && ArgNo >= 0) {
3863       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3864         if (F.hasParamAttribute(u, Attribute::Returned)) {
3865           if (u == unsigned(ArgNo))
3866             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3867           else if (F.onlyReadsMemory())
3868             State.addKnownBits(NO_CAPTURE);
3869           else
3870             State.addKnownBits(NOT_CAPTURED_IN_RET);
3871           break;
3872         }
3873     }
3874   }
3875 
3876   /// See AbstractState::getAsStr().
3877   const std::string getAsStr() const override {
3878     if (isKnownNoCapture())
3879       return "known not-captured";
3880     if (isAssumedNoCapture())
3881       return "assumed not-captured";
3882     if (isKnownNoCaptureMaybeReturned())
3883       return "known not-captured-maybe-returned";
3884     if (isAssumedNoCaptureMaybeReturned())
3885       return "assumed not-captured-maybe-returned";
3886     return "assumed-captured";
3887   }
3888 };
3889 
3890 /// Attributor-aware capture tracker.
3891 struct AACaptureUseTracker final : public CaptureTracker {
3892 
3893   /// Create a capture tracker that can lookup in-flight abstract attributes
3894   /// through the Attributor \p A.
3895   ///
3896   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3897   /// search is stopped. If a use leads to a return instruction,
3898   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3899   /// If a use leads to a ptr2int which may capture the value,
3900   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3901   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3902   /// set. All values in \p PotentialCopies are later tracked as well. For every
3903   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3904   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3905   /// conservatively set to true.
3906   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3907                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3908                       SmallVectorImpl<const Value *> &PotentialCopies,
3909                       unsigned &RemainingUsesToExplore)
3910       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3911         PotentialCopies(PotentialCopies),
3912         RemainingUsesToExplore(RemainingUsesToExplore) {}
3913 
3914   /// Determine if \p V maybe captured. *Also updates the state!*
3915   bool valueMayBeCaptured(const Value *V) {
3916     if (V->getType()->isPointerTy()) {
3917       PointerMayBeCaptured(V, this);
3918     } else {
3919       State.indicatePessimisticFixpoint();
3920     }
3921     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3922   }
3923 
3924   /// See CaptureTracker::tooManyUses().
3925   void tooManyUses() override {
3926     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3927   }
3928 
3929   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3930     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3931       return true;
3932     const auto &DerefAA =
3933         A.getAAFor<AADereferenceable>(NoCaptureAA, IRPosition::value(*O));
3934     return DerefAA.getAssumedDereferenceableBytes();
3935   }
3936 
3937   /// See CaptureTracker::captured(...).
3938   bool captured(const Use *U) override {
3939     Instruction *UInst = cast<Instruction>(U->getUser());
3940     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3941                       << "\n");
3942 
3943     // Because we may reuse the tracker multiple times we keep track of the
3944     // number of explored uses ourselves as well.
3945     if (RemainingUsesToExplore-- == 0) {
3946       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3947       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3948                           /* Return */ true);
3949     }
3950 
3951     // Deal with ptr2int by following uses.
3952     if (isa<PtrToIntInst>(UInst)) {
3953       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3954       return valueMayBeCaptured(UInst);
3955     }
3956 
3957     // Explicitly catch return instructions.
3958     if (isa<ReturnInst>(UInst))
3959       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3960                           /* Return */ true);
3961 
3962     // For now we only use special logic for call sites. However, the tracker
3963     // itself knows about a lot of other non-capturing cases already.
3964     CallSite CS(UInst);
3965     if (!CS || !CS.isArgOperand(U))
3966       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3967                           /* Return */ true);
3968 
3969     unsigned ArgNo = CS.getArgumentNo(U);
3970     const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
3971     // If we have a abstract no-capture attribute for the argument we can use
3972     // it to justify a non-capture attribute here. This allows recursion!
3973     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3974     if (ArgNoCaptureAA.isAssumedNoCapture())
3975       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3976                           /* Return */ false);
3977     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3978       addPotentialCopy(CS);
3979       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3980                           /* Return */ false);
3981     }
3982 
3983     // Lastly, we could not find a reason no-capture can be assumed so we don't.
3984     return isCapturedIn(/* Memory */ true, /* Integer */ true,
3985                         /* Return */ true);
3986   }
3987 
3988   /// Register \p CS as potential copy of the value we are checking.
3989   void addPotentialCopy(CallSite CS) {
3990     PotentialCopies.push_back(CS.getInstruction());
3991   }
3992 
3993   /// See CaptureTracker::shouldExplore(...).
3994   bool shouldExplore(const Use *U) override {
3995     // Check liveness.
3996     return !IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()));
3997   }
3998 
3999   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4000   /// \p CapturedInRet, then return the appropriate value for use in the
4001   /// CaptureTracker::captured() interface.
4002   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4003                     bool CapturedInRet) {
4004     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4005                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4006     if (CapturedInMem)
4007       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4008     if (CapturedInInt)
4009       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4010     if (CapturedInRet)
4011       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4012     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4013   }
4014 
4015 private:
4016   /// The attributor providing in-flight abstract attributes.
4017   Attributor &A;
4018 
4019   /// The abstract attribute currently updated.
4020   AANoCapture &NoCaptureAA;
4021 
4022   /// The abstract liveness state.
4023   const AAIsDead &IsDeadAA;
4024 
4025   /// The state currently updated.
4026   AANoCapture::StateType &State;
4027 
4028   /// Set of potential copies of the tracked value.
4029   SmallVectorImpl<const Value *> &PotentialCopies;
4030 
4031   /// Global counter to limit the number of explored uses.
4032   unsigned &RemainingUsesToExplore;
4033 };
4034 
4035 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4036   const IRPosition &IRP = getIRPosition();
4037   const Value *V =
4038       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4039   if (!V)
4040     return indicatePessimisticFixpoint();
4041 
4042   const Function *F =
4043       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4044   assert(F && "Expected a function!");
4045   const IRPosition &FnPos = IRPosition::function(*F);
4046   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos);
4047 
4048   AANoCapture::StateType T;
4049 
4050   // Readonly means we cannot capture through memory.
4051   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4052   if (FnMemAA.isAssumedReadOnly()) {
4053     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4054     if (FnMemAA.isKnownReadOnly())
4055       addKnownBits(NOT_CAPTURED_IN_MEM);
4056   }
4057 
4058   // Make sure all returned values are different than the underlying value.
4059   // TODO: we could do this in a more sophisticated way inside
4060   //       AAReturnedValues, e.g., track all values that escape through returns
4061   //       directly somehow.
4062   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4063     bool SeenConstant = false;
4064     for (auto &It : RVAA.returned_values()) {
4065       if (isa<Constant>(It.first)) {
4066         if (SeenConstant)
4067           return false;
4068         SeenConstant = true;
4069       } else if (!isa<Argument>(It.first) ||
4070                  It.first == getAssociatedArgument())
4071         return false;
4072     }
4073     return true;
4074   };
4075 
4076   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, FnPos);
4077   if (NoUnwindAA.isAssumedNoUnwind()) {
4078     bool IsVoidTy = F->getReturnType()->isVoidTy();
4079     const AAReturnedValues *RVAA =
4080         IsVoidTy ? nullptr : &A.getAAFor<AAReturnedValues>(*this, FnPos);
4081     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4082       T.addKnownBits(NOT_CAPTURED_IN_RET);
4083       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4084         return ChangeStatus::UNCHANGED;
4085       if (NoUnwindAA.isKnownNoUnwind() &&
4086           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4087         addKnownBits(NOT_CAPTURED_IN_RET);
4088         if (isKnown(NOT_CAPTURED_IN_MEM))
4089           return indicateOptimisticFixpoint();
4090       }
4091     }
4092   }
4093 
4094   // Use the CaptureTracker interface and logic with the specialized tracker,
4095   // defined in AACaptureUseTracker, that can look at in-flight abstract
4096   // attributes and directly updates the assumed state.
4097   SmallVector<const Value *, 4> PotentialCopies;
4098   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
4099   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4100                               RemainingUsesToExplore);
4101 
4102   // Check all potential copies of the associated value until we can assume
4103   // none will be captured or we have to assume at least one might be.
4104   unsigned Idx = 0;
4105   PotentialCopies.push_back(V);
4106   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4107     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4108 
4109   AANoCapture::StateType &S = getState();
4110   auto Assumed = S.getAssumed();
4111   S.intersectAssumedBits(T.getAssumed());
4112   if (!isAssumedNoCaptureMaybeReturned())
4113     return indicatePessimisticFixpoint();
4114   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4115                                    : ChangeStatus::CHANGED;
4116 }
4117 
4118 /// NoCapture attribute for function arguments.
4119 struct AANoCaptureArgument final : AANoCaptureImpl {
4120   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4121 
4122   /// See AbstractAttribute::trackStatistics()
4123   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4124 };
4125 
4126 /// NoCapture attribute for call site arguments.
4127 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4128   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4129 
4130   /// See AbstractAttribute::initialize(...).
4131   void initialize(Attributor &A) override {
4132     if (Argument *Arg = getAssociatedArgument())
4133       if (Arg->hasByValAttr())
4134         indicateOptimisticFixpoint();
4135     AANoCaptureImpl::initialize(A);
4136   }
4137 
4138   /// See AbstractAttribute::updateImpl(...).
4139   ChangeStatus updateImpl(Attributor &A) override {
4140     // TODO: Once we have call site specific value information we can provide
4141     //       call site specific liveness information and then it makes
4142     //       sense to specialize attributes for call sites arguments instead of
4143     //       redirecting requests to the callee argument.
4144     Argument *Arg = getAssociatedArgument();
4145     if (!Arg)
4146       return indicatePessimisticFixpoint();
4147     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4148     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4149     return clampStateAndIndicateChange(
4150         getState(),
4151         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4152   }
4153 
4154   /// See AbstractAttribute::trackStatistics()
4155   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4156 };
4157 
4158 /// NoCapture attribute for floating values.
4159 struct AANoCaptureFloating final : AANoCaptureImpl {
4160   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4161 
4162   /// See AbstractAttribute::trackStatistics()
4163   void trackStatistics() const override {
4164     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4165   }
4166 };
4167 
4168 /// NoCapture attribute for function return value.
4169 struct AANoCaptureReturned final : AANoCaptureImpl {
4170   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
4171     llvm_unreachable("NoCapture is not applicable to function returns!");
4172   }
4173 
4174   /// See AbstractAttribute::initialize(...).
4175   void initialize(Attributor &A) override {
4176     llvm_unreachable("NoCapture is not applicable to function returns!");
4177   }
4178 
4179   /// See AbstractAttribute::updateImpl(...).
4180   ChangeStatus updateImpl(Attributor &A) override {
4181     llvm_unreachable("NoCapture is not applicable to function returns!");
4182   }
4183 
4184   /// See AbstractAttribute::trackStatistics()
4185   void trackStatistics() const override {}
4186 };
4187 
4188 /// NoCapture attribute deduction for a call site return value.
4189 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4190   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4191 
4192   /// See AbstractAttribute::trackStatistics()
4193   void trackStatistics() const override {
4194     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4195   }
4196 };
4197 
4198 /// ------------------ Value Simplify Attribute ----------------------------
4199 struct AAValueSimplifyImpl : AAValueSimplify {
4200   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
4201 
4202   /// See AbstractAttribute::getAsStr().
4203   const std::string getAsStr() const override {
4204     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4205                         : "not-simple";
4206   }
4207 
4208   /// See AbstractAttribute::trackStatistics()
4209   void trackStatistics() const override {}
4210 
4211   /// See AAValueSimplify::getAssumedSimplifiedValue()
4212   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4213     if (!getAssumed())
4214       return const_cast<Value *>(&getAssociatedValue());
4215     return SimplifiedAssociatedValue;
4216   }
4217   void initialize(Attributor &A) override {}
4218 
4219   /// Helper function for querying AAValueSimplify and updating candicate.
4220   /// \param QueryingValue Value trying to unify with SimplifiedValue
4221   /// \param AccumulatedSimplifiedValue Current simplification result.
4222   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4223                              Value &QueryingValue,
4224                              Optional<Value *> &AccumulatedSimplifiedValue) {
4225     // FIXME: Add a typecast support.
4226 
4227     auto &ValueSimpifyAA = A.getAAFor<AAValueSimplify>(
4228         QueryingAA, IRPosition::value(QueryingValue));
4229 
4230     Optional<Value *> QueryingValueSimplified =
4231         ValueSimpifyAA.getAssumedSimplifiedValue(A);
4232 
4233     if (!QueryingValueSimplified.hasValue())
4234       return true;
4235 
4236     if (!QueryingValueSimplified.getValue())
4237       return false;
4238 
4239     Value &QueryingValueSimplifiedUnwrapped =
4240         *QueryingValueSimplified.getValue();
4241 
4242     if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4243       return true;
4244 
4245     if (AccumulatedSimplifiedValue.hasValue())
4246       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4247 
4248     LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << QueryingValue
4249                       << " is assumed to be "
4250                       << QueryingValueSimplifiedUnwrapped << "\n");
4251 
4252     AccumulatedSimplifiedValue = QueryingValueSimplified;
4253     return true;
4254   }
4255 
4256   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4257     if (!getAssociatedValue().getType()->isIntegerTy())
4258       return false;
4259 
4260     const auto &ValueConstantRangeAA =
4261         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4262 
4263     Optional<ConstantInt *> COpt =
4264         ValueConstantRangeAA.getAssumedConstantInt(A);
4265     if (COpt.hasValue()) {
4266       if (auto *C = COpt.getValue())
4267         SimplifiedAssociatedValue = C;
4268       else
4269         return false;
4270     } else {
4271       // FIXME: It should be llvm::None but if you set llvm::None,
4272       //        values are mistakenly infered as `undef` now.
4273       SimplifiedAssociatedValue = &getAssociatedValue();
4274     }
4275     return true;
4276   }
4277 
4278   /// See AbstractAttribute::manifest(...).
4279   ChangeStatus manifest(Attributor &A) override {
4280     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4281 
4282     if (!SimplifiedAssociatedValue.hasValue() ||
4283         !SimplifiedAssociatedValue.getValue())
4284       return Changed;
4285 
4286     if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
4287       // We can replace the AssociatedValue with the constant.
4288       Value &V = getAssociatedValue();
4289       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4290         LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << V << " -> " << *C
4291                           << "\n");
4292         A.changeValueAfterManifest(V, *C);
4293         Changed = ChangeStatus::CHANGED;
4294       }
4295     }
4296 
4297     return Changed | AAValueSimplify::manifest(A);
4298   }
4299 
4300   /// See AbstractState::indicatePessimisticFixpoint(...).
4301   ChangeStatus indicatePessimisticFixpoint() override {
4302     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4303     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4304     SimplifiedAssociatedValue = &getAssociatedValue();
4305     return indicateOptimisticFixpoint();
4306   }
4307 
4308 protected:
4309   // An assumed simplified value. Initially, it is set to Optional::None, which
4310   // means that the value is not clear under current assumption. If in the
4311   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4312   // returns orignal associated value.
4313   Optional<Value *> SimplifiedAssociatedValue;
4314 };
4315 
4316 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4317   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4318 
4319   void initialize(Attributor &A) override {
4320     AAValueSimplifyImpl::initialize(A);
4321     if (!getAssociatedFunction() || getAssociatedFunction()->isDeclaration())
4322       indicatePessimisticFixpoint();
4323     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4324                 /* IgnoreSubsumingPositions */ true))
4325       indicatePessimisticFixpoint();
4326   }
4327 
4328   /// See AbstractAttribute::updateImpl(...).
4329   ChangeStatus updateImpl(Attributor &A) override {
4330     // Byval is only replacable if it is readonly otherwise we would write into
4331     // the replaced value and not the copy that byval creates implicitly.
4332     Argument *Arg = getAssociatedArgument();
4333     if (Arg->hasByValAttr()) {
4334       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4335       if (!MemAA.isAssumedReadOnly())
4336         return indicatePessimisticFixpoint();
4337     }
4338 
4339     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4340 
4341     auto PredForCallSite = [&](AbstractCallSite ACS) {
4342       // Check if we have an associated argument or not (which can happen for
4343       // callback calls).
4344       Value *ArgOp = ACS.getCallArgOperand(getArgNo());
4345       if (!ArgOp)
4346         return false;
4347       // We can only propagate thread independent values through callbacks.
4348       // This is different to direct/indirect call sites because for them we
4349       // know the thread executing the caller and callee is the same. For
4350       // callbacks this is not guaranteed, thus a thread dependent value could
4351       // be different for the caller and callee, making it invalid to propagate.
4352       if (ACS.isCallbackCall())
4353         if (auto *C = dyn_cast<Constant>(ArgOp))
4354           if (C->isThreadDependent())
4355             return false;
4356       return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
4357     };
4358 
4359     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
4360       if (!askSimplifiedValueForAAValueConstantRange(A))
4361         return indicatePessimisticFixpoint();
4362 
4363     // If a candicate was found in this update, return CHANGED.
4364     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4365                ? ChangeStatus::UNCHANGED
4366                : ChangeStatus ::CHANGED;
4367   }
4368 
4369   /// See AbstractAttribute::trackStatistics()
4370   void trackStatistics() const override {
4371     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4372   }
4373 };
4374 
4375 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4376   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4377 
4378   /// See AbstractAttribute::updateImpl(...).
4379   ChangeStatus updateImpl(Attributor &A) override {
4380     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4381 
4382     auto PredForReturned = [&](Value &V) {
4383       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4384     };
4385 
4386     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4387       if (!askSimplifiedValueForAAValueConstantRange(A))
4388         return indicatePessimisticFixpoint();
4389 
4390     // If a candicate was found in this update, return CHANGED.
4391     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4392                ? ChangeStatus::UNCHANGED
4393                : ChangeStatus ::CHANGED;
4394   }
4395   /// See AbstractAttribute::trackStatistics()
4396   void trackStatistics() const override {
4397     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4398   }
4399 };
4400 
4401 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4402   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4403 
4404   /// See AbstractAttribute::initialize(...).
4405   void initialize(Attributor &A) override {
4406     Value &V = getAnchorValue();
4407 
4408     // TODO: add other stuffs
4409     if (isa<Constant>(V))
4410       indicatePessimisticFixpoint();
4411   }
4412 
4413   /// See AbstractAttribute::updateImpl(...).
4414   ChangeStatus updateImpl(Attributor &A) override {
4415     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4416 
4417     auto VisitValueCB = [&](Value &V, BooleanState, bool Stripped) -> bool {
4418       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4419       if (!Stripped && this == &AA) {
4420         // TODO: Look the instruction and check recursively.
4421 
4422         LLVM_DEBUG(
4423             dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
4424                    << V << "\n");
4425         return false;
4426       }
4427       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4428     };
4429 
4430     if (!genericValueTraversal<AAValueSimplify, BooleanState>(
4431             A, getIRPosition(), *this, static_cast<BooleanState &>(*this),
4432             VisitValueCB))
4433       if (!askSimplifiedValueForAAValueConstantRange(A))
4434         return indicatePessimisticFixpoint();
4435 
4436     // If a candicate was found in this update, return CHANGED.
4437 
4438     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4439                ? ChangeStatus::UNCHANGED
4440                : ChangeStatus ::CHANGED;
4441   }
4442 
4443   /// See AbstractAttribute::trackStatistics()
4444   void trackStatistics() const override {
4445     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4446   }
4447 };
4448 
4449 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4450   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4451 
4452   /// See AbstractAttribute::initialize(...).
4453   void initialize(Attributor &A) override {
4454     SimplifiedAssociatedValue = &getAnchorValue();
4455     indicateOptimisticFixpoint();
4456   }
4457   /// See AbstractAttribute::initialize(...).
4458   ChangeStatus updateImpl(Attributor &A) override {
4459     llvm_unreachable(
4460         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4461   }
4462   /// See AbstractAttribute::trackStatistics()
4463   void trackStatistics() const override {
4464     STATS_DECLTRACK_FN_ATTR(value_simplify)
4465   }
4466 };
4467 
4468 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4469   AAValueSimplifyCallSite(const IRPosition &IRP)
4470       : AAValueSimplifyFunction(IRP) {}
4471   /// See AbstractAttribute::trackStatistics()
4472   void trackStatistics() const override {
4473     STATS_DECLTRACK_CS_ATTR(value_simplify)
4474   }
4475 };
4476 
4477 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4478   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4479       : AAValueSimplifyReturned(IRP) {}
4480 
4481   void trackStatistics() const override {
4482     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4483   }
4484 };
4485 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4486   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4487       : AAValueSimplifyFloating(IRP) {}
4488 
4489   void trackStatistics() const override {
4490     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4491   }
4492 };
4493 
4494 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4495 struct AAHeapToStackImpl : public AAHeapToStack {
4496   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4497 
4498   const std::string getAsStr() const override {
4499     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4500   }
4501 
4502   ChangeStatus manifest(Attributor &A) override {
4503     assert(getState().isValidState() &&
4504            "Attempted to manifest an invalid state!");
4505 
4506     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4507     Function *F = getAssociatedFunction();
4508     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4509 
4510     for (Instruction *MallocCall : MallocCalls) {
4511       // This malloc cannot be replaced.
4512       if (BadMallocCalls.count(MallocCall))
4513         continue;
4514 
4515       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4516         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4517         A.deleteAfterManifest(*FreeCall);
4518         HasChanged = ChangeStatus::CHANGED;
4519       }
4520 
4521       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4522                         << "\n");
4523 
4524       Constant *Size;
4525       if (isCallocLikeFn(MallocCall, TLI)) {
4526         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4527         auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
4528         APInt TotalSize = SizeT->getValue() * Num->getValue();
4529         Size =
4530             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4531       } else {
4532         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4533       }
4534 
4535       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4536       Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
4537                                        Size, "", MallocCall->getNextNode());
4538 
4539       if (AI->getType() != MallocCall->getType())
4540         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4541                              AI->getNextNode());
4542 
4543       replaceAllInstructionUsesWith(*MallocCall, *AI);
4544 
4545       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4546         auto *NBB = II->getNormalDest();
4547         BranchInst::Create(NBB, MallocCall->getParent());
4548         A.deleteAfterManifest(*MallocCall);
4549       } else {
4550         A.deleteAfterManifest(*MallocCall);
4551       }
4552 
4553       if (isCallocLikeFn(MallocCall, TLI)) {
4554         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4555                                    AI->getNextNode());
4556         Value *Ops[] = {
4557             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4558             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4559 
4560         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4561         Module *M = F->getParent();
4562         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4563         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4564       }
4565       HasChanged = ChangeStatus::CHANGED;
4566     }
4567 
4568     return HasChanged;
4569   }
4570 
4571   /// Collection of all malloc calls in a function.
4572   SmallSetVector<Instruction *, 4> MallocCalls;
4573 
4574   /// Collection of malloc calls that cannot be converted.
4575   DenseSet<const Instruction *> BadMallocCalls;
4576 
4577   /// A map for each malloc call to the set of associated free calls.
4578   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4579 
4580   ChangeStatus updateImpl(Attributor &A) override;
4581 };
4582 
4583 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4584   const Function *F = getAssociatedFunction();
4585   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4586 
4587   MustBeExecutedContextExplorer &Explorer =
4588       A.getInfoCache().getMustBeExecutedContextExplorer();
4589 
4590   auto FreeCheck = [&](Instruction &I) {
4591     const auto &Frees = FreesForMalloc.lookup(&I);
4592     if (Frees.size() != 1)
4593       return false;
4594     Instruction *UniqueFree = *Frees.begin();
4595     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4596   };
4597 
4598   auto UsesCheck = [&](Instruction &I) {
4599     bool ValidUsesOnly = true;
4600     bool MustUse = true;
4601     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4602       Instruction *UserI = cast<Instruction>(U.getUser());
4603       if (isa<LoadInst>(UserI))
4604         return true;
4605       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4606         if (SI->getValueOperand() == U.get()) {
4607           LLVM_DEBUG(dbgs()
4608                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4609           ValidUsesOnly = false;
4610         } else {
4611           // A store into the malloc'ed memory is fine.
4612         }
4613         return true;
4614       }
4615       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4616         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4617           return true;
4618         // Record malloc.
4619         if (isFreeCall(UserI, TLI)) {
4620           if (MustUse) {
4621             FreesForMalloc[&I].insert(UserI);
4622           } else {
4623             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4624                               << *UserI << "\n");
4625             ValidUsesOnly = false;
4626           }
4627           return true;
4628         }
4629 
4630         unsigned ArgNo = CB->getArgOperandNo(&U);
4631 
4632         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4633             *this, IRPosition::callsite_argument(*CB, ArgNo));
4634 
4635         // If a callsite argument use is nofree, we are fine.
4636         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4637             *this, IRPosition::callsite_argument(*CB, ArgNo));
4638 
4639         if (!NoCaptureAA.isAssumedNoCapture() ||
4640             !ArgNoFreeAA.isAssumedNoFree()) {
4641           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4642           ValidUsesOnly = false;
4643         }
4644         return true;
4645       }
4646 
4647       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4648           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4649         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4650         Follow = true;
4651         return true;
4652       }
4653       // Unknown user for which we can not track uses further (in a way that
4654       // makes sense).
4655       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4656       ValidUsesOnly = false;
4657       return true;
4658     };
4659     A.checkForAllUses(Pred, *this, I);
4660     return ValidUsesOnly;
4661   };
4662 
4663   auto MallocCallocCheck = [&](Instruction &I) {
4664     if (BadMallocCalls.count(&I))
4665       return true;
4666 
4667     bool IsMalloc = isMallocLikeFn(&I, TLI);
4668     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4669     if (!IsMalloc && !IsCalloc) {
4670       BadMallocCalls.insert(&I);
4671       return true;
4672     }
4673 
4674     if (IsMalloc) {
4675       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4676         if (Size->getValue().ule(MaxHeapToStackSize))
4677           if (UsesCheck(I) || FreeCheck(I)) {
4678             MallocCalls.insert(&I);
4679             return true;
4680           }
4681     } else if (IsCalloc) {
4682       bool Overflow = false;
4683       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4684         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4685           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4686                   .ule(MaxHeapToStackSize))
4687             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4688               MallocCalls.insert(&I);
4689               return true;
4690             }
4691     }
4692 
4693     BadMallocCalls.insert(&I);
4694     return true;
4695   };
4696 
4697   size_t NumBadMallocs = BadMallocCalls.size();
4698 
4699   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4700 
4701   if (NumBadMallocs != BadMallocCalls.size())
4702     return ChangeStatus::CHANGED;
4703 
4704   return ChangeStatus::UNCHANGED;
4705 }
4706 
4707 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4708   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
4709 
4710   /// See AbstractAttribute::trackStatistics()
4711   void trackStatistics() const override {
4712     STATS_DECL(MallocCalls, Function,
4713                "Number of malloc calls converted to allocas");
4714     for (auto *C : MallocCalls)
4715       if (!BadMallocCalls.count(C))
4716         ++BUILD_STAT_NAME(MallocCalls, Function);
4717   }
4718 };
4719 
4720 /// -------------------- Memory Behavior Attributes ----------------------------
4721 /// Includes read-none, read-only, and write-only.
4722 /// ----------------------------------------------------------------------------
4723 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
4724   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
4725 
4726   /// See AbstractAttribute::initialize(...).
4727   void initialize(Attributor &A) override {
4728     intersectAssumedBits(BEST_STATE);
4729     getKnownStateFromValue(getIRPosition(), getState());
4730     IRAttribute::initialize(A);
4731   }
4732 
4733   /// Return the memory behavior information encoded in the IR for \p IRP.
4734   static void getKnownStateFromValue(const IRPosition &IRP,
4735                                      BitIntegerState &State,
4736                                      bool IgnoreSubsumingPositions = false) {
4737     SmallVector<Attribute, 2> Attrs;
4738     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
4739     for (const Attribute &Attr : Attrs) {
4740       switch (Attr.getKindAsEnum()) {
4741       case Attribute::ReadNone:
4742         State.addKnownBits(NO_ACCESSES);
4743         break;
4744       case Attribute::ReadOnly:
4745         State.addKnownBits(NO_WRITES);
4746         break;
4747       case Attribute::WriteOnly:
4748         State.addKnownBits(NO_READS);
4749         break;
4750       default:
4751         llvm_unreachable("Unexpcted attribute!");
4752       }
4753     }
4754 
4755     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
4756       if (!I->mayReadFromMemory())
4757         State.addKnownBits(NO_READS);
4758       if (!I->mayWriteToMemory())
4759         State.addKnownBits(NO_WRITES);
4760     }
4761   }
4762 
4763   /// See AbstractAttribute::getDeducedAttributes(...).
4764   void getDeducedAttributes(LLVMContext &Ctx,
4765                             SmallVectorImpl<Attribute> &Attrs) const override {
4766     assert(Attrs.size() == 0);
4767     if (isAssumedReadNone())
4768       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
4769     else if (isAssumedReadOnly())
4770       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
4771     else if (isAssumedWriteOnly())
4772       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
4773     assert(Attrs.size() <= 1);
4774   }
4775 
4776   /// See AbstractAttribute::manifest(...).
4777   ChangeStatus manifest(Attributor &A) override {
4778     const IRPosition &IRP = getIRPosition();
4779 
4780     // Check if we would improve the existing attributes first.
4781     SmallVector<Attribute, 4> DeducedAttrs;
4782     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
4783     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
4784           return IRP.hasAttr(Attr.getKindAsEnum(),
4785                              /* IgnoreSubsumingPositions */ true);
4786         }))
4787       return ChangeStatus::UNCHANGED;
4788 
4789     // Clear existing attributes.
4790     IRP.removeAttrs(AttrKinds);
4791 
4792     // Use the generic manifest method.
4793     return IRAttribute::manifest(A);
4794   }
4795 
4796   /// See AbstractState::getAsStr().
4797   const std::string getAsStr() const override {
4798     if (isAssumedReadNone())
4799       return "readnone";
4800     if (isAssumedReadOnly())
4801       return "readonly";
4802     if (isAssumedWriteOnly())
4803       return "writeonly";
4804     return "may-read/write";
4805   }
4806 
4807   /// The set of IR attributes AAMemoryBehavior deals with.
4808   static const Attribute::AttrKind AttrKinds[3];
4809 };
4810 
4811 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
4812     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
4813 
4814 /// Memory behavior attribute for a floating value.
4815 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
4816   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4817 
4818   /// See AbstractAttribute::initialize(...).
4819   void initialize(Attributor &A) override {
4820     AAMemoryBehaviorImpl::initialize(A);
4821     // Initialize the use vector with all direct uses of the associated value.
4822     for (const Use &U : getAssociatedValue().uses())
4823       Uses.insert(&U);
4824   }
4825 
4826   /// See AbstractAttribute::updateImpl(...).
4827   ChangeStatus updateImpl(Attributor &A) override;
4828 
4829   /// See AbstractAttribute::trackStatistics()
4830   void trackStatistics() const override {
4831     if (isAssumedReadNone())
4832       STATS_DECLTRACK_FLOATING_ATTR(readnone)
4833     else if (isAssumedReadOnly())
4834       STATS_DECLTRACK_FLOATING_ATTR(readonly)
4835     else if (isAssumedWriteOnly())
4836       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
4837   }
4838 
4839 private:
4840   /// Return true if users of \p UserI might access the underlying
4841   /// variable/location described by \p U and should therefore be analyzed.
4842   bool followUsersOfUseIn(Attributor &A, const Use *U,
4843                           const Instruction *UserI);
4844 
4845   /// Update the state according to the effect of use \p U in \p UserI.
4846   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
4847 
4848 protected:
4849   /// Container for (transitive) uses of the associated argument.
4850   SetVector<const Use *> Uses;
4851 };
4852 
4853 /// Memory behavior attribute for function argument.
4854 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
4855   AAMemoryBehaviorArgument(const IRPosition &IRP)
4856       : AAMemoryBehaviorFloating(IRP) {}
4857 
4858   /// See AbstractAttribute::initialize(...).
4859   void initialize(Attributor &A) override {
4860     intersectAssumedBits(BEST_STATE);
4861     const IRPosition &IRP = getIRPosition();
4862     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
4863     // can query it when we use has/getAttr. That would allow us to reuse the
4864     // initialize of the base class here.
4865     bool HasByVal =
4866         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
4867     getKnownStateFromValue(IRP, getState(),
4868                            /* IgnoreSubsumingPositions */ HasByVal);
4869 
4870     // Initialize the use vector with all direct uses of the associated value.
4871     Argument *Arg = getAssociatedArgument();
4872     if (!Arg || !Arg->getParent()->hasExactDefinition()) {
4873       indicatePessimisticFixpoint();
4874     } else {
4875       // Initialize the use vector with all direct uses of the associated value.
4876       for (const Use &U : Arg->uses())
4877         Uses.insert(&U);
4878     }
4879   }
4880 
4881   ChangeStatus manifest(Attributor &A) override {
4882     // TODO: From readattrs.ll: "inalloca parameters are always
4883     //                           considered written"
4884     if (hasAttr({Attribute::InAlloca})) {
4885       removeKnownBits(NO_WRITES);
4886       removeAssumedBits(NO_WRITES);
4887     }
4888     return AAMemoryBehaviorFloating::manifest(A);
4889   }
4890 
4891   /// See AbstractAttribute::trackStatistics()
4892   void trackStatistics() const override {
4893     if (isAssumedReadNone())
4894       STATS_DECLTRACK_ARG_ATTR(readnone)
4895     else if (isAssumedReadOnly())
4896       STATS_DECLTRACK_ARG_ATTR(readonly)
4897     else if (isAssumedWriteOnly())
4898       STATS_DECLTRACK_ARG_ATTR(writeonly)
4899   }
4900 };
4901 
4902 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
4903   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
4904       : AAMemoryBehaviorArgument(IRP) {}
4905 
4906   /// See AbstractAttribute::initialize(...).
4907   void initialize(Attributor &A) override {
4908     if (Argument *Arg = getAssociatedArgument()) {
4909       if (Arg->hasByValAttr()) {
4910         addKnownBits(NO_WRITES);
4911         removeKnownBits(NO_READS);
4912         removeAssumedBits(NO_READS);
4913       }
4914     } else {
4915     }
4916     AAMemoryBehaviorArgument::initialize(A);
4917   }
4918 
4919   /// See AbstractAttribute::updateImpl(...).
4920   ChangeStatus updateImpl(Attributor &A) override {
4921     // TODO: Once we have call site specific value information we can provide
4922     //       call site specific liveness liveness information and then it makes
4923     //       sense to specialize attributes for call sites arguments instead of
4924     //       redirecting requests to the callee argument.
4925     Argument *Arg = getAssociatedArgument();
4926     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4927     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4928     return clampStateAndIndicateChange(
4929         getState(),
4930         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
4931   }
4932 
4933   /// See AbstractAttribute::trackStatistics()
4934   void trackStatistics() const override {
4935     if (isAssumedReadNone())
4936       STATS_DECLTRACK_CSARG_ATTR(readnone)
4937     else if (isAssumedReadOnly())
4938       STATS_DECLTRACK_CSARG_ATTR(readonly)
4939     else if (isAssumedWriteOnly())
4940       STATS_DECLTRACK_CSARG_ATTR(writeonly)
4941   }
4942 };
4943 
4944 /// Memory behavior attribute for a call site return position.
4945 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
4946   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
4947       : AAMemoryBehaviorFloating(IRP) {}
4948 
4949   /// See AbstractAttribute::manifest(...).
4950   ChangeStatus manifest(Attributor &A) override {
4951     // We do not annotate returned values.
4952     return ChangeStatus::UNCHANGED;
4953   }
4954 
4955   /// See AbstractAttribute::trackStatistics()
4956   void trackStatistics() const override {}
4957 };
4958 
4959 /// An AA to represent the memory behavior function attributes.
4960 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
4961   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4962 
4963   /// See AbstractAttribute::updateImpl(Attributor &A).
4964   virtual ChangeStatus updateImpl(Attributor &A) override;
4965 
4966   /// See AbstractAttribute::manifest(...).
4967   ChangeStatus manifest(Attributor &A) override {
4968     Function &F = cast<Function>(getAnchorValue());
4969     if (isAssumedReadNone()) {
4970       F.removeFnAttr(Attribute::ArgMemOnly);
4971       F.removeFnAttr(Attribute::InaccessibleMemOnly);
4972       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
4973     }
4974     return AAMemoryBehaviorImpl::manifest(A);
4975   }
4976 
4977   /// See AbstractAttribute::trackStatistics()
4978   void trackStatistics() const override {
4979     if (isAssumedReadNone())
4980       STATS_DECLTRACK_FN_ATTR(readnone)
4981     else if (isAssumedReadOnly())
4982       STATS_DECLTRACK_FN_ATTR(readonly)
4983     else if (isAssumedWriteOnly())
4984       STATS_DECLTRACK_FN_ATTR(writeonly)
4985   }
4986 };
4987 
4988 /// AAMemoryBehavior attribute for call sites.
4989 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
4990   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4991 
4992   /// See AbstractAttribute::initialize(...).
4993   void initialize(Attributor &A) override {
4994     AAMemoryBehaviorImpl::initialize(A);
4995     Function *F = getAssociatedFunction();
4996     if (!F || !F->hasExactDefinition())
4997       indicatePessimisticFixpoint();
4998   }
4999 
5000   /// See AbstractAttribute::updateImpl(...).
5001   ChangeStatus updateImpl(Attributor &A) override {
5002     // TODO: Once we have call site specific value information we can provide
5003     //       call site specific liveness liveness information and then it makes
5004     //       sense to specialize attributes for call sites arguments instead of
5005     //       redirecting requests to the callee argument.
5006     Function *F = getAssociatedFunction();
5007     const IRPosition &FnPos = IRPosition::function(*F);
5008     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5009     return clampStateAndIndicateChange(
5010         getState(),
5011         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5012   }
5013 
5014   /// See AbstractAttribute::trackStatistics()
5015   void trackStatistics() const override {
5016     if (isAssumedReadNone())
5017       STATS_DECLTRACK_CS_ATTR(readnone)
5018     else if (isAssumedReadOnly())
5019       STATS_DECLTRACK_CS_ATTR(readonly)
5020     else if (isAssumedWriteOnly())
5021       STATS_DECLTRACK_CS_ATTR(writeonly)
5022   }
5023 };
5024 } // namespace
5025 
5026 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5027 
5028   // The current assumed state used to determine a change.
5029   auto AssumedState = getAssumed();
5030 
5031   auto CheckRWInst = [&](Instruction &I) {
5032     // If the instruction has an own memory behavior state, use it to restrict
5033     // the local state. No further analysis is required as the other memory
5034     // state is as optimistic as it gets.
5035     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
5036       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5037           *this, IRPosition::callsite_function(ICS));
5038       intersectAssumedBits(MemBehaviorAA.getAssumed());
5039       return !isAtFixpoint();
5040     }
5041 
5042     // Remove access kind modifiers if necessary.
5043     if (I.mayReadFromMemory())
5044       removeAssumedBits(NO_READS);
5045     if (I.mayWriteToMemory())
5046       removeAssumedBits(NO_WRITES);
5047     return !isAtFixpoint();
5048   };
5049 
5050   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5051     return indicatePessimisticFixpoint();
5052 
5053   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5054                                         : ChangeStatus::UNCHANGED;
5055 }
5056 
5057 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5058 
5059   const IRPosition &IRP = getIRPosition();
5060   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5061   AAMemoryBehavior::StateType &S = getState();
5062 
5063   // First, check the function scope. We take the known information and we avoid
5064   // work if the assumed information implies the current assumed information for
5065   // this attribute. This is a valid for all but byval arguments.
5066   Argument *Arg = IRP.getAssociatedArgument();
5067   AAMemoryBehavior::base_t FnMemAssumedState =
5068       AAMemoryBehavior::StateType::getWorstState();
5069   if (!Arg || !Arg->hasByValAttr()) {
5070     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5071     FnMemAssumedState = FnMemAA.getAssumed();
5072     S.addKnownBits(FnMemAA.getKnown());
5073     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5074       return ChangeStatus::UNCHANGED;
5075   }
5076 
5077   // Make sure the value is not captured (except through "return"), if
5078   // it is, any information derived would be irrelevant anyway as we cannot
5079   // check the potential aliases introduced by the capture. However, no need
5080   // to fall back to anythign less optimistic than the function state.
5081   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5082       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5083   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5084     S.intersectAssumedBits(FnMemAssumedState);
5085     return ChangeStatus::CHANGED;
5086   }
5087 
5088   // The current assumed state used to determine a change.
5089   auto AssumedState = S.getAssumed();
5090 
5091   // Liveness information to exclude dead users.
5092   // TODO: Take the FnPos once we have call site specific liveness information.
5093   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5094       *this, IRPosition::function(*IRP.getAssociatedFunction()));
5095 
5096   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5097   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5098     const Use *U = Uses[i];
5099     Instruction *UserI = cast<Instruction>(U->getUser());
5100     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5101                       << " [Dead: " << (LivenessAA.isAssumedDead(UserI))
5102                       << "]\n");
5103     if (LivenessAA.isAssumedDead(UserI))
5104       continue;
5105 
5106     // Check if the users of UserI should also be visited.
5107     if (followUsersOfUseIn(A, U, UserI))
5108       for (const Use &UserIUse : UserI->uses())
5109         Uses.insert(&UserIUse);
5110 
5111     // If UserI might touch memory we analyze the use in detail.
5112     if (UserI->mayReadOrWriteMemory())
5113       analyzeUseIn(A, U, UserI);
5114   }
5115 
5116   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5117                                         : ChangeStatus::UNCHANGED;
5118 }
5119 
5120 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5121                                                   const Instruction *UserI) {
5122   // The loaded value is unrelated to the pointer argument, no need to
5123   // follow the users of the load.
5124   if (isa<LoadInst>(UserI))
5125     return false;
5126 
5127   // By default we follow all uses assuming UserI might leak information on U,
5128   // we have special handling for call sites operands though.
5129   ImmutableCallSite ICS(UserI);
5130   if (!ICS || !ICS.isArgOperand(U))
5131     return true;
5132 
5133   // If the use is a call argument known not to be captured, the users of
5134   // the call do not need to be visited because they have to be unrelated to
5135   // the input. Note that this check is not trivial even though we disallow
5136   // general capturing of the underlying argument. The reason is that the
5137   // call might the argument "through return", which we allow and for which we
5138   // need to check call users.
5139   unsigned ArgNo = ICS.getArgumentNo(U);
5140   const auto &ArgNoCaptureAA =
5141       A.getAAFor<AANoCapture>(*this, IRPosition::callsite_argument(ICS, ArgNo));
5142   return !ArgNoCaptureAA.isAssumedNoCapture();
5143 }
5144 
5145 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5146                                             const Instruction *UserI) {
5147   assert(UserI->mayReadOrWriteMemory());
5148 
5149   switch (UserI->getOpcode()) {
5150   default:
5151     // TODO: Handle all atomics and other side-effect operations we know of.
5152     break;
5153   case Instruction::Load:
5154     // Loads cause the NO_READS property to disappear.
5155     removeAssumedBits(NO_READS);
5156     return;
5157 
5158   case Instruction::Store:
5159     // Stores cause the NO_WRITES property to disappear if the use is the
5160     // pointer operand. Note that we do assume that capturing was taken care of
5161     // somewhere else.
5162     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5163       removeAssumedBits(NO_WRITES);
5164     return;
5165 
5166   case Instruction::Call:
5167   case Instruction::CallBr:
5168   case Instruction::Invoke: {
5169     // For call sites we look at the argument memory behavior attribute (this
5170     // could be recursive!) in order to restrict our own state.
5171     ImmutableCallSite ICS(UserI);
5172 
5173     // Give up on operand bundles.
5174     if (ICS.isBundleOperand(U)) {
5175       indicatePessimisticFixpoint();
5176       return;
5177     }
5178 
5179     // Calling a function does read the function pointer, maybe write it if the
5180     // function is self-modifying.
5181     if (ICS.isCallee(U)) {
5182       removeAssumedBits(NO_READS);
5183       break;
5184     }
5185 
5186     // Adjust the possible access behavior based on the information on the
5187     // argument.
5188     unsigned ArgNo = ICS.getArgumentNo(U);
5189     const IRPosition &ArgPos = IRPosition::callsite_argument(ICS, ArgNo);
5190     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5191     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5192     // and at least "known".
5193     intersectAssumedBits(MemBehaviorAA.getAssumed());
5194     return;
5195   }
5196   };
5197 
5198   // Generally, look at the "may-properties" and adjust the assumed state if we
5199   // did not trigger special handling before.
5200   if (UserI->mayReadFromMemory())
5201     removeAssumedBits(NO_READS);
5202   if (UserI->mayWriteToMemory())
5203     removeAssumedBits(NO_WRITES);
5204 }
5205 /// ------------------ Value Constant Range Attribute -------------------------
5206 
5207 struct AAValueConstantRangeImpl : AAValueConstantRange {
5208   using StateType = IntegerRangeState;
5209   AAValueConstantRangeImpl(const IRPosition &IRP) : AAValueConstantRange(IRP) {}
5210 
5211   /// See AbstractAttribute::getAsStr().
5212   const std::string getAsStr() const override {
5213     std::string Str;
5214     llvm::raw_string_ostream OS(Str);
5215     OS << "range(" << getBitWidth() << ")<";
5216     getKnown().print(OS);
5217     OS << " / ";
5218     getAssumed().print(OS);
5219     OS << ">";
5220     return OS.str();
5221   }
5222 
5223   /// Helper function to get a SCEV expr for the associated value at program
5224   /// point \p I.
5225   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
5226     if (!getAnchorScope())
5227       return nullptr;
5228 
5229     ScalarEvolution *SE =
5230         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
5231             *getAnchorScope());
5232 
5233     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
5234         *getAnchorScope());
5235 
5236     if (!SE || !LI)
5237       return nullptr;
5238 
5239     const SCEV *S = SE->getSCEV(&getAssociatedValue());
5240     if (!I)
5241       return S;
5242 
5243     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
5244   }
5245 
5246   /// Helper function to get a range from SCEV for the associated value at
5247   /// program point \p I.
5248   ConstantRange getConstantRangeFromSCEV(Attributor &A,
5249                                          const Instruction *I = nullptr) const {
5250     if (!getAnchorScope())
5251       return getWorstState(getBitWidth());
5252 
5253     ScalarEvolution *SE =
5254         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
5255             *getAnchorScope());
5256 
5257     const SCEV *S = getSCEV(A, I);
5258     if (!SE || !S)
5259       return getWorstState(getBitWidth());
5260 
5261     return SE->getUnsignedRange(S);
5262   }
5263 
5264   /// Helper function to get a range from LVI for the associated value at
5265   /// program point \p I.
5266   ConstantRange
5267   getConstantRangeFromLVI(Attributor &A,
5268                           const Instruction *CtxI = nullptr) const {
5269     if (!getAnchorScope())
5270       return getWorstState(getBitWidth());
5271 
5272     LazyValueInfo *LVI =
5273         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
5274             *getAnchorScope());
5275 
5276     if (!LVI || !CtxI)
5277       return getWorstState(getBitWidth());
5278     return LVI->getConstantRange(&getAssociatedValue(),
5279                                  const_cast<BasicBlock *>(CtxI->getParent()),
5280                                  const_cast<Instruction *>(CtxI));
5281   }
5282 
5283   /// See AAValueConstantRange::getKnownConstantRange(..).
5284   ConstantRange
5285   getKnownConstantRange(Attributor &A,
5286                         const Instruction *CtxI = nullptr) const override {
5287     if (!CtxI || CtxI == getCtxI())
5288       return getKnown();
5289 
5290     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
5291     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
5292     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
5293   }
5294 
5295   /// See AAValueConstantRange::getAssumedConstantRange(..).
5296   ConstantRange
5297   getAssumedConstantRange(Attributor &A,
5298                           const Instruction *CtxI = nullptr) const override {
5299     // TODO: Make SCEV use Attributor assumption.
5300     //       We may be able to bound a variable range via assumptions in
5301     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
5302     //       evolve to x^2 + x, then we can say that y is in [2, 12].
5303 
5304     if (!CtxI || CtxI == getCtxI())
5305       return getAssumed();
5306 
5307     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
5308     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
5309     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
5310   }
5311 
5312   /// See AbstractAttribute::initialize(..).
5313   void initialize(Attributor &A) override {
5314     // Intersect a range given by SCEV.
5315     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
5316 
5317     // Intersect a range given by LVI.
5318     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
5319   }
5320 
5321   /// Helper function to create MDNode for range metadata.
5322   static MDNode *
5323   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
5324                             const ConstantRange &AssumedConstantRange) {
5325     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
5326                                   Ty, AssumedConstantRange.getLower())),
5327                               ConstantAsMetadata::get(ConstantInt::get(
5328                                   Ty, AssumedConstantRange.getUpper()))};
5329     return MDNode::get(Ctx, LowAndHigh);
5330   }
5331 
5332   /// Return true if \p Assumed is included in \p KnownRanges.
5333   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
5334 
5335     if (Assumed.isFullSet())
5336       return false;
5337 
5338     if (!KnownRanges)
5339       return true;
5340 
5341     // If multiple ranges are annotated in IR, we give up to annotate assumed
5342     // range for now.
5343 
5344     // TODO:  If there exists a known range which containts assumed range, we
5345     // can say assumed range is better.
5346     if (KnownRanges->getNumOperands() > 2)
5347       return false;
5348 
5349     ConstantInt *Lower =
5350         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
5351     ConstantInt *Upper =
5352         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
5353 
5354     ConstantRange Known(Lower->getValue(), Upper->getValue());
5355     return Known.contains(Assumed) && Known != Assumed;
5356   }
5357 
5358   /// Helper function to set range metadata.
5359   static bool
5360   setRangeMetadataIfisBetterRange(Instruction *I,
5361                                   const ConstantRange &AssumedConstantRange) {
5362     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
5363     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
5364       if (!AssumedConstantRange.isEmptySet()) {
5365         I->setMetadata(LLVMContext::MD_range,
5366                        getMDNodeForConstantRange(I->getType(), I->getContext(),
5367                                                  AssumedConstantRange));
5368         return true;
5369       }
5370     }
5371     return false;
5372   }
5373 
5374   /// See AbstractAttribute::manifest()
5375   ChangeStatus manifest(Attributor &A) override {
5376     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5377     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
5378     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
5379 
5380     auto &V = getAssociatedValue();
5381     if (!AssumedConstantRange.isEmptySet() &&
5382         !AssumedConstantRange.isSingleElement()) {
5383       if (Instruction *I = dyn_cast<Instruction>(&V))
5384         if (isa<CallInst>(I) || isa<LoadInst>(I))
5385           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
5386             Changed = ChangeStatus::CHANGED;
5387     }
5388 
5389     return Changed;
5390   }
5391 };
5392 
5393 struct AAValueConstantRangeArgument final : public AAValueConstantRangeImpl {
5394 
5395   AAValueConstantRangeArgument(const IRPosition &IRP)
5396       : AAValueConstantRangeImpl(IRP) {}
5397 
5398   /// See AbstractAttribute::updateImpl(...).
5399   ChangeStatus updateImpl(Attributor &A) override {
5400     // TODO: Use AAArgumentFromCallSiteArguments
5401 
5402     IntegerRangeState S(getBitWidth());
5403     clampCallSiteArgumentStates<AAValueConstantRange, IntegerRangeState>(
5404         A, *this, S);
5405 
5406     // TODO: If we know we visited all incoming values, thus no are assumed
5407     // dead, we can take the known information from the state T.
5408     return clampStateAndIndicateChange<IntegerRangeState>(this->getState(), S);
5409   }
5410 
5411   /// See AbstractAttribute::trackStatistics()
5412   void trackStatistics() const override {
5413     STATS_DECLTRACK_ARG_ATTR(value_range)
5414   }
5415 };
5416 
5417 struct AAValueConstantRangeReturned : AAValueConstantRangeImpl {
5418   AAValueConstantRangeReturned(const IRPosition &IRP)
5419       : AAValueConstantRangeImpl(IRP) {}
5420 
5421   /// See AbstractAttribute::updateImpl(...).
5422   ChangeStatus updateImpl(Attributor &A) override {
5423     // TODO: Use AAReturnedFromReturnedValues
5424 
5425     // TODO: If we know we visited all returned values, thus no are assumed
5426     // dead, we can take the known information from the state T.
5427 
5428     IntegerRangeState S(getBitWidth());
5429 
5430     clampReturnedValueStates<AAValueConstantRange, IntegerRangeState>(A, *this,
5431                                                                       S);
5432     return clampStateAndIndicateChange<StateType>(this->getState(), S);
5433   }
5434 
5435   /// See AbstractAttribute::trackStatistics()
5436   void trackStatistics() const override {
5437     STATS_DECLTRACK_FNRET_ATTR(value_range)
5438   }
5439 };
5440 
5441 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
5442   AAValueConstantRangeFloating(const IRPosition &IRP)
5443       : AAValueConstantRangeImpl(IRP) {}
5444 
5445   /// See AbstractAttribute::initialize(...).
5446   void initialize(Attributor &A) override {
5447     AAValueConstantRange::initialize(A);
5448     Value &V = getAssociatedValue();
5449 
5450     if (auto *C = dyn_cast<ConstantInt>(&V)) {
5451       unionAssumed(ConstantRange(C->getValue()));
5452       indicateOptimisticFixpoint();
5453       return;
5454     }
5455 
5456     if (isa<UndefValue>(&V)) {
5457       indicateOptimisticFixpoint();
5458       return;
5459     }
5460 
5461     if (auto *I = dyn_cast<Instruction>(&V))
5462       if (isa<BinaryOperator>(I) || isa<CmpInst>(I)) {
5463         Value *LHS = I->getOperand(0);
5464         Value *RHS = I->getOperand(1);
5465 
5466         if (LHS->getType()->isIntegerTy() && RHS->getType()->isIntegerTy())
5467           return;
5468       }
5469 
5470     // If it is a load instruction with range metadata, use it.
5471     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
5472       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
5473         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
5474         return;
5475       }
5476 
5477     // Otherwise we give up.
5478     indicatePessimisticFixpoint();
5479 
5480     LLVM_DEBUG(dbgs() << "[Attributor][AAValueConstantRange] We give up: "
5481                       << getAssociatedValue());
5482   }
5483 
5484   bool calculateBinaryOperator(Attributor &A, BinaryOperator *BinOp,
5485                                IntegerRangeState &T, Instruction *CtxI) {
5486     Value *LHS = BinOp->getOperand(0);
5487     Value *RHS = BinOp->getOperand(1);
5488 
5489     auto &LHSAA =
5490         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
5491     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
5492 
5493     auto &RHSAA =
5494         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
5495     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
5496 
5497     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
5498 
5499     T.unionAssumed(AssumedRange);
5500 
5501     // TODO: Track a known state too.
5502 
5503     return T.isValidState();
5504   }
5505 
5506   bool calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
5507                         Instruction *CtxI) {
5508     Value *LHS = CmpI->getOperand(0);
5509     Value *RHS = CmpI->getOperand(1);
5510 
5511     auto &LHSAA =
5512         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
5513     auto &RHSAA =
5514         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
5515 
5516     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
5517     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
5518 
5519     // If one of them is empty set, we can't decide.
5520     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
5521       return true;
5522 
5523     bool MustTrue = false, MustFalse = false;
5524 
5525     auto AllowedRegion =
5526         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
5527 
5528     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
5529         CmpI->getPredicate(), RHSAARange);
5530 
5531     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
5532       MustFalse = true;
5533 
5534     if (SatisfyingRegion.contains(LHSAARange))
5535       MustTrue = true;
5536 
5537     assert((!MustTrue || !MustFalse) &&
5538            "Either MustTrue or MustFalse should be false!");
5539 
5540     if (MustTrue)
5541       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
5542     else if (MustFalse)
5543       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
5544     else
5545       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
5546 
5547     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
5548                       << " " << RHSAA << "\n");
5549 
5550     // TODO: Track a known state too.
5551     return T.isValidState();
5552   }
5553 
5554   /// See AbstractAttribute::updateImpl(...).
5555   ChangeStatus updateImpl(Attributor &A) override {
5556     Instruction *CtxI = getCtxI();
5557     auto VisitValueCB = [&](Value &V, IntegerRangeState &T,
5558                             bool Stripped) -> bool {
5559       Instruction *I = dyn_cast<Instruction>(&V);
5560       if (!I) {
5561 
5562         // If the value is not instruction, we query AA to Attributor.
5563         const auto &AA =
5564             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
5565 
5566         // Clamp operator is not used to utilize a program point CtxI.
5567         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
5568 
5569         return T.isValidState();
5570       }
5571 
5572       if (auto *BinOp = dyn_cast<BinaryOperator>(I))
5573         return calculateBinaryOperator(A, BinOp, T, CtxI);
5574       else if (auto *CmpI = dyn_cast<CmpInst>(I))
5575         return calculateCmpInst(A, CmpI, T, CtxI);
5576       else {
5577         // Give up with other instructions.
5578         // TODO: Add other instructions
5579 
5580         T.indicatePessimisticFixpoint();
5581         return false;
5582       }
5583     };
5584 
5585     IntegerRangeState T(getBitWidth());
5586 
5587     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
5588             A, getIRPosition(), *this, T, VisitValueCB))
5589       return indicatePessimisticFixpoint();
5590 
5591     return clampStateAndIndicateChange(getState(), T);
5592   }
5593 
5594   /// See AbstractAttribute::trackStatistics()
5595   void trackStatistics() const override {
5596     STATS_DECLTRACK_FLOATING_ATTR(value_range)
5597   }
5598 };
5599 
5600 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
5601   AAValueConstantRangeFunction(const IRPosition &IRP)
5602       : AAValueConstantRangeImpl(IRP) {}
5603 
5604   /// See AbstractAttribute::initialize(...).
5605   ChangeStatus updateImpl(Attributor &A) override {
5606     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
5607                      "not be called");
5608   }
5609 
5610   /// See AbstractAttribute::trackStatistics()
5611   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
5612 };
5613 
5614 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
5615   AAValueConstantRangeCallSite(const IRPosition &IRP)
5616       : AAValueConstantRangeFunction(IRP) {}
5617 
5618   /// See AbstractAttribute::trackStatistics()
5619   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
5620 };
5621 
5622 struct AAValueConstantRangeCallSiteReturned : AAValueConstantRangeReturned {
5623   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP)
5624       : AAValueConstantRangeReturned(IRP) {}
5625 
5626   /// See AbstractAttribute::initialize(...).
5627   void initialize(Attributor &A) override {
5628     // If it is a load instruction with range metadata, use the metadata.
5629     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
5630       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
5631         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
5632 
5633     AAValueConstantRangeReturned::initialize(A);
5634   }
5635 
5636   /// See AbstractAttribute::trackStatistics()
5637   void trackStatistics() const override {
5638     STATS_DECLTRACK_CSRET_ATTR(value_range)
5639   }
5640 };
5641 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
5642   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP)
5643       : AAValueConstantRangeFloating(IRP) {}
5644 
5645   /// See AbstractAttribute::trackStatistics()
5646   void trackStatistics() const override {
5647     STATS_DECLTRACK_CSARG_ATTR(value_range)
5648   }
5649 };
5650 /// ----------------------------------------------------------------------------
5651 ///                               Attributor
5652 /// ----------------------------------------------------------------------------
5653 
5654 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
5655                                const AAIsDead *LivenessAA) {
5656   const Instruction *CtxI = AA.getIRPosition().getCtxI();
5657   if (!CtxI)
5658     return false;
5659 
5660   // TODO: Find a good way to utilize fine and coarse grained liveness
5661   // information.
5662   if (!LivenessAA)
5663     LivenessAA =
5664         &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
5665                             /* TrackDependence */ false);
5666 
5667   // Don't check liveness for AAIsDead.
5668   if (&AA == LivenessAA)
5669     return false;
5670 
5671   if (!LivenessAA->isAssumedDead(CtxI))
5672     return false;
5673 
5674   // We actually used liveness information so we have to record a dependence.
5675   recordDependence(*LivenessAA, AA, DepClassTy::OPTIONAL);
5676 
5677   return true;
5678 }
5679 
5680 bool Attributor::checkForAllUses(
5681     const function_ref<bool(const Use &, bool &)> &Pred,
5682     const AbstractAttribute &QueryingAA, const Value &V) {
5683   const IRPosition &IRP = QueryingAA.getIRPosition();
5684   SmallVector<const Use *, 16> Worklist;
5685   SmallPtrSet<const Use *, 16> Visited;
5686 
5687   for (const Use &U : V.uses())
5688     Worklist.push_back(&U);
5689 
5690   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
5691                     << " initial uses to check\n");
5692 
5693   if (Worklist.empty())
5694     return true;
5695 
5696   bool AnyDead = false;
5697   const Function *ScopeFn = IRP.getAnchorScope();
5698   const auto *LivenessAA =
5699       ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
5700                                     /* TrackDependence */ false)
5701               : nullptr;
5702 
5703   while (!Worklist.empty()) {
5704     const Use *U = Worklist.pop_back_val();
5705     if (!Visited.insert(U).second)
5706       continue;
5707     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << **U << "\n");
5708     if (Instruction *UserI = dyn_cast<Instruction>(U->getUser()))
5709       if (LivenessAA && LivenessAA->isAssumedDead(UserI)) {
5710         LLVM_DEBUG(dbgs() << "[Attributor] Dead user: " << *UserI << ": "
5711                           << *LivenessAA << "\n");
5712         AnyDead = true;
5713         continue;
5714       }
5715 
5716     bool Follow = false;
5717     if (!Pred(*U, Follow))
5718       return false;
5719     if (!Follow)
5720       continue;
5721     for (const Use &UU : U->getUser()->uses())
5722       Worklist.push_back(&UU);
5723   }
5724 
5725   if (AnyDead)
5726     recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5727 
5728   return true;
5729 }
5730 
5731 bool Attributor::checkForAllCallSites(
5732     const function_ref<bool(AbstractCallSite)> &Pred,
5733     const AbstractAttribute &QueryingAA, bool RequireAllCallSites) {
5734   // We can try to determine information from
5735   // the call sites. However, this is only possible all call sites are known,
5736   // hence the function has internal linkage.
5737   const IRPosition &IRP = QueryingAA.getIRPosition();
5738   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5739   if (!AssociatedFunction) {
5740     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
5741                       << "\n");
5742     return false;
5743   }
5744 
5745   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
5746                               &QueryingAA);
5747 }
5748 
5749 bool Attributor::checkForAllCallSites(
5750     const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
5751     bool RequireAllCallSites, const AbstractAttribute *QueryingAA) {
5752   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
5753     LLVM_DEBUG(
5754         dbgs()
5755         << "[Attributor] Function " << Fn.getName()
5756         << " has no internal linkage, hence not all call sites are known\n");
5757     return false;
5758   }
5759 
5760   for (const Use &U : Fn.uses()) {
5761     AbstractCallSite ACS(&U);
5762     if (!ACS) {
5763       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
5764                         << " has non call site use " << *U.get() << " in "
5765                         << *U.getUser() << "\n");
5766       // BlockAddress users are allowed.
5767       if (isa<BlockAddress>(U.getUser()))
5768         continue;
5769       return false;
5770     }
5771 
5772     Instruction *I = ACS.getInstruction();
5773     Function *Caller = I->getFunction();
5774 
5775     const auto *LivenessAA =
5776         lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
5777                               /* TrackDependence */ false);
5778 
5779     // Skip dead calls.
5780     if (LivenessAA && LivenessAA->isAssumedDead(I)) {
5781       // We actually used liveness information so we have to record a
5782       // dependence.
5783       if (QueryingAA)
5784         recordDependence(*LivenessAA, *QueryingAA, DepClassTy::OPTIONAL);
5785       continue;
5786     }
5787 
5788     const Use *EffectiveUse =
5789         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
5790     if (!ACS.isCallee(EffectiveUse)) {
5791       if (!RequireAllCallSites)
5792         continue;
5793       LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
5794                         << " is an invalid use of " << Fn.getName() << "\n");
5795       return false;
5796     }
5797 
5798     if (Pred(ACS))
5799       continue;
5800 
5801     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
5802                       << *ACS.getInstruction() << "\n");
5803     return false;
5804   }
5805 
5806   return true;
5807 }
5808 
5809 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
5810     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
5811         &Pred,
5812     const AbstractAttribute &QueryingAA) {
5813 
5814   const IRPosition &IRP = QueryingAA.getIRPosition();
5815   // Since we need to provide return instructions we have to have an exact
5816   // definition.
5817   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5818   if (!AssociatedFunction)
5819     return false;
5820 
5821   // If this is a call site query we use the call site specific return values
5822   // and liveness information.
5823   // TODO: use the function scope once we have call site AAReturnedValues.
5824   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5825   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
5826   if (!AARetVal.getState().isValidState())
5827     return false;
5828 
5829   return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
5830 }
5831 
5832 bool Attributor::checkForAllReturnedValues(
5833     const function_ref<bool(Value &)> &Pred,
5834     const AbstractAttribute &QueryingAA) {
5835 
5836   const IRPosition &IRP = QueryingAA.getIRPosition();
5837   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5838   if (!AssociatedFunction)
5839     return false;
5840 
5841   // TODO: use the function scope once we have call site AAReturnedValues.
5842   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5843   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
5844   if (!AARetVal.getState().isValidState())
5845     return false;
5846 
5847   return AARetVal.checkForAllReturnedValuesAndReturnInsts(
5848       [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
5849         return Pred(RV);
5850       });
5851 }
5852 
5853 static bool
5854 checkForAllInstructionsImpl(InformationCache::OpcodeInstMapTy &OpcodeInstMap,
5855                             const function_ref<bool(Instruction &)> &Pred,
5856                             const AAIsDead *LivenessAA, bool &AnyDead,
5857                             const ArrayRef<unsigned> &Opcodes) {
5858   for (unsigned Opcode : Opcodes) {
5859     for (Instruction *I : OpcodeInstMap[Opcode]) {
5860       // Skip dead instructions.
5861       if (LivenessAA && LivenessAA->isAssumedDead(I)) {
5862         AnyDead = true;
5863         continue;
5864       }
5865 
5866       if (!Pred(*I))
5867         return false;
5868     }
5869   }
5870   return true;
5871 }
5872 
5873 bool Attributor::checkForAllInstructions(
5874     const llvm::function_ref<bool(Instruction &)> &Pred,
5875     const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
5876 
5877   const IRPosition &IRP = QueryingAA.getIRPosition();
5878   // Since we need to provide instructions we have to have an exact definition.
5879   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5880   if (!AssociatedFunction)
5881     return false;
5882 
5883   // TODO: use the function scope once we have call site AAReturnedValues.
5884   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5885   const auto &LivenessAA =
5886       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
5887   bool AnyDead = false;
5888 
5889   auto &OpcodeInstMap =
5890       InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
5891   if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
5892                                    Opcodes))
5893     return false;
5894 
5895   // If we actually used liveness information so we have to record a dependence.
5896   if (AnyDead)
5897     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5898 
5899   return true;
5900 }
5901 
5902 bool Attributor::checkForAllReadWriteInstructions(
5903     const llvm::function_ref<bool(Instruction &)> &Pred,
5904     AbstractAttribute &QueryingAA) {
5905 
5906   const Function *AssociatedFunction =
5907       QueryingAA.getIRPosition().getAssociatedFunction();
5908   if (!AssociatedFunction)
5909     return false;
5910 
5911   // TODO: use the function scope once we have call site AAReturnedValues.
5912   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5913   const auto &LivenessAA =
5914       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
5915   bool AnyDead = false;
5916 
5917   for (Instruction *I :
5918        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
5919     // Skip dead instructions.
5920     if (LivenessAA.isAssumedDead(I)) {
5921       AnyDead = true;
5922       continue;
5923     }
5924 
5925     if (!Pred(*I))
5926       return false;
5927   }
5928 
5929   // If we actually used liveness information so we have to record a dependence.
5930   if (AnyDead)
5931     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5932 
5933   return true;
5934 }
5935 
5936 ChangeStatus Attributor::run(Module &M) {
5937   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
5938                     << AllAbstractAttributes.size()
5939                     << " abstract attributes.\n");
5940 
5941   // Now that all abstract attributes are collected and initialized we start
5942   // the abstract analysis.
5943 
5944   unsigned IterationCounter = 1;
5945 
5946   SmallVector<AbstractAttribute *, 64> ChangedAAs;
5947   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
5948   Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
5949 
5950   bool RecomputeDependences = false;
5951 
5952   do {
5953     // Remember the size to determine new attributes.
5954     size_t NumAAs = AllAbstractAttributes.size();
5955     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
5956                       << ", Worklist size: " << Worklist.size() << "\n");
5957 
5958     // For invalid AAs we can fix dependent AAs that have a required dependence,
5959     // thereby folding long dependence chains in a single step without the need
5960     // to run updates.
5961     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
5962       AbstractAttribute *InvalidAA = InvalidAAs[u];
5963       auto &QuerriedAAs = QueryMap[InvalidAA];
5964       LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "
5965                         << QuerriedAAs.RequiredAAs.size() << "/"
5966                         << QuerriedAAs.OptionalAAs.size()
5967                         << " required/optional dependences\n");
5968       for (AbstractAttribute *DepOnInvalidAA : QuerriedAAs.RequiredAAs) {
5969         AbstractState &DOIAAState = DepOnInvalidAA->getState();
5970         DOIAAState.indicatePessimisticFixpoint();
5971         ++NumAttributesFixedDueToRequiredDependences;
5972         assert(DOIAAState.isAtFixpoint() && "Expected fixpoint state!");
5973         if (!DOIAAState.isValidState())
5974           InvalidAAs.insert(DepOnInvalidAA);
5975       }
5976       if (!RecomputeDependences)
5977         Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5978                         QuerriedAAs.OptionalAAs.end());
5979     }
5980 
5981     // If dependences (=QueryMap) are recomputed we have to look at all abstract
5982     // attributes again, regardless of what changed in the last iteration.
5983     if (RecomputeDependences) {
5984       LLVM_DEBUG(
5985           dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
5986       QueryMap.clear();
5987       ChangedAAs.clear();
5988       Worklist.insert(AllAbstractAttributes.begin(),
5989                       AllAbstractAttributes.end());
5990     }
5991 
5992     // Add all abstract attributes that are potentially dependent on one that
5993     // changed to the work list.
5994     for (AbstractAttribute *ChangedAA : ChangedAAs) {
5995       auto &QuerriedAAs = QueryMap[ChangedAA];
5996       Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5997                       QuerriedAAs.OptionalAAs.end());
5998       Worklist.insert(QuerriedAAs.RequiredAAs.begin(),
5999                       QuerriedAAs.RequiredAAs.end());
6000     }
6001 
6002     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
6003                       << ", Worklist+Dependent size: " << Worklist.size()
6004                       << "\n");
6005 
6006     // Reset the changed and invalid set.
6007     ChangedAAs.clear();
6008     InvalidAAs.clear();
6009 
6010     // Update all abstract attribute in the work list and record the ones that
6011     // changed.
6012     for (AbstractAttribute *AA : Worklist)
6013       if (!AA->getState().isAtFixpoint() && !isAssumedDead(*AA, nullptr)) {
6014         QueriedNonFixAA = false;
6015         if (AA->update(*this) == ChangeStatus::CHANGED) {
6016           ChangedAAs.push_back(AA);
6017           if (!AA->getState().isValidState())
6018             InvalidAAs.insert(AA);
6019         } else if (!QueriedNonFixAA) {
6020           // If the attribute did not query any non-fix information, the state
6021           // will not change and we can indicate that right away.
6022           AA->getState().indicateOptimisticFixpoint();
6023         }
6024       }
6025 
6026     // Check if we recompute the dependences in the next iteration.
6027     RecomputeDependences = (DepRecomputeInterval > 0 &&
6028                             IterationCounter % DepRecomputeInterval == 0);
6029 
6030     // Add attributes to the changed set if they have been created in the last
6031     // iteration.
6032     ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
6033                       AllAbstractAttributes.end());
6034 
6035     // Reset the work list and repopulate with the changed abstract attributes.
6036     // Note that dependent ones are added above.
6037     Worklist.clear();
6038     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
6039 
6040   } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
6041                                  VerifyMaxFixpointIterations));
6042 
6043   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
6044                     << IterationCounter << "/" << MaxFixpointIterations
6045                     << " iterations\n");
6046 
6047   size_t NumFinalAAs = AllAbstractAttributes.size();
6048 
6049   // Reset abstract arguments not settled in a sound fixpoint by now. This
6050   // happens when we stopped the fixpoint iteration early. Note that only the
6051   // ones marked as "changed" *and* the ones transitively depending on them
6052   // need to be reverted to a pessimistic state. Others might not be in a
6053   // fixpoint state but we can use the optimistic results for them anyway.
6054   SmallPtrSet<AbstractAttribute *, 32> Visited;
6055   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
6056     AbstractAttribute *ChangedAA = ChangedAAs[u];
6057     if (!Visited.insert(ChangedAA).second)
6058       continue;
6059 
6060     AbstractState &State = ChangedAA->getState();
6061     if (!State.isAtFixpoint()) {
6062       State.indicatePessimisticFixpoint();
6063 
6064       NumAttributesTimedOut++;
6065     }
6066 
6067     auto &QuerriedAAs = QueryMap[ChangedAA];
6068     ChangedAAs.append(QuerriedAAs.OptionalAAs.begin(),
6069                       QuerriedAAs.OptionalAAs.end());
6070     ChangedAAs.append(QuerriedAAs.RequiredAAs.begin(),
6071                       QuerriedAAs.RequiredAAs.end());
6072   }
6073 
6074   LLVM_DEBUG({
6075     if (!Visited.empty())
6076       dbgs() << "\n[Attributor] Finalized " << Visited.size()
6077              << " abstract attributes.\n";
6078   });
6079 
6080   unsigned NumManifested = 0;
6081   unsigned NumAtFixpoint = 0;
6082   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
6083   for (AbstractAttribute *AA : AllAbstractAttributes) {
6084     AbstractState &State = AA->getState();
6085 
6086     // If there is not already a fixpoint reached, we can now take the
6087     // optimistic state. This is correct because we enforced a pessimistic one
6088     // on abstract attributes that were transitively dependent on a changed one
6089     // already above.
6090     if (!State.isAtFixpoint())
6091       State.indicateOptimisticFixpoint();
6092 
6093     // If the state is invalid, we do not try to manifest it.
6094     if (!State.isValidState())
6095       continue;
6096 
6097     // Skip dead code.
6098     if (isAssumedDead(*AA, nullptr))
6099       continue;
6100     // Manifest the state and record if we changed the IR.
6101     ChangeStatus LocalChange = AA->manifest(*this);
6102     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
6103       AA->trackStatistics();
6104 
6105     ManifestChange = ManifestChange | LocalChange;
6106 
6107     NumAtFixpoint++;
6108     NumManifested += (LocalChange == ChangeStatus::CHANGED);
6109   }
6110 
6111   (void)NumManifested;
6112   (void)NumAtFixpoint;
6113   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
6114                     << " arguments while " << NumAtFixpoint
6115                     << " were in a valid fixpoint state\n");
6116 
6117   NumAttributesManifested += NumManifested;
6118   NumAttributesValidFixpoint += NumAtFixpoint;
6119 
6120   (void)NumFinalAAs;
6121   assert(
6122       NumFinalAAs == AllAbstractAttributes.size() &&
6123       "Expected the final number of abstract attributes to remain unchanged!");
6124 
6125   // Delete stuff at the end to avoid invalid references and a nice order.
6126   {
6127     LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
6128                       << ToBeDeletedFunctions.size() << " functions and "
6129                       << ToBeDeletedBlocks.size() << " blocks and "
6130                       << ToBeDeletedInsts.size() << " instructions and "
6131                       << ToBeChangedUses.size() << " uses\n");
6132 
6133     SmallVector<Instruction *, 32> DeadInsts;
6134     SmallVector<Instruction *, 32> TerminatorsToFold;
6135 
6136     for (auto &It : ToBeChangedUses) {
6137       Use *U = It.first;
6138       Value *NewV = It.second;
6139       Value *OldV = U->get();
6140       LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
6141                         << " instead of " << *OldV << "\n");
6142       U->set(NewV);
6143       if (Instruction *I = dyn_cast<Instruction>(OldV))
6144         if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
6145             isInstructionTriviallyDead(I)) {
6146           DeadInsts.push_back(I);
6147         }
6148       if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
6149         Instruction *UserI = cast<Instruction>(U->getUser());
6150         if (isa<UndefValue>(NewV)) {
6151           ToBeChangedToUnreachableInsts.insert(UserI);
6152         } else {
6153           TerminatorsToFold.push_back(UserI);
6154         }
6155       }
6156     }
6157     for (Instruction *I : ToBeChangedToUnreachableInsts)
6158       changeToUnreachable(I, /* UseLLVMTrap */ false);
6159     for (Instruction *I : TerminatorsToFold)
6160       ConstantFoldTerminator(I->getParent());
6161 
6162     for (Instruction *I : ToBeDeletedInsts) {
6163       I->replaceAllUsesWith(UndefValue::get(I->getType()));
6164       if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
6165         DeadInsts.push_back(I);
6166       else
6167         I->eraseFromParent();
6168     }
6169 
6170     RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
6171 
6172     if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
6173       SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
6174       ToBeDeletedBBs.reserve(NumDeadBlocks);
6175       ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end());
6176       // Actually we do not delete the blocks but squash them into a single
6177       // unreachable but untangling branches that jump here is something we need
6178       // to do in a more generic way.
6179       DetatchDeadBlocks(ToBeDeletedBBs, nullptr);
6180       STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
6181       BUILD_STAT_NAME(AAIsDead, BasicBlock) += ToBeDeletedBlocks.size();
6182     }
6183 
6184     STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
6185     for (Function *Fn : ToBeDeletedFunctions) {
6186       Fn->replaceAllUsesWith(UndefValue::get(Fn->getType()));
6187       Fn->eraseFromParent();
6188       STATS_TRACK(AAIsDead, Function);
6189     }
6190 
6191     // Identify dead internal functions and delete them. This happens outside
6192     // the other fixpoint analysis as we might treat potentially dead functions
6193     // as live to lower the number of iterations. If they happen to be dead, the
6194     // below fixpoint loop will identify and eliminate them.
6195     SmallVector<Function *, 8> InternalFns;
6196     for (Function &F : M)
6197       if (F.hasLocalLinkage())
6198         InternalFns.push_back(&F);
6199 
6200     bool FoundDeadFn = true;
6201     while (FoundDeadFn) {
6202       FoundDeadFn = false;
6203       for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
6204         Function *F = InternalFns[u];
6205         if (!F)
6206           continue;
6207 
6208         if (!checkForAllCallSites([](AbstractCallSite ACS) { return false; },
6209                                   *F, true, nullptr))
6210           continue;
6211 
6212         STATS_TRACK(AAIsDead, Function);
6213         ToBeDeletedFunctions.insert(F);
6214         F->deleteBody();
6215         F->replaceAllUsesWith(UndefValue::get(F->getType()));
6216         F->eraseFromParent();
6217         InternalFns[u] = nullptr;
6218         FoundDeadFn = true;
6219       }
6220     }
6221   }
6222 
6223   // Rewrite the functions as requested during manifest.
6224   ManifestChange = ManifestChange | rewriteFunctionSignatures();
6225 
6226   if (VerifyMaxFixpointIterations &&
6227       IterationCounter != MaxFixpointIterations) {
6228     errs() << "\n[Attributor] Fixpoint iteration done after: "
6229            << IterationCounter << "/" << MaxFixpointIterations
6230            << " iterations\n";
6231     llvm_unreachable("The fixpoint was not reached with exactly the number of "
6232                      "specified iterations!");
6233   }
6234 
6235   return ManifestChange;
6236 }
6237 
6238 bool Attributor::registerFunctionSignatureRewrite(
6239     Argument &Arg, ArrayRef<Type *> ReplacementTypes,
6240     ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
6241     ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
6242 
6243   auto CallSiteCanBeChanged = [](AbstractCallSite ACS) {
6244     // Forbid must-tail calls for now.
6245     return !ACS.isCallbackCall() && !ACS.getCallSite().isMustTailCall();
6246   };
6247 
6248   Function *Fn = Arg.getParent();
6249   // Avoid var-arg functions for now.
6250   if (Fn->isVarArg()) {
6251     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
6252     return false;
6253   }
6254 
6255   // Avoid functions with complicated argument passing semantics.
6256   AttributeList FnAttributeList = Fn->getAttributes();
6257   if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
6258       FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
6259       FnAttributeList.hasAttrSomewhere(Attribute::InAlloca)) {
6260     LLVM_DEBUG(
6261         dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
6262     return false;
6263   }
6264 
6265   // Avoid callbacks for now.
6266   if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr)) {
6267     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
6268     return false;
6269   }
6270 
6271   auto InstPred = [](Instruction &I) {
6272     if (auto *CI = dyn_cast<CallInst>(&I))
6273       return !CI->isMustTailCall();
6274     return true;
6275   };
6276 
6277   // Forbid must-tail calls for now.
6278   // TODO:
6279   bool AnyDead;
6280   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
6281   if (!checkForAllInstructionsImpl(OpcodeInstMap, InstPred, nullptr, AnyDead,
6282                                    {Instruction::Call})) {
6283     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
6284     return false;
6285   }
6286 
6287   SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = ArgumentReplacementMap[Fn];
6288   if (ARIs.size() == 0)
6289     ARIs.resize(Fn->arg_size());
6290 
6291   // If we have a replacement already with less than or equal new arguments,
6292   // ignore this request.
6293   ArgumentReplacementInfo *&ARI = ARIs[Arg.getArgNo()];
6294   if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
6295     LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
6296     return false;
6297   }
6298 
6299   // If we have a replacement already but we like the new one better, delete
6300   // the old.
6301   if (ARI)
6302     delete ARI;
6303 
6304   // Remember the replacement.
6305   ARI = new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
6306                                     std::move(CalleeRepairCB),
6307                                     std::move(ACSRepairCB));
6308 
6309   return true;
6310 }
6311 
6312 ChangeStatus Attributor::rewriteFunctionSignatures() {
6313   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6314 
6315   for (auto &It : ArgumentReplacementMap) {
6316     Function *OldFn = It.getFirst();
6317 
6318     // Deleted functions do not require rewrites.
6319     if (ToBeDeletedFunctions.count(OldFn))
6320       continue;
6321 
6322     const SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = It.getSecond();
6323     assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
6324 
6325     SmallVector<Type *, 16> NewArgumentTypes;
6326     SmallVector<AttributeSet, 16> NewArgumentAttributes;
6327 
6328     // Collect replacement argument types and copy over existing attributes.
6329     AttributeList OldFnAttributeList = OldFn->getAttributes();
6330     for (Argument &Arg : OldFn->args()) {
6331       if (ArgumentReplacementInfo *ARI = ARIs[Arg.getArgNo()]) {
6332         NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
6333                                 ARI->ReplacementTypes.end());
6334         NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
6335                                      AttributeSet());
6336       } else {
6337         NewArgumentTypes.push_back(Arg.getType());
6338         NewArgumentAttributes.push_back(
6339             OldFnAttributeList.getParamAttributes(Arg.getArgNo()));
6340       }
6341     }
6342 
6343     FunctionType *OldFnTy = OldFn->getFunctionType();
6344     Type *RetTy = OldFnTy->getReturnType();
6345 
6346     // Construct the new function type using the new arguments types.
6347     FunctionType *NewFnTy =
6348         FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
6349 
6350     LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
6351                       << "' from " << *OldFn->getFunctionType() << " to "
6352                       << *NewFnTy << "\n");
6353 
6354     // Create the new function body and insert it into the module.
6355     Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
6356                                        OldFn->getAddressSpace(), "");
6357     OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
6358     NewFn->takeName(OldFn);
6359     NewFn->copyAttributesFrom(OldFn);
6360 
6361     // Patch the pointer to LLVM function in debug info descriptor.
6362     NewFn->setSubprogram(OldFn->getSubprogram());
6363     OldFn->setSubprogram(nullptr);
6364 
6365     // Recompute the parameter attributes list based on the new arguments for
6366     // the function.
6367     LLVMContext &Ctx = OldFn->getContext();
6368     NewFn->setAttributes(AttributeList::get(
6369         Ctx, OldFnAttributeList.getFnAttributes(),
6370         OldFnAttributeList.getRetAttributes(), NewArgumentAttributes));
6371 
6372     // Since we have now created the new function, splice the body of the old
6373     // function right into the new function, leaving the old rotting hulk of the
6374     // function empty.
6375     NewFn->getBasicBlockList().splice(NewFn->begin(),
6376                                       OldFn->getBasicBlockList());
6377 
6378     // Set of all "call-like" instructions that invoke the old function.
6379     SmallPtrSet<Instruction *, 8> OldCallSites;
6380 
6381     // Callback to create a new "call-like" instruction for a given one.
6382     auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
6383       CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
6384       const AttributeList &OldCallAttributeList = OldCB->getAttributes();
6385 
6386       // Collect the new argument operands for the replacement call site.
6387       SmallVector<Value *, 16> NewArgOperands;
6388       SmallVector<AttributeSet, 16> NewArgOperandAttributes;
6389       for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
6390         unsigned NewFirstArgNum = NewArgOperands.size();
6391         (void)NewFirstArgNum; // only used inside assert.
6392         if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
6393           if (ARI->ACSRepairCB)
6394             ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
6395           assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
6396                      NewArgOperands.size() &&
6397                  "ACS repair callback did not provide as many operand as new "
6398                  "types were registered!");
6399           // TODO: Exose the attribute set to the ACS repair callback
6400           NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
6401                                          AttributeSet());
6402         } else {
6403           NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
6404           NewArgOperandAttributes.push_back(
6405               OldCallAttributeList.getParamAttributes(OldArgNum));
6406         }
6407       }
6408 
6409       assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
6410              "Mismatch # argument operands vs. # argument operand attributes!");
6411       assert(NewArgOperands.size() == NewFn->arg_size() &&
6412              "Mismatch # argument operands vs. # function arguments!");
6413 
6414       SmallVector<OperandBundleDef, 4> OperandBundleDefs;
6415       OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
6416 
6417       // Create a new call or invoke instruction to replace the old one.
6418       CallBase *NewCB;
6419       if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
6420         NewCB =
6421             InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
6422                                NewArgOperands, OperandBundleDefs, "", OldCB);
6423       } else {
6424         auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
6425                                        "", OldCB);
6426         NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
6427         NewCB = NewCI;
6428       }
6429 
6430       // Copy over various properties and the new attributes.
6431       OldCB->replaceAllUsesWith(NewCB);
6432       uint64_t W;
6433       if (OldCB->extractProfTotalWeight(W))
6434         NewCB->setProfWeight(W);
6435       NewCB->setCallingConv(OldCB->getCallingConv());
6436       NewCB->setDebugLoc(OldCB->getDebugLoc());
6437       NewCB->takeName(OldCB);
6438       NewCB->setAttributes(AttributeList::get(
6439           Ctx, OldCallAttributeList.getFnAttributes(),
6440           OldCallAttributeList.getRetAttributes(), NewArgOperandAttributes));
6441 
6442       bool Inserted = OldCallSites.insert(OldCB).second;
6443       assert(Inserted && "Call site was old twice!");
6444       (void)Inserted;
6445 
6446       return true;
6447     };
6448 
6449     // Use the CallSiteReplacementCreator to create replacement call sites.
6450     bool Success =
6451         checkForAllCallSites(CallSiteReplacementCreator, *OldFn, true, nullptr);
6452     (void)Success;
6453     assert(Success && "Assumed call site replacement to succeed!");
6454 
6455     // Rewire the arguments.
6456     auto OldFnArgIt = OldFn->arg_begin();
6457     auto NewFnArgIt = NewFn->arg_begin();
6458     for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
6459          ++OldArgNum, ++OldFnArgIt) {
6460       if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
6461         if (ARI->CalleeRepairCB)
6462           ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
6463         NewFnArgIt += ARI->ReplacementTypes.size();
6464       } else {
6465         NewFnArgIt->takeName(&*OldFnArgIt);
6466         OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
6467         ++NewFnArgIt;
6468       }
6469     }
6470 
6471     // Eliminate the instructions *after* we visited all of them.
6472     for (Instruction *OldCallSite : OldCallSites)
6473       OldCallSite->eraseFromParent();
6474 
6475     assert(OldFn->getNumUses() == 0 && "Unexpected leftover uses!");
6476     OldFn->eraseFromParent();
6477     Changed = ChangeStatus::CHANGED;
6478   }
6479 
6480   return Changed;
6481 }
6482 
6483 void Attributor::initializeInformationCache(Function &F) {
6484 
6485   // Walk all instructions to find interesting instructions that might be
6486   // queried by abstract attributes during their initialization or update.
6487   // This has to happen before we create attributes.
6488   auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
6489   auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
6490 
6491   for (Instruction &I : instructions(&F)) {
6492     bool IsInterestingOpcode = false;
6493 
6494     // To allow easy access to all instructions in a function with a given
6495     // opcode we store them in the InfoCache. As not all opcodes are interesting
6496     // to concrete attributes we only cache the ones that are as identified in
6497     // the following switch.
6498     // Note: There are no concrete attributes now so this is initially empty.
6499     switch (I.getOpcode()) {
6500     default:
6501       assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
6502              "New call site/base instruction type needs to be known int the "
6503              "Attributor.");
6504       break;
6505     case Instruction::Load:
6506       // The alignment of a pointer is interesting for loads.
6507     case Instruction::Store:
6508       // The alignment of a pointer is interesting for stores.
6509     case Instruction::Call:
6510     case Instruction::CallBr:
6511     case Instruction::Invoke:
6512     case Instruction::CleanupRet:
6513     case Instruction::CatchSwitch:
6514     case Instruction::AtomicRMW:
6515     case Instruction::AtomicCmpXchg:
6516     case Instruction::Br:
6517     case Instruction::Resume:
6518     case Instruction::Ret:
6519       IsInterestingOpcode = true;
6520     }
6521     if (IsInterestingOpcode)
6522       InstOpcodeMap[I.getOpcode()].push_back(&I);
6523     if (I.mayReadOrWriteMemory())
6524       ReadOrWriteInsts.push_back(&I);
6525   }
6526 }
6527 
6528 void Attributor::recordDependence(const AbstractAttribute &FromAA,
6529                                   const AbstractAttribute &ToAA,
6530                                   DepClassTy DepClass) {
6531   if (FromAA.getState().isAtFixpoint())
6532     return;
6533 
6534   if (DepClass == DepClassTy::REQUIRED)
6535     QueryMap[&FromAA].RequiredAAs.insert(
6536         const_cast<AbstractAttribute *>(&ToAA));
6537   else
6538     QueryMap[&FromAA].OptionalAAs.insert(
6539         const_cast<AbstractAttribute *>(&ToAA));
6540   QueriedNonFixAA = true;
6541 }
6542 
6543 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
6544   if (!VisitedFunctions.insert(&F).second)
6545     return;
6546   if (F.isDeclaration())
6547     return;
6548 
6549   IRPosition FPos = IRPosition::function(F);
6550 
6551   // Check for dead BasicBlocks in every function.
6552   // We need dead instruction detection because we do not want to deal with
6553   // broken IR in which SSA rules do not apply.
6554   getOrCreateAAFor<AAIsDead>(FPos);
6555 
6556   // Every function might be "will-return".
6557   getOrCreateAAFor<AAWillReturn>(FPos);
6558 
6559   // Every function might contain instructions that cause "undefined behavior".
6560   getOrCreateAAFor<AAUndefinedBehavior>(FPos);
6561 
6562   // Every function can be nounwind.
6563   getOrCreateAAFor<AANoUnwind>(FPos);
6564 
6565   // Every function might be marked "nosync"
6566   getOrCreateAAFor<AANoSync>(FPos);
6567 
6568   // Every function might be "no-free".
6569   getOrCreateAAFor<AANoFree>(FPos);
6570 
6571   // Every function might be "no-return".
6572   getOrCreateAAFor<AANoReturn>(FPos);
6573 
6574   // Every function might be "no-recurse".
6575   getOrCreateAAFor<AANoRecurse>(FPos);
6576 
6577   // Every function might be "readnone/readonly/writeonly/...".
6578   getOrCreateAAFor<AAMemoryBehavior>(FPos);
6579 
6580   // Every function might be applicable for Heap-To-Stack conversion.
6581   if (EnableHeapToStack)
6582     getOrCreateAAFor<AAHeapToStack>(FPos);
6583 
6584   // Return attributes are only appropriate if the return type is non void.
6585   Type *ReturnType = F.getReturnType();
6586   if (!ReturnType->isVoidTy()) {
6587     // Argument attribute "returned" --- Create only one per function even
6588     // though it is an argument attribute.
6589     getOrCreateAAFor<AAReturnedValues>(FPos);
6590 
6591     IRPosition RetPos = IRPosition::returned(F);
6592 
6593     // Every returned value might be dead.
6594     getOrCreateAAFor<AAIsDead>(RetPos);
6595 
6596     // Every function might be simplified.
6597     getOrCreateAAFor<AAValueSimplify>(RetPos);
6598 
6599     if (ReturnType->isPointerTy()) {
6600 
6601       // Every function with pointer return type might be marked align.
6602       getOrCreateAAFor<AAAlign>(RetPos);
6603 
6604       // Every function with pointer return type might be marked nonnull.
6605       getOrCreateAAFor<AANonNull>(RetPos);
6606 
6607       // Every function with pointer return type might be marked noalias.
6608       getOrCreateAAFor<AANoAlias>(RetPos);
6609 
6610       // Every function with pointer return type might be marked
6611       // dereferenceable.
6612       getOrCreateAAFor<AADereferenceable>(RetPos);
6613     }
6614   }
6615 
6616   for (Argument &Arg : F.args()) {
6617     IRPosition ArgPos = IRPosition::argument(Arg);
6618 
6619     // Every argument might be simplified.
6620     getOrCreateAAFor<AAValueSimplify>(ArgPos);
6621 
6622     if (Arg.getType()->isPointerTy()) {
6623       // Every argument with pointer type might be marked nonnull.
6624       getOrCreateAAFor<AANonNull>(ArgPos);
6625 
6626       // Every argument with pointer type might be marked noalias.
6627       getOrCreateAAFor<AANoAlias>(ArgPos);
6628 
6629       // Every argument with pointer type might be marked dereferenceable.
6630       getOrCreateAAFor<AADereferenceable>(ArgPos);
6631 
6632       // Every argument with pointer type might be marked align.
6633       getOrCreateAAFor<AAAlign>(ArgPos);
6634 
6635       // Every argument with pointer type might be marked nocapture.
6636       getOrCreateAAFor<AANoCapture>(ArgPos);
6637 
6638       // Every argument with pointer type might be marked
6639       // "readnone/readonly/writeonly/..."
6640       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
6641 
6642       // Every argument with pointer type might be marked nofree.
6643       getOrCreateAAFor<AANoFree>(ArgPos);
6644     }
6645   }
6646 
6647   auto CallSitePred = [&](Instruction &I) -> bool {
6648     CallSite CS(&I);
6649     if (Function *Callee = CS.getCalledFunction()) {
6650       // Skip declerations except if annotations on their call sites were
6651       // explicitly requested.
6652       if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
6653           !Callee->hasMetadata(LLVMContext::MD_callback))
6654         return true;
6655 
6656       if (!Callee->getReturnType()->isVoidTy() && !CS->use_empty()) {
6657 
6658         IRPosition CSRetPos = IRPosition::callsite_returned(CS);
6659 
6660         // Call site return values might be dead.
6661         getOrCreateAAFor<AAIsDead>(CSRetPos);
6662 
6663         // Call site return integer values might be limited by a constant range.
6664         if (Callee->getReturnType()->isIntegerTy()) {
6665           getOrCreateAAFor<AAValueConstantRange>(CSRetPos);
6666         }
6667       }
6668 
6669       for (int i = 0, e = CS.getNumArgOperands(); i < e; i++) {
6670 
6671         IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
6672 
6673         // Every call site argument might be dead.
6674         getOrCreateAAFor<AAIsDead>(CSArgPos);
6675 
6676         // Call site argument might be simplified.
6677         getOrCreateAAFor<AAValueSimplify>(CSArgPos);
6678 
6679         if (!CS.getArgument(i)->getType()->isPointerTy())
6680           continue;
6681 
6682         // Call site argument attribute "non-null".
6683         getOrCreateAAFor<AANonNull>(CSArgPos);
6684 
6685         // Call site argument attribute "no-alias".
6686         getOrCreateAAFor<AANoAlias>(CSArgPos);
6687 
6688         // Call site argument attribute "dereferenceable".
6689         getOrCreateAAFor<AADereferenceable>(CSArgPos);
6690 
6691         // Call site argument attribute "align".
6692         getOrCreateAAFor<AAAlign>(CSArgPos);
6693 
6694         // Call site argument attribute
6695         // "readnone/readonly/writeonly/..."
6696         getOrCreateAAFor<AAMemoryBehavior>(CSArgPos);
6697 
6698         // Call site argument attribute "nofree".
6699         getOrCreateAAFor<AANoFree>(CSArgPos);
6700       }
6701     }
6702     return true;
6703   };
6704 
6705   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
6706   bool Success, AnyDead = false;
6707   Success = checkForAllInstructionsImpl(
6708       OpcodeInstMap, CallSitePred, nullptr, AnyDead,
6709       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
6710        (unsigned)Instruction::Call});
6711   (void)Success;
6712   assert(Success && !AnyDead && "Expected the check call to be successful!");
6713 
6714   auto LoadStorePred = [&](Instruction &I) -> bool {
6715     if (isa<LoadInst>(I))
6716       getOrCreateAAFor<AAAlign>(
6717           IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
6718     else
6719       getOrCreateAAFor<AAAlign>(
6720           IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
6721     return true;
6722   };
6723   Success = checkForAllInstructionsImpl(
6724       OpcodeInstMap, LoadStorePred, nullptr, AnyDead,
6725       {(unsigned)Instruction::Load, (unsigned)Instruction::Store});
6726   (void)Success;
6727   assert(Success && !AnyDead && "Expected the check call to be successful!");
6728 }
6729 
6730 /// Helpers to ease debugging through output streams and print calls.
6731 ///
6732 ///{
6733 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
6734   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
6735 }
6736 
6737 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
6738   switch (AP) {
6739   case IRPosition::IRP_INVALID:
6740     return OS << "inv";
6741   case IRPosition::IRP_FLOAT:
6742     return OS << "flt";
6743   case IRPosition::IRP_RETURNED:
6744     return OS << "fn_ret";
6745   case IRPosition::IRP_CALL_SITE_RETURNED:
6746     return OS << "cs_ret";
6747   case IRPosition::IRP_FUNCTION:
6748     return OS << "fn";
6749   case IRPosition::IRP_CALL_SITE:
6750     return OS << "cs";
6751   case IRPosition::IRP_ARGUMENT:
6752     return OS << "arg";
6753   case IRPosition::IRP_CALL_SITE_ARGUMENT:
6754     return OS << "cs_arg";
6755   }
6756   llvm_unreachable("Unknown attribute position!");
6757 }
6758 
6759 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
6760   const Value &AV = Pos.getAssociatedValue();
6761   return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
6762             << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
6763 }
6764 
6765 template <typename base_ty, base_ty BestState, base_ty WorstState>
6766 raw_ostream &
6767 llvm::operator<<(raw_ostream &OS,
6768                  const IntegerStateBase<base_ty, BestState, WorstState> &S) {
6769   return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
6770             << static_cast<const AbstractState &>(S);
6771 }
6772 
6773 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
6774   OS << "range-state(" << S.getBitWidth() << ")<";
6775   S.getKnown().print(OS);
6776   OS << " / ";
6777   S.getAssumed().print(OS);
6778   OS << ">";
6779 
6780   return OS << static_cast<const AbstractState &>(S);
6781 }
6782 
6783 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
6784   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
6785 }
6786 
6787 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
6788   AA.print(OS);
6789   return OS;
6790 }
6791 
6792 void AbstractAttribute::print(raw_ostream &OS) const {
6793   OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
6794      << "]";
6795 }
6796 ///}
6797 
6798 /// ----------------------------------------------------------------------------
6799 ///                       Pass (Manager) Boilerplate
6800 /// ----------------------------------------------------------------------------
6801 
6802 static bool runAttributorOnModule(Module &M, AnalysisGetter &AG) {
6803   if (DisableAttributor)
6804     return false;
6805 
6806   LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
6807                     << " functions.\n");
6808 
6809   // Create an Attributor and initially empty information cache that is filled
6810   // while we identify default attribute opportunities.
6811   InformationCache InfoCache(M, AG);
6812   Attributor A(InfoCache, DepRecInterval);
6813 
6814   for (Function &F : M)
6815     A.initializeInformationCache(F);
6816 
6817   for (Function &F : M) {
6818     if (F.hasExactDefinition())
6819       NumFnWithExactDefinition++;
6820     else
6821       NumFnWithoutExactDefinition++;
6822 
6823     // We look at internal functions only on-demand but if any use is not a
6824     // direct call, we have to do it eagerly.
6825     if (F.hasLocalLinkage()) {
6826       if (llvm::all_of(F.uses(), [](const Use &U) {
6827             return ImmutableCallSite(U.getUser()) &&
6828                    ImmutableCallSite(U.getUser()).isCallee(&U);
6829           }))
6830         continue;
6831     }
6832 
6833     // Populate the Attributor with abstract attribute opportunities in the
6834     // function and the information cache with IR information.
6835     A.identifyDefaultAbstractAttributes(F);
6836   }
6837 
6838   return A.run(M) == ChangeStatus::CHANGED;
6839 }
6840 
6841 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
6842   AnalysisGetter AG(AM);
6843   if (runAttributorOnModule(M, AG)) {
6844     // FIXME: Think about passes we will preserve and add them here.
6845     return PreservedAnalyses::none();
6846   }
6847   return PreservedAnalyses::all();
6848 }
6849 
6850 namespace {
6851 
6852 struct AttributorLegacyPass : public ModulePass {
6853   static char ID;
6854 
6855   AttributorLegacyPass() : ModulePass(ID) {
6856     initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
6857   }
6858 
6859   bool runOnModule(Module &M) override {
6860     if (skipModule(M))
6861       return false;
6862 
6863     AnalysisGetter AG;
6864     return runAttributorOnModule(M, AG);
6865   }
6866 
6867   void getAnalysisUsage(AnalysisUsage &AU) const override {
6868     // FIXME: Think about passes we will preserve and add them here.
6869     AU.addRequired<TargetLibraryInfoWrapperPass>();
6870   }
6871 };
6872 
6873 } // end anonymous namespace
6874 
6875 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
6876 
6877 char AttributorLegacyPass::ID = 0;
6878 
6879 const char AAReturnedValues::ID = 0;
6880 const char AANoUnwind::ID = 0;
6881 const char AANoSync::ID = 0;
6882 const char AANoFree::ID = 0;
6883 const char AANonNull::ID = 0;
6884 const char AANoRecurse::ID = 0;
6885 const char AAWillReturn::ID = 0;
6886 const char AAUndefinedBehavior::ID = 0;
6887 const char AANoAlias::ID = 0;
6888 const char AAReachability::ID = 0;
6889 const char AANoReturn::ID = 0;
6890 const char AAIsDead::ID = 0;
6891 const char AADereferenceable::ID = 0;
6892 const char AAAlign::ID = 0;
6893 const char AANoCapture::ID = 0;
6894 const char AAValueSimplify::ID = 0;
6895 const char AAHeapToStack::ID = 0;
6896 const char AAMemoryBehavior::ID = 0;
6897 const char AAValueConstantRange::ID = 0;
6898 
6899 // Macro magic to create the static generator function for attributes that
6900 // follow the naming scheme.
6901 
6902 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
6903   case IRPosition::PK:                                                         \
6904     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
6905 
6906 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
6907   case IRPosition::PK:                                                         \
6908     AA = new CLASS##SUFFIX(IRP);                                               \
6909     break;
6910 
6911 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
6912   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6913     CLASS *AA = nullptr;                                                       \
6914     switch (IRP.getPositionKind()) {                                           \
6915       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6916       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6917       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6918       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6919       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6920       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6921       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6922       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6923     }                                                                          \
6924     return *AA;                                                                \
6925   }
6926 
6927 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
6928   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6929     CLASS *AA = nullptr;                                                       \
6930     switch (IRP.getPositionKind()) {                                           \
6931       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6932       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
6933       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
6934       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6935       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6936       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
6937       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6938       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6939     }                                                                          \
6940     return *AA;                                                                \
6941   }
6942 
6943 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
6944   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6945     CLASS *AA = nullptr;                                                       \
6946     switch (IRP.getPositionKind()) {                                           \
6947       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6948       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6949       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6950       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6951       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6952       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
6953       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6954       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6955     }                                                                          \
6956     return *AA;                                                                \
6957   }
6958 
6959 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
6960   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6961     CLASS *AA = nullptr;                                                       \
6962     switch (IRP.getPositionKind()) {                                           \
6963       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6964       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6965       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6966       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6967       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6968       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6969       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
6970       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6971     }                                                                          \
6972     return *AA;                                                                \
6973   }
6974 
6975 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
6976   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6977     CLASS *AA = nullptr;                                                       \
6978     switch (IRP.getPositionKind()) {                                           \
6979       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6980       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6981       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6982       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6983       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6984       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6985       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6986       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6987     }                                                                          \
6988     return *AA;                                                                \
6989   }
6990 
6991 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
6992 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
6993 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
6994 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
6995 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
6996 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
6997 
6998 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
6999 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7000 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7001 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7002 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7003 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7004 
7005 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7006 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7007 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7008 
7009 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7010 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7011 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7012 
7013 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7014 
7015 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7016 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7017 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7018 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7019 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7020 #undef SWITCH_PK_CREATE
7021 #undef SWITCH_PK_INV
7022 
7023 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
7024                       "Deduce and propagate attributes", false, false)
7025 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
7026 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
7027                     "Deduce and propagate attributes", false, false)
7028