1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CFG.h"
32 #include "llvm/IR/InstIterator.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Verifier.h"
35 #include "llvm/InitializePasses.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 
42 #include <cassert>
43 
44 using namespace llvm;
45 
46 #define DEBUG_TYPE "attributor"
47 
48 STATISTIC(NumFnWithExactDefinition,
49           "Number of function with exact definitions");
50 STATISTIC(NumFnWithoutExactDefinition,
51           "Number of function without exact definitions");
52 STATISTIC(NumAttributesTimedOut,
53           "Number of abstract attributes timed out before fixpoint");
54 STATISTIC(NumAttributesValidFixpoint,
55           "Number of abstract attributes in a valid fixpoint state");
56 STATISTIC(NumAttributesManifested,
57           "Number of abstract attributes manifested in IR");
58 STATISTIC(NumAttributesFixedDueToRequiredDependences,
59           "Number of abstract attributes fixed due to required dependences");
60 
61 // Some helper macros to deal with statistics tracking.
62 //
63 // Usage:
64 // For simple IR attribute tracking overload trackStatistics in the abstract
65 // attribute and choose the right STATS_DECLTRACK_********* macro,
66 // e.g.,:
67 //  void trackStatistics() const override {
68 //    STATS_DECLTRACK_ARG_ATTR(returned)
69 //  }
70 // If there is a single "increment" side one can use the macro
71 // STATS_DECLTRACK with a custom message. If there are multiple increment
72 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
73 //
74 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
75   ("Number of " #TYPE " marked '" #NAME "'")
76 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
77 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
78 #define STATS_DECL(NAME, TYPE, MSG)                                            \
79   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
80 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
81 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
82   {                                                                            \
83     STATS_DECL(NAME, TYPE, MSG)                                                \
84     STATS_TRACK(NAME, TYPE)                                                    \
85   }
86 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
87   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
88 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
89   STATS_DECLTRACK(NAME, CSArguments,                                           \
90                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
91 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
92   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
93 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
94   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
95 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
96   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
97                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
98 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
99   STATS_DECLTRACK(NAME, CSReturn,                                              \
100                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
101 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
102   STATS_DECLTRACK(NAME, Floating,                                              \
103                   ("Number of floating values known to be '" #NAME "'"))
104 
105 // Specialization of the operator<< for abstract attributes subclasses. This
106 // disambiguates situations where multiple operators are applicable.
107 namespace llvm {
108 #define PIPE_OPERATOR(CLASS)                                                   \
109   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
110     return OS << static_cast<const AbstractAttribute &>(AA);                   \
111   }
112 
113 PIPE_OPERATOR(AAIsDead)
114 PIPE_OPERATOR(AANoUnwind)
115 PIPE_OPERATOR(AANoSync)
116 PIPE_OPERATOR(AANoRecurse)
117 PIPE_OPERATOR(AAWillReturn)
118 PIPE_OPERATOR(AANoReturn)
119 PIPE_OPERATOR(AAReturnedValues)
120 PIPE_OPERATOR(AANonNull)
121 PIPE_OPERATOR(AANoAlias)
122 PIPE_OPERATOR(AADereferenceable)
123 PIPE_OPERATOR(AAAlign)
124 PIPE_OPERATOR(AANoCapture)
125 PIPE_OPERATOR(AAValueSimplify)
126 PIPE_OPERATOR(AANoFree)
127 PIPE_OPERATOR(AAHeapToStack)
128 PIPE_OPERATOR(AAReachability)
129 PIPE_OPERATOR(AAMemoryBehavior)
130 
131 #undef PIPE_OPERATOR
132 } // namespace llvm
133 
134 // TODO: Determine a good default value.
135 //
136 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
137 // (when run with the first 5 abstract attributes). The results also indicate
138 // that we never reach 32 iterations but always find a fixpoint sooner.
139 //
140 // This will become more evolved once we perform two interleaved fixpoint
141 // iterations: bottom-up and top-down.
142 static cl::opt<unsigned>
143     MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
144                           cl::desc("Maximal number of fixpoint iterations."),
145                           cl::init(32));
146 static cl::opt<bool> VerifyMaxFixpointIterations(
147     "attributor-max-iterations-verify", cl::Hidden,
148     cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
149     cl::init(false));
150 
151 static cl::opt<bool> DisableAttributor(
152     "attributor-disable", cl::Hidden,
153     cl::desc("Disable the attributor inter-procedural deduction pass."),
154     cl::init(true));
155 
156 static cl::opt<bool> AnnotateDeclarationCallSites(
157     "attributor-annotate-decl-cs", cl::Hidden,
158     cl::desc("Annotate call sites of function declarations."), cl::init(false));
159 
160 static cl::opt<bool> ManifestInternal(
161     "attributor-manifest-internal", cl::Hidden,
162     cl::desc("Manifest Attributor internal string attributes."),
163     cl::init(false));
164 
165 static cl::opt<unsigned> DepRecInterval(
166     "attributor-dependence-recompute-interval", cl::Hidden,
167     cl::desc("Number of iterations until dependences are recomputed."),
168     cl::init(4));
169 
170 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
171                                        cl::init(true), cl::Hidden);
172 
173 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
174                                        cl::Hidden);
175 
176 /// Logic operators for the change status enum class.
177 ///
178 ///{
179 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
180   return l == ChangeStatus::CHANGED ? l : r;
181 }
182 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
183   return l == ChangeStatus::UNCHANGED ? l : r;
184 }
185 ///}
186 
187 Argument *IRPosition::getAssociatedArgument() const {
188   if (getPositionKind() == IRP_ARGUMENT)
189     return cast<Argument>(&getAnchorValue());
190 
191   // Not an Argument and no argument number means this is not a call site
192   // argument, thus we cannot find a callback argument to return.
193   int ArgNo = getArgNo();
194   if (ArgNo < 0)
195     return nullptr;
196 
197   // Use abstract call sites to make the connection between the call site
198   // values and the ones in callbacks. If a callback was found that makes use
199   // of the underlying call site operand, we want the corresponding callback
200   // callee argument and not the direct callee argument.
201   Optional<Argument *> CBCandidateArg;
202   SmallVector<const Use *, 4> CBUses;
203   ImmutableCallSite ICS(&getAnchorValue());
204   AbstractCallSite::getCallbackUses(ICS, CBUses);
205   for (const Use *U : CBUses) {
206     AbstractCallSite ACS(U);
207     assert(ACS && ACS.isCallbackCall());
208     if (!ACS.getCalledFunction())
209       continue;
210 
211     for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
212 
213       // Test if the underlying call site operand is argument number u of the
214       // callback callee.
215       if (ACS.getCallArgOperandNo(u) != ArgNo)
216         continue;
217 
218       assert(ACS.getCalledFunction()->arg_size() > u &&
219              "ACS mapped into var-args arguments!");
220       if (CBCandidateArg.hasValue()) {
221         CBCandidateArg = nullptr;
222         break;
223       }
224       CBCandidateArg = ACS.getCalledFunction()->getArg(u);
225     }
226   }
227 
228   // If we found a unique callback candidate argument, return it.
229   if (CBCandidateArg.hasValue() && CBCandidateArg.getValue())
230     return CBCandidateArg.getValue();
231 
232   // If no callbacks were found, or none used the underlying call site operand
233   // exclusively, use the direct callee argument if available.
234   const Function *Callee = ICS.getCalledFunction();
235   if (Callee && Callee->arg_size() > unsigned(ArgNo))
236     return Callee->getArg(ArgNo);
237 
238   return nullptr;
239 }
240 
241 /// For calls (and invokes) we will only replace instruction uses to not disturb
242 /// the old style call graph.
243 /// TODO: Remove this once we get rid of the old PM.
244 static void replaceAllInstructionUsesWith(Value &Old, Value &New) {
245   if (!isa<CallBase>(Old))
246     return Old.replaceAllUsesWith(&New);
247   SmallVector<Use *, 8> Uses;
248   for (Use &U : Old.uses())
249     if (isa<Instruction>(U.getUser()))
250       Uses.push_back(&U);
251   for (Use *U : Uses)
252     U->set(&New);
253 }
254 
255 /// Recursively visit all values that might become \p IRP at some point. This
256 /// will be done by looking through cast instructions, selects, phis, and calls
257 /// with the "returned" attribute. Once we cannot look through the value any
258 /// further, the callback \p VisitValueCB is invoked and passed the current
259 /// value, the \p State, and a flag to indicate if we stripped anything. To
260 /// limit how much effort is invested, we will never visit more values than
261 /// specified by \p MaxValues.
262 template <typename AAType, typename StateTy>
263 static bool genericValueTraversal(
264     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
265     const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
266     int MaxValues = 8) {
267 
268   const AAIsDead *LivenessAA = nullptr;
269   if (IRP.getAnchorScope())
270     LivenessAA = &A.getAAFor<AAIsDead>(
271         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
272         /* TrackDependence */ false);
273   bool AnyDead = false;
274 
275   // TODO: Use Positions here to allow context sensitivity in VisitValueCB
276   SmallPtrSet<Value *, 16> Visited;
277   SmallVector<Value *, 16> Worklist;
278   Worklist.push_back(&IRP.getAssociatedValue());
279 
280   int Iteration = 0;
281   do {
282     Value *V = Worklist.pop_back_val();
283 
284     // Check if we should process the current value. To prevent endless
285     // recursion keep a record of the values we followed!
286     if (!Visited.insert(V).second)
287       continue;
288 
289     // Make sure we limit the compile time for complex expressions.
290     if (Iteration++ >= MaxValues)
291       return false;
292 
293     // Explicitly look through calls with a "returned" attribute if we do
294     // not have a pointer as stripPointerCasts only works on them.
295     Value *NewV = nullptr;
296     if (V->getType()->isPointerTy()) {
297       NewV = V->stripPointerCasts();
298     } else {
299       CallSite CS(V);
300       if (CS && CS.getCalledFunction()) {
301         for (Argument &Arg : CS.getCalledFunction()->args())
302           if (Arg.hasReturnedAttr()) {
303             NewV = CS.getArgOperand(Arg.getArgNo());
304             break;
305           }
306       }
307     }
308     if (NewV && NewV != V) {
309       Worklist.push_back(NewV);
310       continue;
311     }
312 
313     // Look through select instructions, visit both potential values.
314     if (auto *SI = dyn_cast<SelectInst>(V)) {
315       Worklist.push_back(SI->getTrueValue());
316       Worklist.push_back(SI->getFalseValue());
317       continue;
318     }
319 
320     // Look through phi nodes, visit all live operands.
321     if (auto *PHI = dyn_cast<PHINode>(V)) {
322       assert(LivenessAA &&
323              "Expected liveness in the presence of instructions!");
324       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
325         const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
326         if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
327           AnyDead = true;
328           continue;
329         }
330         Worklist.push_back(PHI->getIncomingValue(u));
331       }
332       continue;
333     }
334 
335     // Once a leaf is reached we inform the user through the callback.
336     if (!VisitValueCB(*V, State, Iteration > 1))
337       return false;
338   } while (!Worklist.empty());
339 
340   // If we actually used liveness information so we have to record a dependence.
341   if (AnyDead)
342     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
343 
344   // All values have been visited.
345   return true;
346 }
347 
348 /// Return true if \p New is equal or worse than \p Old.
349 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
350   if (!Old.isIntAttribute())
351     return true;
352 
353   return Old.getValueAsInt() >= New.getValueAsInt();
354 }
355 
356 /// Return true if the information provided by \p Attr was added to the
357 /// attribute list \p Attrs. This is only the case if it was not already present
358 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
359 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
360                              AttributeList &Attrs, int AttrIdx) {
361 
362   if (Attr.isEnumAttribute()) {
363     Attribute::AttrKind Kind = Attr.getKindAsEnum();
364     if (Attrs.hasAttribute(AttrIdx, Kind))
365       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
366         return false;
367     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
368     return true;
369   }
370   if (Attr.isStringAttribute()) {
371     StringRef Kind = Attr.getKindAsString();
372     if (Attrs.hasAttribute(AttrIdx, Kind))
373       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
374         return false;
375     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
376     return true;
377   }
378   if (Attr.isIntAttribute()) {
379     Attribute::AttrKind Kind = Attr.getKindAsEnum();
380     if (Attrs.hasAttribute(AttrIdx, Kind))
381       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
382         return false;
383     Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
384     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
385     return true;
386   }
387 
388   llvm_unreachable("Expected enum or string attribute!");
389 }
390 
391 static const Value *
392 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
393                                      const DataLayout &DL,
394                                      bool AllowNonInbounds = false) {
395   const Value *Ptr =
396       Attributor::getPointerOperand(I, /* AllowVolatile */ false);
397   if (!Ptr)
398     return nullptr;
399 
400   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
401                                           AllowNonInbounds);
402 }
403 
404 ChangeStatus AbstractAttribute::update(Attributor &A) {
405   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
406   if (getState().isAtFixpoint())
407     return HasChanged;
408 
409   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
410 
411   HasChanged = updateImpl(A);
412 
413   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
414                     << "\n");
415 
416   return HasChanged;
417 }
418 
419 ChangeStatus
420 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
421                                    const ArrayRef<Attribute> &DeducedAttrs) {
422   Function *ScopeFn = IRP.getAssociatedFunction();
423   IRPosition::Kind PK = IRP.getPositionKind();
424 
425   // In the following some generic code that will manifest attributes in
426   // DeducedAttrs if they improve the current IR. Due to the different
427   // annotation positions we use the underlying AttributeList interface.
428 
429   AttributeList Attrs;
430   switch (PK) {
431   case IRPosition::IRP_INVALID:
432   case IRPosition::IRP_FLOAT:
433     return ChangeStatus::UNCHANGED;
434   case IRPosition::IRP_ARGUMENT:
435   case IRPosition::IRP_FUNCTION:
436   case IRPosition::IRP_RETURNED:
437     Attrs = ScopeFn->getAttributes();
438     break;
439   case IRPosition::IRP_CALL_SITE:
440   case IRPosition::IRP_CALL_SITE_RETURNED:
441   case IRPosition::IRP_CALL_SITE_ARGUMENT:
442     Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
443     break;
444   }
445 
446   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
447   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
448   for (const Attribute &Attr : DeducedAttrs) {
449     if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
450       continue;
451 
452     HasChanged = ChangeStatus::CHANGED;
453   }
454 
455   if (HasChanged == ChangeStatus::UNCHANGED)
456     return HasChanged;
457 
458   switch (PK) {
459   case IRPosition::IRP_ARGUMENT:
460   case IRPosition::IRP_FUNCTION:
461   case IRPosition::IRP_RETURNED:
462     ScopeFn->setAttributes(Attrs);
463     break;
464   case IRPosition::IRP_CALL_SITE:
465   case IRPosition::IRP_CALL_SITE_RETURNED:
466   case IRPosition::IRP_CALL_SITE_ARGUMENT:
467     CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
468     break;
469   case IRPosition::IRP_INVALID:
470   case IRPosition::IRP_FLOAT:
471     break;
472   }
473 
474   return HasChanged;
475 }
476 
477 const IRPosition IRPosition::EmptyKey(255);
478 const IRPosition IRPosition::TombstoneKey(256);
479 
480 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
481   IRPositions.emplace_back(IRP);
482 
483   ImmutableCallSite ICS(&IRP.getAnchorValue());
484   switch (IRP.getPositionKind()) {
485   case IRPosition::IRP_INVALID:
486   case IRPosition::IRP_FLOAT:
487   case IRPosition::IRP_FUNCTION:
488     return;
489   case IRPosition::IRP_ARGUMENT:
490   case IRPosition::IRP_RETURNED:
491     IRPositions.emplace_back(
492         IRPosition::function(*IRP.getAssociatedFunction()));
493     return;
494   case IRPosition::IRP_CALL_SITE:
495     assert(ICS && "Expected call site!");
496     // TODO: We need to look at the operand bundles similar to the redirection
497     //       in CallBase.
498     if (!ICS.hasOperandBundles())
499       if (const Function *Callee = ICS.getCalledFunction())
500         IRPositions.emplace_back(IRPosition::function(*Callee));
501     return;
502   case IRPosition::IRP_CALL_SITE_RETURNED:
503     assert(ICS && "Expected call site!");
504     // TODO: We need to look at the operand bundles similar to the redirection
505     //       in CallBase.
506     if (!ICS.hasOperandBundles()) {
507       if (const Function *Callee = ICS.getCalledFunction()) {
508         IRPositions.emplace_back(IRPosition::returned(*Callee));
509         IRPositions.emplace_back(IRPosition::function(*Callee));
510       }
511     }
512     IRPositions.emplace_back(
513         IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
514     return;
515   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
516     int ArgNo = IRP.getArgNo();
517     assert(ICS && ArgNo >= 0 && "Expected call site!");
518     // TODO: We need to look at the operand bundles similar to the redirection
519     //       in CallBase.
520     if (!ICS.hasOperandBundles()) {
521       const Function *Callee = ICS.getCalledFunction();
522       if (Callee && Callee->arg_size() > unsigned(ArgNo))
523         IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
524       if (Callee)
525         IRPositions.emplace_back(IRPosition::function(*Callee));
526     }
527     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
528     return;
529   }
530   }
531 }
532 
533 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
534                          bool IgnoreSubsumingPositions) const {
535   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
536     for (Attribute::AttrKind AK : AKs)
537       if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
538         return true;
539     // The first position returned by the SubsumingPositionIterator is
540     // always the position itself. If we ignore subsuming positions we
541     // are done after the first iteration.
542     if (IgnoreSubsumingPositions)
543       break;
544   }
545   return false;
546 }
547 
548 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
549                           SmallVectorImpl<Attribute> &Attrs,
550                           bool IgnoreSubsumingPositions) const {
551   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
552     for (Attribute::AttrKind AK : AKs) {
553       const Attribute &Attr = EquivIRP.getAttr(AK);
554       if (Attr.getKindAsEnum() == AK)
555         Attrs.push_back(Attr);
556     }
557     // The first position returned by the SubsumingPositionIterator is
558     // always the position itself. If we ignore subsuming positions we
559     // are done after the first iteration.
560     if (IgnoreSubsumingPositions)
561       break;
562   }
563 }
564 
565 void IRPosition::verify() {
566   switch (KindOrArgNo) {
567   default:
568     assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
569     assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
570            "Expected call base or argument for positive attribute index!");
571     if (isa<Argument>(AnchorVal)) {
572       assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
573              "Argument number mismatch!");
574       assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
575              "Associated value mismatch!");
576     } else {
577       assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
578              "Call site argument number mismatch!");
579       assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
580                  &getAssociatedValue() &&
581              "Associated value mismatch!");
582     }
583     break;
584   case IRP_INVALID:
585     assert(!AnchorVal && "Expected no value for an invalid position!");
586     break;
587   case IRP_FLOAT:
588     assert((!isa<CallBase>(&getAssociatedValue()) &&
589             !isa<Argument>(&getAssociatedValue())) &&
590            "Expected specialized kind for call base and argument values!");
591     break;
592   case IRP_RETURNED:
593     assert(isa<Function>(AnchorVal) &&
594            "Expected function for a 'returned' position!");
595     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
596     break;
597   case IRP_CALL_SITE_RETURNED:
598     assert((isa<CallBase>(AnchorVal)) &&
599            "Expected call base for 'call site returned' position!");
600     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
601     break;
602   case IRP_CALL_SITE:
603     assert((isa<CallBase>(AnchorVal)) &&
604            "Expected call base for 'call site function' position!");
605     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
606     break;
607   case IRP_FUNCTION:
608     assert(isa<Function>(AnchorVal) &&
609            "Expected function for a 'function' position!");
610     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
611     break;
612   }
613 }
614 
615 namespace {
616 /// Helper function to clamp a state \p S of type \p StateType with the
617 /// information in \p R and indicate/return if \p S did change (as-in update is
618 /// required to be run again).
619 template <typename StateType>
620 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
621   auto Assumed = S.getAssumed();
622   S ^= R;
623   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
624                                    : ChangeStatus::CHANGED;
625 }
626 
627 /// Clamp the information known for all returned values of a function
628 /// (identified by \p QueryingAA) into \p S.
629 template <typename AAType, typename StateType = typename AAType::StateType>
630 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
631                                      StateType &S) {
632   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
633                     << QueryingAA << " into " << S << "\n");
634 
635   assert((QueryingAA.getIRPosition().getPositionKind() ==
636               IRPosition::IRP_RETURNED ||
637           QueryingAA.getIRPosition().getPositionKind() ==
638               IRPosition::IRP_CALL_SITE_RETURNED) &&
639          "Can only clamp returned value states for a function returned or call "
640          "site returned position!");
641 
642   // Use an optional state as there might not be any return values and we want
643   // to join (IntegerState::operator&) the state of all there are.
644   Optional<StateType> T;
645 
646   // Callback for each possibly returned value.
647   auto CheckReturnValue = [&](Value &RV) -> bool {
648     const IRPosition &RVPos = IRPosition::value(RV);
649     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
650     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
651                       << " @ " << RVPos << "\n");
652     const StateType &AAS = static_cast<const StateType &>(AA.getState());
653     if (T.hasValue())
654       *T &= AAS;
655     else
656       T = AAS;
657     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
658                       << "\n");
659     return T->isValidState();
660   };
661 
662   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
663     S.indicatePessimisticFixpoint();
664   else if (T.hasValue())
665     S ^= *T;
666 }
667 
668 /// Helper class to compose two generic deduction
669 template <typename AAType, typename Base, typename StateType,
670           template <typename...> class F, template <typename...> class G>
671 struct AAComposeTwoGenericDeduction
672     : public F<AAType, G<AAType, Base, StateType>, StateType> {
673   AAComposeTwoGenericDeduction(const IRPosition &IRP)
674       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
675 
676   /// See AbstractAttribute::updateImpl(...).
677   ChangeStatus updateImpl(Attributor &A) override {
678     ChangeStatus ChangedF =
679         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
680     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
681     return ChangedF | ChangedG;
682   }
683 };
684 
685 /// Helper class for generic deduction: return value -> returned position.
686 template <typename AAType, typename Base,
687           typename StateType = typename AAType::StateType>
688 struct AAReturnedFromReturnedValues : public Base {
689   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
690 
691   /// See AbstractAttribute::updateImpl(...).
692   ChangeStatus updateImpl(Attributor &A) override {
693     StateType S;
694     clampReturnedValueStates<AAType, StateType>(A, *this, S);
695     // TODO: If we know we visited all returned values, thus no are assumed
696     // dead, we can take the known information from the state T.
697     return clampStateAndIndicateChange<StateType>(this->getState(), S);
698   }
699 };
700 
701 /// Clamp the information known at all call sites for a given argument
702 /// (identified by \p QueryingAA) into \p S.
703 template <typename AAType, typename StateType = typename AAType::StateType>
704 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
705                                         StateType &S) {
706   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
707                     << QueryingAA << " into " << S << "\n");
708 
709   assert(QueryingAA.getIRPosition().getPositionKind() ==
710              IRPosition::IRP_ARGUMENT &&
711          "Can only clamp call site argument states for an argument position!");
712 
713   // Use an optional state as there might not be any return values and we want
714   // to join (IntegerState::operator&) the state of all there are.
715   Optional<StateType> T;
716 
717   // The argument number which is also the call site argument number.
718   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
719 
720   auto CallSiteCheck = [&](AbstractCallSite ACS) {
721     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
722     // Check if a coresponding argument was found or if it is on not associated
723     // (which can happen for callback calls).
724     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
725       return false;
726 
727     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
728     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
729                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
730     const StateType &AAS = static_cast<const StateType &>(AA.getState());
731     if (T.hasValue())
732       *T &= AAS;
733     else
734       T = AAS;
735     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
736                       << "\n");
737     return T->isValidState();
738   };
739 
740   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
741     S.indicatePessimisticFixpoint();
742   else if (T.hasValue())
743     S ^= *T;
744 }
745 
746 /// Helper class for generic deduction: call site argument -> argument position.
747 template <typename AAType, typename Base,
748           typename StateType = typename AAType::StateType>
749 struct AAArgumentFromCallSiteArguments : public Base {
750   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
751 
752   /// See AbstractAttribute::updateImpl(...).
753   ChangeStatus updateImpl(Attributor &A) override {
754     StateType S;
755     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
756     // TODO: If we know we visited all incoming values, thus no are assumed
757     // dead, we can take the known information from the state T.
758     return clampStateAndIndicateChange<StateType>(this->getState(), S);
759   }
760 };
761 
762 /// Helper class for generic replication: function returned -> cs returned.
763 template <typename AAType, typename Base,
764           typename StateType = typename AAType::StateType>
765 struct AACallSiteReturnedFromReturned : public Base {
766   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
767 
768   /// See AbstractAttribute::updateImpl(...).
769   ChangeStatus updateImpl(Attributor &A) override {
770     assert(this->getIRPosition().getPositionKind() ==
771                IRPosition::IRP_CALL_SITE_RETURNED &&
772            "Can only wrap function returned positions for call site returned "
773            "positions!");
774     auto &S = this->getState();
775 
776     const Function *AssociatedFunction =
777         this->getIRPosition().getAssociatedFunction();
778     if (!AssociatedFunction)
779       return S.indicatePessimisticFixpoint();
780 
781     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
782     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
783     return clampStateAndIndicateChange(
784         S, static_cast<const typename AAType::StateType &>(AA.getState()));
785   }
786 };
787 
788 /// Helper class for generic deduction using must-be-executed-context
789 /// Base class is required to have `followUse` method.
790 
791 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
792 /// U - Underlying use.
793 /// I - The user of the \p U.
794 /// `followUse` returns true if the value should be tracked transitively.
795 
796 template <typename AAType, typename Base,
797           typename StateType = typename AAType::StateType>
798 struct AAFromMustBeExecutedContext : public Base {
799   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
800 
801   void initialize(Attributor &A) override {
802     Base::initialize(A);
803     const IRPosition &IRP = this->getIRPosition();
804     Instruction *CtxI = IRP.getCtxI();
805 
806     if (!CtxI)
807       return;
808 
809     for (const Use &U : IRP.getAssociatedValue().uses())
810       Uses.insert(&U);
811   }
812 
813   /// See AbstractAttribute::updateImpl(...).
814   ChangeStatus updateImpl(Attributor &A) override {
815     auto BeforeState = this->getState();
816     auto &S = this->getState();
817     Instruction *CtxI = this->getIRPosition().getCtxI();
818     if (!CtxI)
819       return ChangeStatus::UNCHANGED;
820 
821     MustBeExecutedContextExplorer &Explorer =
822         A.getInfoCache().getMustBeExecutedContextExplorer();
823 
824     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
825     for (unsigned u = 0; u < Uses.size(); ++u) {
826       const Use *U = Uses[u];
827       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
828         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
829         if (Found && Base::followUse(A, U, UserI))
830           for (const Use &Us : UserI->uses())
831             Uses.insert(&Us);
832       }
833     }
834 
835     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
836   }
837 
838 private:
839   /// Container for (transitive) uses of the associated value.
840   SetVector<const Use *> Uses;
841 };
842 
843 template <typename AAType, typename Base,
844           typename StateType = typename AAType::StateType>
845 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
846     AAComposeTwoGenericDeduction<AAType, Base, StateType,
847                                  AAFromMustBeExecutedContext,
848                                  AAArgumentFromCallSiteArguments>;
849 
850 template <typename AAType, typename Base,
851           typename StateType = typename AAType::StateType>
852 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
853     AAComposeTwoGenericDeduction<AAType, Base, StateType,
854                                  AAFromMustBeExecutedContext,
855                                  AACallSiteReturnedFromReturned>;
856 
857 /// -----------------------NoUnwind Function Attribute--------------------------
858 
859 struct AANoUnwindImpl : AANoUnwind {
860   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
861 
862   const std::string getAsStr() const override {
863     return getAssumed() ? "nounwind" : "may-unwind";
864   }
865 
866   /// See AbstractAttribute::updateImpl(...).
867   ChangeStatus updateImpl(Attributor &A) override {
868     auto Opcodes = {
869         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
870         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
871         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
872 
873     auto CheckForNoUnwind = [&](Instruction &I) {
874       if (!I.mayThrow())
875         return true;
876 
877       if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
878         const auto &NoUnwindAA =
879             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
880         return NoUnwindAA.isAssumedNoUnwind();
881       }
882       return false;
883     };
884 
885     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
886       return indicatePessimisticFixpoint();
887 
888     return ChangeStatus::UNCHANGED;
889   }
890 };
891 
892 struct AANoUnwindFunction final : public AANoUnwindImpl {
893   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
894 
895   /// See AbstractAttribute::trackStatistics()
896   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
897 };
898 
899 /// NoUnwind attribute deduction for a call sites.
900 struct AANoUnwindCallSite final : AANoUnwindImpl {
901   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
902 
903   /// See AbstractAttribute::initialize(...).
904   void initialize(Attributor &A) override {
905     AANoUnwindImpl::initialize(A);
906     Function *F = getAssociatedFunction();
907     if (!F)
908       indicatePessimisticFixpoint();
909   }
910 
911   /// See AbstractAttribute::updateImpl(...).
912   ChangeStatus updateImpl(Attributor &A) override {
913     // TODO: Once we have call site specific value information we can provide
914     //       call site specific liveness information and then it makes
915     //       sense to specialize attributes for call sites arguments instead of
916     //       redirecting requests to the callee argument.
917     Function *F = getAssociatedFunction();
918     const IRPosition &FnPos = IRPosition::function(*F);
919     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
920     return clampStateAndIndicateChange(
921         getState(),
922         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
923   }
924 
925   /// See AbstractAttribute::trackStatistics()
926   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
927 };
928 
929 /// --------------------- Function Return Values -------------------------------
930 
931 /// "Attribute" that collects all potential returned values and the return
932 /// instructions that they arise from.
933 ///
934 /// If there is a unique returned value R, the manifest method will:
935 ///   - mark R with the "returned" attribute, if R is an argument.
936 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
937 
938   /// Mapping of values potentially returned by the associated function to the
939   /// return instructions that might return them.
940   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
941 
942   /// Mapping to remember the number of returned values for a call site such
943   /// that we can avoid updates if nothing changed.
944   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
945 
946   /// Set of unresolved calls returned by the associated function.
947   SmallSetVector<CallBase *, 4> UnresolvedCalls;
948 
949   /// State flags
950   ///
951   ///{
952   bool IsFixed = false;
953   bool IsValidState = true;
954   ///}
955 
956 public:
957   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
958 
959   /// See AbstractAttribute::initialize(...).
960   void initialize(Attributor &A) override {
961     // Reset the state.
962     IsFixed = false;
963     IsValidState = true;
964     ReturnedValues.clear();
965 
966     Function *F = getAssociatedFunction();
967     if (!F) {
968       indicatePessimisticFixpoint();
969       return;
970     }
971 
972     // The map from instruction opcodes to those instructions in the function.
973     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
974 
975     // Look through all arguments, if one is marked as returned we are done.
976     for (Argument &Arg : F->args()) {
977       if (Arg.hasReturnedAttr()) {
978         auto &ReturnInstSet = ReturnedValues[&Arg];
979         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
980           ReturnInstSet.insert(cast<ReturnInst>(RI));
981 
982         indicateOptimisticFixpoint();
983         return;
984       }
985     }
986 
987     if (!F->hasExactDefinition())
988       indicatePessimisticFixpoint();
989   }
990 
991   /// See AbstractAttribute::manifest(...).
992   ChangeStatus manifest(Attributor &A) override;
993 
994   /// See AbstractAttribute::getState(...).
995   AbstractState &getState() override { return *this; }
996 
997   /// See AbstractAttribute::getState(...).
998   const AbstractState &getState() const override { return *this; }
999 
1000   /// See AbstractAttribute::updateImpl(Attributor &A).
1001   ChangeStatus updateImpl(Attributor &A) override;
1002 
1003   llvm::iterator_range<iterator> returned_values() override {
1004     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1005   }
1006 
1007   llvm::iterator_range<const_iterator> returned_values() const override {
1008     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1009   }
1010 
1011   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
1012     return UnresolvedCalls;
1013   }
1014 
1015   /// Return the number of potential return values, -1 if unknown.
1016   size_t getNumReturnValues() const override {
1017     return isValidState() ? ReturnedValues.size() : -1;
1018   }
1019 
1020   /// Return an assumed unique return value if a single candidate is found. If
1021   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1022   /// Optional::NoneType.
1023   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1024 
1025   /// See AbstractState::checkForAllReturnedValues(...).
1026   bool checkForAllReturnedValuesAndReturnInsts(
1027       const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1028           &Pred) const override;
1029 
1030   /// Pretty print the attribute similar to the IR representation.
1031   const std::string getAsStr() const override;
1032 
1033   /// See AbstractState::isAtFixpoint().
1034   bool isAtFixpoint() const override { return IsFixed; }
1035 
1036   /// See AbstractState::isValidState().
1037   bool isValidState() const override { return IsValidState; }
1038 
1039   /// See AbstractState::indicateOptimisticFixpoint(...).
1040   ChangeStatus indicateOptimisticFixpoint() override {
1041     IsFixed = true;
1042     return ChangeStatus::UNCHANGED;
1043   }
1044 
1045   ChangeStatus indicatePessimisticFixpoint() override {
1046     IsFixed = true;
1047     IsValidState = false;
1048     return ChangeStatus::CHANGED;
1049   }
1050 };
1051 
1052 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1053   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1054 
1055   // Bookkeeping.
1056   assert(isValidState());
1057   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1058                   "Number of function with known return values");
1059 
1060   // Check if we have an assumed unique return value that we could manifest.
1061   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1062 
1063   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1064     return Changed;
1065 
1066   // Bookkeeping.
1067   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1068                   "Number of function with unique return");
1069 
1070   // Callback to replace the uses of CB with the constant C.
1071   auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
1072     if (CB.getNumUses() == 0 || CB.isMustTailCall())
1073       return ChangeStatus::UNCHANGED;
1074     replaceAllInstructionUsesWith(CB, C);
1075     return ChangeStatus::CHANGED;
1076   };
1077 
1078   // If the assumed unique return value is an argument, annotate it.
1079   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1080     // TODO: This should be handled differently!
1081     this->AnchorVal = UniqueRVArg;
1082     this->KindOrArgNo = UniqueRVArg->getArgNo();
1083     Changed = IRAttribute::manifest(A);
1084   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
1085     // We can replace the returned value with the unique returned constant.
1086     Value &AnchorValue = getAnchorValue();
1087     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1088       for (const Use &U : F->uses())
1089         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1090           if (CB->isCallee(&U)) {
1091             Constant *RVCCast =
1092                 CB->getType() == RVC->getType()
1093                     ? RVC
1094                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
1095             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1096           }
1097     } else {
1098       assert(isa<CallBase>(AnchorValue) &&
1099              "Expcected a function or call base anchor!");
1100       Constant *RVCCast =
1101           AnchorValue.getType() == RVC->getType()
1102               ? RVC
1103               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1104       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1105     }
1106     if (Changed == ChangeStatus::CHANGED)
1107       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1108                       "Number of function returns replaced by constant return");
1109   }
1110 
1111   return Changed;
1112 }
1113 
1114 const std::string AAReturnedValuesImpl::getAsStr() const {
1115   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1116          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1117          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1118 }
1119 
1120 Optional<Value *>
1121 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1122   // If checkForAllReturnedValues provides a unique value, ignoring potential
1123   // undef values that can also be present, it is assumed to be the actual
1124   // return value and forwarded to the caller of this method. If there are
1125   // multiple, a nullptr is returned indicating there cannot be a unique
1126   // returned value.
1127   Optional<Value *> UniqueRV;
1128 
1129   auto Pred = [&](Value &RV) -> bool {
1130     // If we found a second returned value and neither the current nor the saved
1131     // one is an undef, there is no unique returned value. Undefs are special
1132     // since we can pretend they have any value.
1133     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1134         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1135       UniqueRV = nullptr;
1136       return false;
1137     }
1138 
1139     // Do not overwrite a value with an undef.
1140     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1141       UniqueRV = &RV;
1142 
1143     return true;
1144   };
1145 
1146   if (!A.checkForAllReturnedValues(Pred, *this))
1147     UniqueRV = nullptr;
1148 
1149   return UniqueRV;
1150 }
1151 
1152 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1153     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1154         &Pred) const {
1155   if (!isValidState())
1156     return false;
1157 
1158   // Check all returned values but ignore call sites as long as we have not
1159   // encountered an overdefined one during an update.
1160   for (auto &It : ReturnedValues) {
1161     Value *RV = It.first;
1162 
1163     CallBase *CB = dyn_cast<CallBase>(RV);
1164     if (CB && !UnresolvedCalls.count(CB))
1165       continue;
1166 
1167     if (!Pred(*RV, It.second))
1168       return false;
1169   }
1170 
1171   return true;
1172 }
1173 
1174 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1175   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1176   bool Changed = false;
1177 
1178   // State used in the value traversals starting in returned values.
1179   struct RVState {
1180     // The map in which we collect return values -> return instrs.
1181     decltype(ReturnedValues) &RetValsMap;
1182     // The flag to indicate a change.
1183     bool &Changed;
1184     // The return instrs we come from.
1185     SmallSetVector<ReturnInst *, 4> RetInsts;
1186   };
1187 
1188   // Callback for a leaf value returned by the associated function.
1189   auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1190     auto Size = RVS.RetValsMap[&Val].size();
1191     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1192     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1193     RVS.Changed |= Inserted;
1194     LLVM_DEBUG({
1195       if (Inserted)
1196         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1197                << " => " << RVS.RetInsts.size() << "\n";
1198     });
1199     return true;
1200   };
1201 
1202   // Helper method to invoke the generic value traversal.
1203   auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1204     IRPosition RetValPos = IRPosition::value(RV);
1205     return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1206                                                             RVS, VisitValueCB);
1207   };
1208 
1209   // Callback for all "return intructions" live in the associated function.
1210   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1211     ReturnInst &Ret = cast<ReturnInst>(I);
1212     RVState RVS({ReturnedValues, Changed, {}});
1213     RVS.RetInsts.insert(&Ret);
1214     return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1215   };
1216 
1217   // Start by discovering returned values from all live returned instructions in
1218   // the associated function.
1219   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1220     return indicatePessimisticFixpoint();
1221 
1222   // Once returned values "directly" present in the code are handled we try to
1223   // resolve returned calls.
1224   decltype(ReturnedValues) NewRVsMap;
1225   for (auto &It : ReturnedValues) {
1226     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1227                       << " by #" << It.second.size() << " RIs\n");
1228     CallBase *CB = dyn_cast<CallBase>(It.first);
1229     if (!CB || UnresolvedCalls.count(CB))
1230       continue;
1231 
1232     if (!CB->getCalledFunction()) {
1233       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1234                         << "\n");
1235       UnresolvedCalls.insert(CB);
1236       continue;
1237     }
1238 
1239     // TODO: use the function scope once we have call site AAReturnedValues.
1240     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1241         *this, IRPosition::function(*CB->getCalledFunction()));
1242     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1243                       << RetValAA << "\n");
1244 
1245     // Skip dead ends, thus if we do not know anything about the returned
1246     // call we mark it as unresolved and it will stay that way.
1247     if (!RetValAA.getState().isValidState()) {
1248       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1249                         << "\n");
1250       UnresolvedCalls.insert(CB);
1251       continue;
1252     }
1253 
1254     // Do not try to learn partial information. If the callee has unresolved
1255     // return values we will treat the call as unresolved/opaque.
1256     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1257     if (!RetValAAUnresolvedCalls.empty()) {
1258       UnresolvedCalls.insert(CB);
1259       continue;
1260     }
1261 
1262     // Now check if we can track transitively returned values. If possible, thus
1263     // if all return value can be represented in the current scope, do so.
1264     bool Unresolved = false;
1265     for (auto &RetValAAIt : RetValAA.returned_values()) {
1266       Value *RetVal = RetValAAIt.first;
1267       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1268           isa<Constant>(RetVal))
1269         continue;
1270       // Anything that did not fit in the above categories cannot be resolved,
1271       // mark the call as unresolved.
1272       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1273                            "cannot be translated: "
1274                         << *RetVal << "\n");
1275       UnresolvedCalls.insert(CB);
1276       Unresolved = true;
1277       break;
1278     }
1279 
1280     if (Unresolved)
1281       continue;
1282 
1283     // Now track transitively returned values.
1284     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1285     if (NumRetAA == RetValAA.getNumReturnValues()) {
1286       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1287                            "changed since it was seen last\n");
1288       continue;
1289     }
1290     NumRetAA = RetValAA.getNumReturnValues();
1291 
1292     for (auto &RetValAAIt : RetValAA.returned_values()) {
1293       Value *RetVal = RetValAAIt.first;
1294       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1295         // Arguments are mapped to call site operands and we begin the traversal
1296         // again.
1297         bool Unused = false;
1298         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1299         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1300         continue;
1301       } else if (isa<CallBase>(RetVal)) {
1302         // Call sites are resolved by the callee attribute over time, no need to
1303         // do anything for us.
1304         continue;
1305       } else if (isa<Constant>(RetVal)) {
1306         // Constants are valid everywhere, we can simply take them.
1307         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1308         continue;
1309       }
1310     }
1311   }
1312 
1313   // To avoid modifications to the ReturnedValues map while we iterate over it
1314   // we kept record of potential new entries in a copy map, NewRVsMap.
1315   for (auto &It : NewRVsMap) {
1316     assert(!It.second.empty() && "Entry does not add anything.");
1317     auto &ReturnInsts = ReturnedValues[It.first];
1318     for (ReturnInst *RI : It.second)
1319       if (ReturnInsts.insert(RI)) {
1320         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1321                           << *It.first << " => " << *RI << "\n");
1322         Changed = true;
1323       }
1324   }
1325 
1326   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1327   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1328 }
1329 
1330 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1331   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1332 
1333   /// See AbstractAttribute::trackStatistics()
1334   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1335 };
1336 
1337 /// Returned values information for a call sites.
1338 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1339   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1340 
1341   /// See AbstractAttribute::initialize(...).
1342   void initialize(Attributor &A) override {
1343     // TODO: Once we have call site specific value information we can provide
1344     //       call site specific liveness information and then it makes
1345     //       sense to specialize attributes for call sites instead of
1346     //       redirecting requests to the callee.
1347     llvm_unreachable("Abstract attributes for returned values are not "
1348                      "supported for call sites yet!");
1349   }
1350 
1351   /// See AbstractAttribute::updateImpl(...).
1352   ChangeStatus updateImpl(Attributor &A) override {
1353     return indicatePessimisticFixpoint();
1354   }
1355 
1356   /// See AbstractAttribute::trackStatistics()
1357   void trackStatistics() const override {}
1358 };
1359 
1360 /// ------------------------ NoSync Function Attribute -------------------------
1361 
1362 struct AANoSyncImpl : AANoSync {
1363   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1364 
1365   const std::string getAsStr() const override {
1366     return getAssumed() ? "nosync" : "may-sync";
1367   }
1368 
1369   /// See AbstractAttribute::updateImpl(...).
1370   ChangeStatus updateImpl(Attributor &A) override;
1371 
1372   /// Helper function used to determine whether an instruction is non-relaxed
1373   /// atomic. In other words, if an atomic instruction does not have unordered
1374   /// or monotonic ordering
1375   static bool isNonRelaxedAtomic(Instruction *I);
1376 
1377   /// Helper function used to determine whether an instruction is volatile.
1378   static bool isVolatile(Instruction *I);
1379 
1380   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1381   /// memset).
1382   static bool isNoSyncIntrinsic(Instruction *I);
1383 };
1384 
1385 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1386   if (!I->isAtomic())
1387     return false;
1388 
1389   AtomicOrdering Ordering;
1390   switch (I->getOpcode()) {
1391   case Instruction::AtomicRMW:
1392     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1393     break;
1394   case Instruction::Store:
1395     Ordering = cast<StoreInst>(I)->getOrdering();
1396     break;
1397   case Instruction::Load:
1398     Ordering = cast<LoadInst>(I)->getOrdering();
1399     break;
1400   case Instruction::Fence: {
1401     auto *FI = cast<FenceInst>(I);
1402     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1403       return false;
1404     Ordering = FI->getOrdering();
1405     break;
1406   }
1407   case Instruction::AtomicCmpXchg: {
1408     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1409     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1410     // Only if both are relaxed, than it can be treated as relaxed.
1411     // Otherwise it is non-relaxed.
1412     if (Success != AtomicOrdering::Unordered &&
1413         Success != AtomicOrdering::Monotonic)
1414       return true;
1415     if (Failure != AtomicOrdering::Unordered &&
1416         Failure != AtomicOrdering::Monotonic)
1417       return true;
1418     return false;
1419   }
1420   default:
1421     llvm_unreachable(
1422         "New atomic operations need to be known in the attributor.");
1423   }
1424 
1425   // Relaxed.
1426   if (Ordering == AtomicOrdering::Unordered ||
1427       Ordering == AtomicOrdering::Monotonic)
1428     return false;
1429   return true;
1430 }
1431 
1432 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1433 /// FIXME: We should ipmrove the handling of intrinsics.
1434 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1435   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1436     switch (II->getIntrinsicID()) {
1437     /// Element wise atomic memory intrinsics are can only be unordered,
1438     /// therefore nosync.
1439     case Intrinsic::memset_element_unordered_atomic:
1440     case Intrinsic::memmove_element_unordered_atomic:
1441     case Intrinsic::memcpy_element_unordered_atomic:
1442       return true;
1443     case Intrinsic::memset:
1444     case Intrinsic::memmove:
1445     case Intrinsic::memcpy:
1446       if (!cast<MemIntrinsic>(II)->isVolatile())
1447         return true;
1448       return false;
1449     default:
1450       return false;
1451     }
1452   }
1453   return false;
1454 }
1455 
1456 bool AANoSyncImpl::isVolatile(Instruction *I) {
1457   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1458          "Calls should not be checked here");
1459 
1460   switch (I->getOpcode()) {
1461   case Instruction::AtomicRMW:
1462     return cast<AtomicRMWInst>(I)->isVolatile();
1463   case Instruction::Store:
1464     return cast<StoreInst>(I)->isVolatile();
1465   case Instruction::Load:
1466     return cast<LoadInst>(I)->isVolatile();
1467   case Instruction::AtomicCmpXchg:
1468     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1469   default:
1470     return false;
1471   }
1472 }
1473 
1474 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1475 
1476   auto CheckRWInstForNoSync = [&](Instruction &I) {
1477     /// We are looking for volatile instructions or Non-Relaxed atomics.
1478     /// FIXME: We should improve the handling of intrinsics.
1479 
1480     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1481       return true;
1482 
1483     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1484       if (ICS.hasFnAttr(Attribute::NoSync))
1485         return true;
1486 
1487       const auto &NoSyncAA =
1488           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1489       if (NoSyncAA.isAssumedNoSync())
1490         return true;
1491       return false;
1492     }
1493 
1494     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1495       return true;
1496 
1497     return false;
1498   };
1499 
1500   auto CheckForNoSync = [&](Instruction &I) {
1501     // At this point we handled all read/write effects and they are all
1502     // nosync, so they can be skipped.
1503     if (I.mayReadOrWriteMemory())
1504       return true;
1505 
1506     // non-convergent and readnone imply nosync.
1507     return !ImmutableCallSite(&I).isConvergent();
1508   };
1509 
1510   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1511       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1512     return indicatePessimisticFixpoint();
1513 
1514   return ChangeStatus::UNCHANGED;
1515 }
1516 
1517 struct AANoSyncFunction final : public AANoSyncImpl {
1518   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1519 
1520   /// See AbstractAttribute::trackStatistics()
1521   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1522 };
1523 
1524 /// NoSync attribute deduction for a call sites.
1525 struct AANoSyncCallSite final : AANoSyncImpl {
1526   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1527 
1528   /// See AbstractAttribute::initialize(...).
1529   void initialize(Attributor &A) override {
1530     AANoSyncImpl::initialize(A);
1531     Function *F = getAssociatedFunction();
1532     if (!F)
1533       indicatePessimisticFixpoint();
1534   }
1535 
1536   /// See AbstractAttribute::updateImpl(...).
1537   ChangeStatus updateImpl(Attributor &A) override {
1538     // TODO: Once we have call site specific value information we can provide
1539     //       call site specific liveness information and then it makes
1540     //       sense to specialize attributes for call sites arguments instead of
1541     //       redirecting requests to the callee argument.
1542     Function *F = getAssociatedFunction();
1543     const IRPosition &FnPos = IRPosition::function(*F);
1544     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1545     return clampStateAndIndicateChange(
1546         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1547   }
1548 
1549   /// See AbstractAttribute::trackStatistics()
1550   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1551 };
1552 
1553 /// ------------------------ No-Free Attributes ----------------------------
1554 
1555 struct AANoFreeImpl : public AANoFree {
1556   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1557 
1558   /// See AbstractAttribute::updateImpl(...).
1559   ChangeStatus updateImpl(Attributor &A) override {
1560     auto CheckForNoFree = [&](Instruction &I) {
1561       ImmutableCallSite ICS(&I);
1562       if (ICS.hasFnAttr(Attribute::NoFree))
1563         return true;
1564 
1565       const auto &NoFreeAA =
1566           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1567       return NoFreeAA.isAssumedNoFree();
1568     };
1569 
1570     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1571       return indicatePessimisticFixpoint();
1572     return ChangeStatus::UNCHANGED;
1573   }
1574 
1575   /// See AbstractAttribute::getAsStr().
1576   const std::string getAsStr() const override {
1577     return getAssumed() ? "nofree" : "may-free";
1578   }
1579 };
1580 
1581 struct AANoFreeFunction final : public AANoFreeImpl {
1582   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1583 
1584   /// See AbstractAttribute::trackStatistics()
1585   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1586 };
1587 
1588 /// NoFree attribute deduction for a call sites.
1589 struct AANoFreeCallSite final : AANoFreeImpl {
1590   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1591 
1592   /// See AbstractAttribute::initialize(...).
1593   void initialize(Attributor &A) override {
1594     AANoFreeImpl::initialize(A);
1595     Function *F = getAssociatedFunction();
1596     if (!F)
1597       indicatePessimisticFixpoint();
1598   }
1599 
1600   /// See AbstractAttribute::updateImpl(...).
1601   ChangeStatus updateImpl(Attributor &A) override {
1602     // TODO: Once we have call site specific value information we can provide
1603     //       call site specific liveness information and then it makes
1604     //       sense to specialize attributes for call sites arguments instead of
1605     //       redirecting requests to the callee argument.
1606     Function *F = getAssociatedFunction();
1607     const IRPosition &FnPos = IRPosition::function(*F);
1608     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1609     return clampStateAndIndicateChange(
1610         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1611   }
1612 
1613   /// See AbstractAttribute::trackStatistics()
1614   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1615 };
1616 
1617 /// NoFree attribute for floating values.
1618 struct AANoFreeFloating : AANoFreeImpl {
1619   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1620 
1621   /// See AbstractAttribute::trackStatistics()
1622   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1623 
1624   /// See Abstract Attribute::updateImpl(...).
1625   ChangeStatus updateImpl(Attributor &A) override {
1626     const IRPosition &IRP = getIRPosition();
1627 
1628     const auto &NoFreeAA =
1629         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1630     if (NoFreeAA.isAssumedNoFree())
1631       return ChangeStatus::UNCHANGED;
1632 
1633     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1634     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1635       Instruction *UserI = cast<Instruction>(U.getUser());
1636       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1637         if (CB->isBundleOperand(&U))
1638           return false;
1639         if (!CB->isArgOperand(&U))
1640           return true;
1641         unsigned ArgNo = CB->getArgOperandNo(&U);
1642 
1643         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1644             *this, IRPosition::callsite_argument(*CB, ArgNo));
1645         return NoFreeArg.isAssumedNoFree();
1646       }
1647 
1648       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1649           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1650         Follow = true;
1651         return true;
1652       }
1653 
1654       // Unknown user.
1655       return false;
1656     };
1657     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1658       return indicatePessimisticFixpoint();
1659 
1660     return ChangeStatus::UNCHANGED;
1661   }
1662 };
1663 
1664 /// NoFree attribute for a call site argument.
1665 struct AANoFreeArgument final : AANoFreeFloating {
1666   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1667 
1668   /// See AbstractAttribute::trackStatistics()
1669   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1670 };
1671 
1672 /// NoFree attribute for call site arguments.
1673 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1674   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1675 
1676   /// See AbstractAttribute::updateImpl(...).
1677   ChangeStatus updateImpl(Attributor &A) override {
1678     // TODO: Once we have call site specific value information we can provide
1679     //       call site specific liveness information and then it makes
1680     //       sense to specialize attributes for call sites arguments instead of
1681     //       redirecting requests to the callee argument.
1682     Argument *Arg = getAssociatedArgument();
1683     if (!Arg)
1684       return indicatePessimisticFixpoint();
1685     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1686     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1687     return clampStateAndIndicateChange(
1688         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1689   }
1690 
1691   /// See AbstractAttribute::trackStatistics()
1692   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1693 };
1694 
1695 /// NoFree attribute for function return value.
1696 struct AANoFreeReturned final : AANoFreeFloating {
1697   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1698     llvm_unreachable("NoFree is not applicable to function returns!");
1699   }
1700 
1701   /// See AbstractAttribute::initialize(...).
1702   void initialize(Attributor &A) override {
1703     llvm_unreachable("NoFree is not applicable to function returns!");
1704   }
1705 
1706   /// See AbstractAttribute::updateImpl(...).
1707   ChangeStatus updateImpl(Attributor &A) override {
1708     llvm_unreachable("NoFree is not applicable to function returns!");
1709   }
1710 
1711   /// See AbstractAttribute::trackStatistics()
1712   void trackStatistics() const override {}
1713 };
1714 
1715 /// NoFree attribute deduction for a call site return value.
1716 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1717   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1718 
1719   ChangeStatus manifest(Attributor &A) override {
1720     return ChangeStatus::UNCHANGED;
1721   }
1722   /// See AbstractAttribute::trackStatistics()
1723   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1724 };
1725 
1726 /// ------------------------ NonNull Argument Attribute ------------------------
1727 static int64_t getKnownNonNullAndDerefBytesForUse(
1728     Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1729     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1730   TrackUse = false;
1731 
1732   const Value *UseV = U->get();
1733   if (!UseV->getType()->isPointerTy())
1734     return 0;
1735 
1736   Type *PtrTy = UseV->getType();
1737   const Function *F = I->getFunction();
1738   bool NullPointerIsDefined =
1739       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1740   const DataLayout &DL = A.getInfoCache().getDL();
1741   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1742     if (ICS.isBundleOperand(U))
1743       return 0;
1744 
1745     if (ICS.isCallee(U)) {
1746       IsNonNull |= !NullPointerIsDefined;
1747       return 0;
1748     }
1749 
1750     unsigned ArgNo = ICS.getArgumentNo(U);
1751     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1752     // As long as we only use known information there is no need to track
1753     // dependences here.
1754     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1755                                                   /* TrackDependence */ false);
1756     IsNonNull |= DerefAA.isKnownNonNull();
1757     return DerefAA.getKnownDereferenceableBytes();
1758   }
1759 
1760   // We need to follow common pointer manipulation uses to the accesses they
1761   // feed into. We can try to be smart to avoid looking through things we do not
1762   // like for now, e.g., non-inbounds GEPs.
1763   if (isa<CastInst>(I)) {
1764     TrackUse = true;
1765     return 0;
1766   }
1767   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1768     if (GEP->hasAllConstantIndices()) {
1769       TrackUse = true;
1770       return 0;
1771     }
1772 
1773   int64_t Offset;
1774   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1775     if (Base == &AssociatedValue &&
1776         Attributor::getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1777       int64_t DerefBytes =
1778           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1779 
1780       IsNonNull |= !NullPointerIsDefined;
1781       return std::max(int64_t(0), DerefBytes);
1782     }
1783   }
1784 
1785   /// Corner case when an offset is 0.
1786   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1787           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1788     if (Offset == 0 && Base == &AssociatedValue &&
1789         Attributor::getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1790       int64_t DerefBytes =
1791           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1792       IsNonNull |= !NullPointerIsDefined;
1793       return std::max(int64_t(0), DerefBytes);
1794     }
1795   }
1796 
1797   return 0;
1798 }
1799 
1800 struct AANonNullImpl : AANonNull {
1801   AANonNullImpl(const IRPosition &IRP)
1802       : AANonNull(IRP),
1803         NullIsDefined(NullPointerIsDefined(
1804             getAnchorScope(),
1805             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1806 
1807   /// See AbstractAttribute::initialize(...).
1808   void initialize(Attributor &A) override {
1809     if (!NullIsDefined &&
1810         hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1811       indicateOptimisticFixpoint();
1812     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1813       indicatePessimisticFixpoint();
1814     else
1815       AANonNull::initialize(A);
1816   }
1817 
1818   /// See AAFromMustBeExecutedContext
1819   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1820     bool IsNonNull = false;
1821     bool TrackUse = false;
1822     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1823                                        IsNonNull, TrackUse);
1824     setKnown(IsNonNull);
1825     return TrackUse;
1826   }
1827 
1828   /// See AbstractAttribute::getAsStr().
1829   const std::string getAsStr() const override {
1830     return getAssumed() ? "nonnull" : "may-null";
1831   }
1832 
1833   /// Flag to determine if the underlying value can be null and still allow
1834   /// valid accesses.
1835   const bool NullIsDefined;
1836 };
1837 
1838 /// NonNull attribute for a floating value.
1839 struct AANonNullFloating
1840     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1841   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1842   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1843 
1844   /// See AbstractAttribute::updateImpl(...).
1845   ChangeStatus updateImpl(Attributor &A) override {
1846     ChangeStatus Change = Base::updateImpl(A);
1847     if (isKnownNonNull())
1848       return Change;
1849 
1850     if (!NullIsDefined) {
1851       const auto &DerefAA =
1852           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1853       if (DerefAA.getAssumedDereferenceableBytes())
1854         return Change;
1855     }
1856 
1857     const DataLayout &DL = A.getDataLayout();
1858 
1859     DominatorTree *DT = nullptr;
1860     InformationCache &InfoCache = A.getInfoCache();
1861     if (const Function *Fn = getAnchorScope())
1862       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1863 
1864     auto VisitValueCB = [&](Value &V, AANonNull::StateType &T,
1865                             bool Stripped) -> bool {
1866       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1867       if (!Stripped && this == &AA) {
1868         if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, getCtxI(), DT))
1869           T.indicatePessimisticFixpoint();
1870       } else {
1871         // Use abstract attribute information.
1872         const AANonNull::StateType &NS =
1873             static_cast<const AANonNull::StateType &>(AA.getState());
1874         T ^= NS;
1875       }
1876       return T.isValidState();
1877     };
1878 
1879     StateType T;
1880     if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1881                                                      T, VisitValueCB))
1882       return indicatePessimisticFixpoint();
1883 
1884     return clampStateAndIndicateChange(getState(), T);
1885   }
1886 
1887   /// See AbstractAttribute::trackStatistics()
1888   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1889 };
1890 
1891 /// NonNull attribute for function return value.
1892 struct AANonNullReturned final
1893     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1894   AANonNullReturned(const IRPosition &IRP)
1895       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1896 
1897   /// See AbstractAttribute::trackStatistics()
1898   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1899 };
1900 
1901 /// NonNull attribute for function argument.
1902 struct AANonNullArgument final
1903     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1904                                                               AANonNullImpl> {
1905   AANonNullArgument(const IRPosition &IRP)
1906       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1907                                                                 AANonNullImpl>(
1908             IRP) {}
1909 
1910   /// See AbstractAttribute::trackStatistics()
1911   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1912 };
1913 
1914 struct AANonNullCallSiteArgument final : AANonNullFloating {
1915   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1916 
1917   /// See AbstractAttribute::trackStatistics()
1918   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1919 };
1920 
1921 /// NonNull attribute for a call site return position.
1922 struct AANonNullCallSiteReturned final
1923     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1924                                                              AANonNullImpl> {
1925   AANonNullCallSiteReturned(const IRPosition &IRP)
1926       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1927                                                                AANonNullImpl>(
1928             IRP) {}
1929 
1930   /// See AbstractAttribute::trackStatistics()
1931   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1932 };
1933 
1934 /// ------------------------ No-Recurse Attributes ----------------------------
1935 
1936 struct AANoRecurseImpl : public AANoRecurse {
1937   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1938 
1939   /// See AbstractAttribute::getAsStr()
1940   const std::string getAsStr() const override {
1941     return getAssumed() ? "norecurse" : "may-recurse";
1942   }
1943 };
1944 
1945 struct AANoRecurseFunction final : AANoRecurseImpl {
1946   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1947 
1948   /// See AbstractAttribute::initialize(...).
1949   void initialize(Attributor &A) override {
1950     AANoRecurseImpl::initialize(A);
1951     if (const Function *F = getAnchorScope())
1952       if (A.getInfoCache().getSccSize(*F) == 1)
1953         return;
1954     indicatePessimisticFixpoint();
1955   }
1956 
1957   /// See AbstractAttribute::updateImpl(...).
1958   ChangeStatus updateImpl(Attributor &A) override {
1959 
1960     auto CheckForNoRecurse = [&](Instruction &I) {
1961       ImmutableCallSite ICS(&I);
1962       if (ICS.hasFnAttr(Attribute::NoRecurse))
1963         return true;
1964 
1965       const auto &NoRecurseAA =
1966           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
1967       if (!NoRecurseAA.isAssumedNoRecurse())
1968         return false;
1969 
1970       // Recursion to the same function
1971       if (ICS.getCalledFunction() == getAnchorScope())
1972         return false;
1973 
1974       return true;
1975     };
1976 
1977     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1978       return indicatePessimisticFixpoint();
1979     return ChangeStatus::UNCHANGED;
1980   }
1981 
1982   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1983 };
1984 
1985 /// NoRecurse attribute deduction for a call sites.
1986 struct AANoRecurseCallSite final : AANoRecurseImpl {
1987   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1988 
1989   /// See AbstractAttribute::initialize(...).
1990   void initialize(Attributor &A) override {
1991     AANoRecurseImpl::initialize(A);
1992     Function *F = getAssociatedFunction();
1993     if (!F)
1994       indicatePessimisticFixpoint();
1995   }
1996 
1997   /// See AbstractAttribute::updateImpl(...).
1998   ChangeStatus updateImpl(Attributor &A) override {
1999     // TODO: Once we have call site specific value information we can provide
2000     //       call site specific liveness information and then it makes
2001     //       sense to specialize attributes for call sites arguments instead of
2002     //       redirecting requests to the callee argument.
2003     Function *F = getAssociatedFunction();
2004     const IRPosition &FnPos = IRPosition::function(*F);
2005     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
2006     return clampStateAndIndicateChange(
2007         getState(),
2008         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
2009   }
2010 
2011   /// See AbstractAttribute::trackStatistics()
2012   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2013 };
2014 
2015 /// -------------------- Undefined-Behavior Attributes ------------------------
2016 
2017 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2018   AAUndefinedBehaviorImpl(const IRPosition &IRP) : AAUndefinedBehavior(IRP) {}
2019 
2020   /// See AbstractAttribute::updateImpl(...).
2021   // through a pointer (i.e. also branches etc.)
2022   ChangeStatus updateImpl(Attributor &A) override {
2023     const size_t UBPrevSize = KnownUBInsts.size();
2024     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2025 
2026     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2027       // Skip instructions that are already saved.
2028       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2029         return true;
2030 
2031       // If we reach here, we know we have an instruction
2032       // that accesses memory through a pointer operand,
2033       // for which getPointerOperand() should give it to us.
2034       const Value *PtrOp =
2035           Attributor::getPointerOperand(&I, /* AllowVolatile */ true);
2036       assert(PtrOp &&
2037              "Expected pointer operand of memory accessing instruction");
2038 
2039       // A memory access through a pointer is considered UB
2040       // only if the pointer has constant null value.
2041       // TODO: Expand it to not only check constant values.
2042       if (!isa<ConstantPointerNull>(PtrOp)) {
2043         AssumedNoUBInsts.insert(&I);
2044         return true;
2045       }
2046       const Type *PtrTy = PtrOp->getType();
2047 
2048       // Because we only consider instructions inside functions,
2049       // assume that a parent function exists.
2050       const Function *F = I.getFunction();
2051 
2052       // A memory access using constant null pointer is only considered UB
2053       // if null pointer is _not_ defined for the target platform.
2054       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2055         AssumedNoUBInsts.insert(&I);
2056       else
2057         KnownUBInsts.insert(&I);
2058       return true;
2059     };
2060 
2061     auto InspectBrInstForUB = [&](Instruction &I) {
2062       // A conditional branch instruction is considered UB if it has `undef`
2063       // condition.
2064 
2065       // Skip instructions that are already saved.
2066       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2067         return true;
2068 
2069       // We know we have a branch instruction.
2070       auto BrInst = cast<BranchInst>(&I);
2071 
2072       // Unconditional branches are never considered UB.
2073       if (BrInst->isUnconditional())
2074         return true;
2075 
2076       // Either we stopped and the appropriate action was taken,
2077       // or we got back a simplified value to continue.
2078       Optional<Value *> SimplifiedCond =
2079           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2080       if (!SimplifiedCond.hasValue())
2081         return true;
2082       AssumedNoUBInsts.insert(&I);
2083       return true;
2084     };
2085 
2086     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2087                               {Instruction::Load, Instruction::Store,
2088                                Instruction::AtomicCmpXchg,
2089                                Instruction::AtomicRMW});
2090     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br});
2091     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2092         UBPrevSize != KnownUBInsts.size())
2093       return ChangeStatus::CHANGED;
2094     return ChangeStatus::UNCHANGED;
2095   }
2096 
2097   bool isKnownToCauseUB(Instruction *I) const override {
2098     return KnownUBInsts.count(I);
2099   }
2100 
2101   bool isAssumedToCauseUB(Instruction *I) const override {
2102     // In simple words, if an instruction is not in the assumed to _not_
2103     // cause UB, then it is assumed UB (that includes those
2104     // in the KnownUBInsts set). The rest is boilerplate
2105     // is to ensure that it is one of the instructions we test
2106     // for UB.
2107 
2108     switch (I->getOpcode()) {
2109     case Instruction::Load:
2110     case Instruction::Store:
2111     case Instruction::AtomicCmpXchg:
2112     case Instruction::AtomicRMW:
2113       return !AssumedNoUBInsts.count(I);
2114     case Instruction::Br: {
2115       auto BrInst = cast<BranchInst>(I);
2116       if (BrInst->isUnconditional())
2117         return false;
2118       return !AssumedNoUBInsts.count(I);
2119     } break;
2120     default:
2121       return false;
2122     }
2123     return false;
2124   }
2125 
2126   ChangeStatus manifest(Attributor &A) override {
2127     if (KnownUBInsts.empty())
2128       return ChangeStatus::UNCHANGED;
2129     for (Instruction *I : KnownUBInsts)
2130       A.changeToUnreachableAfterManifest(I);
2131     return ChangeStatus::CHANGED;
2132   }
2133 
2134   /// See AbstractAttribute::getAsStr()
2135   const std::string getAsStr() const override {
2136     return getAssumed() ? "undefined-behavior" : "no-ub";
2137   }
2138 
2139   /// Note: The correctness of this analysis depends on the fact that the
2140   /// following 2 sets will stop changing after some point.
2141   /// "Change" here means that their size changes.
2142   /// The size of each set is monotonically increasing
2143   /// (we only add items to them) and it is upper bounded by the number of
2144   /// instructions in the processed function (we can never save more
2145   /// elements in either set than this number). Hence, at some point,
2146   /// they will stop increasing.
2147   /// Consequently, at some point, both sets will have stopped
2148   /// changing, effectively making the analysis reach a fixpoint.
2149 
2150   /// Note: These 2 sets are disjoint and an instruction can be considered
2151   /// one of 3 things:
2152   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2153   ///    the KnownUBInsts set.
2154   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2155   ///    has a reason to assume it).
2156   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2157   ///    could not find a reason to assume or prove that it can cause UB,
2158   ///    hence it assumes it doesn't. We have a set for these instructions
2159   ///    so that we don't reprocess them in every update.
2160   ///    Note however that instructions in this set may cause UB.
2161 
2162 protected:
2163   /// A set of all live instructions _known_ to cause UB.
2164   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2165 
2166 private:
2167   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2168   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2169 
2170   // Should be called on updates in which if we're processing an instruction
2171   // \p I that depends on a value \p V, one of the following has to happen:
2172   // - If the value is assumed, then stop.
2173   // - If the value is known but undef, then consider it UB.
2174   // - Otherwise, do specific processing with the simplified value.
2175   // We return None in the first 2 cases to signify that an appropriate
2176   // action was taken and the caller should stop.
2177   // Otherwise, we return the simplified value that the caller should
2178   // use for specific processing.
2179   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2180                                          Instruction *I) {
2181     const auto &ValueSimplifyAA =
2182         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2183     Optional<Value *> SimplifiedV =
2184         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2185     if (!ValueSimplifyAA.isKnown()) {
2186       // Don't depend on assumed values.
2187       return llvm::None;
2188     }
2189     if (!SimplifiedV.hasValue()) {
2190       // If it is known (which we tested above) but it doesn't have a value,
2191       // then we can assume `undef` and hence the instruction is UB.
2192       KnownUBInsts.insert(I);
2193       return llvm::None;
2194     }
2195     Value *Val = SimplifiedV.getValue();
2196     if (isa<UndefValue>(Val)) {
2197       KnownUBInsts.insert(I);
2198       return llvm::None;
2199     }
2200     return Val;
2201   }
2202 };
2203 
2204 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2205   AAUndefinedBehaviorFunction(const IRPosition &IRP)
2206       : AAUndefinedBehaviorImpl(IRP) {}
2207 
2208   /// See AbstractAttribute::trackStatistics()
2209   void trackStatistics() const override {
2210     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2211                "Number of instructions known to have UB");
2212     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2213         KnownUBInsts.size();
2214   }
2215 };
2216 
2217 /// ------------------------ Will-Return Attributes ----------------------------
2218 
2219 // Helper function that checks whether a function has any cycle.
2220 // TODO: Replace with more efficent code
2221 static bool containsCycle(Function &F) {
2222   SmallPtrSet<BasicBlock *, 32> Visited;
2223 
2224   // Traverse BB by dfs and check whether successor is already visited.
2225   for (BasicBlock *BB : depth_first(&F)) {
2226     Visited.insert(BB);
2227     for (auto *SuccBB : successors(BB)) {
2228       if (Visited.count(SuccBB))
2229         return true;
2230     }
2231   }
2232   return false;
2233 }
2234 
2235 // Helper function that checks the function have a loop which might become an
2236 // endless loop
2237 // FIXME: Any cycle is regarded as endless loop for now.
2238 //        We have to allow some patterns.
2239 static bool containsPossiblyEndlessLoop(Function *F) {
2240   return !F || !F->hasExactDefinition() || containsCycle(*F);
2241 }
2242 
2243 struct AAWillReturnImpl : public AAWillReturn {
2244   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
2245 
2246   /// See AbstractAttribute::initialize(...).
2247   void initialize(Attributor &A) override {
2248     AAWillReturn::initialize(A);
2249 
2250     Function *F = getAssociatedFunction();
2251     if (containsPossiblyEndlessLoop(F))
2252       indicatePessimisticFixpoint();
2253   }
2254 
2255   /// See AbstractAttribute::updateImpl(...).
2256   ChangeStatus updateImpl(Attributor &A) override {
2257     auto CheckForWillReturn = [&](Instruction &I) {
2258       IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
2259       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2260       if (WillReturnAA.isKnownWillReturn())
2261         return true;
2262       if (!WillReturnAA.isAssumedWillReturn())
2263         return false;
2264       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2265       return NoRecurseAA.isAssumedNoRecurse();
2266     };
2267 
2268     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2269       return indicatePessimisticFixpoint();
2270 
2271     return ChangeStatus::UNCHANGED;
2272   }
2273 
2274   /// See AbstractAttribute::getAsStr()
2275   const std::string getAsStr() const override {
2276     return getAssumed() ? "willreturn" : "may-noreturn";
2277   }
2278 };
2279 
2280 struct AAWillReturnFunction final : AAWillReturnImpl {
2281   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2282 
2283   /// See AbstractAttribute::trackStatistics()
2284   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2285 };
2286 
2287 /// WillReturn attribute deduction for a call sites.
2288 struct AAWillReturnCallSite final : AAWillReturnImpl {
2289   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2290 
2291   /// See AbstractAttribute::initialize(...).
2292   void initialize(Attributor &A) override {
2293     AAWillReturnImpl::initialize(A);
2294     Function *F = getAssociatedFunction();
2295     if (!F)
2296       indicatePessimisticFixpoint();
2297   }
2298 
2299   /// See AbstractAttribute::updateImpl(...).
2300   ChangeStatus updateImpl(Attributor &A) override {
2301     // TODO: Once we have call site specific value information we can provide
2302     //       call site specific liveness information and then it makes
2303     //       sense to specialize attributes for call sites arguments instead of
2304     //       redirecting requests to the callee argument.
2305     Function *F = getAssociatedFunction();
2306     const IRPosition &FnPos = IRPosition::function(*F);
2307     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2308     return clampStateAndIndicateChange(
2309         getState(),
2310         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2311   }
2312 
2313   /// See AbstractAttribute::trackStatistics()
2314   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2315 };
2316 
2317 /// -------------------AAReachability Attribute--------------------------
2318 
2319 struct AAReachabilityImpl : AAReachability {
2320   AAReachabilityImpl(const IRPosition &IRP) : AAReachability(IRP) {}
2321 
2322   const std::string getAsStr() const override {
2323     // TODO: Return the number of reachable queries.
2324     return "reachable";
2325   }
2326 
2327   /// See AbstractAttribute::initialize(...).
2328   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2329 
2330   /// See AbstractAttribute::updateImpl(...).
2331   ChangeStatus updateImpl(Attributor &A) override {
2332     return indicatePessimisticFixpoint();
2333   }
2334 };
2335 
2336 struct AAReachabilityFunction final : public AAReachabilityImpl {
2337   AAReachabilityFunction(const IRPosition &IRP) : AAReachabilityImpl(IRP) {}
2338 
2339   /// See AbstractAttribute::trackStatistics()
2340   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2341 };
2342 
2343 /// ------------------------ NoAlias Argument Attribute ------------------------
2344 
2345 struct AANoAliasImpl : AANoAlias {
2346   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
2347 
2348   const std::string getAsStr() const override {
2349     return getAssumed() ? "noalias" : "may-alias";
2350   }
2351 };
2352 
2353 /// NoAlias attribute for a floating value.
2354 struct AANoAliasFloating final : AANoAliasImpl {
2355   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2356 
2357   /// See AbstractAttribute::initialize(...).
2358   void initialize(Attributor &A) override {
2359     AANoAliasImpl::initialize(A);
2360     Value &Val = getAssociatedValue();
2361     if (isa<AllocaInst>(Val))
2362       indicateOptimisticFixpoint();
2363     if (isa<ConstantPointerNull>(Val) &&
2364         Val.getType()->getPointerAddressSpace() == 0)
2365       indicateOptimisticFixpoint();
2366   }
2367 
2368   /// See AbstractAttribute::updateImpl(...).
2369   ChangeStatus updateImpl(Attributor &A) override {
2370     // TODO: Implement this.
2371     return indicatePessimisticFixpoint();
2372   }
2373 
2374   /// See AbstractAttribute::trackStatistics()
2375   void trackStatistics() const override {
2376     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2377   }
2378 };
2379 
2380 /// NoAlias attribute for an argument.
2381 struct AANoAliasArgument final
2382     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2383   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2384   AANoAliasArgument(const IRPosition &IRP) : Base(IRP) {}
2385 
2386   /// See AbstractAttribute::update(...).
2387   ChangeStatus updateImpl(Attributor &A) override {
2388     // We have to make sure no-alias on the argument does not break
2389     // synchronization when this is a callback argument, see also [1] below.
2390     // If synchronization cannot be affected, we delegate to the base updateImpl
2391     // function, otherwise we give up for now.
2392 
2393     // If the function is no-sync, no-alias cannot break synchronization.
2394     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2395         *this, IRPosition::function_scope(getIRPosition()));
2396     if (NoSyncAA.isAssumedNoSync())
2397       return Base::updateImpl(A);
2398 
2399     // If the argument is read-only, no-alias cannot break synchronization.
2400     const auto &MemBehaviorAA =
2401         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2402     if (MemBehaviorAA.isAssumedReadOnly())
2403       return Base::updateImpl(A);
2404 
2405     // If the argument is never passed through callbacks, no-alias cannot break
2406     // synchronization.
2407     if (A.checkForAllCallSites(
2408             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2409             true))
2410       return Base::updateImpl(A);
2411 
2412     // TODO: add no-alias but make sure it doesn't break synchronization by
2413     // introducing fake uses. See:
2414     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2415     //     International Workshop on OpenMP 2018,
2416     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2417 
2418     return indicatePessimisticFixpoint();
2419   }
2420 
2421   /// See AbstractAttribute::trackStatistics()
2422   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2423 };
2424 
2425 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2426   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2427 
2428   /// See AbstractAttribute::initialize(...).
2429   void initialize(Attributor &A) override {
2430     // See callsite argument attribute and callee argument attribute.
2431     ImmutableCallSite ICS(&getAnchorValue());
2432     if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2433       indicateOptimisticFixpoint();
2434   }
2435 
2436   /// See AbstractAttribute::updateImpl(...).
2437   ChangeStatus updateImpl(Attributor &A) override {
2438     // We can deduce "noalias" if the following conditions hold.
2439     // (i)   Associated value is assumed to be noalias in the definition.
2440     // (ii)  Associated value is assumed to be no-capture in all the uses
2441     //       possibly executed before this callsite.
2442     // (iii) There is no other pointer argument which could alias with the
2443     //       value.
2444 
2445     const Value &V = getAssociatedValue();
2446     const IRPosition IRP = IRPosition::value(V);
2447 
2448     // (i) Check whether noalias holds in the definition.
2449 
2450     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
2451     LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] check definition: " << V
2452                       << " :: " << NoAliasAA << "\n");
2453 
2454     if (!NoAliasAA.isAssumedNoAlias())
2455       return indicatePessimisticFixpoint();
2456 
2457     LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] " << V
2458                       << " is assumed NoAlias in the definition\n");
2459 
2460     // (ii) Check whether the value is captured in the scope using AANoCapture.
2461     //      FIXME: This is conservative though, it is better to look at CFG and
2462     //             check only uses possibly executed before this callsite.
2463 
2464     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
2465     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2466       LLVM_DEBUG(
2467           dbgs() << "[Attributor][AANoAliasCSArg] " << V
2468                  << " cannot be noalias as it is potentially captured\n");
2469       return indicatePessimisticFixpoint();
2470     }
2471 
2472     // (iii) Check there is no other pointer argument which could alias with the
2473     // value.
2474     // TODO: AbstractCallSite
2475     ImmutableCallSite ICS(&getAnchorValue());
2476     for (unsigned i = 0; i < ICS.getNumArgOperands(); i++) {
2477       if (getArgNo() == (int)i)
2478         continue;
2479       const Value *ArgOp = ICS.getArgOperand(i);
2480       if (!ArgOp->getType()->isPointerTy())
2481         continue;
2482 
2483       if (const Function *F = getAnchorScope()) {
2484         if (AAResults *AAR = A.getInfoCache().getAAResultsForFunction(*F)) {
2485           bool IsAliasing = !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2486           LLVM_DEBUG(dbgs()
2487                      << "[Attributor][NoAliasCSArg] Check alias between "
2488                         "callsite arguments "
2489                      << AAR->isNoAlias(&getAssociatedValue(), ArgOp) << " "
2490                      << getAssociatedValue() << " " << *ArgOp << " => "
2491                      << (IsAliasing ? "" : "no-") << "alias \n");
2492 
2493           if (!IsAliasing)
2494             continue;
2495         }
2496       }
2497       return indicatePessimisticFixpoint();
2498     }
2499 
2500     return ChangeStatus::UNCHANGED;
2501   }
2502 
2503   /// See AbstractAttribute::trackStatistics()
2504   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2505 };
2506 
2507 /// NoAlias attribute for function return value.
2508 struct AANoAliasReturned final : AANoAliasImpl {
2509   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2510 
2511   /// See AbstractAttribute::updateImpl(...).
2512   virtual ChangeStatus updateImpl(Attributor &A) override {
2513 
2514     auto CheckReturnValue = [&](Value &RV) -> bool {
2515       if (Constant *C = dyn_cast<Constant>(&RV))
2516         if (C->isNullValue() || isa<UndefValue>(C))
2517           return true;
2518 
2519       /// For now, we can only deduce noalias if we have call sites.
2520       /// FIXME: add more support.
2521       ImmutableCallSite ICS(&RV);
2522       if (!ICS)
2523         return false;
2524 
2525       const IRPosition &RVPos = IRPosition::value(RV);
2526       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2527       if (!NoAliasAA.isAssumedNoAlias())
2528         return false;
2529 
2530       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2531       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2532     };
2533 
2534     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2535       return indicatePessimisticFixpoint();
2536 
2537     return ChangeStatus::UNCHANGED;
2538   }
2539 
2540   /// See AbstractAttribute::trackStatistics()
2541   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2542 };
2543 
2544 /// NoAlias attribute deduction for a call site return value.
2545 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2546   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2547 
2548   /// See AbstractAttribute::initialize(...).
2549   void initialize(Attributor &A) override {
2550     AANoAliasImpl::initialize(A);
2551     Function *F = getAssociatedFunction();
2552     if (!F)
2553       indicatePessimisticFixpoint();
2554   }
2555 
2556   /// See AbstractAttribute::updateImpl(...).
2557   ChangeStatus updateImpl(Attributor &A) override {
2558     // TODO: Once we have call site specific value information we can provide
2559     //       call site specific liveness information and then it makes
2560     //       sense to specialize attributes for call sites arguments instead of
2561     //       redirecting requests to the callee argument.
2562     Function *F = getAssociatedFunction();
2563     const IRPosition &FnPos = IRPosition::returned(*F);
2564     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2565     return clampStateAndIndicateChange(
2566         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2567   }
2568 
2569   /// See AbstractAttribute::trackStatistics()
2570   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2571 };
2572 
2573 /// -------------------AAIsDead Function Attribute-----------------------
2574 
2575 struct AAIsDeadValueImpl : public AAIsDead {
2576   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2577 
2578   /// See AAIsDead::isAssumedDead().
2579   bool isAssumedDead() const override { return getAssumed(); }
2580 
2581   /// See AAIsDead::isAssumedDead(BasicBlock *).
2582   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2583 
2584   /// See AAIsDead::isKnownDead(BasicBlock *).
2585   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2586 
2587   /// See AAIsDead::isAssumedDead(Instruction *I).
2588   bool isAssumedDead(const Instruction *I) const override {
2589     return I == getCtxI() && isAssumedDead();
2590   }
2591 
2592   /// See AAIsDead::isKnownDead(Instruction *I).
2593   bool isKnownDead(const Instruction *I) const override {
2594     return I == getCtxI() && getKnown();
2595   }
2596 
2597   /// See AbstractAttribute::getAsStr().
2598   const std::string getAsStr() const override {
2599     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2600   }
2601 };
2602 
2603 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2604   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2605 
2606   /// See AbstractAttribute::initialize(...).
2607   void initialize(Attributor &A) override {
2608     if (Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()))
2609       if (!wouldInstructionBeTriviallyDead(I))
2610         indicatePessimisticFixpoint();
2611     if (isa<UndefValue>(getAssociatedValue()))
2612       indicatePessimisticFixpoint();
2613   }
2614 
2615   /// See AbstractAttribute::updateImpl(...).
2616   ChangeStatus updateImpl(Attributor &A) override {
2617     auto UsePred = [&](const Use &U, bool &Follow) {
2618       Instruction *UserI = cast<Instruction>(U.getUser());
2619       if (CallSite CS = CallSite(UserI)) {
2620         if (!CS.isArgOperand(&U))
2621           return false;
2622         const IRPosition &CSArgPos =
2623             IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
2624         const auto &CSArgIsDead = A.getAAFor<AAIsDead>(*this, CSArgPos);
2625         return CSArgIsDead.isAssumedDead();
2626       }
2627       if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
2628         const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
2629         const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, RetPos);
2630         return RetIsDeadAA.isAssumedDead();
2631       }
2632       Follow = true;
2633       return wouldInstructionBeTriviallyDead(UserI);
2634     };
2635 
2636     if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
2637       return indicatePessimisticFixpoint();
2638     return ChangeStatus::UNCHANGED;
2639   }
2640 
2641   /// See AbstractAttribute::manifest(...).
2642   ChangeStatus manifest(Attributor &A) override {
2643     Value &V = getAssociatedValue();
2644     if (auto *I = dyn_cast<Instruction>(&V))
2645       if (wouldInstructionBeTriviallyDead(I)) {
2646         A.deleteAfterManifest(*I);
2647         return ChangeStatus::CHANGED;
2648       }
2649 
2650     if (V.use_empty())
2651       return ChangeStatus::UNCHANGED;
2652 
2653     UndefValue &UV = *UndefValue::get(V.getType());
2654     bool AnyChange = A.changeValueAfterManifest(V, UV);
2655     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2656   }
2657 
2658   /// See AbstractAttribute::trackStatistics()
2659   void trackStatistics() const override {
2660     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2661   }
2662 };
2663 
2664 struct AAIsDeadArgument : public AAIsDeadFloating {
2665   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2666 
2667   /// See AbstractAttribute::initialize(...).
2668   void initialize(Attributor &A) override {
2669     if (!getAssociatedFunction()->hasExactDefinition())
2670       indicatePessimisticFixpoint();
2671   }
2672 
2673   /// See AbstractAttribute::manifest(...).
2674   ChangeStatus manifest(Attributor &A) override {
2675     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2676     Argument &Arg = *getAssociatedArgument();
2677     if (Arg.getParent()->hasLocalLinkage())
2678       if (A.registerFunctionSignatureRewrite(
2679               Arg, /* ReplacementTypes */ {},
2680               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2681               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{}))
2682         return ChangeStatus::CHANGED;
2683     return Changed;
2684   }
2685 
2686   /// See AbstractAttribute::trackStatistics()
2687   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2688 };
2689 
2690 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2691   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2692 
2693   /// See AbstractAttribute::initialize(...).
2694   void initialize(Attributor &A) override {
2695     if (isa<UndefValue>(getAssociatedValue()))
2696       indicatePessimisticFixpoint();
2697   }
2698 
2699   /// See AbstractAttribute::updateImpl(...).
2700   ChangeStatus updateImpl(Attributor &A) override {
2701     // TODO: Once we have call site specific value information we can provide
2702     //       call site specific liveness information and then it makes
2703     //       sense to specialize attributes for call sites arguments instead of
2704     //       redirecting requests to the callee argument.
2705     Argument *Arg = getAssociatedArgument();
2706     if (!Arg)
2707       return indicatePessimisticFixpoint();
2708     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2709     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2710     return clampStateAndIndicateChange(
2711         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2712   }
2713 
2714   /// See AbstractAttribute::manifest(...).
2715   ChangeStatus manifest(Attributor &A) override {
2716     CallBase &CB = cast<CallBase>(getAnchorValue());
2717     Use &U = CB.getArgOperandUse(getArgNo());
2718     assert(!isa<UndefValue>(U.get()) &&
2719            "Expected undef values to be filtered out!");
2720     UndefValue &UV = *UndefValue::get(U->getType());
2721     if (A.changeUseAfterManifest(U, UV))
2722       return ChangeStatus::CHANGED;
2723     return ChangeStatus::UNCHANGED;
2724   }
2725 
2726   /// See AbstractAttribute::trackStatistics()
2727   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2728 };
2729 
2730 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2731   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2732 
2733   /// See AbstractAttribute::updateImpl(...).
2734   ChangeStatus updateImpl(Attributor &A) override {
2735 
2736     auto PredForCallSite = [&](AbstractCallSite ACS) {
2737       if (ACS.isCallbackCall())
2738         return false;
2739       const IRPosition &CSRetPos =
2740           IRPosition::callsite_returned(ACS.getCallSite());
2741       const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, CSRetPos);
2742       return RetIsDeadAA.isAssumedDead();
2743     };
2744 
2745     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
2746       return indicatePessimisticFixpoint();
2747 
2748     return ChangeStatus::UNCHANGED;
2749   }
2750 
2751   /// See AbstractAttribute::manifest(...).
2752   ChangeStatus manifest(Attributor &A) override {
2753     // TODO: Rewrite the signature to return void?
2754     bool AnyChange = false;
2755     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2756     auto RetInstPred = [&](Instruction &I) {
2757       ReturnInst &RI = cast<ReturnInst>(I);
2758       if (!isa<UndefValue>(RI.getReturnValue()))
2759         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2760       return true;
2761     };
2762     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2763     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2764   }
2765 
2766   /// See AbstractAttribute::trackStatistics()
2767   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2768 };
2769 
2770 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2771   AAIsDeadCallSiteReturned(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2772 
2773   /// See AbstractAttribute::initialize(...).
2774   void initialize(Attributor &A) override {}
2775 
2776   /// See AbstractAttribute::trackStatistics()
2777   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(IsDead) }
2778 };
2779 
2780 struct AAIsDeadFunction : public AAIsDead {
2781   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
2782 
2783   /// See AbstractAttribute::initialize(...).
2784   void initialize(Attributor &A) override {
2785     const Function *F = getAssociatedFunction();
2786     if (F && !F->isDeclaration()) {
2787       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2788       assumeLive(A, F->getEntryBlock());
2789     }
2790   }
2791 
2792   /// See AbstractAttribute::getAsStr().
2793   const std::string getAsStr() const override {
2794     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2795            std::to_string(getAssociatedFunction()->size()) + "][#TBEP " +
2796            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2797            std::to_string(KnownDeadEnds.size()) + "]";
2798   }
2799 
2800   /// See AbstractAttribute::manifest(...).
2801   ChangeStatus manifest(Attributor &A) override {
2802     assert(getState().isValidState() &&
2803            "Attempted to manifest an invalid state!");
2804 
2805     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2806     Function &F = *getAssociatedFunction();
2807 
2808     if (AssumedLiveBlocks.empty()) {
2809       A.deleteAfterManifest(F);
2810       return ChangeStatus::CHANGED;
2811     }
2812 
2813     // Flag to determine if we can change an invoke to a call assuming the
2814     // callee is nounwind. This is not possible if the personality of the
2815     // function allows to catch asynchronous exceptions.
2816     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2817 
2818     KnownDeadEnds.set_union(ToBeExploredFrom);
2819     for (const Instruction *DeadEndI : KnownDeadEnds) {
2820       auto *CB = dyn_cast<CallBase>(DeadEndI);
2821       if (!CB)
2822         continue;
2823       const auto &NoReturnAA =
2824           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2825       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2826       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2827         continue;
2828 
2829       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
2830         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
2831       else
2832         A.changeToUnreachableAfterManifest(
2833             const_cast<Instruction *>(DeadEndI->getNextNode()));
2834       HasChanged = ChangeStatus::CHANGED;
2835     }
2836 
2837     for (BasicBlock &BB : F)
2838       if (!AssumedLiveBlocks.count(&BB))
2839         A.deleteAfterManifest(BB);
2840 
2841     return HasChanged;
2842   }
2843 
2844   /// See AbstractAttribute::updateImpl(...).
2845   ChangeStatus updateImpl(Attributor &A) override;
2846 
2847   /// See AbstractAttribute::trackStatistics()
2848   void trackStatistics() const override {}
2849 
2850   /// Returns true if the function is assumed dead.
2851   bool isAssumedDead() const override { return false; }
2852 
2853   /// See AAIsDead::isAssumedDead(BasicBlock *).
2854   bool isAssumedDead(const BasicBlock *BB) const override {
2855     assert(BB->getParent() == getAssociatedFunction() &&
2856            "BB must be in the same anchor scope function.");
2857 
2858     if (!getAssumed())
2859       return false;
2860     return !AssumedLiveBlocks.count(BB);
2861   }
2862 
2863   /// See AAIsDead::isKnownDead(BasicBlock *).
2864   bool isKnownDead(const BasicBlock *BB) const override {
2865     return getKnown() && isAssumedDead(BB);
2866   }
2867 
2868   /// See AAIsDead::isAssumed(Instruction *I).
2869   bool isAssumedDead(const Instruction *I) const override {
2870     assert(I->getParent()->getParent() == getAssociatedFunction() &&
2871            "Instruction must be in the same anchor scope function.");
2872 
2873     if (!getAssumed())
2874       return false;
2875 
2876     // If it is not in AssumedLiveBlocks then it for sure dead.
2877     // Otherwise, it can still be after noreturn call in a live block.
2878     if (!AssumedLiveBlocks.count(I->getParent()))
2879       return true;
2880 
2881     // If it is not after a liveness barrier it is live.
2882     const Instruction *PrevI = I->getPrevNode();
2883     while (PrevI) {
2884       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2885         return true;
2886       PrevI = PrevI->getPrevNode();
2887     }
2888     return false;
2889   }
2890 
2891   /// See AAIsDead::isKnownDead(Instruction *I).
2892   bool isKnownDead(const Instruction *I) const override {
2893     return getKnown() && isAssumedDead(I);
2894   }
2895 
2896   /// Determine if \p F might catch asynchronous exceptions.
2897   static bool mayCatchAsynchronousExceptions(const Function &F) {
2898     return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
2899   }
2900 
2901   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2902   /// that internal function called from \p BB should now be looked at.
2903   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2904     if (!AssumedLiveBlocks.insert(&BB).second)
2905       return false;
2906 
2907     // We assume that all of BB is (probably) live now and if there are calls to
2908     // internal functions we will assume that those are now live as well. This
2909     // is a performance optimization for blocks with calls to a lot of internal
2910     // functions. It can however cause dead functions to be treated as live.
2911     for (const Instruction &I : BB)
2912       if (ImmutableCallSite ICS = ImmutableCallSite(&I))
2913         if (const Function *F = ICS.getCalledFunction())
2914           if (F->hasLocalLinkage())
2915             A.markLiveInternalFunction(*F);
2916     return true;
2917   }
2918 
2919   /// Collection of instructions that need to be explored again, e.g., we
2920   /// did assume they do not transfer control to (one of their) successors.
2921   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
2922 
2923   /// Collection of instructions that are known to not transfer control.
2924   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
2925 
2926   /// Collection of all assumed live BasicBlocks.
2927   DenseSet<const BasicBlock *> AssumedLiveBlocks;
2928 };
2929 
2930 static bool
2931 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
2932                         AbstractAttribute &AA,
2933                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2934   const IRPosition &IPos = IRPosition::callsite_function(CB);
2935 
2936   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
2937   if (NoReturnAA.isAssumedNoReturn())
2938     return !NoReturnAA.isKnownNoReturn();
2939   if (CB.isTerminator())
2940     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
2941   else
2942     AliveSuccessors.push_back(CB.getNextNode());
2943   return false;
2944 }
2945 
2946 static bool
2947 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
2948                         AbstractAttribute &AA,
2949                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2950   bool UsedAssumedInformation =
2951       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
2952 
2953   // First, determine if we can change an invoke to a call assuming the
2954   // callee is nounwind. This is not possible if the personality of the
2955   // function allows to catch asynchronous exceptions.
2956   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
2957     AliveSuccessors.push_back(&II.getUnwindDest()->front());
2958   } else {
2959     const IRPosition &IPos = IRPosition::callsite_function(II);
2960     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
2961     if (AANoUnw.isAssumedNoUnwind()) {
2962       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
2963     } else {
2964       AliveSuccessors.push_back(&II.getUnwindDest()->front());
2965     }
2966   }
2967   return UsedAssumedInformation;
2968 }
2969 
2970 static Optional<ConstantInt *>
2971 getAssumedConstant(Attributor &A, const Value &V, AbstractAttribute &AA,
2972                    bool &UsedAssumedInformation) {
2973   const auto &ValueSimplifyAA =
2974       A.getAAFor<AAValueSimplify>(AA, IRPosition::value(V));
2975   Optional<Value *> SimplifiedV = ValueSimplifyAA.getAssumedSimplifiedValue(A);
2976   UsedAssumedInformation |= !ValueSimplifyAA.isKnown();
2977   if (!SimplifiedV.hasValue())
2978     return llvm::None;
2979   if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue()))
2980     return llvm::None;
2981   return dyn_cast_or_null<ConstantInt>(SimplifiedV.getValue());
2982 }
2983 
2984 static bool
2985 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
2986                         AbstractAttribute &AA,
2987                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2988   bool UsedAssumedInformation = false;
2989   if (BI.getNumSuccessors() == 1) {
2990     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
2991   } else {
2992     Optional<ConstantInt *> CI =
2993         getAssumedConstant(A, *BI.getCondition(), AA, UsedAssumedInformation);
2994     if (!CI.hasValue()) {
2995       // No value yet, assume both edges are dead.
2996     } else if (CI.getValue()) {
2997       const BasicBlock *SuccBB =
2998           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
2999       AliveSuccessors.push_back(&SuccBB->front());
3000     } else {
3001       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3002       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3003       UsedAssumedInformation = false;
3004     }
3005   }
3006   return UsedAssumedInformation;
3007 }
3008 
3009 static bool
3010 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3011                         AbstractAttribute &AA,
3012                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3013   bool UsedAssumedInformation = false;
3014   Optional<ConstantInt *> CI =
3015       getAssumedConstant(A, *SI.getCondition(), AA, UsedAssumedInformation);
3016   if (!CI.hasValue()) {
3017     // No value yet, assume all edges are dead.
3018   } else if (CI.getValue()) {
3019     for (auto &CaseIt : SI.cases()) {
3020       if (CaseIt.getCaseValue() == CI.getValue()) {
3021         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3022         return UsedAssumedInformation;
3023       }
3024     }
3025     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3026     return UsedAssumedInformation;
3027   } else {
3028     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3029       AliveSuccessors.push_back(&SuccBB->front());
3030   }
3031   return UsedAssumedInformation;
3032 }
3033 
3034 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3035   ChangeStatus Change = ChangeStatus::UNCHANGED;
3036 
3037   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3038                     << getAssociatedFunction()->size() << "] BBs and "
3039                     << ToBeExploredFrom.size() << " exploration points and "
3040                     << KnownDeadEnds.size() << " known dead ends\n");
3041 
3042   // Copy and clear the list of instructions we need to explore from. It is
3043   // refilled with instructions the next update has to look at.
3044   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3045                                                ToBeExploredFrom.end());
3046   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3047 
3048   SmallVector<const Instruction *, 8> AliveSuccessors;
3049   while (!Worklist.empty()) {
3050     const Instruction *I = Worklist.pop_back_val();
3051     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3052 
3053     AliveSuccessors.clear();
3054 
3055     bool UsedAssumedInformation = false;
3056     switch (I->getOpcode()) {
3057     // TODO: look for (assumed) UB to backwards propagate "deadness".
3058     default:
3059       if (I->isTerminator()) {
3060         for (const BasicBlock *SuccBB : successors(I->getParent()))
3061           AliveSuccessors.push_back(&SuccBB->front());
3062       } else {
3063         AliveSuccessors.push_back(I->getNextNode());
3064       }
3065       break;
3066     case Instruction::Call:
3067       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3068                                                        *this, AliveSuccessors);
3069       break;
3070     case Instruction::Invoke:
3071       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3072                                                        *this, AliveSuccessors);
3073       break;
3074     case Instruction::Br:
3075       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3076                                                        *this, AliveSuccessors);
3077       break;
3078     case Instruction::Switch:
3079       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3080                                                        *this, AliveSuccessors);
3081       break;
3082     }
3083 
3084     if (UsedAssumedInformation) {
3085       NewToBeExploredFrom.insert(I);
3086     } else {
3087       Change = ChangeStatus::CHANGED;
3088       if (AliveSuccessors.empty() ||
3089           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3090         KnownDeadEnds.insert(I);
3091     }
3092 
3093     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3094                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3095                       << UsedAssumedInformation << "\n");
3096 
3097     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3098       if (!I->isTerminator()) {
3099         assert(AliveSuccessors.size() == 1 &&
3100                "Non-terminator expected to have a single successor!");
3101         Worklist.push_back(AliveSuccessor);
3102       } else {
3103         if (assumeLive(A, *AliveSuccessor->getParent()))
3104           Worklist.push_back(AliveSuccessor);
3105       }
3106     }
3107   }
3108 
3109   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3110 
3111   // If we know everything is live there is no need to query for liveness.
3112   // Instead, indicating a pessimistic fixpoint will cause the state to be
3113   // "invalid" and all queries to be answered conservatively without lookups.
3114   // To be in this state we have to (1) finished the exploration and (3) not
3115   // discovered any non-trivial dead end and (2) not ruled unreachable code
3116   // dead.
3117   if (ToBeExploredFrom.empty() &&
3118       getAssociatedFunction()->size() == AssumedLiveBlocks.size() &&
3119       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3120         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3121       }))
3122     return indicatePessimisticFixpoint();
3123   return Change;
3124 }
3125 
3126 /// Liveness information for a call sites.
3127 struct AAIsDeadCallSite final : AAIsDeadFunction {
3128   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
3129 
3130   /// See AbstractAttribute::initialize(...).
3131   void initialize(Attributor &A) override {
3132     // TODO: Once we have call site specific value information we can provide
3133     //       call site specific liveness information and then it makes
3134     //       sense to specialize attributes for call sites instead of
3135     //       redirecting requests to the callee.
3136     llvm_unreachable("Abstract attributes for liveness are not "
3137                      "supported for call sites yet!");
3138   }
3139 
3140   /// See AbstractAttribute::updateImpl(...).
3141   ChangeStatus updateImpl(Attributor &A) override {
3142     return indicatePessimisticFixpoint();
3143   }
3144 
3145   /// See AbstractAttribute::trackStatistics()
3146   void trackStatistics() const override {}
3147 };
3148 
3149 /// -------------------- Dereferenceable Argument Attribute --------------------
3150 
3151 template <>
3152 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3153                                                      const DerefState &R) {
3154   ChangeStatus CS0 =
3155       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3156   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3157   return CS0 | CS1;
3158 }
3159 
3160 struct AADereferenceableImpl : AADereferenceable {
3161   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
3162   using StateType = DerefState;
3163 
3164   void initialize(Attributor &A) override {
3165     SmallVector<Attribute, 4> Attrs;
3166     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3167              Attrs);
3168     for (const Attribute &Attr : Attrs)
3169       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3170 
3171     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
3172 
3173     const IRPosition &IRP = this->getIRPosition();
3174     bool IsFnInterface = IRP.isFnInterfaceKind();
3175     const Function *FnScope = IRP.getAnchorScope();
3176     if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
3177       indicatePessimisticFixpoint();
3178   }
3179 
3180   /// See AbstractAttribute::getState()
3181   /// {
3182   StateType &getState() override { return *this; }
3183   const StateType &getState() const override { return *this; }
3184   /// }
3185 
3186   /// Helper function for collecting accessed bytes in must-be-executed-context
3187   void addAccessedBytesForUse(Attributor &A, const Use *U,
3188                               const Instruction *I) {
3189     const Value *UseV = U->get();
3190     if (!UseV->getType()->isPointerTy())
3191       return;
3192 
3193     Type *PtrTy = UseV->getType();
3194     const DataLayout &DL = A.getDataLayout();
3195     int64_t Offset;
3196     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3197             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3198       if (Base == &getAssociatedValue() &&
3199           Attributor::getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3200         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3201         addAccessedBytes(Offset, Size);
3202       }
3203     }
3204     return;
3205   }
3206 
3207   /// See AAFromMustBeExecutedContext
3208   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3209     bool IsNonNull = false;
3210     bool TrackUse = false;
3211     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3212         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3213 
3214     addAccessedBytesForUse(A, U, I);
3215     takeKnownDerefBytesMaximum(DerefBytes);
3216     return TrackUse;
3217   }
3218 
3219   /// See AbstractAttribute::manifest(...).
3220   ChangeStatus manifest(Attributor &A) override {
3221     ChangeStatus Change = AADereferenceable::manifest(A);
3222     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3223       removeAttrs({Attribute::DereferenceableOrNull});
3224       return ChangeStatus::CHANGED;
3225     }
3226     return Change;
3227   }
3228 
3229   void getDeducedAttributes(LLVMContext &Ctx,
3230                             SmallVectorImpl<Attribute> &Attrs) const override {
3231     // TODO: Add *_globally support
3232     if (isAssumedNonNull())
3233       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3234           Ctx, getAssumedDereferenceableBytes()));
3235     else
3236       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3237           Ctx, getAssumedDereferenceableBytes()));
3238   }
3239 
3240   /// See AbstractAttribute::getAsStr().
3241   const std::string getAsStr() const override {
3242     if (!getAssumedDereferenceableBytes())
3243       return "unknown-dereferenceable";
3244     return std::string("dereferenceable") +
3245            (isAssumedNonNull() ? "" : "_or_null") +
3246            (isAssumedGlobal() ? "_globally" : "") + "<" +
3247            std::to_string(getKnownDereferenceableBytes()) + "-" +
3248            std::to_string(getAssumedDereferenceableBytes()) + ">";
3249   }
3250 };
3251 
3252 /// Dereferenceable attribute for a floating value.
3253 struct AADereferenceableFloating
3254     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3255   using Base =
3256       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3257   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
3258 
3259   /// See AbstractAttribute::updateImpl(...).
3260   ChangeStatus updateImpl(Attributor &A) override {
3261     ChangeStatus Change = Base::updateImpl(A);
3262 
3263     const DataLayout &DL = A.getDataLayout();
3264 
3265     auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
3266       unsigned IdxWidth =
3267           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3268       APInt Offset(IdxWidth, 0);
3269       const Value *Base =
3270           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3271 
3272       const auto &AA =
3273           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3274       int64_t DerefBytes = 0;
3275       if (!Stripped && this == &AA) {
3276         // Use IR information if we did not strip anything.
3277         // TODO: track globally.
3278         bool CanBeNull;
3279         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3280         T.GlobalState.indicatePessimisticFixpoint();
3281       } else {
3282         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3283         DerefBytes = DS.DerefBytesState.getAssumed();
3284         T.GlobalState &= DS.GlobalState;
3285       }
3286 
3287       // For now we do not try to "increase" dereferenceability due to negative
3288       // indices as we first have to come up with code to deal with loops and
3289       // for overflows of the dereferenceable bytes.
3290       int64_t OffsetSExt = Offset.getSExtValue();
3291       if (OffsetSExt < 0)
3292         OffsetSExt = 0;
3293 
3294       T.takeAssumedDerefBytesMinimum(
3295           std::max(int64_t(0), DerefBytes - OffsetSExt));
3296 
3297       if (this == &AA) {
3298         if (!Stripped) {
3299           // If nothing was stripped IR information is all we got.
3300           T.takeKnownDerefBytesMaximum(
3301               std::max(int64_t(0), DerefBytes - OffsetSExt));
3302           T.indicatePessimisticFixpoint();
3303         } else if (OffsetSExt > 0) {
3304           // If something was stripped but there is circular reasoning we look
3305           // for the offset. If it is positive we basically decrease the
3306           // dereferenceable bytes in a circluar loop now, which will simply
3307           // drive them down to the known value in a very slow way which we
3308           // can accelerate.
3309           T.indicatePessimisticFixpoint();
3310         }
3311       }
3312 
3313       return T.isValidState();
3314     };
3315 
3316     DerefState T;
3317     if (!genericValueTraversal<AADereferenceable, DerefState>(
3318             A, getIRPosition(), *this, T, VisitValueCB))
3319       return indicatePessimisticFixpoint();
3320 
3321     return Change | clampStateAndIndicateChange(getState(), T);
3322   }
3323 
3324   /// See AbstractAttribute::trackStatistics()
3325   void trackStatistics() const override {
3326     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3327   }
3328 };
3329 
3330 /// Dereferenceable attribute for a return value.
3331 struct AADereferenceableReturned final
3332     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3333                                    DerefState> {
3334   AADereferenceableReturned(const IRPosition &IRP)
3335       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3336                                      DerefState>(IRP) {}
3337 
3338   /// See AbstractAttribute::trackStatistics()
3339   void trackStatistics() const override {
3340     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3341   }
3342 };
3343 
3344 /// Dereferenceable attribute for an argument
3345 struct AADereferenceableArgument final
3346     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3347           AADereferenceable, AADereferenceableImpl, DerefState> {
3348   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3349       AADereferenceable, AADereferenceableImpl, DerefState>;
3350   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3351 
3352   /// See AbstractAttribute::trackStatistics()
3353   void trackStatistics() const override {
3354     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3355   }
3356 };
3357 
3358 /// Dereferenceable attribute for a call site argument.
3359 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3360   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3361       : AADereferenceableFloating(IRP) {}
3362 
3363   /// See AbstractAttribute::trackStatistics()
3364   void trackStatistics() const override {
3365     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3366   }
3367 };
3368 
3369 /// Dereferenceable attribute deduction for a call site return value.
3370 struct AADereferenceableCallSiteReturned final
3371     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3372           AADereferenceable, AADereferenceableImpl> {
3373   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3374       AADereferenceable, AADereferenceableImpl>;
3375   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3376 
3377   /// See AbstractAttribute::trackStatistics()
3378   void trackStatistics() const override {
3379     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3380   }
3381 };
3382 
3383 // ------------------------ Align Argument Attribute ------------------------
3384 
3385 static unsigned int getKnownAlignForUse(Attributor &A,
3386                                         AbstractAttribute &QueryingAA,
3387                                         Value &AssociatedValue, const Use *U,
3388                                         const Instruction *I, bool &TrackUse) {
3389   // We need to follow common pointer manipulation uses to the accesses they
3390   // feed into.
3391   if (isa<CastInst>(I)) {
3392     // Follow all but ptr2int casts.
3393     TrackUse = !isa<PtrToIntInst>(I);
3394     return 0;
3395   }
3396   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3397     if (GEP->hasAllConstantIndices()) {
3398       TrackUse = true;
3399       return 0;
3400     }
3401   }
3402 
3403   unsigned Alignment = 0;
3404   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
3405     if (ICS.isBundleOperand(U) || ICS.isCallee(U))
3406       return 0;
3407 
3408     unsigned ArgNo = ICS.getArgumentNo(U);
3409     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
3410     // As long as we only use known information there is no need to track
3411     // dependences here.
3412     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3413                                         /* TrackDependence */ false);
3414     Alignment = AlignAA.getKnownAlign();
3415   }
3416 
3417   const Value *UseV = U->get();
3418   if (auto *SI = dyn_cast<StoreInst>(I))
3419     Alignment = SI->getAlignment();
3420   else if (auto *LI = dyn_cast<LoadInst>(I))
3421     Alignment = LI->getAlignment();
3422 
3423   if (Alignment <= 1)
3424     return 0;
3425 
3426   auto &DL = A.getDataLayout();
3427   int64_t Offset;
3428 
3429   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3430     if (Base == &AssociatedValue) {
3431       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3432       // So we can say that the maximum power of two which is a divisor of
3433       // gcd(Offset, Alignment) is an alignment.
3434 
3435       uint32_t gcd =
3436           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3437       Alignment = llvm::PowerOf2Floor(gcd);
3438     }
3439   }
3440 
3441   return Alignment;
3442 }
3443 struct AAAlignImpl : AAAlign {
3444   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3445 
3446   /// See AbstractAttribute::initialize(...).
3447   void initialize(Attributor &A) override {
3448     SmallVector<Attribute, 4> Attrs;
3449     getAttrs({Attribute::Alignment}, Attrs);
3450     for (const Attribute &Attr : Attrs)
3451       takeKnownMaximum(Attr.getValueAsInt());
3452 
3453     if (getIRPosition().isFnInterfaceKind() &&
3454         (!getAssociatedFunction() ||
3455          !getAssociatedFunction()->hasExactDefinition()))
3456       indicatePessimisticFixpoint();
3457   }
3458 
3459   /// See AbstractAttribute::manifest(...).
3460   ChangeStatus manifest(Attributor &A) override {
3461     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3462 
3463     // Check for users that allow alignment annotations.
3464     Value &AnchorVal = getIRPosition().getAnchorValue();
3465     for (const Use &U : AnchorVal.uses()) {
3466       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3467         if (SI->getPointerOperand() == &AnchorVal)
3468           if (SI->getAlignment() < getAssumedAlign()) {
3469             STATS_DECLTRACK(AAAlign, Store,
3470                             "Number of times alignment added to a store");
3471             SI->setAlignment(Align(getAssumedAlign()));
3472             Changed = ChangeStatus::CHANGED;
3473           }
3474       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3475         if (LI->getPointerOperand() == &AnchorVal)
3476           if (LI->getAlignment() < getAssumedAlign()) {
3477             LI->setAlignment(Align(getAssumedAlign()));
3478             STATS_DECLTRACK(AAAlign, Load,
3479                             "Number of times alignment added to a load");
3480             Changed = ChangeStatus::CHANGED;
3481           }
3482       }
3483     }
3484 
3485     return AAAlign::manifest(A) | Changed;
3486   }
3487 
3488   // TODO: Provide a helper to determine the implied ABI alignment and check in
3489   //       the existing manifest method and a new one for AAAlignImpl that value
3490   //       to avoid making the alignment explicit if it did not improve.
3491 
3492   /// See AbstractAttribute::getDeducedAttributes
3493   virtual void
3494   getDeducedAttributes(LLVMContext &Ctx,
3495                        SmallVectorImpl<Attribute> &Attrs) const override {
3496     if (getAssumedAlign() > 1)
3497       Attrs.emplace_back(
3498           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3499   }
3500   /// See AAFromMustBeExecutedContext
3501   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3502     bool TrackUse = false;
3503 
3504     unsigned int KnownAlign =
3505         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3506     takeKnownMaximum(KnownAlign);
3507 
3508     return TrackUse;
3509   }
3510 
3511   /// See AbstractAttribute::getAsStr().
3512   const std::string getAsStr() const override {
3513     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3514                                 "-" + std::to_string(getAssumedAlign()) + ">")
3515                              : "unknown-align";
3516   }
3517 };
3518 
3519 /// Align attribute for a floating value.
3520 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3521   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3522   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3523 
3524   /// See AbstractAttribute::updateImpl(...).
3525   ChangeStatus updateImpl(Attributor &A) override {
3526     Base::updateImpl(A);
3527 
3528     const DataLayout &DL = A.getDataLayout();
3529 
3530     auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
3531                             bool Stripped) -> bool {
3532       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3533       if (!Stripped && this == &AA) {
3534         // Use only IR information if we did not strip anything.
3535         const MaybeAlign PA = V.getPointerAlignment(DL);
3536         T.takeKnownMaximum(PA ? PA->value() : 0);
3537         T.indicatePessimisticFixpoint();
3538       } else {
3539         // Use abstract attribute information.
3540         const AAAlign::StateType &DS =
3541             static_cast<const AAAlign::StateType &>(AA.getState());
3542         T ^= DS;
3543       }
3544       return T.isValidState();
3545     };
3546 
3547     StateType T;
3548     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3549                                                    VisitValueCB))
3550       return indicatePessimisticFixpoint();
3551 
3552     // TODO: If we know we visited all incoming values, thus no are assumed
3553     // dead, we can take the known information from the state T.
3554     return clampStateAndIndicateChange(getState(), T);
3555   }
3556 
3557   /// See AbstractAttribute::trackStatistics()
3558   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3559 };
3560 
3561 /// Align attribute for function return value.
3562 struct AAAlignReturned final
3563     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3564   AAAlignReturned(const IRPosition &IRP)
3565       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3566 
3567   /// See AbstractAttribute::trackStatistics()
3568   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3569 };
3570 
3571 /// Align attribute for function argument.
3572 struct AAAlignArgument final
3573     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3574                                                               AAAlignImpl> {
3575   AAAlignArgument(const IRPosition &IRP)
3576       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3577                                                                 AAAlignImpl>(
3578             IRP) {}
3579 
3580   /// See AbstractAttribute::trackStatistics()
3581   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3582 };
3583 
3584 struct AAAlignCallSiteArgument final : AAAlignFloating {
3585   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3586 
3587   /// See AbstractAttribute::manifest(...).
3588   ChangeStatus manifest(Attributor &A) override {
3589     return AAAlignImpl::manifest(A);
3590   }
3591 
3592   /// See AbstractAttribute::updateImpl(Attributor &A).
3593   ChangeStatus updateImpl(Attributor &A) override {
3594     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3595     if (Argument *Arg = getAssociatedArgument()) {
3596       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3597           *this, IRPosition::argument(*Arg), /* TrackDependence */ false,
3598           DepClassTy::OPTIONAL);
3599       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3600     }
3601     return Changed;
3602   }
3603 
3604   /// See AbstractAttribute::trackStatistics()
3605   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3606 };
3607 
3608 /// Align attribute deduction for a call site return value.
3609 struct AAAlignCallSiteReturned final
3610     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3611                                                              AAAlignImpl> {
3612   using Base =
3613       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3614                                                              AAAlignImpl>;
3615   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3616 
3617   /// See AbstractAttribute::initialize(...).
3618   void initialize(Attributor &A) override {
3619     Base::initialize(A);
3620     Function *F = getAssociatedFunction();
3621     if (!F)
3622       indicatePessimisticFixpoint();
3623   }
3624 
3625   /// See AbstractAttribute::trackStatistics()
3626   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3627 };
3628 
3629 /// ------------------ Function No-Return Attribute ----------------------------
3630 struct AANoReturnImpl : public AANoReturn {
3631   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3632 
3633   /// See AbstractAttribute::initialize(...).
3634   void initialize(Attributor &A) override {
3635     AANoReturn::initialize(A);
3636     Function *F = getAssociatedFunction();
3637     if (!F)
3638       indicatePessimisticFixpoint();
3639   }
3640 
3641   /// See AbstractAttribute::getAsStr().
3642   const std::string getAsStr() const override {
3643     return getAssumed() ? "noreturn" : "may-return";
3644   }
3645 
3646   /// See AbstractAttribute::updateImpl(Attributor &A).
3647   virtual ChangeStatus updateImpl(Attributor &A) override {
3648     auto CheckForNoReturn = [](Instruction &) { return false; };
3649     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3650                                    {(unsigned)Instruction::Ret}))
3651       return indicatePessimisticFixpoint();
3652     return ChangeStatus::UNCHANGED;
3653   }
3654 };
3655 
3656 struct AANoReturnFunction final : AANoReturnImpl {
3657   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3658 
3659   /// See AbstractAttribute::trackStatistics()
3660   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3661 };
3662 
3663 /// NoReturn attribute deduction for a call sites.
3664 struct AANoReturnCallSite final : AANoReturnImpl {
3665   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3666 
3667   /// See AbstractAttribute::updateImpl(...).
3668   ChangeStatus updateImpl(Attributor &A) override {
3669     // TODO: Once we have call site specific value information we can provide
3670     //       call site specific liveness information and then it makes
3671     //       sense to specialize attributes for call sites arguments instead of
3672     //       redirecting requests to the callee argument.
3673     Function *F = getAssociatedFunction();
3674     const IRPosition &FnPos = IRPosition::function(*F);
3675     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3676     return clampStateAndIndicateChange(
3677         getState(),
3678         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3679   }
3680 
3681   /// See AbstractAttribute::trackStatistics()
3682   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3683 };
3684 
3685 /// ----------------------- Variable Capturing ---------------------------------
3686 
3687 /// A class to hold the state of for no-capture attributes.
3688 struct AANoCaptureImpl : public AANoCapture {
3689   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
3690 
3691   /// See AbstractAttribute::initialize(...).
3692   void initialize(Attributor &A) override {
3693     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3694       indicateOptimisticFixpoint();
3695       return;
3696     }
3697     Function *AnchorScope = getAnchorScope();
3698     if (isFnInterfaceKind() &&
3699         (!AnchorScope || !AnchorScope->hasExactDefinition())) {
3700       indicatePessimisticFixpoint();
3701       return;
3702     }
3703 
3704     // You cannot "capture" null in the default address space.
3705     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3706         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3707       indicateOptimisticFixpoint();
3708       return;
3709     }
3710 
3711     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3712 
3713     // Check what state the associated function can actually capture.
3714     if (F)
3715       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3716     else
3717       indicatePessimisticFixpoint();
3718   }
3719 
3720   /// See AbstractAttribute::updateImpl(...).
3721   ChangeStatus updateImpl(Attributor &A) override;
3722 
3723   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3724   virtual void
3725   getDeducedAttributes(LLVMContext &Ctx,
3726                        SmallVectorImpl<Attribute> &Attrs) const override {
3727     if (!isAssumedNoCaptureMaybeReturned())
3728       return;
3729 
3730     if (getArgNo() >= 0) {
3731       if (isAssumedNoCapture())
3732         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3733       else if (ManifestInternal)
3734         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3735     }
3736   }
3737 
3738   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3739   /// depending on the ability of the function associated with \p IRP to capture
3740   /// state in memory and through "returning/throwing", respectively.
3741   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3742                                                    const Function &F,
3743                                                    BitIntegerState &State) {
3744     // TODO: Once we have memory behavior attributes we should use them here.
3745 
3746     // If we know we cannot communicate or write to memory, we do not care about
3747     // ptr2int anymore.
3748     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3749         F.getReturnType()->isVoidTy()) {
3750       State.addKnownBits(NO_CAPTURE);
3751       return;
3752     }
3753 
3754     // A function cannot capture state in memory if it only reads memory, it can
3755     // however return/throw state and the state might be influenced by the
3756     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3757     if (F.onlyReadsMemory())
3758       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3759 
3760     // A function cannot communicate state back if it does not through
3761     // exceptions and doesn not return values.
3762     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3763       State.addKnownBits(NOT_CAPTURED_IN_RET);
3764 
3765     // Check existing "returned" attributes.
3766     int ArgNo = IRP.getArgNo();
3767     if (F.doesNotThrow() && ArgNo >= 0) {
3768       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3769         if (F.hasParamAttribute(u, Attribute::Returned)) {
3770           if (u == unsigned(ArgNo))
3771             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3772           else if (F.onlyReadsMemory())
3773             State.addKnownBits(NO_CAPTURE);
3774           else
3775             State.addKnownBits(NOT_CAPTURED_IN_RET);
3776           break;
3777         }
3778     }
3779   }
3780 
3781   /// See AbstractState::getAsStr().
3782   const std::string getAsStr() const override {
3783     if (isKnownNoCapture())
3784       return "known not-captured";
3785     if (isAssumedNoCapture())
3786       return "assumed not-captured";
3787     if (isKnownNoCaptureMaybeReturned())
3788       return "known not-captured-maybe-returned";
3789     if (isAssumedNoCaptureMaybeReturned())
3790       return "assumed not-captured-maybe-returned";
3791     return "assumed-captured";
3792   }
3793 };
3794 
3795 /// Attributor-aware capture tracker.
3796 struct AACaptureUseTracker final : public CaptureTracker {
3797 
3798   /// Create a capture tracker that can lookup in-flight abstract attributes
3799   /// through the Attributor \p A.
3800   ///
3801   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3802   /// search is stopped. If a use leads to a return instruction,
3803   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3804   /// If a use leads to a ptr2int which may capture the value,
3805   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3806   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3807   /// set. All values in \p PotentialCopies are later tracked as well. For every
3808   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3809   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3810   /// conservatively set to true.
3811   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3812                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3813                       SmallVectorImpl<const Value *> &PotentialCopies,
3814                       unsigned &RemainingUsesToExplore)
3815       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3816         PotentialCopies(PotentialCopies),
3817         RemainingUsesToExplore(RemainingUsesToExplore) {}
3818 
3819   /// Determine if \p V maybe captured. *Also updates the state!*
3820   bool valueMayBeCaptured(const Value *V) {
3821     if (V->getType()->isPointerTy()) {
3822       PointerMayBeCaptured(V, this);
3823     } else {
3824       State.indicatePessimisticFixpoint();
3825     }
3826     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3827   }
3828 
3829   /// See CaptureTracker::tooManyUses().
3830   void tooManyUses() override {
3831     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3832   }
3833 
3834   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3835     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3836       return true;
3837     const auto &DerefAA =
3838         A.getAAFor<AADereferenceable>(NoCaptureAA, IRPosition::value(*O));
3839     return DerefAA.getAssumedDereferenceableBytes();
3840   }
3841 
3842   /// See CaptureTracker::captured(...).
3843   bool captured(const Use *U) override {
3844     Instruction *UInst = cast<Instruction>(U->getUser());
3845     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3846                       << "\n");
3847 
3848     // Because we may reuse the tracker multiple times we keep track of the
3849     // number of explored uses ourselves as well.
3850     if (RemainingUsesToExplore-- == 0) {
3851       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3852       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3853                           /* Return */ true);
3854     }
3855 
3856     // Deal with ptr2int by following uses.
3857     if (isa<PtrToIntInst>(UInst)) {
3858       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3859       return valueMayBeCaptured(UInst);
3860     }
3861 
3862     // Explicitly catch return instructions.
3863     if (isa<ReturnInst>(UInst))
3864       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3865                           /* Return */ true);
3866 
3867     // For now we only use special logic for call sites. However, the tracker
3868     // itself knows about a lot of other non-capturing cases already.
3869     CallSite CS(UInst);
3870     if (!CS || !CS.isArgOperand(U))
3871       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3872                           /* Return */ true);
3873 
3874     unsigned ArgNo = CS.getArgumentNo(U);
3875     const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
3876     // If we have a abstract no-capture attribute for the argument we can use
3877     // it to justify a non-capture attribute here. This allows recursion!
3878     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3879     if (ArgNoCaptureAA.isAssumedNoCapture())
3880       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3881                           /* Return */ false);
3882     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3883       addPotentialCopy(CS);
3884       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3885                           /* Return */ false);
3886     }
3887 
3888     // Lastly, we could not find a reason no-capture can be assumed so we don't.
3889     return isCapturedIn(/* Memory */ true, /* Integer */ true,
3890                         /* Return */ true);
3891   }
3892 
3893   /// Register \p CS as potential copy of the value we are checking.
3894   void addPotentialCopy(CallSite CS) {
3895     PotentialCopies.push_back(CS.getInstruction());
3896   }
3897 
3898   /// See CaptureTracker::shouldExplore(...).
3899   bool shouldExplore(const Use *U) override {
3900     // Check liveness.
3901     return !IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()));
3902   }
3903 
3904   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
3905   /// \p CapturedInRet, then return the appropriate value for use in the
3906   /// CaptureTracker::captured() interface.
3907   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
3908                     bool CapturedInRet) {
3909     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
3910                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
3911     if (CapturedInMem)
3912       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
3913     if (CapturedInInt)
3914       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
3915     if (CapturedInRet)
3916       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
3917     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3918   }
3919 
3920 private:
3921   /// The attributor providing in-flight abstract attributes.
3922   Attributor &A;
3923 
3924   /// The abstract attribute currently updated.
3925   AANoCapture &NoCaptureAA;
3926 
3927   /// The abstract liveness state.
3928   const AAIsDead &IsDeadAA;
3929 
3930   /// The state currently updated.
3931   AANoCapture::StateType &State;
3932 
3933   /// Set of potential copies of the tracked value.
3934   SmallVectorImpl<const Value *> &PotentialCopies;
3935 
3936   /// Global counter to limit the number of explored uses.
3937   unsigned &RemainingUsesToExplore;
3938 };
3939 
3940 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
3941   const IRPosition &IRP = getIRPosition();
3942   const Value *V =
3943       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
3944   if (!V)
3945     return indicatePessimisticFixpoint();
3946 
3947   const Function *F =
3948       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
3949   assert(F && "Expected a function!");
3950   const IRPosition &FnPos = IRPosition::function(*F);
3951   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos);
3952 
3953   AANoCapture::StateType T;
3954 
3955   // Readonly means we cannot capture through memory.
3956   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
3957   if (FnMemAA.isAssumedReadOnly()) {
3958     T.addKnownBits(NOT_CAPTURED_IN_MEM);
3959     if (FnMemAA.isKnownReadOnly())
3960       addKnownBits(NOT_CAPTURED_IN_MEM);
3961   }
3962 
3963   // Make sure all returned values are different than the underlying value.
3964   // TODO: we could do this in a more sophisticated way inside
3965   //       AAReturnedValues, e.g., track all values that escape through returns
3966   //       directly somehow.
3967   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
3968     bool SeenConstant = false;
3969     for (auto &It : RVAA.returned_values()) {
3970       if (isa<Constant>(It.first)) {
3971         if (SeenConstant)
3972           return false;
3973         SeenConstant = true;
3974       } else if (!isa<Argument>(It.first) ||
3975                  It.first == getAssociatedArgument())
3976         return false;
3977     }
3978     return true;
3979   };
3980 
3981   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, FnPos);
3982   if (NoUnwindAA.isAssumedNoUnwind()) {
3983     bool IsVoidTy = F->getReturnType()->isVoidTy();
3984     const AAReturnedValues *RVAA =
3985         IsVoidTy ? nullptr : &A.getAAFor<AAReturnedValues>(*this, FnPos);
3986     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
3987       T.addKnownBits(NOT_CAPTURED_IN_RET);
3988       if (T.isKnown(NOT_CAPTURED_IN_MEM))
3989         return ChangeStatus::UNCHANGED;
3990       if (NoUnwindAA.isKnownNoUnwind() &&
3991           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
3992         addKnownBits(NOT_CAPTURED_IN_RET);
3993         if (isKnown(NOT_CAPTURED_IN_MEM))
3994           return indicateOptimisticFixpoint();
3995       }
3996     }
3997   }
3998 
3999   // Use the CaptureTracker interface and logic with the specialized tracker,
4000   // defined in AACaptureUseTracker, that can look at in-flight abstract
4001   // attributes and directly updates the assumed state.
4002   SmallVector<const Value *, 4> PotentialCopies;
4003   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
4004   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4005                               RemainingUsesToExplore);
4006 
4007   // Check all potential copies of the associated value until we can assume
4008   // none will be captured or we have to assume at least one might be.
4009   unsigned Idx = 0;
4010   PotentialCopies.push_back(V);
4011   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4012     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4013 
4014   AANoCapture::StateType &S = getState();
4015   auto Assumed = S.getAssumed();
4016   S.intersectAssumedBits(T.getAssumed());
4017   if (!isAssumedNoCaptureMaybeReturned())
4018     return indicatePessimisticFixpoint();
4019   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4020                                    : ChangeStatus::CHANGED;
4021 }
4022 
4023 /// NoCapture attribute for function arguments.
4024 struct AANoCaptureArgument final : AANoCaptureImpl {
4025   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4026 
4027   /// See AbstractAttribute::trackStatistics()
4028   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4029 };
4030 
4031 /// NoCapture attribute for call site arguments.
4032 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4033   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4034 
4035   /// See AbstractAttribute::initialize(...).
4036   void initialize(Attributor &A) override {
4037     if (Argument *Arg = getAssociatedArgument())
4038       if (Arg->hasByValAttr())
4039         indicateOptimisticFixpoint();
4040     AANoCaptureImpl::initialize(A);
4041   }
4042 
4043   /// See AbstractAttribute::updateImpl(...).
4044   ChangeStatus updateImpl(Attributor &A) override {
4045     // TODO: Once we have call site specific value information we can provide
4046     //       call site specific liveness information and then it makes
4047     //       sense to specialize attributes for call sites arguments instead of
4048     //       redirecting requests to the callee argument.
4049     Argument *Arg = getAssociatedArgument();
4050     if (!Arg)
4051       return indicatePessimisticFixpoint();
4052     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4053     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4054     return clampStateAndIndicateChange(
4055         getState(),
4056         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4057   }
4058 
4059   /// See AbstractAttribute::trackStatistics()
4060   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4061 };
4062 
4063 /// NoCapture attribute for floating values.
4064 struct AANoCaptureFloating final : AANoCaptureImpl {
4065   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4066 
4067   /// See AbstractAttribute::trackStatistics()
4068   void trackStatistics() const override {
4069     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4070   }
4071 };
4072 
4073 /// NoCapture attribute for function return value.
4074 struct AANoCaptureReturned final : AANoCaptureImpl {
4075   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
4076     llvm_unreachable("NoCapture is not applicable to function returns!");
4077   }
4078 
4079   /// See AbstractAttribute::initialize(...).
4080   void initialize(Attributor &A) override {
4081     llvm_unreachable("NoCapture is not applicable to function returns!");
4082   }
4083 
4084   /// See AbstractAttribute::updateImpl(...).
4085   ChangeStatus updateImpl(Attributor &A) override {
4086     llvm_unreachable("NoCapture is not applicable to function returns!");
4087   }
4088 
4089   /// See AbstractAttribute::trackStatistics()
4090   void trackStatistics() const override {}
4091 };
4092 
4093 /// NoCapture attribute deduction for a call site return value.
4094 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4095   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4096 
4097   /// See AbstractAttribute::trackStatistics()
4098   void trackStatistics() const override {
4099     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4100   }
4101 };
4102 
4103 /// ------------------ Value Simplify Attribute ----------------------------
4104 struct AAValueSimplifyImpl : AAValueSimplify {
4105   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
4106 
4107   /// See AbstractAttribute::getAsStr().
4108   const std::string getAsStr() const override {
4109     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4110                         : "not-simple";
4111   }
4112 
4113   /// See AbstractAttribute::trackStatistics()
4114   void trackStatistics() const override {}
4115 
4116   /// See AAValueSimplify::getAssumedSimplifiedValue()
4117   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4118     if (!getAssumed())
4119       return const_cast<Value *>(&getAssociatedValue());
4120     return SimplifiedAssociatedValue;
4121   }
4122   void initialize(Attributor &A) override {}
4123 
4124   /// Helper function for querying AAValueSimplify and updating candicate.
4125   /// \param QueryingValue Value trying to unify with SimplifiedValue
4126   /// \param AccumulatedSimplifiedValue Current simplification result.
4127   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4128                              Value &QueryingValue,
4129                              Optional<Value *> &AccumulatedSimplifiedValue) {
4130     // FIXME: Add a typecast support.
4131 
4132     auto &ValueSimpifyAA = A.getAAFor<AAValueSimplify>(
4133         QueryingAA, IRPosition::value(QueryingValue));
4134 
4135     Optional<Value *> QueryingValueSimplified =
4136         ValueSimpifyAA.getAssumedSimplifiedValue(A);
4137 
4138     if (!QueryingValueSimplified.hasValue())
4139       return true;
4140 
4141     if (!QueryingValueSimplified.getValue())
4142       return false;
4143 
4144     Value &QueryingValueSimplifiedUnwrapped =
4145         *QueryingValueSimplified.getValue();
4146 
4147     if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4148       return true;
4149 
4150     if (AccumulatedSimplifiedValue.hasValue())
4151       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4152 
4153     LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << QueryingValue
4154                       << " is assumed to be "
4155                       << QueryingValueSimplifiedUnwrapped << "\n");
4156 
4157     AccumulatedSimplifiedValue = QueryingValueSimplified;
4158     return true;
4159   }
4160 
4161   /// See AbstractAttribute::manifest(...).
4162   ChangeStatus manifest(Attributor &A) override {
4163     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4164 
4165     if (!SimplifiedAssociatedValue.hasValue() ||
4166         !SimplifiedAssociatedValue.getValue())
4167       return Changed;
4168 
4169     if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
4170       // We can replace the AssociatedValue with the constant.
4171       Value &V = getAssociatedValue();
4172       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4173         LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << V << " -> " << *C
4174                           << "\n");
4175         A.changeValueAfterManifest(V, *C);
4176         Changed = ChangeStatus::CHANGED;
4177       }
4178     }
4179 
4180     return Changed | AAValueSimplify::manifest(A);
4181   }
4182 
4183   /// See AbstractState::indicatePessimisticFixpoint(...).
4184   ChangeStatus indicatePessimisticFixpoint() override {
4185     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4186     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4187     SimplifiedAssociatedValue = &getAssociatedValue();
4188     indicateOptimisticFixpoint();
4189     return ChangeStatus::CHANGED;
4190   }
4191 
4192 protected:
4193   // An assumed simplified value. Initially, it is set to Optional::None, which
4194   // means that the value is not clear under current assumption. If in the
4195   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4196   // returns orignal associated value.
4197   Optional<Value *> SimplifiedAssociatedValue;
4198 };
4199 
4200 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4201   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4202 
4203   void initialize(Attributor &A) override {
4204     AAValueSimplifyImpl::initialize(A);
4205     if (!getAssociatedFunction() || getAssociatedFunction()->isDeclaration())
4206       indicatePessimisticFixpoint();
4207     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4208                 /* IgnoreSubsumingPositions */ true))
4209       indicatePessimisticFixpoint();
4210   }
4211 
4212   /// See AbstractAttribute::updateImpl(...).
4213   ChangeStatus updateImpl(Attributor &A) override {
4214     // Byval is only replacable if it is readonly otherwise we would write into
4215     // the replaced value and not the copy that byval creates implicitly.
4216     Argument *Arg = getAssociatedArgument();
4217     if (Arg->hasByValAttr()) {
4218       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4219       if (!MemAA.isAssumedReadOnly())
4220         return indicatePessimisticFixpoint();
4221     }
4222 
4223     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4224 
4225     auto PredForCallSite = [&](AbstractCallSite ACS) {
4226       // Check if we have an associated argument or not (which can happen for
4227       // callback calls).
4228       Value *ArgOp = ACS.getCallArgOperand(getArgNo());
4229       if (!ArgOp)
4230         return false;
4231       // We can only propagate thread independent values through callbacks.
4232       // This is different to direct/indirect call sites because for them we
4233       // know the thread executing the caller and callee is the same. For
4234       // callbacks this is not guaranteed, thus a thread dependent value could
4235       // be different for the caller and callee, making it invalid to propagate.
4236       if (ACS.isCallbackCall())
4237         if (auto *C = dyn_cast<Constant>(ArgOp))
4238           if (C->isThreadDependent())
4239             return false;
4240       return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
4241     };
4242 
4243     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
4244       return indicatePessimisticFixpoint();
4245 
4246     // If a candicate was found in this update, return CHANGED.
4247     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4248                ? ChangeStatus::UNCHANGED
4249                : ChangeStatus ::CHANGED;
4250   }
4251 
4252   /// See AbstractAttribute::trackStatistics()
4253   void trackStatistics() const override {
4254     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4255   }
4256 };
4257 
4258 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4259   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4260 
4261   /// See AbstractAttribute::updateImpl(...).
4262   ChangeStatus updateImpl(Attributor &A) override {
4263     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4264 
4265     auto PredForReturned = [&](Value &V) {
4266       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4267     };
4268 
4269     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4270       return indicatePessimisticFixpoint();
4271 
4272     // If a candicate was found in this update, return CHANGED.
4273     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4274                ? ChangeStatus::UNCHANGED
4275                : ChangeStatus ::CHANGED;
4276   }
4277   /// See AbstractAttribute::trackStatistics()
4278   void trackStatistics() const override {
4279     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4280   }
4281 };
4282 
4283 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4284   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4285 
4286   /// See AbstractAttribute::initialize(...).
4287   void initialize(Attributor &A) override {
4288     Value &V = getAnchorValue();
4289 
4290     // TODO: add other stuffs
4291     if (isa<Constant>(V))
4292       indicatePessimisticFixpoint();
4293   }
4294 
4295   /// See AbstractAttribute::updateImpl(...).
4296   ChangeStatus updateImpl(Attributor &A) override {
4297     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4298 
4299     auto VisitValueCB = [&](Value &V, BooleanState, bool Stripped) -> bool {
4300       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4301       if (!Stripped && this == &AA) {
4302         // TODO: Look the instruction and check recursively.
4303         LLVM_DEBUG(
4304             dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
4305                    << V << "\n");
4306         indicatePessimisticFixpoint();
4307         return false;
4308       }
4309       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4310     };
4311 
4312     if (!genericValueTraversal<AAValueSimplify, BooleanState>(
4313             A, getIRPosition(), *this, static_cast<BooleanState &>(*this),
4314             VisitValueCB))
4315       return indicatePessimisticFixpoint();
4316 
4317     // If a candicate was found in this update, return CHANGED.
4318 
4319     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4320                ? ChangeStatus::UNCHANGED
4321                : ChangeStatus ::CHANGED;
4322   }
4323 
4324   /// See AbstractAttribute::trackStatistics()
4325   void trackStatistics() const override {
4326     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4327   }
4328 };
4329 
4330 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4331   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4332 
4333   /// See AbstractAttribute::initialize(...).
4334   void initialize(Attributor &A) override {
4335     SimplifiedAssociatedValue = &getAnchorValue();
4336     indicateOptimisticFixpoint();
4337   }
4338   /// See AbstractAttribute::initialize(...).
4339   ChangeStatus updateImpl(Attributor &A) override {
4340     llvm_unreachable(
4341         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4342   }
4343   /// See AbstractAttribute::trackStatistics()
4344   void trackStatistics() const override {
4345     STATS_DECLTRACK_FN_ATTR(value_simplify)
4346   }
4347 };
4348 
4349 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4350   AAValueSimplifyCallSite(const IRPosition &IRP)
4351       : AAValueSimplifyFunction(IRP) {}
4352   /// See AbstractAttribute::trackStatistics()
4353   void trackStatistics() const override {
4354     STATS_DECLTRACK_CS_ATTR(value_simplify)
4355   }
4356 };
4357 
4358 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4359   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4360       : AAValueSimplifyReturned(IRP) {}
4361 
4362   void trackStatistics() const override {
4363     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4364   }
4365 };
4366 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4367   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4368       : AAValueSimplifyFloating(IRP) {}
4369 
4370   void trackStatistics() const override {
4371     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4372   }
4373 };
4374 
4375 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4376 struct AAHeapToStackImpl : public AAHeapToStack {
4377   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4378 
4379   const std::string getAsStr() const override {
4380     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4381   }
4382 
4383   ChangeStatus manifest(Attributor &A) override {
4384     assert(getState().isValidState() &&
4385            "Attempted to manifest an invalid state!");
4386 
4387     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4388     Function *F = getAssociatedFunction();
4389     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4390 
4391     for (Instruction *MallocCall : MallocCalls) {
4392       // This malloc cannot be replaced.
4393       if (BadMallocCalls.count(MallocCall))
4394         continue;
4395 
4396       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4397         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4398         A.deleteAfterManifest(*FreeCall);
4399         HasChanged = ChangeStatus::CHANGED;
4400       }
4401 
4402       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4403                         << "\n");
4404 
4405       Constant *Size;
4406       if (isCallocLikeFn(MallocCall, TLI)) {
4407         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4408         auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
4409         APInt TotalSize = SizeT->getValue() * Num->getValue();
4410         Size =
4411             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4412       } else {
4413         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4414       }
4415 
4416       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4417       Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
4418                                        Size, "", MallocCall->getNextNode());
4419 
4420       if (AI->getType() != MallocCall->getType())
4421         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4422                              AI->getNextNode());
4423 
4424       replaceAllInstructionUsesWith(*MallocCall, *AI);
4425 
4426       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4427         auto *NBB = II->getNormalDest();
4428         BranchInst::Create(NBB, MallocCall->getParent());
4429         A.deleteAfterManifest(*MallocCall);
4430       } else {
4431         A.deleteAfterManifest(*MallocCall);
4432       }
4433 
4434       if (isCallocLikeFn(MallocCall, TLI)) {
4435         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4436                                    AI->getNextNode());
4437         Value *Ops[] = {
4438             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4439             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4440 
4441         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4442         Module *M = F->getParent();
4443         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4444         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4445       }
4446       HasChanged = ChangeStatus::CHANGED;
4447     }
4448 
4449     return HasChanged;
4450   }
4451 
4452   /// Collection of all malloc calls in a function.
4453   SmallSetVector<Instruction *, 4> MallocCalls;
4454 
4455   /// Collection of malloc calls that cannot be converted.
4456   DenseSet<const Instruction *> BadMallocCalls;
4457 
4458   /// A map for each malloc call to the set of associated free calls.
4459   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4460 
4461   ChangeStatus updateImpl(Attributor &A) override;
4462 };
4463 
4464 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4465   const Function *F = getAssociatedFunction();
4466   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4467 
4468   MustBeExecutedContextExplorer &Explorer =
4469       A.getInfoCache().getMustBeExecutedContextExplorer();
4470 
4471   auto FreeCheck = [&](Instruction &I) {
4472     const auto &Frees = FreesForMalloc.lookup(&I);
4473     if (Frees.size() != 1)
4474       return false;
4475     Instruction *UniqueFree = *Frees.begin();
4476     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4477   };
4478 
4479   auto UsesCheck = [&](Instruction &I) {
4480     bool ValidUsesOnly = true;
4481     bool MustUse = true;
4482     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4483       Instruction *UserI = cast<Instruction>(U.getUser());
4484       if (isa<LoadInst>(UserI))
4485         return true;
4486       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4487         if (SI->getValueOperand() == U.get()) {
4488           LLVM_DEBUG(dbgs()
4489                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4490           ValidUsesOnly = false;
4491         } else {
4492           // A store into the malloc'ed memory is fine.
4493         }
4494         return true;
4495       }
4496       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4497         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4498           return true;
4499         // Record malloc.
4500         if (isFreeCall(UserI, TLI)) {
4501           if (MustUse) {
4502             FreesForMalloc[&I].insert(UserI);
4503           } else {
4504             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4505                               << *UserI << "\n");
4506             ValidUsesOnly = false;
4507           }
4508           return true;
4509         }
4510 
4511         unsigned ArgNo = CB->getArgOperandNo(&U);
4512 
4513         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4514             *this, IRPosition::callsite_argument(*CB, ArgNo));
4515 
4516         // If a callsite argument use is nofree, we are fine.
4517         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4518             *this, IRPosition::callsite_argument(*CB, ArgNo));
4519 
4520         if (!NoCaptureAA.isAssumedNoCapture() ||
4521             !ArgNoFreeAA.isAssumedNoFree()) {
4522           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4523           ValidUsesOnly = false;
4524         }
4525         return true;
4526       }
4527 
4528       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4529           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4530         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4531         Follow = true;
4532         return true;
4533       }
4534       // Unknown user for which we can not track uses further (in a way that
4535       // makes sense).
4536       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4537       ValidUsesOnly = false;
4538       return true;
4539     };
4540     A.checkForAllUses(Pred, *this, I);
4541     return ValidUsesOnly;
4542   };
4543 
4544   auto MallocCallocCheck = [&](Instruction &I) {
4545     if (BadMallocCalls.count(&I))
4546       return true;
4547 
4548     bool IsMalloc = isMallocLikeFn(&I, TLI);
4549     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4550     if (!IsMalloc && !IsCalloc) {
4551       BadMallocCalls.insert(&I);
4552       return true;
4553     }
4554 
4555     if (IsMalloc) {
4556       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4557         if (Size->getValue().ule(MaxHeapToStackSize))
4558           if (UsesCheck(I) || FreeCheck(I)) {
4559             MallocCalls.insert(&I);
4560             return true;
4561           }
4562     } else if (IsCalloc) {
4563       bool Overflow = false;
4564       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4565         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4566           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4567                   .ule(MaxHeapToStackSize))
4568             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4569               MallocCalls.insert(&I);
4570               return true;
4571             }
4572     }
4573 
4574     BadMallocCalls.insert(&I);
4575     return true;
4576   };
4577 
4578   size_t NumBadMallocs = BadMallocCalls.size();
4579 
4580   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4581 
4582   if (NumBadMallocs != BadMallocCalls.size())
4583     return ChangeStatus::CHANGED;
4584 
4585   return ChangeStatus::UNCHANGED;
4586 }
4587 
4588 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4589   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
4590 
4591   /// See AbstractAttribute::trackStatistics()
4592   void trackStatistics() const override {
4593     STATS_DECL(MallocCalls, Function,
4594                "Number of malloc calls converted to allocas");
4595     for (auto *C : MallocCalls)
4596       if (!BadMallocCalls.count(C))
4597         ++BUILD_STAT_NAME(MallocCalls, Function);
4598   }
4599 };
4600 
4601 /// -------------------- Memory Behavior Attributes ----------------------------
4602 /// Includes read-none, read-only, and write-only.
4603 /// ----------------------------------------------------------------------------
4604 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
4605   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
4606 
4607   /// See AbstractAttribute::initialize(...).
4608   void initialize(Attributor &A) override {
4609     intersectAssumedBits(BEST_STATE);
4610     getKnownStateFromValue(getIRPosition(), getState());
4611     IRAttribute::initialize(A);
4612   }
4613 
4614   /// Return the memory behavior information encoded in the IR for \p IRP.
4615   static void getKnownStateFromValue(const IRPosition &IRP,
4616                                      BitIntegerState &State,
4617                                      bool IgnoreSubsumingPositions = false) {
4618     SmallVector<Attribute, 2> Attrs;
4619     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
4620     for (const Attribute &Attr : Attrs) {
4621       switch (Attr.getKindAsEnum()) {
4622       case Attribute::ReadNone:
4623         State.addKnownBits(NO_ACCESSES);
4624         break;
4625       case Attribute::ReadOnly:
4626         State.addKnownBits(NO_WRITES);
4627         break;
4628       case Attribute::WriteOnly:
4629         State.addKnownBits(NO_READS);
4630         break;
4631       default:
4632         llvm_unreachable("Unexpcted attribute!");
4633       }
4634     }
4635 
4636     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
4637       if (!I->mayReadFromMemory())
4638         State.addKnownBits(NO_READS);
4639       if (!I->mayWriteToMemory())
4640         State.addKnownBits(NO_WRITES);
4641     }
4642   }
4643 
4644   /// See AbstractAttribute::getDeducedAttributes(...).
4645   void getDeducedAttributes(LLVMContext &Ctx,
4646                             SmallVectorImpl<Attribute> &Attrs) const override {
4647     assert(Attrs.size() == 0);
4648     if (isAssumedReadNone())
4649       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
4650     else if (isAssumedReadOnly())
4651       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
4652     else if (isAssumedWriteOnly())
4653       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
4654     assert(Attrs.size() <= 1);
4655   }
4656 
4657   /// See AbstractAttribute::manifest(...).
4658   ChangeStatus manifest(Attributor &A) override {
4659     const IRPosition &IRP = getIRPosition();
4660 
4661     // Check if we would improve the existing attributes first.
4662     SmallVector<Attribute, 4> DeducedAttrs;
4663     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
4664     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
4665           return IRP.hasAttr(Attr.getKindAsEnum(),
4666                              /* IgnoreSubsumingPositions */ true);
4667         }))
4668       return ChangeStatus::UNCHANGED;
4669 
4670     // Clear existing attributes.
4671     IRP.removeAttrs(AttrKinds);
4672 
4673     // Use the generic manifest method.
4674     return IRAttribute::manifest(A);
4675   }
4676 
4677   /// See AbstractState::getAsStr().
4678   const std::string getAsStr() const override {
4679     if (isAssumedReadNone())
4680       return "readnone";
4681     if (isAssumedReadOnly())
4682       return "readonly";
4683     if (isAssumedWriteOnly())
4684       return "writeonly";
4685     return "may-read/write";
4686   }
4687 
4688   /// The set of IR attributes AAMemoryBehavior deals with.
4689   static const Attribute::AttrKind AttrKinds[3];
4690 };
4691 
4692 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
4693     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
4694 
4695 /// Memory behavior attribute for a floating value.
4696 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
4697   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4698 
4699   /// See AbstractAttribute::initialize(...).
4700   void initialize(Attributor &A) override {
4701     AAMemoryBehaviorImpl::initialize(A);
4702     // Initialize the use vector with all direct uses of the associated value.
4703     for (const Use &U : getAssociatedValue().uses())
4704       Uses.insert(&U);
4705   }
4706 
4707   /// See AbstractAttribute::updateImpl(...).
4708   ChangeStatus updateImpl(Attributor &A) override;
4709 
4710   /// See AbstractAttribute::trackStatistics()
4711   void trackStatistics() const override {
4712     if (isAssumedReadNone())
4713       STATS_DECLTRACK_FLOATING_ATTR(readnone)
4714     else if (isAssumedReadOnly())
4715       STATS_DECLTRACK_FLOATING_ATTR(readonly)
4716     else if (isAssumedWriteOnly())
4717       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
4718   }
4719 
4720 private:
4721   /// Return true if users of \p UserI might access the underlying
4722   /// variable/location described by \p U and should therefore be analyzed.
4723   bool followUsersOfUseIn(Attributor &A, const Use *U,
4724                           const Instruction *UserI);
4725 
4726   /// Update the state according to the effect of use \p U in \p UserI.
4727   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
4728 
4729 protected:
4730   /// Container for (transitive) uses of the associated argument.
4731   SetVector<const Use *> Uses;
4732 };
4733 
4734 /// Memory behavior attribute for function argument.
4735 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
4736   AAMemoryBehaviorArgument(const IRPosition &IRP)
4737       : AAMemoryBehaviorFloating(IRP) {}
4738 
4739   /// See AbstractAttribute::initialize(...).
4740   void initialize(Attributor &A) override {
4741     intersectAssumedBits(BEST_STATE);
4742     const IRPosition &IRP = getIRPosition();
4743     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
4744     // can query it when we use has/getAttr. That would allow us to reuse the
4745     // initialize of the base class here.
4746     bool HasByVal =
4747         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
4748     getKnownStateFromValue(IRP, getState(),
4749                            /* IgnoreSubsumingPositions */ HasByVal);
4750 
4751     // Initialize the use vector with all direct uses of the associated value.
4752     Argument *Arg = getAssociatedArgument();
4753     if (!Arg || !Arg->getParent()->hasExactDefinition()) {
4754       indicatePessimisticFixpoint();
4755     } else {
4756       // Initialize the use vector with all direct uses of the associated value.
4757       for (const Use &U : Arg->uses())
4758         Uses.insert(&U);
4759     }
4760   }
4761 
4762   ChangeStatus manifest(Attributor &A) override {
4763     // TODO: From readattrs.ll: "inalloca parameters are always
4764     //                           considered written"
4765     if (hasAttr({Attribute::InAlloca})) {
4766       removeKnownBits(NO_WRITES);
4767       removeAssumedBits(NO_WRITES);
4768     }
4769     return AAMemoryBehaviorFloating::manifest(A);
4770   }
4771 
4772   /// See AbstractAttribute::trackStatistics()
4773   void trackStatistics() const override {
4774     if (isAssumedReadNone())
4775       STATS_DECLTRACK_ARG_ATTR(readnone)
4776     else if (isAssumedReadOnly())
4777       STATS_DECLTRACK_ARG_ATTR(readonly)
4778     else if (isAssumedWriteOnly())
4779       STATS_DECLTRACK_ARG_ATTR(writeonly)
4780   }
4781 };
4782 
4783 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
4784   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
4785       : AAMemoryBehaviorArgument(IRP) {}
4786 
4787   /// See AbstractAttribute::initialize(...).
4788   void initialize(Attributor &A) override {
4789     if (Argument *Arg = getAssociatedArgument()) {
4790       if (Arg->hasByValAttr()) {
4791         addKnownBits(NO_WRITES);
4792         removeKnownBits(NO_READS);
4793         removeAssumedBits(NO_READS);
4794       }
4795     } else {
4796     }
4797     AAMemoryBehaviorArgument::initialize(A);
4798   }
4799 
4800   /// See AbstractAttribute::updateImpl(...).
4801   ChangeStatus updateImpl(Attributor &A) override {
4802     // TODO: Once we have call site specific value information we can provide
4803     //       call site specific liveness liveness information and then it makes
4804     //       sense to specialize attributes for call sites arguments instead of
4805     //       redirecting requests to the callee argument.
4806     Argument *Arg = getAssociatedArgument();
4807     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4808     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4809     return clampStateAndIndicateChange(
4810         getState(),
4811         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
4812   }
4813 
4814   /// See AbstractAttribute::trackStatistics()
4815   void trackStatistics() const override {
4816     if (isAssumedReadNone())
4817       STATS_DECLTRACK_CSARG_ATTR(readnone)
4818     else if (isAssumedReadOnly())
4819       STATS_DECLTRACK_CSARG_ATTR(readonly)
4820     else if (isAssumedWriteOnly())
4821       STATS_DECLTRACK_CSARG_ATTR(writeonly)
4822   }
4823 };
4824 
4825 /// Memory behavior attribute for a call site return position.
4826 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
4827   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
4828       : AAMemoryBehaviorFloating(IRP) {}
4829 
4830   /// See AbstractAttribute::manifest(...).
4831   ChangeStatus manifest(Attributor &A) override {
4832     // We do not annotate returned values.
4833     return ChangeStatus::UNCHANGED;
4834   }
4835 
4836   /// See AbstractAttribute::trackStatistics()
4837   void trackStatistics() const override {}
4838 };
4839 
4840 /// An AA to represent the memory behavior function attributes.
4841 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
4842   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4843 
4844   /// See AbstractAttribute::updateImpl(Attributor &A).
4845   virtual ChangeStatus updateImpl(Attributor &A) override;
4846 
4847   /// See AbstractAttribute::manifest(...).
4848   ChangeStatus manifest(Attributor &A) override {
4849     Function &F = cast<Function>(getAnchorValue());
4850     if (isAssumedReadNone()) {
4851       F.removeFnAttr(Attribute::ArgMemOnly);
4852       F.removeFnAttr(Attribute::InaccessibleMemOnly);
4853       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
4854     }
4855     return AAMemoryBehaviorImpl::manifest(A);
4856   }
4857 
4858   /// See AbstractAttribute::trackStatistics()
4859   void trackStatistics() const override {
4860     if (isAssumedReadNone())
4861       STATS_DECLTRACK_FN_ATTR(readnone)
4862     else if (isAssumedReadOnly())
4863       STATS_DECLTRACK_FN_ATTR(readonly)
4864     else if (isAssumedWriteOnly())
4865       STATS_DECLTRACK_FN_ATTR(writeonly)
4866   }
4867 };
4868 
4869 /// AAMemoryBehavior attribute for call sites.
4870 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
4871   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4872 
4873   /// See AbstractAttribute::initialize(...).
4874   void initialize(Attributor &A) override {
4875     AAMemoryBehaviorImpl::initialize(A);
4876     Function *F = getAssociatedFunction();
4877     if (!F || !F->hasExactDefinition())
4878       indicatePessimisticFixpoint();
4879   }
4880 
4881   /// See AbstractAttribute::updateImpl(...).
4882   ChangeStatus updateImpl(Attributor &A) override {
4883     // TODO: Once we have call site specific value information we can provide
4884     //       call site specific liveness liveness information and then it makes
4885     //       sense to specialize attributes for call sites arguments instead of
4886     //       redirecting requests to the callee argument.
4887     Function *F = getAssociatedFunction();
4888     const IRPosition &FnPos = IRPosition::function(*F);
4889     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4890     return clampStateAndIndicateChange(
4891         getState(),
4892         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
4893   }
4894 
4895   /// See AbstractAttribute::trackStatistics()
4896   void trackStatistics() const override {
4897     if (isAssumedReadNone())
4898       STATS_DECLTRACK_CS_ATTR(readnone)
4899     else if (isAssumedReadOnly())
4900       STATS_DECLTRACK_CS_ATTR(readonly)
4901     else if (isAssumedWriteOnly())
4902       STATS_DECLTRACK_CS_ATTR(writeonly)
4903   }
4904 };
4905 } // namespace
4906 
4907 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
4908 
4909   // The current assumed state used to determine a change.
4910   auto AssumedState = getAssumed();
4911 
4912   auto CheckRWInst = [&](Instruction &I) {
4913     // If the instruction has an own memory behavior state, use it to restrict
4914     // the local state. No further analysis is required as the other memory
4915     // state is as optimistic as it gets.
4916     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
4917       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
4918           *this, IRPosition::callsite_function(ICS));
4919       intersectAssumedBits(MemBehaviorAA.getAssumed());
4920       return !isAtFixpoint();
4921     }
4922 
4923     // Remove access kind modifiers if necessary.
4924     if (I.mayReadFromMemory())
4925       removeAssumedBits(NO_READS);
4926     if (I.mayWriteToMemory())
4927       removeAssumedBits(NO_WRITES);
4928     return !isAtFixpoint();
4929   };
4930 
4931   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
4932     return indicatePessimisticFixpoint();
4933 
4934   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4935                                         : ChangeStatus::UNCHANGED;
4936 }
4937 
4938 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
4939 
4940   const IRPosition &IRP = getIRPosition();
4941   const IRPosition &FnPos = IRPosition::function_scope(IRP);
4942   AAMemoryBehavior::StateType &S = getState();
4943 
4944   // First, check the function scope. We take the known information and we avoid
4945   // work if the assumed information implies the current assumed information for
4946   // this attribute. This is a valid for all but byval arguments.
4947   Argument *Arg = IRP.getAssociatedArgument();
4948   AAMemoryBehavior::base_t FnMemAssumedState =
4949       AAMemoryBehavior::StateType::getWorstState();
4950   if (!Arg || !Arg->hasByValAttr()) {
4951     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4952     FnMemAssumedState = FnMemAA.getAssumed();
4953     S.addKnownBits(FnMemAA.getKnown());
4954     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
4955       return ChangeStatus::UNCHANGED;
4956   }
4957 
4958   // Make sure the value is not captured (except through "return"), if
4959   // it is, any information derived would be irrelevant anyway as we cannot
4960   // check the potential aliases introduced by the capture. However, no need
4961   // to fall back to anythign less optimistic than the function state.
4962   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
4963       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4964   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4965     S.intersectAssumedBits(FnMemAssumedState);
4966     return ChangeStatus::CHANGED;
4967   }
4968 
4969   // The current assumed state used to determine a change.
4970   auto AssumedState = S.getAssumed();
4971 
4972   // Liveness information to exclude dead users.
4973   // TODO: Take the FnPos once we have call site specific liveness information.
4974   const auto &LivenessAA = A.getAAFor<AAIsDead>(
4975       *this, IRPosition::function(*IRP.getAssociatedFunction()));
4976 
4977   // Visit and expand uses until all are analyzed or a fixpoint is reached.
4978   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
4979     const Use *U = Uses[i];
4980     Instruction *UserI = cast<Instruction>(U->getUser());
4981     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
4982                       << " [Dead: " << (LivenessAA.isAssumedDead(UserI))
4983                       << "]\n");
4984     if (LivenessAA.isAssumedDead(UserI))
4985       continue;
4986 
4987     // Check if the users of UserI should also be visited.
4988     if (followUsersOfUseIn(A, U, UserI))
4989       for (const Use &UserIUse : UserI->uses())
4990         Uses.insert(&UserIUse);
4991 
4992     // If UserI might touch memory we analyze the use in detail.
4993     if (UserI->mayReadOrWriteMemory())
4994       analyzeUseIn(A, U, UserI);
4995   }
4996 
4997   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
4998                                         : ChangeStatus::UNCHANGED;
4999 }
5000 
5001 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5002                                                   const Instruction *UserI) {
5003   // The loaded value is unrelated to the pointer argument, no need to
5004   // follow the users of the load.
5005   if (isa<LoadInst>(UserI))
5006     return false;
5007 
5008   // By default we follow all uses assuming UserI might leak information on U,
5009   // we have special handling for call sites operands though.
5010   ImmutableCallSite ICS(UserI);
5011   if (!ICS || !ICS.isArgOperand(U))
5012     return true;
5013 
5014   // If the use is a call argument known not to be captured, the users of
5015   // the call do not need to be visited because they have to be unrelated to
5016   // the input. Note that this check is not trivial even though we disallow
5017   // general capturing of the underlying argument. The reason is that the
5018   // call might the argument "through return", which we allow and for which we
5019   // need to check call users.
5020   unsigned ArgNo = ICS.getArgumentNo(U);
5021   const auto &ArgNoCaptureAA =
5022       A.getAAFor<AANoCapture>(*this, IRPosition::callsite_argument(ICS, ArgNo));
5023   return !ArgNoCaptureAA.isAssumedNoCapture();
5024 }
5025 
5026 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5027                                             const Instruction *UserI) {
5028   assert(UserI->mayReadOrWriteMemory());
5029 
5030   switch (UserI->getOpcode()) {
5031   default:
5032     // TODO: Handle all atomics and other side-effect operations we know of.
5033     break;
5034   case Instruction::Load:
5035     // Loads cause the NO_READS property to disappear.
5036     removeAssumedBits(NO_READS);
5037     return;
5038 
5039   case Instruction::Store:
5040     // Stores cause the NO_WRITES property to disappear if the use is the
5041     // pointer operand. Note that we do assume that capturing was taken care of
5042     // somewhere else.
5043     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5044       removeAssumedBits(NO_WRITES);
5045     return;
5046 
5047   case Instruction::Call:
5048   case Instruction::CallBr:
5049   case Instruction::Invoke: {
5050     // For call sites we look at the argument memory behavior attribute (this
5051     // could be recursive!) in order to restrict our own state.
5052     ImmutableCallSite ICS(UserI);
5053 
5054     // Give up on operand bundles.
5055     if (ICS.isBundleOperand(U)) {
5056       indicatePessimisticFixpoint();
5057       return;
5058     }
5059 
5060     // Calling a function does read the function pointer, maybe write it if the
5061     // function is self-modifying.
5062     if (ICS.isCallee(U)) {
5063       removeAssumedBits(NO_READS);
5064       break;
5065     }
5066 
5067     // Adjust the possible access behavior based on the information on the
5068     // argument.
5069     unsigned ArgNo = ICS.getArgumentNo(U);
5070     const IRPosition &ArgPos = IRPosition::callsite_argument(ICS, ArgNo);
5071     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5072     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5073     // and at least "known".
5074     intersectAssumedBits(MemBehaviorAA.getAssumed());
5075     return;
5076   }
5077   };
5078 
5079   // Generally, look at the "may-properties" and adjust the assumed state if we
5080   // did not trigger special handling before.
5081   if (UserI->mayReadFromMemory())
5082     removeAssumedBits(NO_READS);
5083   if (UserI->mayWriteToMemory())
5084     removeAssumedBits(NO_WRITES);
5085 }
5086 
5087 /// ----------------------------------------------------------------------------
5088 ///                               Attributor
5089 /// ----------------------------------------------------------------------------
5090 
5091 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
5092                                const AAIsDead *LivenessAA) {
5093   const Instruction *CtxI = AA.getIRPosition().getCtxI();
5094   if (!CtxI)
5095     return false;
5096 
5097   // TODO: Find a good way to utilize fine and coarse grained liveness
5098   // information.
5099   if (!LivenessAA)
5100     LivenessAA =
5101         &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
5102                             /* TrackDependence */ false);
5103 
5104   // Don't check liveness for AAIsDead.
5105   if (&AA == LivenessAA)
5106     return false;
5107 
5108   if (!LivenessAA->isAssumedDead(CtxI))
5109     return false;
5110 
5111   // We actually used liveness information so we have to record a dependence.
5112   recordDependence(*LivenessAA, AA, DepClassTy::OPTIONAL);
5113 
5114   return true;
5115 }
5116 
5117 bool Attributor::checkForAllUses(
5118     const function_ref<bool(const Use &, bool &)> &Pred,
5119     const AbstractAttribute &QueryingAA, const Value &V) {
5120   const IRPosition &IRP = QueryingAA.getIRPosition();
5121   SmallVector<const Use *, 16> Worklist;
5122   SmallPtrSet<const Use *, 16> Visited;
5123 
5124   for (const Use &U : V.uses())
5125     Worklist.push_back(&U);
5126 
5127   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
5128                     << " initial uses to check\n");
5129 
5130   if (Worklist.empty())
5131     return true;
5132 
5133   bool AnyDead = false;
5134   const Function *ScopeFn = IRP.getAnchorScope();
5135   const auto *LivenessAA =
5136       ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
5137                                     /* TrackDependence */ false)
5138               : nullptr;
5139 
5140   while (!Worklist.empty()) {
5141     const Use *U = Worklist.pop_back_val();
5142     if (!Visited.insert(U).second)
5143       continue;
5144     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << **U << "\n");
5145     if (Instruction *UserI = dyn_cast<Instruction>(U->getUser()))
5146       if (LivenessAA && LivenessAA->isAssumedDead(UserI)) {
5147         LLVM_DEBUG(dbgs() << "[Attributor] Dead user: " << *UserI << ": "
5148                           << *LivenessAA << "\n");
5149         AnyDead = true;
5150         continue;
5151       }
5152 
5153     bool Follow = false;
5154     if (!Pred(*U, Follow))
5155       return false;
5156     if (!Follow)
5157       continue;
5158     for (const Use &UU : U->getUser()->uses())
5159       Worklist.push_back(&UU);
5160   }
5161 
5162   if (AnyDead)
5163     recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5164 
5165   return true;
5166 }
5167 
5168 bool Attributor::checkForAllCallSites(
5169     const function_ref<bool(AbstractCallSite)> &Pred,
5170     const AbstractAttribute &QueryingAA, bool RequireAllCallSites) {
5171   // We can try to determine information from
5172   // the call sites. However, this is only possible all call sites are known,
5173   // hence the function has internal linkage.
5174   const IRPosition &IRP = QueryingAA.getIRPosition();
5175   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5176   if (!AssociatedFunction) {
5177     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
5178                       << "\n");
5179     return false;
5180   }
5181 
5182   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
5183                               &QueryingAA);
5184 }
5185 
5186 bool Attributor::checkForAllCallSites(
5187     const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
5188     bool RequireAllCallSites, const AbstractAttribute *QueryingAA) {
5189   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
5190     LLVM_DEBUG(
5191         dbgs()
5192         << "[Attributor] Function " << Fn.getName()
5193         << " has no internal linkage, hence not all call sites are known\n");
5194     return false;
5195   }
5196 
5197   for (const Use &U : Fn.uses()) {
5198     AbstractCallSite ACS(&U);
5199     if (!ACS) {
5200       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
5201                         << " has non call site use " << *U.get() << " in "
5202                         << *U.getUser() << "\n");
5203       // BlockAddress users are allowed.
5204       if (isa<BlockAddress>(U.getUser()))
5205         continue;
5206       return false;
5207     }
5208 
5209     Instruction *I = ACS.getInstruction();
5210     Function *Caller = I->getFunction();
5211 
5212     const auto *LivenessAA =
5213         lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
5214                               /* TrackDependence */ false);
5215 
5216     // Skip dead calls.
5217     if (LivenessAA && LivenessAA->isAssumedDead(I)) {
5218       // We actually used liveness information so we have to record a
5219       // dependence.
5220       if (QueryingAA)
5221         recordDependence(*LivenessAA, *QueryingAA, DepClassTy::OPTIONAL);
5222       continue;
5223     }
5224 
5225     const Use *EffectiveUse =
5226         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
5227     if (!ACS.isCallee(EffectiveUse)) {
5228       if (!RequireAllCallSites)
5229         continue;
5230       LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
5231                         << " is an invalid use of " << Fn.getName() << "\n");
5232       return false;
5233     }
5234 
5235     if (Pred(ACS))
5236       continue;
5237 
5238     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
5239                       << *ACS.getInstruction() << "\n");
5240     return false;
5241   }
5242 
5243   return true;
5244 }
5245 
5246 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
5247     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
5248         &Pred,
5249     const AbstractAttribute &QueryingAA) {
5250 
5251   const IRPosition &IRP = QueryingAA.getIRPosition();
5252   // Since we need to provide return instructions we have to have an exact
5253   // definition.
5254   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5255   if (!AssociatedFunction)
5256     return false;
5257 
5258   // If this is a call site query we use the call site specific return values
5259   // and liveness information.
5260   // TODO: use the function scope once we have call site AAReturnedValues.
5261   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5262   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
5263   if (!AARetVal.getState().isValidState())
5264     return false;
5265 
5266   return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
5267 }
5268 
5269 bool Attributor::checkForAllReturnedValues(
5270     const function_ref<bool(Value &)> &Pred,
5271     const AbstractAttribute &QueryingAA) {
5272 
5273   const IRPosition &IRP = QueryingAA.getIRPosition();
5274   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5275   if (!AssociatedFunction)
5276     return false;
5277 
5278   // TODO: use the function scope once we have call site AAReturnedValues.
5279   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5280   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
5281   if (!AARetVal.getState().isValidState())
5282     return false;
5283 
5284   return AARetVal.checkForAllReturnedValuesAndReturnInsts(
5285       [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
5286         return Pred(RV);
5287       });
5288 }
5289 
5290 static bool
5291 checkForAllInstructionsImpl(InformationCache::OpcodeInstMapTy &OpcodeInstMap,
5292                             const function_ref<bool(Instruction &)> &Pred,
5293                             const AAIsDead *LivenessAA, bool &AnyDead,
5294                             const ArrayRef<unsigned> &Opcodes) {
5295   for (unsigned Opcode : Opcodes) {
5296     for (Instruction *I : OpcodeInstMap[Opcode]) {
5297       // Skip dead instructions.
5298       if (LivenessAA && LivenessAA->isAssumedDead(I)) {
5299         AnyDead = true;
5300         continue;
5301       }
5302 
5303       if (!Pred(*I))
5304         return false;
5305     }
5306   }
5307   return true;
5308 }
5309 
5310 bool Attributor::checkForAllInstructions(
5311     const llvm::function_ref<bool(Instruction &)> &Pred,
5312     const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
5313 
5314   const IRPosition &IRP = QueryingAA.getIRPosition();
5315   // Since we need to provide instructions we have to have an exact definition.
5316   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5317   if (!AssociatedFunction)
5318     return false;
5319 
5320   // TODO: use the function scope once we have call site AAReturnedValues.
5321   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5322   const auto &LivenessAA =
5323       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
5324   bool AnyDead = false;
5325 
5326   auto &OpcodeInstMap =
5327       InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
5328   if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
5329                                    Opcodes))
5330     return false;
5331 
5332   // If we actually used liveness information so we have to record a dependence.
5333   if (AnyDead)
5334     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5335 
5336   return true;
5337 }
5338 
5339 bool Attributor::checkForAllReadWriteInstructions(
5340     const llvm::function_ref<bool(Instruction &)> &Pred,
5341     AbstractAttribute &QueryingAA) {
5342 
5343   const Function *AssociatedFunction =
5344       QueryingAA.getIRPosition().getAssociatedFunction();
5345   if (!AssociatedFunction)
5346     return false;
5347 
5348   // TODO: use the function scope once we have call site AAReturnedValues.
5349   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5350   const auto &LivenessAA =
5351       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
5352   bool AnyDead = false;
5353 
5354   for (Instruction *I :
5355        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
5356     // Skip dead instructions.
5357     if (LivenessAA.isAssumedDead(I)) {
5358       AnyDead = true;
5359       continue;
5360     }
5361 
5362     if (!Pred(*I))
5363       return false;
5364   }
5365 
5366   // If we actually used liveness information so we have to record a dependence.
5367   if (AnyDead)
5368     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5369 
5370   return true;
5371 }
5372 
5373 ChangeStatus Attributor::run(Module &M) {
5374   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
5375                     << AllAbstractAttributes.size()
5376                     << " abstract attributes.\n");
5377 
5378   // Now that all abstract attributes are collected and initialized we start
5379   // the abstract analysis.
5380 
5381   unsigned IterationCounter = 1;
5382 
5383   SmallVector<AbstractAttribute *, 64> ChangedAAs;
5384   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
5385   Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
5386 
5387   bool RecomputeDependences = false;
5388 
5389   do {
5390     // Remember the size to determine new attributes.
5391     size_t NumAAs = AllAbstractAttributes.size();
5392     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
5393                       << ", Worklist size: " << Worklist.size() << "\n");
5394 
5395     // For invalid AAs we can fix dependent AAs that have a required dependence,
5396     // thereby folding long dependence chains in a single step without the need
5397     // to run updates.
5398     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
5399       AbstractAttribute *InvalidAA = InvalidAAs[u];
5400       auto &QuerriedAAs = QueryMap[InvalidAA];
5401       LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "
5402                         << QuerriedAAs.RequiredAAs.size() << "/"
5403                         << QuerriedAAs.OptionalAAs.size()
5404                         << " required/optional dependences\n");
5405       for (AbstractAttribute *DepOnInvalidAA : QuerriedAAs.RequiredAAs) {
5406         AbstractState &DOIAAState = DepOnInvalidAA->getState();
5407         DOIAAState.indicatePessimisticFixpoint();
5408         ++NumAttributesFixedDueToRequiredDependences;
5409         assert(DOIAAState.isAtFixpoint() && "Expected fixpoint state!");
5410         if (!DOIAAState.isValidState())
5411           InvalidAAs.insert(DepOnInvalidAA);
5412       }
5413       if (!RecomputeDependences)
5414         Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5415                         QuerriedAAs.OptionalAAs.end());
5416     }
5417 
5418     // If dependences (=QueryMap) are recomputed we have to look at all abstract
5419     // attributes again, regardless of what changed in the last iteration.
5420     if (RecomputeDependences) {
5421       LLVM_DEBUG(
5422           dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
5423       QueryMap.clear();
5424       ChangedAAs.clear();
5425       Worklist.insert(AllAbstractAttributes.begin(),
5426                       AllAbstractAttributes.end());
5427     }
5428 
5429     // Add all abstract attributes that are potentially dependent on one that
5430     // changed to the work list.
5431     for (AbstractAttribute *ChangedAA : ChangedAAs) {
5432       auto &QuerriedAAs = QueryMap[ChangedAA];
5433       Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5434                       QuerriedAAs.OptionalAAs.end());
5435       Worklist.insert(QuerriedAAs.RequiredAAs.begin(),
5436                       QuerriedAAs.RequiredAAs.end());
5437     }
5438 
5439     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
5440                       << ", Worklist+Dependent size: " << Worklist.size()
5441                       << "\n");
5442 
5443     // Reset the changed and invalid set.
5444     ChangedAAs.clear();
5445     InvalidAAs.clear();
5446 
5447     // Update all abstract attribute in the work list and record the ones that
5448     // changed.
5449     for (AbstractAttribute *AA : Worklist)
5450       if (!AA->getState().isAtFixpoint() && !isAssumedDead(*AA, nullptr)) {
5451         QueriedNonFixAA = false;
5452         if (AA->update(*this) == ChangeStatus::CHANGED) {
5453           ChangedAAs.push_back(AA);
5454           if (!AA->getState().isValidState())
5455             InvalidAAs.insert(AA);
5456         } else if (!QueriedNonFixAA) {
5457           // If the attribute did not query any non-fix information, the state
5458           // will not change and we can indicate that right away.
5459           AA->getState().indicateOptimisticFixpoint();
5460         }
5461       }
5462 
5463     // Check if we recompute the dependences in the next iteration.
5464     RecomputeDependences = (DepRecomputeInterval > 0 &&
5465                             IterationCounter % DepRecomputeInterval == 0);
5466 
5467     // Add attributes to the changed set if they have been created in the last
5468     // iteration.
5469     ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
5470                       AllAbstractAttributes.end());
5471 
5472     // Reset the work list and repopulate with the changed abstract attributes.
5473     // Note that dependent ones are added above.
5474     Worklist.clear();
5475     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
5476 
5477   } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
5478                                  VerifyMaxFixpointIterations));
5479 
5480   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
5481                     << IterationCounter << "/" << MaxFixpointIterations
5482                     << " iterations\n");
5483 
5484   size_t NumFinalAAs = AllAbstractAttributes.size();
5485 
5486   // Reset abstract arguments not settled in a sound fixpoint by now. This
5487   // happens when we stopped the fixpoint iteration early. Note that only the
5488   // ones marked as "changed" *and* the ones transitively depending on them
5489   // need to be reverted to a pessimistic state. Others might not be in a
5490   // fixpoint state but we can use the optimistic results for them anyway.
5491   SmallPtrSet<AbstractAttribute *, 32> Visited;
5492   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
5493     AbstractAttribute *ChangedAA = ChangedAAs[u];
5494     if (!Visited.insert(ChangedAA).second)
5495       continue;
5496 
5497     AbstractState &State = ChangedAA->getState();
5498     if (!State.isAtFixpoint()) {
5499       State.indicatePessimisticFixpoint();
5500 
5501       NumAttributesTimedOut++;
5502     }
5503 
5504     auto &QuerriedAAs = QueryMap[ChangedAA];
5505     ChangedAAs.append(QuerriedAAs.OptionalAAs.begin(),
5506                       QuerriedAAs.OptionalAAs.end());
5507     ChangedAAs.append(QuerriedAAs.RequiredAAs.begin(),
5508                       QuerriedAAs.RequiredAAs.end());
5509   }
5510 
5511   LLVM_DEBUG({
5512     if (!Visited.empty())
5513       dbgs() << "\n[Attributor] Finalized " << Visited.size()
5514              << " abstract attributes.\n";
5515   });
5516 
5517   unsigned NumManifested = 0;
5518   unsigned NumAtFixpoint = 0;
5519   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
5520   for (AbstractAttribute *AA : AllAbstractAttributes) {
5521     AbstractState &State = AA->getState();
5522 
5523     // If there is not already a fixpoint reached, we can now take the
5524     // optimistic state. This is correct because we enforced a pessimistic one
5525     // on abstract attributes that were transitively dependent on a changed one
5526     // already above.
5527     if (!State.isAtFixpoint())
5528       State.indicateOptimisticFixpoint();
5529 
5530     // If the state is invalid, we do not try to manifest it.
5531     if (!State.isValidState())
5532       continue;
5533 
5534     // Skip dead code.
5535     if (isAssumedDead(*AA, nullptr))
5536       continue;
5537     // Manifest the state and record if we changed the IR.
5538     ChangeStatus LocalChange = AA->manifest(*this);
5539     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
5540       AA->trackStatistics();
5541 
5542     ManifestChange = ManifestChange | LocalChange;
5543 
5544     NumAtFixpoint++;
5545     NumManifested += (LocalChange == ChangeStatus::CHANGED);
5546   }
5547 
5548   (void)NumManifested;
5549   (void)NumAtFixpoint;
5550   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
5551                     << " arguments while " << NumAtFixpoint
5552                     << " were in a valid fixpoint state\n");
5553 
5554   NumAttributesManifested += NumManifested;
5555   NumAttributesValidFixpoint += NumAtFixpoint;
5556 
5557   (void)NumFinalAAs;
5558   assert(
5559       NumFinalAAs == AllAbstractAttributes.size() &&
5560       "Expected the final number of abstract attributes to remain unchanged!");
5561 
5562   // Delete stuff at the end to avoid invalid references and a nice order.
5563   {
5564     LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
5565                       << ToBeDeletedFunctions.size() << " functions and "
5566                       << ToBeDeletedBlocks.size() << " blocks and "
5567                       << ToBeDeletedInsts.size() << " instructions and "
5568                       << ToBeChangedUses.size() << " uses\n");
5569 
5570     SmallVector<Instruction *, 32> DeadInsts;
5571     SmallVector<Instruction *, 32> TerminatorsToFold;
5572 
5573     for (auto &It : ToBeChangedUses) {
5574       Use *U = It.first;
5575       Value *NewV = It.second;
5576       Value *OldV = U->get();
5577       LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
5578                         << " instead of " << *OldV << "\n");
5579       U->set(NewV);
5580       if (Instruction *I = dyn_cast<Instruction>(OldV))
5581         if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
5582             isInstructionTriviallyDead(I)) {
5583           DeadInsts.push_back(I);
5584         }
5585       if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
5586         Instruction *UserI = cast<Instruction>(U->getUser());
5587         if (isa<UndefValue>(NewV)) {
5588           ToBeChangedToUnreachableInsts.insert(UserI);
5589         } else {
5590           TerminatorsToFold.push_back(UserI);
5591         }
5592       }
5593     }
5594     for (auto &V : InvokeWithDeadSuccessor)
5595       if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
5596         bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
5597         bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
5598         bool Invoke2CallAllowed =
5599             !AAIsDeadFunction::mayCatchAsynchronousExceptions(
5600                 *II->getFunction());
5601         assert((UnwindBBIsDead || NormalBBIsDead) &&
5602                "Invoke does not have dead successors!");
5603         BasicBlock *BB = II->getParent();
5604         BasicBlock *NormalDestBB = II->getNormalDest();
5605         if (UnwindBBIsDead) {
5606           Instruction *NormalNextIP = &NormalDestBB->front();
5607           if (Invoke2CallAllowed) {
5608             changeToCall(II);
5609             NormalNextIP = BB->getTerminator();
5610           }
5611           if (NormalBBIsDead)
5612             ToBeChangedToUnreachableInsts.insert(NormalNextIP);
5613         } else {
5614           assert(NormalBBIsDead && "Broken invariant!");
5615           if (!NormalDestBB->getUniquePredecessor())
5616             NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
5617           ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
5618         }
5619       }
5620     for (auto &V : ToBeChangedToUnreachableInsts)
5621       if (Instruction *I = dyn_cast_or_null<Instruction>(V))
5622         changeToUnreachable(I, /* UseLLVMTrap */ false);
5623     for (Instruction *I : TerminatorsToFold)
5624       ConstantFoldTerminator(I->getParent());
5625 
5626     for (Instruction *I : ToBeDeletedInsts) {
5627       I->replaceAllUsesWith(UndefValue::get(I->getType()));
5628       if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
5629         DeadInsts.push_back(I);
5630       else
5631         I->eraseFromParent();
5632     }
5633 
5634     RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
5635 
5636     if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
5637       SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
5638       ToBeDeletedBBs.reserve(NumDeadBlocks);
5639       ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end());
5640       // Actually we do not delete the blocks but squash them into a single
5641       // unreachable but untangling branches that jump here is something we need
5642       // to do in a more generic way.
5643       DetatchDeadBlocks(ToBeDeletedBBs, nullptr);
5644       STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
5645       BUILD_STAT_NAME(AAIsDead, BasicBlock) += ToBeDeletedBlocks.size();
5646     }
5647 
5648     // Identify dead internal functions and delete them. This happens outside
5649     // the other fixpoint analysis as we might treat potentially dead functions
5650     // as live to lower the number of iterations. If they happen to be dead, the
5651     // below fixpoint loop will identify and eliminate them.
5652     SmallVector<Function *, 8> InternalFns;
5653     for (Function &F : M)
5654       if (F.hasLocalLinkage())
5655         InternalFns.push_back(&F);
5656 
5657     bool FoundDeadFn = true;
5658     while (FoundDeadFn) {
5659       FoundDeadFn = false;
5660       for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
5661         Function *F = InternalFns[u];
5662         if (!F)
5663           continue;
5664 
5665         if (!checkForAllCallSites(
5666                 [this](AbstractCallSite ACS) {
5667                   return ToBeDeletedFunctions.count(
5668                       ACS.getInstruction()->getFunction());
5669                 },
5670                 *F, true, nullptr))
5671           continue;
5672 
5673         ToBeDeletedFunctions.insert(F);
5674         InternalFns[u] = nullptr;
5675         FoundDeadFn = true;
5676       }
5677     }
5678   }
5679 
5680   STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
5681   BUILD_STAT_NAME(AAIsDead, Function) += ToBeDeletedFunctions.size();
5682 
5683   // Rewrite the functions as requested during manifest.
5684   ManifestChange = ManifestChange | rewriteFunctionSignatures();
5685 
5686   for (Function *Fn : ToBeDeletedFunctions) {
5687     Fn->deleteBody();
5688     Fn->replaceAllUsesWith(UndefValue::get(Fn->getType()));
5689     Fn->eraseFromParent();
5690   }
5691 
5692   if (VerifyMaxFixpointIterations &&
5693       IterationCounter != MaxFixpointIterations) {
5694     errs() << "\n[Attributor] Fixpoint iteration done after: "
5695            << IterationCounter << "/" << MaxFixpointIterations
5696            << " iterations\n";
5697     llvm_unreachable("The fixpoint was not reached with exactly the number of "
5698                      "specified iterations!");
5699   }
5700 
5701   return ManifestChange;
5702 }
5703 
5704 bool Attributor::registerFunctionSignatureRewrite(
5705     Argument &Arg, ArrayRef<Type *> ReplacementTypes,
5706     ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
5707     ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
5708 
5709   auto CallSiteCanBeChanged = [](AbstractCallSite ACS) {
5710     // Forbid must-tail calls for now.
5711     return !ACS.isCallbackCall() && !ACS.getCallSite().isMustTailCall();
5712   };
5713 
5714   Function *Fn = Arg.getParent();
5715   // Avoid var-arg functions for now.
5716   if (Fn->isVarArg()) {
5717     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
5718     return false;
5719   }
5720 
5721   // Avoid functions with complicated argument passing semantics.
5722   AttributeList FnAttributeList = Fn->getAttributes();
5723   if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
5724       FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
5725       FnAttributeList.hasAttrSomewhere(Attribute::InAlloca)) {
5726     LLVM_DEBUG(
5727         dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
5728     return false;
5729   }
5730 
5731   // Avoid callbacks for now.
5732   if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr)) {
5733     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
5734     return false;
5735   }
5736 
5737   auto InstPred = [](Instruction &I) {
5738     if (auto *CI = dyn_cast<CallInst>(&I))
5739       return !CI->isMustTailCall();
5740     return true;
5741   };
5742 
5743   // Forbid must-tail calls for now.
5744   // TODO:
5745   bool AnyDead;
5746   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
5747   if (!checkForAllInstructionsImpl(OpcodeInstMap, InstPred, nullptr, AnyDead,
5748                                    {Instruction::Call})) {
5749     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
5750     return false;
5751   }
5752 
5753   SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = ArgumentReplacementMap[Fn];
5754   if (ARIs.size() == 0)
5755     ARIs.resize(Fn->arg_size());
5756 
5757   // If we have a replacement already with less than or equal new arguments,
5758   // ignore this request.
5759   ArgumentReplacementInfo *&ARI = ARIs[Arg.getArgNo()];
5760   if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
5761     LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
5762     return false;
5763   }
5764 
5765   // If we have a replacement already but we like the new one better, delete
5766   // the old.
5767   if (ARI)
5768     delete ARI;
5769 
5770   // Remember the replacement.
5771   ARI = new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
5772                                     std::move(CalleeRepairCB),
5773                                     std::move(ACSRepairCB));
5774 
5775   return true;
5776 }
5777 
5778 ChangeStatus Attributor::rewriteFunctionSignatures() {
5779   ChangeStatus Changed = ChangeStatus::UNCHANGED;
5780 
5781   for (auto &It : ArgumentReplacementMap) {
5782     Function *OldFn = It.getFirst();
5783 
5784     // Deleted functions do not require rewrites.
5785     if (ToBeDeletedFunctions.count(OldFn))
5786       continue;
5787 
5788     const SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = It.getSecond();
5789     assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
5790 
5791     SmallVector<Type *, 16> NewArgumentTypes;
5792     SmallVector<AttributeSet, 16> NewArgumentAttributes;
5793 
5794     // Collect replacement argument types and copy over existing attributes.
5795     AttributeList OldFnAttributeList = OldFn->getAttributes();
5796     for (Argument &Arg : OldFn->args()) {
5797       if (ArgumentReplacementInfo *ARI = ARIs[Arg.getArgNo()]) {
5798         NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
5799                                 ARI->ReplacementTypes.end());
5800         NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
5801                                      AttributeSet());
5802       } else {
5803         NewArgumentTypes.push_back(Arg.getType());
5804         NewArgumentAttributes.push_back(
5805             OldFnAttributeList.getParamAttributes(Arg.getArgNo()));
5806       }
5807     }
5808 
5809     FunctionType *OldFnTy = OldFn->getFunctionType();
5810     Type *RetTy = OldFnTy->getReturnType();
5811 
5812     // Construct the new function type using the new arguments types.
5813     FunctionType *NewFnTy =
5814         FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
5815 
5816     LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
5817                       << "' from " << *OldFn->getFunctionType() << " to "
5818                       << *NewFnTy << "\n");
5819 
5820     // Create the new function body and insert it into the module.
5821     Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
5822                                        OldFn->getAddressSpace(), "");
5823     OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
5824     NewFn->takeName(OldFn);
5825     NewFn->copyAttributesFrom(OldFn);
5826 
5827     // Patch the pointer to LLVM function in debug info descriptor.
5828     NewFn->setSubprogram(OldFn->getSubprogram());
5829     OldFn->setSubprogram(nullptr);
5830 
5831     // Recompute the parameter attributes list based on the new arguments for
5832     // the function.
5833     LLVMContext &Ctx = OldFn->getContext();
5834     NewFn->setAttributes(AttributeList::get(
5835         Ctx, OldFnAttributeList.getFnAttributes(),
5836         OldFnAttributeList.getRetAttributes(), NewArgumentAttributes));
5837 
5838     // Since we have now created the new function, splice the body of the old
5839     // function right into the new function, leaving the old rotting hulk of the
5840     // function empty.
5841     NewFn->getBasicBlockList().splice(NewFn->begin(),
5842                                       OldFn->getBasicBlockList());
5843 
5844     // Set of all "call-like" instructions that invoke the old function mapped
5845     // to their new replacements.
5846     SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
5847 
5848     // Callback to create a new "call-like" instruction for a given one.
5849     auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
5850       CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
5851       const AttributeList &OldCallAttributeList = OldCB->getAttributes();
5852 
5853       // Collect the new argument operands for the replacement call site.
5854       SmallVector<Value *, 16> NewArgOperands;
5855       SmallVector<AttributeSet, 16> NewArgOperandAttributes;
5856       for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
5857         unsigned NewFirstArgNum = NewArgOperands.size();
5858         (void)NewFirstArgNum; // only used inside assert.
5859         if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
5860           if (ARI->ACSRepairCB)
5861             ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
5862           assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
5863                      NewArgOperands.size() &&
5864                  "ACS repair callback did not provide as many operand as new "
5865                  "types were registered!");
5866           // TODO: Exose the attribute set to the ACS repair callback
5867           NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
5868                                          AttributeSet());
5869         } else {
5870           NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
5871           NewArgOperandAttributes.push_back(
5872               OldCallAttributeList.getParamAttributes(OldArgNum));
5873         }
5874       }
5875 
5876       assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
5877              "Mismatch # argument operands vs. # argument operand attributes!");
5878       assert(NewArgOperands.size() == NewFn->arg_size() &&
5879              "Mismatch # argument operands vs. # function arguments!");
5880 
5881       SmallVector<OperandBundleDef, 4> OperandBundleDefs;
5882       OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
5883 
5884       // Create a new call or invoke instruction to replace the old one.
5885       CallBase *NewCB;
5886       if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
5887         NewCB =
5888             InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
5889                                NewArgOperands, OperandBundleDefs, "", OldCB);
5890       } else {
5891         auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
5892                                        "", OldCB);
5893         NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
5894         NewCB = NewCI;
5895       }
5896 
5897       // Copy over various properties and the new attributes.
5898       uint64_t W;
5899       if (OldCB->extractProfTotalWeight(W))
5900         NewCB->setProfWeight(W);
5901       NewCB->setCallingConv(OldCB->getCallingConv());
5902       NewCB->setDebugLoc(OldCB->getDebugLoc());
5903       NewCB->takeName(OldCB);
5904       NewCB->setAttributes(AttributeList::get(
5905           Ctx, OldCallAttributeList.getFnAttributes(),
5906           OldCallAttributeList.getRetAttributes(), NewArgOperandAttributes));
5907 
5908       CallSitePairs.push_back({OldCB, NewCB});
5909       return true;
5910     };
5911 
5912     // Use the CallSiteReplacementCreator to create replacement call sites.
5913     bool Success =
5914         checkForAllCallSites(CallSiteReplacementCreator, *OldFn, true, nullptr);
5915     (void)Success;
5916     assert(Success && "Assumed call site replacement to succeed!");
5917 
5918     // Rewire the arguments.
5919     auto OldFnArgIt = OldFn->arg_begin();
5920     auto NewFnArgIt = NewFn->arg_begin();
5921     for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
5922          ++OldArgNum, ++OldFnArgIt) {
5923       if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
5924         if (ARI->CalleeRepairCB)
5925           ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
5926         NewFnArgIt += ARI->ReplacementTypes.size();
5927       } else {
5928         NewFnArgIt->takeName(&*OldFnArgIt);
5929         OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
5930         ++NewFnArgIt;
5931       }
5932     }
5933 
5934     // Eliminate the instructions *after* we visited all of them.
5935     for (auto &CallSitePair : CallSitePairs) {
5936       CallBase &OldCB = *CallSitePair.first;
5937       CallBase &NewCB = *CallSitePair.second;
5938       OldCB.replaceAllUsesWith(&NewCB);
5939       OldCB.eraseFromParent();
5940     }
5941 
5942     ToBeDeletedFunctions.insert(OldFn);
5943 
5944     Changed = ChangeStatus::CHANGED;
5945   }
5946 
5947   return Changed;
5948 }
5949 
5950 void Attributor::initializeInformationCache(Function &F) {
5951 
5952   // Walk all instructions to find interesting instructions that might be
5953   // queried by abstract attributes during their initialization or update.
5954   // This has to happen before we create attributes.
5955   auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
5956   auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
5957 
5958   for (Instruction &I : instructions(&F)) {
5959     bool IsInterestingOpcode = false;
5960 
5961     // To allow easy access to all instructions in a function with a given
5962     // opcode we store them in the InfoCache. As not all opcodes are interesting
5963     // to concrete attributes we only cache the ones that are as identified in
5964     // the following switch.
5965     // Note: There are no concrete attributes now so this is initially empty.
5966     switch (I.getOpcode()) {
5967     default:
5968       assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
5969              "New call site/base instruction type needs to be known int the "
5970              "Attributor.");
5971       break;
5972     case Instruction::Load:
5973       // The alignment of a pointer is interesting for loads.
5974     case Instruction::Store:
5975       // The alignment of a pointer is interesting for stores.
5976     case Instruction::Call:
5977     case Instruction::CallBr:
5978     case Instruction::Invoke:
5979     case Instruction::CleanupRet:
5980     case Instruction::CatchSwitch:
5981     case Instruction::AtomicRMW:
5982     case Instruction::AtomicCmpXchg:
5983     case Instruction::Br:
5984     case Instruction::Resume:
5985     case Instruction::Ret:
5986       IsInterestingOpcode = true;
5987     }
5988     if (IsInterestingOpcode)
5989       InstOpcodeMap[I.getOpcode()].push_back(&I);
5990     if (I.mayReadOrWriteMemory())
5991       ReadOrWriteInsts.push_back(&I);
5992   }
5993 }
5994 
5995 void Attributor::recordDependence(const AbstractAttribute &FromAA,
5996                                   const AbstractAttribute &ToAA,
5997                                   DepClassTy DepClass) {
5998   if (FromAA.getState().isAtFixpoint())
5999     return;
6000 
6001   if (DepClass == DepClassTy::REQUIRED)
6002     QueryMap[&FromAA].RequiredAAs.insert(
6003         const_cast<AbstractAttribute *>(&ToAA));
6004   else
6005     QueryMap[&FromAA].OptionalAAs.insert(
6006         const_cast<AbstractAttribute *>(&ToAA));
6007   QueriedNonFixAA = true;
6008 }
6009 
6010 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
6011   if (!VisitedFunctions.insert(&F).second)
6012     return;
6013   if (F.isDeclaration())
6014     return;
6015 
6016   IRPosition FPos = IRPosition::function(F);
6017 
6018   // Check for dead BasicBlocks in every function.
6019   // We need dead instruction detection because we do not want to deal with
6020   // broken IR in which SSA rules do not apply.
6021   getOrCreateAAFor<AAIsDead>(FPos);
6022 
6023   // Every function might be "will-return".
6024   getOrCreateAAFor<AAWillReturn>(FPos);
6025 
6026   // Every function might contain instructions that cause "undefined behavior".
6027   getOrCreateAAFor<AAUndefinedBehavior>(FPos);
6028 
6029   // Every function can be nounwind.
6030   getOrCreateAAFor<AANoUnwind>(FPos);
6031 
6032   // Every function might be marked "nosync"
6033   getOrCreateAAFor<AANoSync>(FPos);
6034 
6035   // Every function might be "no-free".
6036   getOrCreateAAFor<AANoFree>(FPos);
6037 
6038   // Every function might be "no-return".
6039   getOrCreateAAFor<AANoReturn>(FPos);
6040 
6041   // Every function might be "no-recurse".
6042   getOrCreateAAFor<AANoRecurse>(FPos);
6043 
6044   // Every function might be "readnone/readonly/writeonly/...".
6045   getOrCreateAAFor<AAMemoryBehavior>(FPos);
6046 
6047   // Every function might be applicable for Heap-To-Stack conversion.
6048   if (EnableHeapToStack)
6049     getOrCreateAAFor<AAHeapToStack>(FPos);
6050 
6051   // Return attributes are only appropriate if the return type is non void.
6052   Type *ReturnType = F.getReturnType();
6053   if (!ReturnType->isVoidTy()) {
6054     // Argument attribute "returned" --- Create only one per function even
6055     // though it is an argument attribute.
6056     getOrCreateAAFor<AAReturnedValues>(FPos);
6057 
6058     IRPosition RetPos = IRPosition::returned(F);
6059 
6060     // Every returned value might be dead.
6061     getOrCreateAAFor<AAIsDead>(RetPos);
6062 
6063     // Every function might be simplified.
6064     getOrCreateAAFor<AAValueSimplify>(RetPos);
6065 
6066     if (ReturnType->isPointerTy()) {
6067 
6068       // Every function with pointer return type might be marked align.
6069       getOrCreateAAFor<AAAlign>(RetPos);
6070 
6071       // Every function with pointer return type might be marked nonnull.
6072       getOrCreateAAFor<AANonNull>(RetPos);
6073 
6074       // Every function with pointer return type might be marked noalias.
6075       getOrCreateAAFor<AANoAlias>(RetPos);
6076 
6077       // Every function with pointer return type might be marked
6078       // dereferenceable.
6079       getOrCreateAAFor<AADereferenceable>(RetPos);
6080     }
6081   }
6082 
6083   for (Argument &Arg : F.args()) {
6084     IRPosition ArgPos = IRPosition::argument(Arg);
6085 
6086     // Every argument might be simplified.
6087     getOrCreateAAFor<AAValueSimplify>(ArgPos);
6088 
6089     if (Arg.getType()->isPointerTy()) {
6090       // Every argument with pointer type might be marked nonnull.
6091       getOrCreateAAFor<AANonNull>(ArgPos);
6092 
6093       // Every argument with pointer type might be marked noalias.
6094       getOrCreateAAFor<AANoAlias>(ArgPos);
6095 
6096       // Every argument with pointer type might be marked dereferenceable.
6097       getOrCreateAAFor<AADereferenceable>(ArgPos);
6098 
6099       // Every argument with pointer type might be marked align.
6100       getOrCreateAAFor<AAAlign>(ArgPos);
6101 
6102       // Every argument with pointer type might be marked nocapture.
6103       getOrCreateAAFor<AANoCapture>(ArgPos);
6104 
6105       // Every argument with pointer type might be marked
6106       // "readnone/readonly/writeonly/..."
6107       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
6108 
6109       // Every argument with pointer type might be marked nofree.
6110       getOrCreateAAFor<AANoFree>(ArgPos);
6111     }
6112   }
6113 
6114   auto CallSitePred = [&](Instruction &I) -> bool {
6115     CallSite CS(&I);
6116     if (Function *Callee = CS.getCalledFunction()) {
6117       // Skip declerations except if annotations on their call sites were
6118       // explicitly requested.
6119       if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
6120           !Callee->hasMetadata(LLVMContext::MD_callback))
6121         return true;
6122 
6123       if (!Callee->getReturnType()->isVoidTy() && !CS->use_empty()) {
6124         IRPosition CSRetPos = IRPosition::callsite_returned(CS);
6125 
6126         // Call site return values might be dead.
6127         getOrCreateAAFor<AAIsDead>(CSRetPos);
6128       }
6129 
6130       for (int i = 0, e = CS.getNumArgOperands(); i < e; i++) {
6131 
6132         IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
6133 
6134         // Every call site argument might be dead.
6135         getOrCreateAAFor<AAIsDead>(CSArgPos);
6136 
6137         // Call site argument might be simplified.
6138         getOrCreateAAFor<AAValueSimplify>(CSArgPos);
6139 
6140         if (!CS.getArgument(i)->getType()->isPointerTy())
6141           continue;
6142 
6143         // Call site argument attribute "non-null".
6144         getOrCreateAAFor<AANonNull>(CSArgPos);
6145 
6146         // Call site argument attribute "no-alias".
6147         getOrCreateAAFor<AANoAlias>(CSArgPos);
6148 
6149         // Call site argument attribute "dereferenceable".
6150         getOrCreateAAFor<AADereferenceable>(CSArgPos);
6151 
6152         // Call site argument attribute "align".
6153         getOrCreateAAFor<AAAlign>(CSArgPos);
6154 
6155         // Call site argument attribute
6156         // "readnone/readonly/writeonly/..."
6157         getOrCreateAAFor<AAMemoryBehavior>(CSArgPos);
6158 
6159         // Call site argument attribute "nofree".
6160         getOrCreateAAFor<AANoFree>(CSArgPos);
6161       }
6162     }
6163     return true;
6164   };
6165 
6166   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
6167   bool Success, AnyDead = false;
6168   Success = checkForAllInstructionsImpl(
6169       OpcodeInstMap, CallSitePred, nullptr, AnyDead,
6170       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
6171        (unsigned)Instruction::Call});
6172   (void)Success;
6173   assert(Success && !AnyDead && "Expected the check call to be successful!");
6174 
6175   auto LoadStorePred = [&](Instruction &I) -> bool {
6176     if (isa<LoadInst>(I))
6177       getOrCreateAAFor<AAAlign>(
6178           IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
6179     else
6180       getOrCreateAAFor<AAAlign>(
6181           IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
6182     return true;
6183   };
6184   Success = checkForAllInstructionsImpl(
6185       OpcodeInstMap, LoadStorePred, nullptr, AnyDead,
6186       {(unsigned)Instruction::Load, (unsigned)Instruction::Store});
6187   (void)Success;
6188   assert(Success && !AnyDead && "Expected the check call to be successful!");
6189 }
6190 
6191 /// Helpers to ease debugging through output streams and print calls.
6192 ///
6193 ///{
6194 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
6195   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
6196 }
6197 
6198 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
6199   switch (AP) {
6200   case IRPosition::IRP_INVALID:
6201     return OS << "inv";
6202   case IRPosition::IRP_FLOAT:
6203     return OS << "flt";
6204   case IRPosition::IRP_RETURNED:
6205     return OS << "fn_ret";
6206   case IRPosition::IRP_CALL_SITE_RETURNED:
6207     return OS << "cs_ret";
6208   case IRPosition::IRP_FUNCTION:
6209     return OS << "fn";
6210   case IRPosition::IRP_CALL_SITE:
6211     return OS << "cs";
6212   case IRPosition::IRP_ARGUMENT:
6213     return OS << "arg";
6214   case IRPosition::IRP_CALL_SITE_ARGUMENT:
6215     return OS << "cs_arg";
6216   }
6217   llvm_unreachable("Unknown attribute position!");
6218 }
6219 
6220 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
6221   const Value &AV = Pos.getAssociatedValue();
6222   return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
6223             << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
6224 }
6225 
6226 template <typename base_ty, base_ty BestState, base_ty WorstState>
6227 raw_ostream &llvm::
6228 operator<<(raw_ostream &OS,
6229            const IntegerStateBase<base_ty, BestState, WorstState> &S) {
6230   return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
6231             << static_cast<const AbstractState &>(S);
6232 }
6233 
6234 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
6235   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
6236 }
6237 
6238 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
6239   AA.print(OS);
6240   return OS;
6241 }
6242 
6243 void AbstractAttribute::print(raw_ostream &OS) const {
6244   OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
6245      << "]";
6246 }
6247 ///}
6248 
6249 /// ----------------------------------------------------------------------------
6250 ///                       Pass (Manager) Boilerplate
6251 /// ----------------------------------------------------------------------------
6252 
6253 static bool runAttributorOnModule(Module &M, AnalysisGetter &AG) {
6254   if (DisableAttributor)
6255     return false;
6256 
6257   LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
6258                     << " functions.\n");
6259 
6260   // Create an Attributor and initially empty information cache that is filled
6261   // while we identify default attribute opportunities.
6262   InformationCache InfoCache(M, AG);
6263   Attributor A(InfoCache, DepRecInterval);
6264 
6265   for (Function &F : M)
6266     A.initializeInformationCache(F);
6267 
6268   for (Function &F : M) {
6269     if (F.hasExactDefinition())
6270       NumFnWithExactDefinition++;
6271     else
6272       NumFnWithoutExactDefinition++;
6273 
6274     // We look at internal functions only on-demand but if any use is not a
6275     // direct call, we have to do it eagerly.
6276     if (F.hasLocalLinkage()) {
6277       if (llvm::all_of(F.uses(), [](const Use &U) {
6278             return ImmutableCallSite(U.getUser()) &&
6279                    ImmutableCallSite(U.getUser()).isCallee(&U);
6280           }))
6281         continue;
6282     }
6283 
6284     // Populate the Attributor with abstract attribute opportunities in the
6285     // function and the information cache with IR information.
6286     A.identifyDefaultAbstractAttributes(F);
6287   }
6288 
6289   bool Changed = A.run(M) == ChangeStatus::CHANGED;
6290   assert(!verifyModule(M, &errs()) && "Module verification failed!");
6291   return Changed;
6292 }
6293 
6294 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
6295   AnalysisGetter AG(AM);
6296   if (runAttributorOnModule(M, AG)) {
6297     // FIXME: Think about passes we will preserve and add them here.
6298     return PreservedAnalyses::none();
6299   }
6300   return PreservedAnalyses::all();
6301 }
6302 
6303 namespace {
6304 
6305 struct AttributorLegacyPass : public ModulePass {
6306   static char ID;
6307 
6308   AttributorLegacyPass() : ModulePass(ID) {
6309     initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
6310   }
6311 
6312   bool runOnModule(Module &M) override {
6313     if (skipModule(M))
6314       return false;
6315 
6316     AnalysisGetter AG;
6317     return runAttributorOnModule(M, AG);
6318   }
6319 
6320   void getAnalysisUsage(AnalysisUsage &AU) const override {
6321     // FIXME: Think about passes we will preserve and add them here.
6322     AU.addRequired<TargetLibraryInfoWrapperPass>();
6323   }
6324 };
6325 
6326 } // end anonymous namespace
6327 
6328 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
6329 
6330 char AttributorLegacyPass::ID = 0;
6331 
6332 const char AAReturnedValues::ID = 0;
6333 const char AANoUnwind::ID = 0;
6334 const char AANoSync::ID = 0;
6335 const char AANoFree::ID = 0;
6336 const char AANonNull::ID = 0;
6337 const char AANoRecurse::ID = 0;
6338 const char AAWillReturn::ID = 0;
6339 const char AAUndefinedBehavior::ID = 0;
6340 const char AANoAlias::ID = 0;
6341 const char AAReachability::ID = 0;
6342 const char AANoReturn::ID = 0;
6343 const char AAIsDead::ID = 0;
6344 const char AADereferenceable::ID = 0;
6345 const char AAAlign::ID = 0;
6346 const char AANoCapture::ID = 0;
6347 const char AAValueSimplify::ID = 0;
6348 const char AAHeapToStack::ID = 0;
6349 const char AAMemoryBehavior::ID = 0;
6350 
6351 // Macro magic to create the static generator function for attributes that
6352 // follow the naming scheme.
6353 
6354 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
6355   case IRPosition::PK:                                                         \
6356     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
6357 
6358 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
6359   case IRPosition::PK:                                                         \
6360     AA = new CLASS##SUFFIX(IRP);                                               \
6361     break;
6362 
6363 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
6364   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6365     CLASS *AA = nullptr;                                                       \
6366     switch (IRP.getPositionKind()) {                                           \
6367       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6368       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6369       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6370       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6371       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6372       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6373       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6374       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6375     }                                                                          \
6376     return *AA;                                                                \
6377   }
6378 
6379 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
6380   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6381     CLASS *AA = nullptr;                                                       \
6382     switch (IRP.getPositionKind()) {                                           \
6383       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6384       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
6385       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
6386       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6387       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6388       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
6389       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6390       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6391     }                                                                          \
6392     return *AA;                                                                \
6393   }
6394 
6395 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
6396   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6397     CLASS *AA = nullptr;                                                       \
6398     switch (IRP.getPositionKind()) {                                           \
6399       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6400       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6401       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6402       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6403       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6404       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
6405       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6406       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6407     }                                                                          \
6408     return *AA;                                                                \
6409   }
6410 
6411 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
6412   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6413     CLASS *AA = nullptr;                                                       \
6414     switch (IRP.getPositionKind()) {                                           \
6415       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6416       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6417       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6418       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6419       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6420       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6421       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
6422       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6423     }                                                                          \
6424     return *AA;                                                                \
6425   }
6426 
6427 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
6428   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6429     CLASS *AA = nullptr;                                                       \
6430     switch (IRP.getPositionKind()) {                                           \
6431       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6432       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6433       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6434       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6435       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6436       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6437       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6438       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6439     }                                                                          \
6440     return *AA;                                                                \
6441   }
6442 
6443 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
6444 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
6445 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
6446 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
6447 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
6448 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
6449 
6450 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
6451 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
6452 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
6453 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
6454 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
6455 
6456 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
6457 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
6458 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
6459 
6460 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
6461 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
6462 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
6463 
6464 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
6465 
6466 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
6467 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
6468 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
6469 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
6470 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
6471 #undef SWITCH_PK_CREATE
6472 #undef SWITCH_PK_INV
6473 
6474 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
6475                       "Deduce and propagate attributes", false, false)
6476 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
6477 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
6478                     "Deduce and propagate attributes", false, false)
6479