1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/LazyValueInfo.h"
27 #include "llvm/Analysis/Loads.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/ScalarEvolution.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/CFG.h"
34 #include "llvm/IR/InstIterator.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Verifier.h"
37 #include "llvm/InitializePasses.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
42 #include "llvm/Transforms/Utils/Local.h"
43 
44 #include <cassert>
45 
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "attributor"
49 
50 STATISTIC(NumFnWithExactDefinition,
51           "Number of function with exact definitions");
52 STATISTIC(NumFnWithoutExactDefinition,
53           "Number of function without exact definitions");
54 STATISTIC(NumAttributesTimedOut,
55           "Number of abstract attributes timed out before fixpoint");
56 STATISTIC(NumAttributesValidFixpoint,
57           "Number of abstract attributes in a valid fixpoint state");
58 STATISTIC(NumAttributesManifested,
59           "Number of abstract attributes manifested in IR");
60 STATISTIC(NumAttributesFixedDueToRequiredDependences,
61           "Number of abstract attributes fixed due to required dependences");
62 
63 // Some helper macros to deal with statistics tracking.
64 //
65 // Usage:
66 // For simple IR attribute tracking overload trackStatistics in the abstract
67 // attribute and choose the right STATS_DECLTRACK_********* macro,
68 // e.g.,:
69 //  void trackStatistics() const override {
70 //    STATS_DECLTRACK_ARG_ATTR(returned)
71 //  }
72 // If there is a single "increment" side one can use the macro
73 // STATS_DECLTRACK with a custom message. If there are multiple increment
74 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
75 //
76 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
77   ("Number of " #TYPE " marked '" #NAME "'")
78 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
79 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
80 #define STATS_DECL(NAME, TYPE, MSG)                                            \
81   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
82 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
83 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
84   {                                                                            \
85     STATS_DECL(NAME, TYPE, MSG)                                                \
86     STATS_TRACK(NAME, TYPE)                                                    \
87   }
88 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
89   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
90 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
91   STATS_DECLTRACK(NAME, CSArguments,                                           \
92                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
93 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
94   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
95 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
96   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
97 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
98   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
99                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
100 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
101   STATS_DECLTRACK(NAME, CSReturn,                                              \
102                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
103 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
104   STATS_DECLTRACK(NAME, Floating,                                              \
105                   ("Number of floating values known to be '" #NAME "'"))
106 
107 // Specialization of the operator<< for abstract attributes subclasses. This
108 // disambiguates situations where multiple operators are applicable.
109 namespace llvm {
110 #define PIPE_OPERATOR(CLASS)                                                   \
111   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
112     return OS << static_cast<const AbstractAttribute &>(AA);                   \
113   }
114 
115 PIPE_OPERATOR(AAIsDead)
116 PIPE_OPERATOR(AANoUnwind)
117 PIPE_OPERATOR(AANoSync)
118 PIPE_OPERATOR(AANoRecurse)
119 PIPE_OPERATOR(AAWillReturn)
120 PIPE_OPERATOR(AANoReturn)
121 PIPE_OPERATOR(AAReturnedValues)
122 PIPE_OPERATOR(AANonNull)
123 PIPE_OPERATOR(AANoAlias)
124 PIPE_OPERATOR(AADereferenceable)
125 PIPE_OPERATOR(AAAlign)
126 PIPE_OPERATOR(AANoCapture)
127 PIPE_OPERATOR(AAValueSimplify)
128 PIPE_OPERATOR(AANoFree)
129 PIPE_OPERATOR(AAHeapToStack)
130 PIPE_OPERATOR(AAReachability)
131 PIPE_OPERATOR(AAMemoryBehavior)
132 PIPE_OPERATOR(AAValueConstantRange)
133 
134 #undef PIPE_OPERATOR
135 } // namespace llvm
136 
137 // TODO: Determine a good default value.
138 //
139 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
140 // (when run with the first 5 abstract attributes). The results also indicate
141 // that we never reach 32 iterations but always find a fixpoint sooner.
142 //
143 // This will become more evolved once we perform two interleaved fixpoint
144 // iterations: bottom-up and top-down.
145 static cl::opt<unsigned>
146     MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
147                           cl::desc("Maximal number of fixpoint iterations."),
148                           cl::init(32));
149 static cl::opt<bool> VerifyMaxFixpointIterations(
150     "attributor-max-iterations-verify", cl::Hidden,
151     cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
152     cl::init(false));
153 
154 static cl::opt<bool> DisableAttributor(
155     "attributor-disable", cl::Hidden,
156     cl::desc("Disable the attributor inter-procedural deduction pass."),
157     cl::init(true));
158 
159 static cl::opt<bool> AnnotateDeclarationCallSites(
160     "attributor-annotate-decl-cs", cl::Hidden,
161     cl::desc("Annotate call sites of function declarations."), cl::init(false));
162 
163 static cl::opt<bool> ManifestInternal(
164     "attributor-manifest-internal", cl::Hidden,
165     cl::desc("Manifest Attributor internal string attributes."),
166     cl::init(false));
167 
168 static cl::opt<unsigned> DepRecInterval(
169     "attributor-dependence-recompute-interval", cl::Hidden,
170     cl::desc("Number of iterations until dependences are recomputed."),
171     cl::init(4));
172 
173 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
174                                        cl::init(true), cl::Hidden);
175 
176 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
177                                        cl::Hidden);
178 
179 /// Logic operators for the change status enum class.
180 ///
181 ///{
182 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
183   return l == ChangeStatus::CHANGED ? l : r;
184 }
185 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
186   return l == ChangeStatus::UNCHANGED ? l : r;
187 }
188 ///}
189 
190 Argument *IRPosition::getAssociatedArgument() const {
191   if (getPositionKind() == IRP_ARGUMENT)
192     return cast<Argument>(&getAnchorValue());
193 
194   // Not an Argument and no argument number means this is not a call site
195   // argument, thus we cannot find a callback argument to return.
196   int ArgNo = getArgNo();
197   if (ArgNo < 0)
198     return nullptr;
199 
200   // Use abstract call sites to make the connection between the call site
201   // values and the ones in callbacks. If a callback was found that makes use
202   // of the underlying call site operand, we want the corresponding callback
203   // callee argument and not the direct callee argument.
204   Optional<Argument *> CBCandidateArg;
205   SmallVector<const Use *, 4> CBUses;
206   ImmutableCallSite ICS(&getAnchorValue());
207   AbstractCallSite::getCallbackUses(ICS, CBUses);
208   for (const Use *U : CBUses) {
209     AbstractCallSite ACS(U);
210     assert(ACS && ACS.isCallbackCall());
211     if (!ACS.getCalledFunction())
212       continue;
213 
214     for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
215 
216       // Test if the underlying call site operand is argument number u of the
217       // callback callee.
218       if (ACS.getCallArgOperandNo(u) != ArgNo)
219         continue;
220 
221       assert(ACS.getCalledFunction()->arg_size() > u &&
222              "ACS mapped into var-args arguments!");
223       if (CBCandidateArg.hasValue()) {
224         CBCandidateArg = nullptr;
225         break;
226       }
227       CBCandidateArg = ACS.getCalledFunction()->getArg(u);
228     }
229   }
230 
231   // If we found a unique callback candidate argument, return it.
232   if (CBCandidateArg.hasValue() && CBCandidateArg.getValue())
233     return CBCandidateArg.getValue();
234 
235   // If no callbacks were found, or none used the underlying call site operand
236   // exclusively, use the direct callee argument if available.
237   const Function *Callee = ICS.getCalledFunction();
238   if (Callee && Callee->arg_size() > unsigned(ArgNo))
239     return Callee->getArg(ArgNo);
240 
241   return nullptr;
242 }
243 
244 /// For calls (and invokes) we will only replace instruction uses to not disturb
245 /// the old style call graph.
246 /// TODO: Remove this once we get rid of the old PM.
247 static void replaceAllInstructionUsesWith(Value &Old, Value &New) {
248   if (!isa<CallBase>(Old))
249     return Old.replaceAllUsesWith(&New);
250   SmallVector<Use *, 8> Uses;
251   for (Use &U : Old.uses())
252     if (isa<Instruction>(U.getUser()))
253       Uses.push_back(&U);
254   for (Use *U : Uses)
255     U->set(&New);
256 }
257 
258 static Optional<ConstantInt *>
259 getAssumedConstant(Attributor &A, const Value &V, const AbstractAttribute &AA,
260                    bool &UsedAssumedInformation) {
261   const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
262       AA, IRPosition::value(V), /* TrackDependence */ false);
263   Optional<Value *> SimplifiedV = ValueSimplifyAA.getAssumedSimplifiedValue(A);
264   bool IsKnown = ValueSimplifyAA.isKnown();
265   UsedAssumedInformation |= !IsKnown;
266   if (!SimplifiedV.hasValue()) {
267     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
268     return llvm::None;
269   }
270   if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue())) {
271     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
272     return llvm::None;
273   }
274   ConstantInt *CI = dyn_cast_or_null<ConstantInt>(SimplifiedV.getValue());
275   if (CI)
276     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
277   return CI;
278 }
279 
280 /// Recursively visit all values that might become \p IRP at some point. This
281 /// will be done by looking through cast instructions, selects, phis, and calls
282 /// with the "returned" attribute. Once we cannot look through the value any
283 /// further, the callback \p VisitValueCB is invoked and passed the current
284 /// value, the \p State, and a flag to indicate if we stripped anything. To
285 /// limit how much effort is invested, we will never visit more values than
286 /// specified by \p MaxValues.
287 template <typename AAType, typename StateTy>
288 static bool genericValueTraversal(
289     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
290     const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
291     int MaxValues = 8) {
292 
293   const AAIsDead *LivenessAA = nullptr;
294   if (IRP.getAnchorScope())
295     LivenessAA = &A.getAAFor<AAIsDead>(
296         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
297         /* TrackDependence */ false);
298   bool AnyDead = false;
299 
300   // TODO: Use Positions here to allow context sensitivity in VisitValueCB
301   SmallPtrSet<Value *, 16> Visited;
302   SmallVector<Value *, 16> Worklist;
303   Worklist.push_back(&IRP.getAssociatedValue());
304 
305   int Iteration = 0;
306   do {
307     Value *V = Worklist.pop_back_val();
308 
309     // Check if we should process the current value. To prevent endless
310     // recursion keep a record of the values we followed!
311     if (!Visited.insert(V).second)
312       continue;
313 
314     // Make sure we limit the compile time for complex expressions.
315     if (Iteration++ >= MaxValues)
316       return false;
317 
318     // Explicitly look through calls with a "returned" attribute if we do
319     // not have a pointer as stripPointerCasts only works on them.
320     Value *NewV = nullptr;
321     if (V->getType()->isPointerTy()) {
322       NewV = V->stripPointerCasts();
323     } else {
324       CallSite CS(V);
325       if (CS && CS.getCalledFunction()) {
326         for (Argument &Arg : CS.getCalledFunction()->args())
327           if (Arg.hasReturnedAttr()) {
328             NewV = CS.getArgOperand(Arg.getArgNo());
329             break;
330           }
331       }
332     }
333     if (NewV && NewV != V) {
334       Worklist.push_back(NewV);
335       continue;
336     }
337 
338     // Look through select instructions, visit both potential values.
339     if (auto *SI = dyn_cast<SelectInst>(V)) {
340       Worklist.push_back(SI->getTrueValue());
341       Worklist.push_back(SI->getFalseValue());
342       continue;
343     }
344 
345     // Look through phi nodes, visit all live operands.
346     if (auto *PHI = dyn_cast<PHINode>(V)) {
347       assert(LivenessAA &&
348              "Expected liveness in the presence of instructions!");
349       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
350         const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
351         if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
352           AnyDead = true;
353           continue;
354         }
355         Worklist.push_back(PHI->getIncomingValue(u));
356       }
357       continue;
358     }
359 
360     // Once a leaf is reached we inform the user through the callback.
361     if (!VisitValueCB(*V, State, Iteration > 1))
362       return false;
363   } while (!Worklist.empty());
364 
365   // If we actually used liveness information so we have to record a dependence.
366   if (AnyDead)
367     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
368 
369   // All values have been visited.
370   return true;
371 }
372 
373 /// Return true if \p New is equal or worse than \p Old.
374 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
375   if (!Old.isIntAttribute())
376     return true;
377 
378   return Old.getValueAsInt() >= New.getValueAsInt();
379 }
380 
381 /// Return true if the information provided by \p Attr was added to the
382 /// attribute list \p Attrs. This is only the case if it was not already present
383 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
384 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
385                              AttributeList &Attrs, int AttrIdx) {
386 
387   if (Attr.isEnumAttribute()) {
388     Attribute::AttrKind Kind = Attr.getKindAsEnum();
389     if (Attrs.hasAttribute(AttrIdx, Kind))
390       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
391         return false;
392     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
393     return true;
394   }
395   if (Attr.isStringAttribute()) {
396     StringRef Kind = Attr.getKindAsString();
397     if (Attrs.hasAttribute(AttrIdx, Kind))
398       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
399         return false;
400     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
401     return true;
402   }
403   if (Attr.isIntAttribute()) {
404     Attribute::AttrKind Kind = Attr.getKindAsEnum();
405     if (Attrs.hasAttribute(AttrIdx, Kind))
406       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
407         return false;
408     Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
409     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
410     return true;
411   }
412 
413   llvm_unreachable("Expected enum or string attribute!");
414 }
415 
416 static const Value *
417 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
418                                      const DataLayout &DL,
419                                      bool AllowNonInbounds = false) {
420   const Value *Ptr =
421       Attributor::getPointerOperand(I, /* AllowVolatile */ false);
422   if (!Ptr)
423     return nullptr;
424 
425   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
426                                           AllowNonInbounds);
427 }
428 
429 ChangeStatus AbstractAttribute::update(Attributor &A) {
430   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
431   if (getState().isAtFixpoint())
432     return HasChanged;
433 
434   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
435 
436   HasChanged = updateImpl(A);
437 
438   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
439                     << "\n");
440 
441   return HasChanged;
442 }
443 
444 ChangeStatus
445 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
446                                    const ArrayRef<Attribute> &DeducedAttrs) {
447   Function *ScopeFn = IRP.getAssociatedFunction();
448   IRPosition::Kind PK = IRP.getPositionKind();
449 
450   // In the following some generic code that will manifest attributes in
451   // DeducedAttrs if they improve the current IR. Due to the different
452   // annotation positions we use the underlying AttributeList interface.
453 
454   AttributeList Attrs;
455   switch (PK) {
456   case IRPosition::IRP_INVALID:
457   case IRPosition::IRP_FLOAT:
458     return ChangeStatus::UNCHANGED;
459   case IRPosition::IRP_ARGUMENT:
460   case IRPosition::IRP_FUNCTION:
461   case IRPosition::IRP_RETURNED:
462     Attrs = ScopeFn->getAttributes();
463     break;
464   case IRPosition::IRP_CALL_SITE:
465   case IRPosition::IRP_CALL_SITE_RETURNED:
466   case IRPosition::IRP_CALL_SITE_ARGUMENT:
467     Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
468     break;
469   }
470 
471   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
472   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
473   for (const Attribute &Attr : DeducedAttrs) {
474     if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
475       continue;
476 
477     HasChanged = ChangeStatus::CHANGED;
478   }
479 
480   if (HasChanged == ChangeStatus::UNCHANGED)
481     return HasChanged;
482 
483   switch (PK) {
484   case IRPosition::IRP_ARGUMENT:
485   case IRPosition::IRP_FUNCTION:
486   case IRPosition::IRP_RETURNED:
487     ScopeFn->setAttributes(Attrs);
488     break;
489   case IRPosition::IRP_CALL_SITE:
490   case IRPosition::IRP_CALL_SITE_RETURNED:
491   case IRPosition::IRP_CALL_SITE_ARGUMENT:
492     CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
493     break;
494   case IRPosition::IRP_INVALID:
495   case IRPosition::IRP_FLOAT:
496     break;
497   }
498 
499   return HasChanged;
500 }
501 
502 const IRPosition IRPosition::EmptyKey(255);
503 const IRPosition IRPosition::TombstoneKey(256);
504 
505 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
506   IRPositions.emplace_back(IRP);
507 
508   ImmutableCallSite ICS(&IRP.getAnchorValue());
509   switch (IRP.getPositionKind()) {
510   case IRPosition::IRP_INVALID:
511   case IRPosition::IRP_FLOAT:
512   case IRPosition::IRP_FUNCTION:
513     return;
514   case IRPosition::IRP_ARGUMENT:
515   case IRPosition::IRP_RETURNED:
516     IRPositions.emplace_back(
517         IRPosition::function(*IRP.getAssociatedFunction()));
518     return;
519   case IRPosition::IRP_CALL_SITE:
520     assert(ICS && "Expected call site!");
521     // TODO: We need to look at the operand bundles similar to the redirection
522     //       in CallBase.
523     if (!ICS.hasOperandBundles())
524       if (const Function *Callee = ICS.getCalledFunction())
525         IRPositions.emplace_back(IRPosition::function(*Callee));
526     return;
527   case IRPosition::IRP_CALL_SITE_RETURNED:
528     assert(ICS && "Expected call site!");
529     // TODO: We need to look at the operand bundles similar to the redirection
530     //       in CallBase.
531     if (!ICS.hasOperandBundles()) {
532       if (const Function *Callee = ICS.getCalledFunction()) {
533         IRPositions.emplace_back(IRPosition::returned(*Callee));
534         IRPositions.emplace_back(IRPosition::function(*Callee));
535       }
536     }
537     IRPositions.emplace_back(
538         IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
539     return;
540   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
541     int ArgNo = IRP.getArgNo();
542     assert(ICS && ArgNo >= 0 && "Expected call site!");
543     // TODO: We need to look at the operand bundles similar to the redirection
544     //       in CallBase.
545     if (!ICS.hasOperandBundles()) {
546       const Function *Callee = ICS.getCalledFunction();
547       if (Callee && Callee->arg_size() > unsigned(ArgNo))
548         IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
549       if (Callee)
550         IRPositions.emplace_back(IRPosition::function(*Callee));
551     }
552     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
553     return;
554   }
555   }
556 }
557 
558 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
559                          bool IgnoreSubsumingPositions) const {
560   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
561     for (Attribute::AttrKind AK : AKs)
562       if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
563         return true;
564     // The first position returned by the SubsumingPositionIterator is
565     // always the position itself. If we ignore subsuming positions we
566     // are done after the first iteration.
567     if (IgnoreSubsumingPositions)
568       break;
569   }
570   return false;
571 }
572 
573 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
574                           SmallVectorImpl<Attribute> &Attrs,
575                           bool IgnoreSubsumingPositions) const {
576   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
577     for (Attribute::AttrKind AK : AKs) {
578       const Attribute &Attr = EquivIRP.getAttr(AK);
579       if (Attr.getKindAsEnum() == AK)
580         Attrs.push_back(Attr);
581     }
582     // The first position returned by the SubsumingPositionIterator is
583     // always the position itself. If we ignore subsuming positions we
584     // are done after the first iteration.
585     if (IgnoreSubsumingPositions)
586       break;
587   }
588 }
589 
590 void IRPosition::verify() {
591   switch (KindOrArgNo) {
592   default:
593     assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
594     assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
595            "Expected call base or argument for positive attribute index!");
596     if (isa<Argument>(AnchorVal)) {
597       assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
598              "Argument number mismatch!");
599       assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
600              "Associated value mismatch!");
601     } else {
602       assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
603              "Call site argument number mismatch!");
604       assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
605                  &getAssociatedValue() &&
606              "Associated value mismatch!");
607     }
608     break;
609   case IRP_INVALID:
610     assert(!AnchorVal && "Expected no value for an invalid position!");
611     break;
612   case IRP_FLOAT:
613     assert((!isa<CallBase>(&getAssociatedValue()) &&
614             !isa<Argument>(&getAssociatedValue())) &&
615            "Expected specialized kind for call base and argument values!");
616     break;
617   case IRP_RETURNED:
618     assert(isa<Function>(AnchorVal) &&
619            "Expected function for a 'returned' position!");
620     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
621     break;
622   case IRP_CALL_SITE_RETURNED:
623     assert((isa<CallBase>(AnchorVal)) &&
624            "Expected call base for 'call site returned' position!");
625     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
626     break;
627   case IRP_CALL_SITE:
628     assert((isa<CallBase>(AnchorVal)) &&
629            "Expected call base for 'call site function' position!");
630     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
631     break;
632   case IRP_FUNCTION:
633     assert(isa<Function>(AnchorVal) &&
634            "Expected function for a 'function' position!");
635     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
636     break;
637   }
638 }
639 
640 namespace {
641 /// Helper function to clamp a state \p S of type \p StateType with the
642 /// information in \p R and indicate/return if \p S did change (as-in update is
643 /// required to be run again).
644 template <typename StateType>
645 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
646   auto Assumed = S.getAssumed();
647   S ^= R;
648   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
649                                    : ChangeStatus::CHANGED;
650 }
651 
652 /// Clamp the information known for all returned values of a function
653 /// (identified by \p QueryingAA) into \p S.
654 template <typename AAType, typename StateType = typename AAType::StateType>
655 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
656                                      StateType &S) {
657   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
658                     << QueryingAA << " into " << S << "\n");
659 
660   assert((QueryingAA.getIRPosition().getPositionKind() ==
661               IRPosition::IRP_RETURNED ||
662           QueryingAA.getIRPosition().getPositionKind() ==
663               IRPosition::IRP_CALL_SITE_RETURNED) &&
664          "Can only clamp returned value states for a function returned or call "
665          "site returned position!");
666 
667   // Use an optional state as there might not be any return values and we want
668   // to join (IntegerState::operator&) the state of all there are.
669   Optional<StateType> T;
670 
671   // Callback for each possibly returned value.
672   auto CheckReturnValue = [&](Value &RV) -> bool {
673     const IRPosition &RVPos = IRPosition::value(RV);
674     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
675     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
676                       << " @ " << RVPos << "\n");
677     const StateType &AAS = static_cast<const StateType &>(AA.getState());
678     if (T.hasValue())
679       *T &= AAS;
680     else
681       T = AAS;
682     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
683                       << "\n");
684     return T->isValidState();
685   };
686 
687   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
688     S.indicatePessimisticFixpoint();
689   else if (T.hasValue())
690     S ^= *T;
691 }
692 
693 /// Helper class to compose two generic deduction
694 template <typename AAType, typename Base, typename StateType,
695           template <typename...> class F, template <typename...> class G>
696 struct AAComposeTwoGenericDeduction
697     : public F<AAType, G<AAType, Base, StateType>, StateType> {
698   AAComposeTwoGenericDeduction(const IRPosition &IRP)
699       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
700 
701   /// See AbstractAttribute::updateImpl(...).
702   ChangeStatus updateImpl(Attributor &A) override {
703     ChangeStatus ChangedF =
704         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
705     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
706     return ChangedF | ChangedG;
707   }
708 };
709 
710 /// Helper class for generic deduction: return value -> returned position.
711 template <typename AAType, typename Base,
712           typename StateType = typename AAType::StateType>
713 struct AAReturnedFromReturnedValues : public Base {
714   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
715 
716   /// See AbstractAttribute::updateImpl(...).
717   ChangeStatus updateImpl(Attributor &A) override {
718     StateType S;
719     clampReturnedValueStates<AAType, StateType>(A, *this, S);
720     // TODO: If we know we visited all returned values, thus no are assumed
721     // dead, we can take the known information from the state T.
722     return clampStateAndIndicateChange<StateType>(this->getState(), S);
723   }
724 };
725 
726 /// Clamp the information known at all call sites for a given argument
727 /// (identified by \p QueryingAA) into \p S.
728 template <typename AAType, typename StateType = typename AAType::StateType>
729 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
730                                         StateType &S) {
731   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
732                     << QueryingAA << " into " << S << "\n");
733 
734   assert(QueryingAA.getIRPosition().getPositionKind() ==
735              IRPosition::IRP_ARGUMENT &&
736          "Can only clamp call site argument states for an argument position!");
737 
738   // Use an optional state as there might not be any return values and we want
739   // to join (IntegerState::operator&) the state of all there are.
740   Optional<StateType> T;
741 
742   // The argument number which is also the call site argument number.
743   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
744 
745   auto CallSiteCheck = [&](AbstractCallSite ACS) {
746     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
747     // Check if a coresponding argument was found or if it is on not associated
748     // (which can happen for callback calls).
749     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
750       return false;
751 
752     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
753     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
754                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
755     const StateType &AAS = static_cast<const StateType &>(AA.getState());
756     if (T.hasValue())
757       *T &= AAS;
758     else
759       T = AAS;
760     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
761                       << "\n");
762     return T->isValidState();
763   };
764 
765   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
766     S.indicatePessimisticFixpoint();
767   else if (T.hasValue())
768     S ^= *T;
769 }
770 
771 /// Helper class for generic deduction: call site argument -> argument position.
772 template <typename AAType, typename Base,
773           typename StateType = typename AAType::StateType>
774 struct AAArgumentFromCallSiteArguments : public Base {
775   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
776 
777   /// See AbstractAttribute::updateImpl(...).
778   ChangeStatus updateImpl(Attributor &A) override {
779     StateType S;
780     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
781     // TODO: If we know we visited all incoming values, thus no are assumed
782     // dead, we can take the known information from the state T.
783     return clampStateAndIndicateChange<StateType>(this->getState(), S);
784   }
785 };
786 
787 /// Helper class for generic replication: function returned -> cs returned.
788 template <typename AAType, typename Base,
789           typename StateType = typename AAType::StateType>
790 struct AACallSiteReturnedFromReturned : public Base {
791   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
792 
793   /// See AbstractAttribute::updateImpl(...).
794   ChangeStatus updateImpl(Attributor &A) override {
795     assert(this->getIRPosition().getPositionKind() ==
796                IRPosition::IRP_CALL_SITE_RETURNED &&
797            "Can only wrap function returned positions for call site returned "
798            "positions!");
799     auto &S = this->getState();
800 
801     const Function *AssociatedFunction =
802         this->getIRPosition().getAssociatedFunction();
803     if (!AssociatedFunction)
804       return S.indicatePessimisticFixpoint();
805 
806     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
807     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
808     return clampStateAndIndicateChange(
809         S, static_cast<const typename AAType::StateType &>(AA.getState()));
810   }
811 };
812 
813 /// Helper class for generic deduction using must-be-executed-context
814 /// Base class is required to have `followUse` method.
815 
816 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
817 /// U - Underlying use.
818 /// I - The user of the \p U.
819 /// `followUse` returns true if the value should be tracked transitively.
820 
821 template <typename AAType, typename Base,
822           typename StateType = typename AAType::StateType>
823 struct AAFromMustBeExecutedContext : public Base {
824   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
825 
826   void initialize(Attributor &A) override {
827     Base::initialize(A);
828     const IRPosition &IRP = this->getIRPosition();
829     Instruction *CtxI = IRP.getCtxI();
830 
831     if (!CtxI)
832       return;
833 
834     for (const Use &U : IRP.getAssociatedValue().uses())
835       Uses.insert(&U);
836   }
837 
838   /// See AbstractAttribute::updateImpl(...).
839   ChangeStatus updateImpl(Attributor &A) override {
840     auto BeforeState = this->getState();
841     auto &S = this->getState();
842     Instruction *CtxI = this->getIRPosition().getCtxI();
843     if (!CtxI)
844       return ChangeStatus::UNCHANGED;
845 
846     MustBeExecutedContextExplorer &Explorer =
847         A.getInfoCache().getMustBeExecutedContextExplorer();
848 
849     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
850     for (unsigned u = 0; u < Uses.size(); ++u) {
851       const Use *U = Uses[u];
852       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
853         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
854         if (Found && Base::followUse(A, U, UserI))
855           for (const Use &Us : UserI->uses())
856             Uses.insert(&Us);
857       }
858     }
859 
860     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
861   }
862 
863 private:
864   /// Container for (transitive) uses of the associated value.
865   SetVector<const Use *> Uses;
866 };
867 
868 template <typename AAType, typename Base,
869           typename StateType = typename AAType::StateType>
870 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
871     AAComposeTwoGenericDeduction<AAType, Base, StateType,
872                                  AAFromMustBeExecutedContext,
873                                  AAArgumentFromCallSiteArguments>;
874 
875 template <typename AAType, typename Base,
876           typename StateType = typename AAType::StateType>
877 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
878     AAComposeTwoGenericDeduction<AAType, Base, StateType,
879                                  AAFromMustBeExecutedContext,
880                                  AACallSiteReturnedFromReturned>;
881 
882 /// -----------------------NoUnwind Function Attribute--------------------------
883 
884 struct AANoUnwindImpl : AANoUnwind {
885   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
886 
887   const std::string getAsStr() const override {
888     return getAssumed() ? "nounwind" : "may-unwind";
889   }
890 
891   /// See AbstractAttribute::updateImpl(...).
892   ChangeStatus updateImpl(Attributor &A) override {
893     auto Opcodes = {
894         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
895         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
896         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
897 
898     auto CheckForNoUnwind = [&](Instruction &I) {
899       if (!I.mayThrow())
900         return true;
901 
902       if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
903         const auto &NoUnwindAA =
904             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
905         return NoUnwindAA.isAssumedNoUnwind();
906       }
907       return false;
908     };
909 
910     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
911       return indicatePessimisticFixpoint();
912 
913     return ChangeStatus::UNCHANGED;
914   }
915 };
916 
917 struct AANoUnwindFunction final : public AANoUnwindImpl {
918   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
919 
920   /// See AbstractAttribute::trackStatistics()
921   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
922 };
923 
924 /// NoUnwind attribute deduction for a call sites.
925 struct AANoUnwindCallSite final : AANoUnwindImpl {
926   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
927 
928   /// See AbstractAttribute::initialize(...).
929   void initialize(Attributor &A) override {
930     AANoUnwindImpl::initialize(A);
931     Function *F = getAssociatedFunction();
932     if (!F)
933       indicatePessimisticFixpoint();
934   }
935 
936   /// See AbstractAttribute::updateImpl(...).
937   ChangeStatus updateImpl(Attributor &A) override {
938     // TODO: Once we have call site specific value information we can provide
939     //       call site specific liveness information and then it makes
940     //       sense to specialize attributes for call sites arguments instead of
941     //       redirecting requests to the callee argument.
942     Function *F = getAssociatedFunction();
943     const IRPosition &FnPos = IRPosition::function(*F);
944     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
945     return clampStateAndIndicateChange(
946         getState(),
947         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
948   }
949 
950   /// See AbstractAttribute::trackStatistics()
951   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
952 };
953 
954 /// --------------------- Function Return Values -------------------------------
955 
956 /// "Attribute" that collects all potential returned values and the return
957 /// instructions that they arise from.
958 ///
959 /// If there is a unique returned value R, the manifest method will:
960 ///   - mark R with the "returned" attribute, if R is an argument.
961 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
962 
963   /// Mapping of values potentially returned by the associated function to the
964   /// return instructions that might return them.
965   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
966 
967   /// Mapping to remember the number of returned values for a call site such
968   /// that we can avoid updates if nothing changed.
969   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
970 
971   /// Set of unresolved calls returned by the associated function.
972   SmallSetVector<CallBase *, 4> UnresolvedCalls;
973 
974   /// State flags
975   ///
976   ///{
977   bool IsFixed = false;
978   bool IsValidState = true;
979   ///}
980 
981 public:
982   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
983 
984   /// See AbstractAttribute::initialize(...).
985   void initialize(Attributor &A) override {
986     // Reset the state.
987     IsFixed = false;
988     IsValidState = true;
989     ReturnedValues.clear();
990 
991     Function *F = getAssociatedFunction();
992     if (!F) {
993       indicatePessimisticFixpoint();
994       return;
995     }
996     assert(!F->getReturnType()->isVoidTy() &&
997            "Did not expect a void return type!");
998 
999     // The map from instruction opcodes to those instructions in the function.
1000     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1001 
1002     // Look through all arguments, if one is marked as returned we are done.
1003     for (Argument &Arg : F->args()) {
1004       if (Arg.hasReturnedAttr()) {
1005         auto &ReturnInstSet = ReturnedValues[&Arg];
1006         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
1007           ReturnInstSet.insert(cast<ReturnInst>(RI));
1008 
1009         indicateOptimisticFixpoint();
1010         return;
1011       }
1012     }
1013 
1014     if (!F->hasExactDefinition())
1015       indicatePessimisticFixpoint();
1016   }
1017 
1018   /// See AbstractAttribute::manifest(...).
1019   ChangeStatus manifest(Attributor &A) override;
1020 
1021   /// See AbstractAttribute::getState(...).
1022   AbstractState &getState() override { return *this; }
1023 
1024   /// See AbstractAttribute::getState(...).
1025   const AbstractState &getState() const override { return *this; }
1026 
1027   /// See AbstractAttribute::updateImpl(Attributor &A).
1028   ChangeStatus updateImpl(Attributor &A) override;
1029 
1030   llvm::iterator_range<iterator> returned_values() override {
1031     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1032   }
1033 
1034   llvm::iterator_range<const_iterator> returned_values() const override {
1035     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1036   }
1037 
1038   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
1039     return UnresolvedCalls;
1040   }
1041 
1042   /// Return the number of potential return values, -1 if unknown.
1043   size_t getNumReturnValues() const override {
1044     return isValidState() ? ReturnedValues.size() : -1;
1045   }
1046 
1047   /// Return an assumed unique return value if a single candidate is found. If
1048   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1049   /// Optional::NoneType.
1050   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1051 
1052   /// See AbstractState::checkForAllReturnedValues(...).
1053   bool checkForAllReturnedValuesAndReturnInsts(
1054       const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1055           &Pred) const override;
1056 
1057   /// Pretty print the attribute similar to the IR representation.
1058   const std::string getAsStr() const override;
1059 
1060   /// See AbstractState::isAtFixpoint().
1061   bool isAtFixpoint() const override { return IsFixed; }
1062 
1063   /// See AbstractState::isValidState().
1064   bool isValidState() const override { return IsValidState; }
1065 
1066   /// See AbstractState::indicateOptimisticFixpoint(...).
1067   ChangeStatus indicateOptimisticFixpoint() override {
1068     IsFixed = true;
1069     return ChangeStatus::UNCHANGED;
1070   }
1071 
1072   ChangeStatus indicatePessimisticFixpoint() override {
1073     IsFixed = true;
1074     IsValidState = false;
1075     return ChangeStatus::CHANGED;
1076   }
1077 };
1078 
1079 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1080   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1081 
1082   // Bookkeeping.
1083   assert(isValidState());
1084   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1085                   "Number of function with known return values");
1086 
1087   // Check if we have an assumed unique return value that we could manifest.
1088   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1089 
1090   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1091     return Changed;
1092 
1093   // Bookkeeping.
1094   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1095                   "Number of function with unique return");
1096 
1097   // Callback to replace the uses of CB with the constant C.
1098   auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
1099     if (CB.getNumUses() == 0 || CB.isMustTailCall())
1100       return ChangeStatus::UNCHANGED;
1101     replaceAllInstructionUsesWith(CB, C);
1102     return ChangeStatus::CHANGED;
1103   };
1104 
1105   // If the assumed unique return value is an argument, annotate it.
1106   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1107     // TODO: This should be handled differently!
1108     this->AnchorVal = UniqueRVArg;
1109     this->KindOrArgNo = UniqueRVArg->getArgNo();
1110     Changed = IRAttribute::manifest(A);
1111   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
1112     // We can replace the returned value with the unique returned constant.
1113     Value &AnchorValue = getAnchorValue();
1114     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1115       for (const Use &U : F->uses())
1116         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1117           if (CB->isCallee(&U)) {
1118             Constant *RVCCast =
1119                 CB->getType() == RVC->getType()
1120                     ? RVC
1121                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
1122             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1123           }
1124     } else {
1125       assert(isa<CallBase>(AnchorValue) &&
1126              "Expcected a function or call base anchor!");
1127       Constant *RVCCast =
1128           AnchorValue.getType() == RVC->getType()
1129               ? RVC
1130               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1131       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1132     }
1133     if (Changed == ChangeStatus::CHANGED)
1134       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1135                       "Number of function returns replaced by constant return");
1136   }
1137 
1138   return Changed;
1139 }
1140 
1141 const std::string AAReturnedValuesImpl::getAsStr() const {
1142   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1143          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1144          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1145 }
1146 
1147 Optional<Value *>
1148 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1149   // If checkForAllReturnedValues provides a unique value, ignoring potential
1150   // undef values that can also be present, it is assumed to be the actual
1151   // return value and forwarded to the caller of this method. If there are
1152   // multiple, a nullptr is returned indicating there cannot be a unique
1153   // returned value.
1154   Optional<Value *> UniqueRV;
1155 
1156   auto Pred = [&](Value &RV) -> bool {
1157     // If we found a second returned value and neither the current nor the saved
1158     // one is an undef, there is no unique returned value. Undefs are special
1159     // since we can pretend they have any value.
1160     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1161         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1162       UniqueRV = nullptr;
1163       return false;
1164     }
1165 
1166     // Do not overwrite a value with an undef.
1167     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1168       UniqueRV = &RV;
1169 
1170     return true;
1171   };
1172 
1173   if (!A.checkForAllReturnedValues(Pred, *this))
1174     UniqueRV = nullptr;
1175 
1176   return UniqueRV;
1177 }
1178 
1179 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1180     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1181         &Pred) const {
1182   if (!isValidState())
1183     return false;
1184 
1185   // Check all returned values but ignore call sites as long as we have not
1186   // encountered an overdefined one during an update.
1187   for (auto &It : ReturnedValues) {
1188     Value *RV = It.first;
1189 
1190     CallBase *CB = dyn_cast<CallBase>(RV);
1191     if (CB && !UnresolvedCalls.count(CB))
1192       continue;
1193 
1194     if (!Pred(*RV, It.second))
1195       return false;
1196   }
1197 
1198   return true;
1199 }
1200 
1201 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1202   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1203   bool Changed = false;
1204 
1205   // State used in the value traversals starting in returned values.
1206   struct RVState {
1207     // The map in which we collect return values -> return instrs.
1208     decltype(ReturnedValues) &RetValsMap;
1209     // The flag to indicate a change.
1210     bool &Changed;
1211     // The return instrs we come from.
1212     SmallSetVector<ReturnInst *, 4> RetInsts;
1213   };
1214 
1215   // Callback for a leaf value returned by the associated function.
1216   auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1217     auto Size = RVS.RetValsMap[&Val].size();
1218     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1219     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1220     RVS.Changed |= Inserted;
1221     LLVM_DEBUG({
1222       if (Inserted)
1223         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1224                << " => " << RVS.RetInsts.size() << "\n";
1225     });
1226     return true;
1227   };
1228 
1229   // Helper method to invoke the generic value traversal.
1230   auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1231     IRPosition RetValPos = IRPosition::value(RV);
1232     return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1233                                                             RVS, VisitValueCB);
1234   };
1235 
1236   // Callback for all "return intructions" live in the associated function.
1237   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1238     ReturnInst &Ret = cast<ReturnInst>(I);
1239     RVState RVS({ReturnedValues, Changed, {}});
1240     RVS.RetInsts.insert(&Ret);
1241     return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1242   };
1243 
1244   // Start by discovering returned values from all live returned instructions in
1245   // the associated function.
1246   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1247     return indicatePessimisticFixpoint();
1248 
1249   // Once returned values "directly" present in the code are handled we try to
1250   // resolve returned calls.
1251   decltype(ReturnedValues) NewRVsMap;
1252   for (auto &It : ReturnedValues) {
1253     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1254                       << " by #" << It.second.size() << " RIs\n");
1255     CallBase *CB = dyn_cast<CallBase>(It.first);
1256     if (!CB || UnresolvedCalls.count(CB))
1257       continue;
1258 
1259     if (!CB->getCalledFunction()) {
1260       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1261                         << "\n");
1262       UnresolvedCalls.insert(CB);
1263       continue;
1264     }
1265 
1266     // TODO: use the function scope once we have call site AAReturnedValues.
1267     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1268         *this, IRPosition::function(*CB->getCalledFunction()));
1269     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1270                       << RetValAA << "\n");
1271 
1272     // Skip dead ends, thus if we do not know anything about the returned
1273     // call we mark it as unresolved and it will stay that way.
1274     if (!RetValAA.getState().isValidState()) {
1275       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1276                         << "\n");
1277       UnresolvedCalls.insert(CB);
1278       continue;
1279     }
1280 
1281     // Do not try to learn partial information. If the callee has unresolved
1282     // return values we will treat the call as unresolved/opaque.
1283     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1284     if (!RetValAAUnresolvedCalls.empty()) {
1285       UnresolvedCalls.insert(CB);
1286       continue;
1287     }
1288 
1289     // Now check if we can track transitively returned values. If possible, thus
1290     // if all return value can be represented in the current scope, do so.
1291     bool Unresolved = false;
1292     for (auto &RetValAAIt : RetValAA.returned_values()) {
1293       Value *RetVal = RetValAAIt.first;
1294       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1295           isa<Constant>(RetVal))
1296         continue;
1297       // Anything that did not fit in the above categories cannot be resolved,
1298       // mark the call as unresolved.
1299       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1300                            "cannot be translated: "
1301                         << *RetVal << "\n");
1302       UnresolvedCalls.insert(CB);
1303       Unresolved = true;
1304       break;
1305     }
1306 
1307     if (Unresolved)
1308       continue;
1309 
1310     // Now track transitively returned values.
1311     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1312     if (NumRetAA == RetValAA.getNumReturnValues()) {
1313       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1314                            "changed since it was seen last\n");
1315       continue;
1316     }
1317     NumRetAA = RetValAA.getNumReturnValues();
1318 
1319     for (auto &RetValAAIt : RetValAA.returned_values()) {
1320       Value *RetVal = RetValAAIt.first;
1321       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1322         // Arguments are mapped to call site operands and we begin the traversal
1323         // again.
1324         bool Unused = false;
1325         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1326         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1327         continue;
1328       } else if (isa<CallBase>(RetVal)) {
1329         // Call sites are resolved by the callee attribute over time, no need to
1330         // do anything for us.
1331         continue;
1332       } else if (isa<Constant>(RetVal)) {
1333         // Constants are valid everywhere, we can simply take them.
1334         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1335         continue;
1336       }
1337     }
1338   }
1339 
1340   // To avoid modifications to the ReturnedValues map while we iterate over it
1341   // we kept record of potential new entries in a copy map, NewRVsMap.
1342   for (auto &It : NewRVsMap) {
1343     assert(!It.second.empty() && "Entry does not add anything.");
1344     auto &ReturnInsts = ReturnedValues[It.first];
1345     for (ReturnInst *RI : It.second)
1346       if (ReturnInsts.insert(RI)) {
1347         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1348                           << *It.first << " => " << *RI << "\n");
1349         Changed = true;
1350       }
1351   }
1352 
1353   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1354   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1355 }
1356 
1357 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1358   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1359 
1360   /// See AbstractAttribute::trackStatistics()
1361   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1362 };
1363 
1364 /// Returned values information for a call sites.
1365 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1366   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1367 
1368   /// See AbstractAttribute::initialize(...).
1369   void initialize(Attributor &A) override {
1370     // TODO: Once we have call site specific value information we can provide
1371     //       call site specific liveness information and then it makes
1372     //       sense to specialize attributes for call sites instead of
1373     //       redirecting requests to the callee.
1374     llvm_unreachable("Abstract attributes for returned values are not "
1375                      "supported for call sites yet!");
1376   }
1377 
1378   /// See AbstractAttribute::updateImpl(...).
1379   ChangeStatus updateImpl(Attributor &A) override {
1380     return indicatePessimisticFixpoint();
1381   }
1382 
1383   /// See AbstractAttribute::trackStatistics()
1384   void trackStatistics() const override {}
1385 };
1386 
1387 /// ------------------------ NoSync Function Attribute -------------------------
1388 
1389 struct AANoSyncImpl : AANoSync {
1390   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1391 
1392   const std::string getAsStr() const override {
1393     return getAssumed() ? "nosync" : "may-sync";
1394   }
1395 
1396   /// See AbstractAttribute::updateImpl(...).
1397   ChangeStatus updateImpl(Attributor &A) override;
1398 
1399   /// Helper function used to determine whether an instruction is non-relaxed
1400   /// atomic. In other words, if an atomic instruction does not have unordered
1401   /// or monotonic ordering
1402   static bool isNonRelaxedAtomic(Instruction *I);
1403 
1404   /// Helper function used to determine whether an instruction is volatile.
1405   static bool isVolatile(Instruction *I);
1406 
1407   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1408   /// memset).
1409   static bool isNoSyncIntrinsic(Instruction *I);
1410 };
1411 
1412 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1413   if (!I->isAtomic())
1414     return false;
1415 
1416   AtomicOrdering Ordering;
1417   switch (I->getOpcode()) {
1418   case Instruction::AtomicRMW:
1419     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1420     break;
1421   case Instruction::Store:
1422     Ordering = cast<StoreInst>(I)->getOrdering();
1423     break;
1424   case Instruction::Load:
1425     Ordering = cast<LoadInst>(I)->getOrdering();
1426     break;
1427   case Instruction::Fence: {
1428     auto *FI = cast<FenceInst>(I);
1429     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1430       return false;
1431     Ordering = FI->getOrdering();
1432     break;
1433   }
1434   case Instruction::AtomicCmpXchg: {
1435     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1436     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1437     // Only if both are relaxed, than it can be treated as relaxed.
1438     // Otherwise it is non-relaxed.
1439     if (Success != AtomicOrdering::Unordered &&
1440         Success != AtomicOrdering::Monotonic)
1441       return true;
1442     if (Failure != AtomicOrdering::Unordered &&
1443         Failure != AtomicOrdering::Monotonic)
1444       return true;
1445     return false;
1446   }
1447   default:
1448     llvm_unreachable(
1449         "New atomic operations need to be known in the attributor.");
1450   }
1451 
1452   // Relaxed.
1453   if (Ordering == AtomicOrdering::Unordered ||
1454       Ordering == AtomicOrdering::Monotonic)
1455     return false;
1456   return true;
1457 }
1458 
1459 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1460 /// FIXME: We should ipmrove the handling of intrinsics.
1461 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1462   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1463     switch (II->getIntrinsicID()) {
1464     /// Element wise atomic memory intrinsics are can only be unordered,
1465     /// therefore nosync.
1466     case Intrinsic::memset_element_unordered_atomic:
1467     case Intrinsic::memmove_element_unordered_atomic:
1468     case Intrinsic::memcpy_element_unordered_atomic:
1469       return true;
1470     case Intrinsic::memset:
1471     case Intrinsic::memmove:
1472     case Intrinsic::memcpy:
1473       if (!cast<MemIntrinsic>(II)->isVolatile())
1474         return true;
1475       return false;
1476     default:
1477       return false;
1478     }
1479   }
1480   return false;
1481 }
1482 
1483 bool AANoSyncImpl::isVolatile(Instruction *I) {
1484   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1485          "Calls should not be checked here");
1486 
1487   switch (I->getOpcode()) {
1488   case Instruction::AtomicRMW:
1489     return cast<AtomicRMWInst>(I)->isVolatile();
1490   case Instruction::Store:
1491     return cast<StoreInst>(I)->isVolatile();
1492   case Instruction::Load:
1493     return cast<LoadInst>(I)->isVolatile();
1494   case Instruction::AtomicCmpXchg:
1495     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1496   default:
1497     return false;
1498   }
1499 }
1500 
1501 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1502 
1503   auto CheckRWInstForNoSync = [&](Instruction &I) {
1504     /// We are looking for volatile instructions or Non-Relaxed atomics.
1505     /// FIXME: We should improve the handling of intrinsics.
1506 
1507     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1508       return true;
1509 
1510     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1511       if (ICS.hasFnAttr(Attribute::NoSync))
1512         return true;
1513 
1514       const auto &NoSyncAA =
1515           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1516       if (NoSyncAA.isAssumedNoSync())
1517         return true;
1518       return false;
1519     }
1520 
1521     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1522       return true;
1523 
1524     return false;
1525   };
1526 
1527   auto CheckForNoSync = [&](Instruction &I) {
1528     // At this point we handled all read/write effects and they are all
1529     // nosync, so they can be skipped.
1530     if (I.mayReadOrWriteMemory())
1531       return true;
1532 
1533     // non-convergent and readnone imply nosync.
1534     return !ImmutableCallSite(&I).isConvergent();
1535   };
1536 
1537   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1538       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1539     return indicatePessimisticFixpoint();
1540 
1541   return ChangeStatus::UNCHANGED;
1542 }
1543 
1544 struct AANoSyncFunction final : public AANoSyncImpl {
1545   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1546 
1547   /// See AbstractAttribute::trackStatistics()
1548   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1549 };
1550 
1551 /// NoSync attribute deduction for a call sites.
1552 struct AANoSyncCallSite final : AANoSyncImpl {
1553   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1554 
1555   /// See AbstractAttribute::initialize(...).
1556   void initialize(Attributor &A) override {
1557     AANoSyncImpl::initialize(A);
1558     Function *F = getAssociatedFunction();
1559     if (!F)
1560       indicatePessimisticFixpoint();
1561   }
1562 
1563   /// See AbstractAttribute::updateImpl(...).
1564   ChangeStatus updateImpl(Attributor &A) override {
1565     // TODO: Once we have call site specific value information we can provide
1566     //       call site specific liveness information and then it makes
1567     //       sense to specialize attributes for call sites arguments instead of
1568     //       redirecting requests to the callee argument.
1569     Function *F = getAssociatedFunction();
1570     const IRPosition &FnPos = IRPosition::function(*F);
1571     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1572     return clampStateAndIndicateChange(
1573         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1574   }
1575 
1576   /// See AbstractAttribute::trackStatistics()
1577   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1578 };
1579 
1580 /// ------------------------ No-Free Attributes ----------------------------
1581 
1582 struct AANoFreeImpl : public AANoFree {
1583   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1584 
1585   /// See AbstractAttribute::updateImpl(...).
1586   ChangeStatus updateImpl(Attributor &A) override {
1587     auto CheckForNoFree = [&](Instruction &I) {
1588       ImmutableCallSite ICS(&I);
1589       if (ICS.hasFnAttr(Attribute::NoFree))
1590         return true;
1591 
1592       const auto &NoFreeAA =
1593           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1594       return NoFreeAA.isAssumedNoFree();
1595     };
1596 
1597     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1598       return indicatePessimisticFixpoint();
1599     return ChangeStatus::UNCHANGED;
1600   }
1601 
1602   /// See AbstractAttribute::getAsStr().
1603   const std::string getAsStr() const override {
1604     return getAssumed() ? "nofree" : "may-free";
1605   }
1606 };
1607 
1608 struct AANoFreeFunction final : public AANoFreeImpl {
1609   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1610 
1611   /// See AbstractAttribute::trackStatistics()
1612   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1613 };
1614 
1615 /// NoFree attribute deduction for a call sites.
1616 struct AANoFreeCallSite final : AANoFreeImpl {
1617   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1618 
1619   /// See AbstractAttribute::initialize(...).
1620   void initialize(Attributor &A) override {
1621     AANoFreeImpl::initialize(A);
1622     Function *F = getAssociatedFunction();
1623     if (!F)
1624       indicatePessimisticFixpoint();
1625   }
1626 
1627   /// See AbstractAttribute::updateImpl(...).
1628   ChangeStatus updateImpl(Attributor &A) override {
1629     // TODO: Once we have call site specific value information we can provide
1630     //       call site specific liveness information and then it makes
1631     //       sense to specialize attributes for call sites arguments instead of
1632     //       redirecting requests to the callee argument.
1633     Function *F = getAssociatedFunction();
1634     const IRPosition &FnPos = IRPosition::function(*F);
1635     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1636     return clampStateAndIndicateChange(
1637         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1638   }
1639 
1640   /// See AbstractAttribute::trackStatistics()
1641   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1642 };
1643 
1644 /// NoFree attribute for floating values.
1645 struct AANoFreeFloating : AANoFreeImpl {
1646   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1647 
1648   /// See AbstractAttribute::trackStatistics()
1649   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1650 
1651   /// See Abstract Attribute::updateImpl(...).
1652   ChangeStatus updateImpl(Attributor &A) override {
1653     const IRPosition &IRP = getIRPosition();
1654 
1655     const auto &NoFreeAA =
1656         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1657     if (NoFreeAA.isAssumedNoFree())
1658       return ChangeStatus::UNCHANGED;
1659 
1660     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1661     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1662       Instruction *UserI = cast<Instruction>(U.getUser());
1663       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1664         if (CB->isBundleOperand(&U))
1665           return false;
1666         if (!CB->isArgOperand(&U))
1667           return true;
1668         unsigned ArgNo = CB->getArgOperandNo(&U);
1669 
1670         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1671             *this, IRPosition::callsite_argument(*CB, ArgNo));
1672         return NoFreeArg.isAssumedNoFree();
1673       }
1674 
1675       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1676           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1677         Follow = true;
1678         return true;
1679       }
1680 
1681       // Unknown user.
1682       return false;
1683     };
1684     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1685       return indicatePessimisticFixpoint();
1686 
1687     return ChangeStatus::UNCHANGED;
1688   }
1689 };
1690 
1691 /// NoFree attribute for a call site argument.
1692 struct AANoFreeArgument final : AANoFreeFloating {
1693   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1694 
1695   /// See AbstractAttribute::trackStatistics()
1696   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1697 };
1698 
1699 /// NoFree attribute for call site arguments.
1700 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1701   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1702 
1703   /// See AbstractAttribute::updateImpl(...).
1704   ChangeStatus updateImpl(Attributor &A) override {
1705     // TODO: Once we have call site specific value information we can provide
1706     //       call site specific liveness information and then it makes
1707     //       sense to specialize attributes for call sites arguments instead of
1708     //       redirecting requests to the callee argument.
1709     Argument *Arg = getAssociatedArgument();
1710     if (!Arg)
1711       return indicatePessimisticFixpoint();
1712     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1713     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1714     return clampStateAndIndicateChange(
1715         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1716   }
1717 
1718   /// See AbstractAttribute::trackStatistics()
1719   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1720 };
1721 
1722 /// NoFree attribute for function return value.
1723 struct AANoFreeReturned final : AANoFreeFloating {
1724   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1725     llvm_unreachable("NoFree is not applicable to function returns!");
1726   }
1727 
1728   /// See AbstractAttribute::initialize(...).
1729   void initialize(Attributor &A) override {
1730     llvm_unreachable("NoFree is not applicable to function returns!");
1731   }
1732 
1733   /// See AbstractAttribute::updateImpl(...).
1734   ChangeStatus updateImpl(Attributor &A) override {
1735     llvm_unreachable("NoFree is not applicable to function returns!");
1736   }
1737 
1738   /// See AbstractAttribute::trackStatistics()
1739   void trackStatistics() const override {}
1740 };
1741 
1742 /// NoFree attribute deduction for a call site return value.
1743 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1744   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1745 
1746   ChangeStatus manifest(Attributor &A) override {
1747     return ChangeStatus::UNCHANGED;
1748   }
1749   /// See AbstractAttribute::trackStatistics()
1750   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1751 };
1752 
1753 /// ------------------------ NonNull Argument Attribute ------------------------
1754 static int64_t getKnownNonNullAndDerefBytesForUse(
1755     Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1756     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1757   TrackUse = false;
1758 
1759   const Value *UseV = U->get();
1760   if (!UseV->getType()->isPointerTy())
1761     return 0;
1762 
1763   Type *PtrTy = UseV->getType();
1764   const Function *F = I->getFunction();
1765   bool NullPointerIsDefined =
1766       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1767   const DataLayout &DL = A.getInfoCache().getDL();
1768   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1769     if (ICS.isBundleOperand(U))
1770       return 0;
1771 
1772     if (ICS.isCallee(U)) {
1773       IsNonNull |= !NullPointerIsDefined;
1774       return 0;
1775     }
1776 
1777     unsigned ArgNo = ICS.getArgumentNo(U);
1778     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1779     // As long as we only use known information there is no need to track
1780     // dependences here.
1781     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1782                                                   /* TrackDependence */ false);
1783     IsNonNull |= DerefAA.isKnownNonNull();
1784     return DerefAA.getKnownDereferenceableBytes();
1785   }
1786 
1787   // We need to follow common pointer manipulation uses to the accesses they
1788   // feed into. We can try to be smart to avoid looking through things we do not
1789   // like for now, e.g., non-inbounds GEPs.
1790   if (isa<CastInst>(I)) {
1791     TrackUse = true;
1792     return 0;
1793   }
1794   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1795     if (GEP->hasAllConstantIndices()) {
1796       TrackUse = true;
1797       return 0;
1798     }
1799 
1800   int64_t Offset;
1801   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1802     if (Base == &AssociatedValue &&
1803         Attributor::getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1804       int64_t DerefBytes =
1805           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1806 
1807       IsNonNull |= !NullPointerIsDefined;
1808       return std::max(int64_t(0), DerefBytes);
1809     }
1810   }
1811 
1812   /// Corner case when an offset is 0.
1813   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1814           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1815     if (Offset == 0 && Base == &AssociatedValue &&
1816         Attributor::getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1817       int64_t DerefBytes =
1818           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1819       IsNonNull |= !NullPointerIsDefined;
1820       return std::max(int64_t(0), DerefBytes);
1821     }
1822   }
1823 
1824   return 0;
1825 }
1826 
1827 struct AANonNullImpl : AANonNull {
1828   AANonNullImpl(const IRPosition &IRP)
1829       : AANonNull(IRP),
1830         NullIsDefined(NullPointerIsDefined(
1831             getAnchorScope(),
1832             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1833 
1834   /// See AbstractAttribute::initialize(...).
1835   void initialize(Attributor &A) override {
1836     if (!NullIsDefined &&
1837         hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1838       indicateOptimisticFixpoint();
1839     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1840       indicatePessimisticFixpoint();
1841     else
1842       AANonNull::initialize(A);
1843   }
1844 
1845   /// See AAFromMustBeExecutedContext
1846   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1847     bool IsNonNull = false;
1848     bool TrackUse = false;
1849     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1850                                        IsNonNull, TrackUse);
1851     setKnown(IsNonNull);
1852     return TrackUse;
1853   }
1854 
1855   /// See AbstractAttribute::getAsStr().
1856   const std::string getAsStr() const override {
1857     return getAssumed() ? "nonnull" : "may-null";
1858   }
1859 
1860   /// Flag to determine if the underlying value can be null and still allow
1861   /// valid accesses.
1862   const bool NullIsDefined;
1863 };
1864 
1865 /// NonNull attribute for a floating value.
1866 struct AANonNullFloating
1867     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1868   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1869   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1870 
1871   /// See AbstractAttribute::updateImpl(...).
1872   ChangeStatus updateImpl(Attributor &A) override {
1873     ChangeStatus Change = Base::updateImpl(A);
1874     if (isKnownNonNull())
1875       return Change;
1876 
1877     if (!NullIsDefined) {
1878       const auto &DerefAA =
1879           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1880       if (DerefAA.getAssumedDereferenceableBytes())
1881         return Change;
1882     }
1883 
1884     const DataLayout &DL = A.getDataLayout();
1885 
1886     DominatorTree *DT = nullptr;
1887     InformationCache &InfoCache = A.getInfoCache();
1888     if (const Function *Fn = getAnchorScope())
1889       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1890 
1891     auto VisitValueCB = [&](Value &V, AANonNull::StateType &T,
1892                             bool Stripped) -> bool {
1893       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1894       if (!Stripped && this == &AA) {
1895         if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, getCtxI(), DT))
1896           T.indicatePessimisticFixpoint();
1897       } else {
1898         // Use abstract attribute information.
1899         const AANonNull::StateType &NS =
1900             static_cast<const AANonNull::StateType &>(AA.getState());
1901         T ^= NS;
1902       }
1903       return T.isValidState();
1904     };
1905 
1906     StateType T;
1907     if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1908                                                      T, VisitValueCB))
1909       return indicatePessimisticFixpoint();
1910 
1911     return clampStateAndIndicateChange(getState(), T);
1912   }
1913 
1914   /// See AbstractAttribute::trackStatistics()
1915   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1916 };
1917 
1918 /// NonNull attribute for function return value.
1919 struct AANonNullReturned final
1920     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1921   AANonNullReturned(const IRPosition &IRP)
1922       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1923 
1924   /// See AbstractAttribute::trackStatistics()
1925   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1926 };
1927 
1928 /// NonNull attribute for function argument.
1929 struct AANonNullArgument final
1930     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1931                                                               AANonNullImpl> {
1932   AANonNullArgument(const IRPosition &IRP)
1933       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1934                                                                 AANonNullImpl>(
1935             IRP) {}
1936 
1937   /// See AbstractAttribute::trackStatistics()
1938   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1939 };
1940 
1941 struct AANonNullCallSiteArgument final : AANonNullFloating {
1942   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1943 
1944   /// See AbstractAttribute::trackStatistics()
1945   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1946 };
1947 
1948 /// NonNull attribute for a call site return position.
1949 struct AANonNullCallSiteReturned final
1950     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1951                                                              AANonNullImpl> {
1952   AANonNullCallSiteReturned(const IRPosition &IRP)
1953       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1954                                                                AANonNullImpl>(
1955             IRP) {}
1956 
1957   /// See AbstractAttribute::trackStatistics()
1958   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1959 };
1960 
1961 /// ------------------------ No-Recurse Attributes ----------------------------
1962 
1963 struct AANoRecurseImpl : public AANoRecurse {
1964   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1965 
1966   /// See AbstractAttribute::getAsStr()
1967   const std::string getAsStr() const override {
1968     return getAssumed() ? "norecurse" : "may-recurse";
1969   }
1970 };
1971 
1972 struct AANoRecurseFunction final : AANoRecurseImpl {
1973   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1974 
1975   /// See AbstractAttribute::initialize(...).
1976   void initialize(Attributor &A) override {
1977     AANoRecurseImpl::initialize(A);
1978     if (const Function *F = getAnchorScope())
1979       if (A.getInfoCache().getSccSize(*F) == 1)
1980         return;
1981     indicatePessimisticFixpoint();
1982   }
1983 
1984   /// See AbstractAttribute::updateImpl(...).
1985   ChangeStatus updateImpl(Attributor &A) override {
1986 
1987     auto CheckForNoRecurse = [&](Instruction &I) {
1988       ImmutableCallSite ICS(&I);
1989       if (ICS.hasFnAttr(Attribute::NoRecurse))
1990         return true;
1991 
1992       const auto &NoRecurseAA =
1993           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
1994       if (!NoRecurseAA.isAssumedNoRecurse())
1995         return false;
1996 
1997       // Recursion to the same function
1998       if (ICS.getCalledFunction() == getAnchorScope())
1999         return false;
2000 
2001       return true;
2002     };
2003 
2004     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
2005       return indicatePessimisticFixpoint();
2006     return ChangeStatus::UNCHANGED;
2007   }
2008 
2009   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2010 };
2011 
2012 /// NoRecurse attribute deduction for a call sites.
2013 struct AANoRecurseCallSite final : AANoRecurseImpl {
2014   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
2015 
2016   /// See AbstractAttribute::initialize(...).
2017   void initialize(Attributor &A) override {
2018     AANoRecurseImpl::initialize(A);
2019     Function *F = getAssociatedFunction();
2020     if (!F)
2021       indicatePessimisticFixpoint();
2022   }
2023 
2024   /// See AbstractAttribute::updateImpl(...).
2025   ChangeStatus updateImpl(Attributor &A) override {
2026     // TODO: Once we have call site specific value information we can provide
2027     //       call site specific liveness information and then it makes
2028     //       sense to specialize attributes for call sites arguments instead of
2029     //       redirecting requests to the callee argument.
2030     Function *F = getAssociatedFunction();
2031     const IRPosition &FnPos = IRPosition::function(*F);
2032     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
2033     return clampStateAndIndicateChange(
2034         getState(),
2035         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
2036   }
2037 
2038   /// See AbstractAttribute::trackStatistics()
2039   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2040 };
2041 
2042 /// -------------------- Undefined-Behavior Attributes ------------------------
2043 
2044 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2045   AAUndefinedBehaviorImpl(const IRPosition &IRP) : AAUndefinedBehavior(IRP) {}
2046 
2047   /// See AbstractAttribute::updateImpl(...).
2048   // through a pointer (i.e. also branches etc.)
2049   ChangeStatus updateImpl(Attributor &A) override {
2050     const size_t UBPrevSize = KnownUBInsts.size();
2051     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2052 
2053     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2054       // Skip instructions that are already saved.
2055       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2056         return true;
2057 
2058       // If we reach here, we know we have an instruction
2059       // that accesses memory through a pointer operand,
2060       // for which getPointerOperand() should give it to us.
2061       const Value *PtrOp =
2062           Attributor::getPointerOperand(&I, /* AllowVolatile */ true);
2063       assert(PtrOp &&
2064              "Expected pointer operand of memory accessing instruction");
2065 
2066       // A memory access through a pointer is considered UB
2067       // only if the pointer has constant null value.
2068       // TODO: Expand it to not only check constant values.
2069       if (!isa<ConstantPointerNull>(PtrOp)) {
2070         AssumedNoUBInsts.insert(&I);
2071         return true;
2072       }
2073       const Type *PtrTy = PtrOp->getType();
2074 
2075       // Because we only consider instructions inside functions,
2076       // assume that a parent function exists.
2077       const Function *F = I.getFunction();
2078 
2079       // A memory access using constant null pointer is only considered UB
2080       // if null pointer is _not_ defined for the target platform.
2081       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2082         AssumedNoUBInsts.insert(&I);
2083       else
2084         KnownUBInsts.insert(&I);
2085       return true;
2086     };
2087 
2088     auto InspectBrInstForUB = [&](Instruction &I) {
2089       // A conditional branch instruction is considered UB if it has `undef`
2090       // condition.
2091 
2092       // Skip instructions that are already saved.
2093       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2094         return true;
2095 
2096       // We know we have a branch instruction.
2097       auto BrInst = cast<BranchInst>(&I);
2098 
2099       // Unconditional branches are never considered UB.
2100       if (BrInst->isUnconditional())
2101         return true;
2102 
2103       // Either we stopped and the appropriate action was taken,
2104       // or we got back a simplified value to continue.
2105       Optional<Value *> SimplifiedCond =
2106           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2107       if (!SimplifiedCond.hasValue())
2108         return true;
2109       AssumedNoUBInsts.insert(&I);
2110       return true;
2111     };
2112 
2113     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2114                               {Instruction::Load, Instruction::Store,
2115                                Instruction::AtomicCmpXchg,
2116                                Instruction::AtomicRMW});
2117     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br});
2118     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2119         UBPrevSize != KnownUBInsts.size())
2120       return ChangeStatus::CHANGED;
2121     return ChangeStatus::UNCHANGED;
2122   }
2123 
2124   bool isKnownToCauseUB(Instruction *I) const override {
2125     return KnownUBInsts.count(I);
2126   }
2127 
2128   bool isAssumedToCauseUB(Instruction *I) const override {
2129     // In simple words, if an instruction is not in the assumed to _not_
2130     // cause UB, then it is assumed UB (that includes those
2131     // in the KnownUBInsts set). The rest is boilerplate
2132     // is to ensure that it is one of the instructions we test
2133     // for UB.
2134 
2135     switch (I->getOpcode()) {
2136     case Instruction::Load:
2137     case Instruction::Store:
2138     case Instruction::AtomicCmpXchg:
2139     case Instruction::AtomicRMW:
2140       return !AssumedNoUBInsts.count(I);
2141     case Instruction::Br: {
2142       auto BrInst = cast<BranchInst>(I);
2143       if (BrInst->isUnconditional())
2144         return false;
2145       return !AssumedNoUBInsts.count(I);
2146     } break;
2147     default:
2148       return false;
2149     }
2150     return false;
2151   }
2152 
2153   ChangeStatus manifest(Attributor &A) override {
2154     if (KnownUBInsts.empty())
2155       return ChangeStatus::UNCHANGED;
2156     for (Instruction *I : KnownUBInsts)
2157       A.changeToUnreachableAfterManifest(I);
2158     return ChangeStatus::CHANGED;
2159   }
2160 
2161   /// See AbstractAttribute::getAsStr()
2162   const std::string getAsStr() const override {
2163     return getAssumed() ? "undefined-behavior" : "no-ub";
2164   }
2165 
2166   /// Note: The correctness of this analysis depends on the fact that the
2167   /// following 2 sets will stop changing after some point.
2168   /// "Change" here means that their size changes.
2169   /// The size of each set is monotonically increasing
2170   /// (we only add items to them) and it is upper bounded by the number of
2171   /// instructions in the processed function (we can never save more
2172   /// elements in either set than this number). Hence, at some point,
2173   /// they will stop increasing.
2174   /// Consequently, at some point, both sets will have stopped
2175   /// changing, effectively making the analysis reach a fixpoint.
2176 
2177   /// Note: These 2 sets are disjoint and an instruction can be considered
2178   /// one of 3 things:
2179   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2180   ///    the KnownUBInsts set.
2181   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2182   ///    has a reason to assume it).
2183   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2184   ///    could not find a reason to assume or prove that it can cause UB,
2185   ///    hence it assumes it doesn't. We have a set for these instructions
2186   ///    so that we don't reprocess them in every update.
2187   ///    Note however that instructions in this set may cause UB.
2188 
2189 protected:
2190   /// A set of all live instructions _known_ to cause UB.
2191   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2192 
2193 private:
2194   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2195   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2196 
2197   // Should be called on updates in which if we're processing an instruction
2198   // \p I that depends on a value \p V, one of the following has to happen:
2199   // - If the value is assumed, then stop.
2200   // - If the value is known but undef, then consider it UB.
2201   // - Otherwise, do specific processing with the simplified value.
2202   // We return None in the first 2 cases to signify that an appropriate
2203   // action was taken and the caller should stop.
2204   // Otherwise, we return the simplified value that the caller should
2205   // use for specific processing.
2206   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2207                                          Instruction *I) {
2208     const auto &ValueSimplifyAA =
2209         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2210     Optional<Value *> SimplifiedV =
2211         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2212     if (!ValueSimplifyAA.isKnown()) {
2213       // Don't depend on assumed values.
2214       return llvm::None;
2215     }
2216     if (!SimplifiedV.hasValue()) {
2217       // If it is known (which we tested above) but it doesn't have a value,
2218       // then we can assume `undef` and hence the instruction is UB.
2219       KnownUBInsts.insert(I);
2220       return llvm::None;
2221     }
2222     Value *Val = SimplifiedV.getValue();
2223     if (isa<UndefValue>(Val)) {
2224       KnownUBInsts.insert(I);
2225       return llvm::None;
2226     }
2227     return Val;
2228   }
2229 };
2230 
2231 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2232   AAUndefinedBehaviorFunction(const IRPosition &IRP)
2233       : AAUndefinedBehaviorImpl(IRP) {}
2234 
2235   /// See AbstractAttribute::trackStatistics()
2236   void trackStatistics() const override {
2237     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2238                "Number of instructions known to have UB");
2239     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2240         KnownUBInsts.size();
2241   }
2242 };
2243 
2244 /// ------------------------ Will-Return Attributes ----------------------------
2245 
2246 // Helper function that checks whether a function has any cycle.
2247 // TODO: Replace with more efficent code
2248 static bool containsCycle(Function &F) {
2249   SmallPtrSet<BasicBlock *, 32> Visited;
2250 
2251   // Traverse BB by dfs and check whether successor is already visited.
2252   for (BasicBlock *BB : depth_first(&F)) {
2253     Visited.insert(BB);
2254     for (auto *SuccBB : successors(BB)) {
2255       if (Visited.count(SuccBB))
2256         return true;
2257     }
2258   }
2259   return false;
2260 }
2261 
2262 // Helper function that checks the function have a loop which might become an
2263 // endless loop
2264 // FIXME: Any cycle is regarded as endless loop for now.
2265 //        We have to allow some patterns.
2266 static bool containsPossiblyEndlessLoop(Function *F) {
2267   return !F || !F->hasExactDefinition() || containsCycle(*F);
2268 }
2269 
2270 struct AAWillReturnImpl : public AAWillReturn {
2271   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
2272 
2273   /// See AbstractAttribute::initialize(...).
2274   void initialize(Attributor &A) override {
2275     AAWillReturn::initialize(A);
2276 
2277     Function *F = getAssociatedFunction();
2278     if (containsPossiblyEndlessLoop(F))
2279       indicatePessimisticFixpoint();
2280   }
2281 
2282   /// See AbstractAttribute::updateImpl(...).
2283   ChangeStatus updateImpl(Attributor &A) override {
2284     auto CheckForWillReturn = [&](Instruction &I) {
2285       IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
2286       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2287       if (WillReturnAA.isKnownWillReturn())
2288         return true;
2289       if (!WillReturnAA.isAssumedWillReturn())
2290         return false;
2291       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2292       return NoRecurseAA.isAssumedNoRecurse();
2293     };
2294 
2295     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2296       return indicatePessimisticFixpoint();
2297 
2298     return ChangeStatus::UNCHANGED;
2299   }
2300 
2301   /// See AbstractAttribute::getAsStr()
2302   const std::string getAsStr() const override {
2303     return getAssumed() ? "willreturn" : "may-noreturn";
2304   }
2305 };
2306 
2307 struct AAWillReturnFunction final : AAWillReturnImpl {
2308   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2309 
2310   /// See AbstractAttribute::trackStatistics()
2311   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2312 };
2313 
2314 /// WillReturn attribute deduction for a call sites.
2315 struct AAWillReturnCallSite final : AAWillReturnImpl {
2316   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2317 
2318   /// See AbstractAttribute::initialize(...).
2319   void initialize(Attributor &A) override {
2320     AAWillReturnImpl::initialize(A);
2321     Function *F = getAssociatedFunction();
2322     if (!F)
2323       indicatePessimisticFixpoint();
2324   }
2325 
2326   /// See AbstractAttribute::updateImpl(...).
2327   ChangeStatus updateImpl(Attributor &A) override {
2328     // TODO: Once we have call site specific value information we can provide
2329     //       call site specific liveness information and then it makes
2330     //       sense to specialize attributes for call sites arguments instead of
2331     //       redirecting requests to the callee argument.
2332     Function *F = getAssociatedFunction();
2333     const IRPosition &FnPos = IRPosition::function(*F);
2334     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2335     return clampStateAndIndicateChange(
2336         getState(),
2337         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2338   }
2339 
2340   /// See AbstractAttribute::trackStatistics()
2341   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2342 };
2343 
2344 /// -------------------AAReachability Attribute--------------------------
2345 
2346 struct AAReachabilityImpl : AAReachability {
2347   AAReachabilityImpl(const IRPosition &IRP) : AAReachability(IRP) {}
2348 
2349   const std::string getAsStr() const override {
2350     // TODO: Return the number of reachable queries.
2351     return "reachable";
2352   }
2353 
2354   /// See AbstractAttribute::initialize(...).
2355   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2356 
2357   /// See AbstractAttribute::updateImpl(...).
2358   ChangeStatus updateImpl(Attributor &A) override {
2359     return indicatePessimisticFixpoint();
2360   }
2361 };
2362 
2363 struct AAReachabilityFunction final : public AAReachabilityImpl {
2364   AAReachabilityFunction(const IRPosition &IRP) : AAReachabilityImpl(IRP) {}
2365 
2366   /// See AbstractAttribute::trackStatistics()
2367   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2368 };
2369 
2370 /// ------------------------ NoAlias Argument Attribute ------------------------
2371 
2372 struct AANoAliasImpl : AANoAlias {
2373   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
2374 
2375   const std::string getAsStr() const override {
2376     return getAssumed() ? "noalias" : "may-alias";
2377   }
2378 };
2379 
2380 /// NoAlias attribute for a floating value.
2381 struct AANoAliasFloating final : AANoAliasImpl {
2382   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2383 
2384   /// See AbstractAttribute::initialize(...).
2385   void initialize(Attributor &A) override {
2386     AANoAliasImpl::initialize(A);
2387     Value &Val = getAssociatedValue();
2388     if (isa<AllocaInst>(Val))
2389       indicateOptimisticFixpoint();
2390     if (isa<ConstantPointerNull>(Val) &&
2391         Val.getType()->getPointerAddressSpace() == 0)
2392       indicateOptimisticFixpoint();
2393   }
2394 
2395   /// See AbstractAttribute::updateImpl(...).
2396   ChangeStatus updateImpl(Attributor &A) override {
2397     // TODO: Implement this.
2398     return indicatePessimisticFixpoint();
2399   }
2400 
2401   /// See AbstractAttribute::trackStatistics()
2402   void trackStatistics() const override {
2403     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2404   }
2405 };
2406 
2407 /// NoAlias attribute for an argument.
2408 struct AANoAliasArgument final
2409     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2410   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2411   AANoAliasArgument(const IRPosition &IRP) : Base(IRP) {}
2412 
2413   /// See AbstractAttribute::initialize(...).
2414   void initialize(Attributor &A) override {
2415     Base::initialize(A);
2416     // See callsite argument attribute and callee argument attribute.
2417     if (hasAttr({Attribute::ByVal}))
2418       indicateOptimisticFixpoint();
2419   }
2420 
2421   /// See AbstractAttribute::update(...).
2422   ChangeStatus updateImpl(Attributor &A) override {
2423     // We have to make sure no-alias on the argument does not break
2424     // synchronization when this is a callback argument, see also [1] below.
2425     // If synchronization cannot be affected, we delegate to the base updateImpl
2426     // function, otherwise we give up for now.
2427 
2428     // If the function is no-sync, no-alias cannot break synchronization.
2429     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2430         *this, IRPosition::function_scope(getIRPosition()));
2431     if (NoSyncAA.isAssumedNoSync())
2432       return Base::updateImpl(A);
2433 
2434     // If the argument is read-only, no-alias cannot break synchronization.
2435     const auto &MemBehaviorAA =
2436         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2437     if (MemBehaviorAA.isAssumedReadOnly())
2438       return Base::updateImpl(A);
2439 
2440     // If the argument is never passed through callbacks, no-alias cannot break
2441     // synchronization.
2442     if (A.checkForAllCallSites(
2443             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2444             true))
2445       return Base::updateImpl(A);
2446 
2447     // TODO: add no-alias but make sure it doesn't break synchronization by
2448     // introducing fake uses. See:
2449     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2450     //     International Workshop on OpenMP 2018,
2451     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2452 
2453     return indicatePessimisticFixpoint();
2454   }
2455 
2456   /// See AbstractAttribute::trackStatistics()
2457   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2458 };
2459 
2460 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2461   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2462 
2463   /// See AbstractAttribute::initialize(...).
2464   void initialize(Attributor &A) override {
2465     // See callsite argument attribute and callee argument attribute.
2466     ImmutableCallSite ICS(&getAnchorValue());
2467     if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2468       indicateOptimisticFixpoint();
2469   }
2470 
2471   /// See AbstractAttribute::updateImpl(...).
2472   ChangeStatus updateImpl(Attributor &A) override {
2473     // We can deduce "noalias" if the following conditions hold.
2474     // (i)   Associated value is assumed to be noalias in the definition.
2475     // (ii)  Associated value is assumed to be no-capture in all the uses
2476     //       possibly executed before this callsite.
2477     // (iii) There is no other pointer argument which could alias with the
2478     //       value.
2479 
2480     const Value &V = getAssociatedValue();
2481     const IRPosition IRP = IRPosition::value(V);
2482 
2483     // (i) Check whether noalias holds in the definition.
2484 
2485     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
2486     LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] check definition: " << V
2487                       << " :: " << NoAliasAA << "\n");
2488 
2489     if (!NoAliasAA.isAssumedNoAlias())
2490       return indicatePessimisticFixpoint();
2491 
2492     LLVM_DEBUG(dbgs() << "[Attributor][AANoAliasCSArg] " << V
2493                       << " is assumed NoAlias in the definition\n");
2494 
2495     // (ii) Check whether the value is captured in the scope using AANoCapture.
2496     //      FIXME: This is conservative though, it is better to look at CFG and
2497     //             check only uses possibly executed before this callsite.
2498 
2499     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
2500     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2501       LLVM_DEBUG(
2502           dbgs() << "[Attributor][AANoAliasCSArg] " << V
2503                  << " cannot be noalias as it is potentially captured\n");
2504       return indicatePessimisticFixpoint();
2505     }
2506 
2507     // (iii) Check there is no other pointer argument which could alias with the
2508     // value.
2509     // TODO: AbstractCallSite
2510     ImmutableCallSite ICS(&getAnchorValue());
2511     for (unsigned i = 0; i < ICS.getNumArgOperands(); i++) {
2512       if (getArgNo() == (int)i)
2513         continue;
2514       const Value *ArgOp = ICS.getArgOperand(i);
2515       if (!ArgOp->getType()->isPointerTy())
2516         continue;
2517 
2518       if (const Function *F = getAnchorScope()) {
2519         if (AAResults *AAR = A.getInfoCache().getAAResultsForFunction(*F)) {
2520           bool IsAliasing = !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2521           LLVM_DEBUG(dbgs()
2522                      << "[Attributor][NoAliasCSArg] Check alias between "
2523                         "callsite arguments "
2524                      << AAR->isNoAlias(&getAssociatedValue(), ArgOp) << " "
2525                      << getAssociatedValue() << " " << *ArgOp << " => "
2526                      << (IsAliasing ? "" : "no-") << "alias \n");
2527 
2528           if (!IsAliasing)
2529             continue;
2530         }
2531       }
2532       return indicatePessimisticFixpoint();
2533     }
2534 
2535     return ChangeStatus::UNCHANGED;
2536   }
2537 
2538   /// See AbstractAttribute::trackStatistics()
2539   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2540 };
2541 
2542 /// NoAlias attribute for function return value.
2543 struct AANoAliasReturned final : AANoAliasImpl {
2544   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2545 
2546   /// See AbstractAttribute::updateImpl(...).
2547   virtual ChangeStatus updateImpl(Attributor &A) override {
2548 
2549     auto CheckReturnValue = [&](Value &RV) -> bool {
2550       if (Constant *C = dyn_cast<Constant>(&RV))
2551         if (C->isNullValue() || isa<UndefValue>(C))
2552           return true;
2553 
2554       /// For now, we can only deduce noalias if we have call sites.
2555       /// FIXME: add more support.
2556       ImmutableCallSite ICS(&RV);
2557       if (!ICS)
2558         return false;
2559 
2560       const IRPosition &RVPos = IRPosition::value(RV);
2561       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2562       if (!NoAliasAA.isAssumedNoAlias())
2563         return false;
2564 
2565       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2566       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2567     };
2568 
2569     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2570       return indicatePessimisticFixpoint();
2571 
2572     return ChangeStatus::UNCHANGED;
2573   }
2574 
2575   /// See AbstractAttribute::trackStatistics()
2576   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2577 };
2578 
2579 /// NoAlias attribute deduction for a call site return value.
2580 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2581   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2582 
2583   /// See AbstractAttribute::initialize(...).
2584   void initialize(Attributor &A) override {
2585     AANoAliasImpl::initialize(A);
2586     Function *F = getAssociatedFunction();
2587     if (!F)
2588       indicatePessimisticFixpoint();
2589   }
2590 
2591   /// See AbstractAttribute::updateImpl(...).
2592   ChangeStatus updateImpl(Attributor &A) override {
2593     // TODO: Once we have call site specific value information we can provide
2594     //       call site specific liveness information and then it makes
2595     //       sense to specialize attributes for call sites arguments instead of
2596     //       redirecting requests to the callee argument.
2597     Function *F = getAssociatedFunction();
2598     const IRPosition &FnPos = IRPosition::returned(*F);
2599     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2600     return clampStateAndIndicateChange(
2601         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2602   }
2603 
2604   /// See AbstractAttribute::trackStatistics()
2605   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2606 };
2607 
2608 /// -------------------AAIsDead Function Attribute-----------------------
2609 
2610 struct AAIsDeadValueImpl : public AAIsDead {
2611   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2612 
2613   /// See AAIsDead::isAssumedDead().
2614   bool isAssumedDead() const override { return getAssumed(); }
2615 
2616   /// See AAIsDead::isAssumedDead(BasicBlock *).
2617   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2618 
2619   /// See AAIsDead::isKnownDead(BasicBlock *).
2620   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2621 
2622   /// See AAIsDead::isAssumedDead(Instruction *I).
2623   bool isAssumedDead(const Instruction *I) const override {
2624     return I == getCtxI() && isAssumedDead();
2625   }
2626 
2627   /// See AAIsDead::isKnownDead(Instruction *I).
2628   bool isKnownDead(const Instruction *I) const override {
2629     return I == getCtxI() && getKnown();
2630   }
2631 
2632   /// See AbstractAttribute::getAsStr().
2633   const std::string getAsStr() const override {
2634     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2635   }
2636 };
2637 
2638 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2639   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2640 
2641   /// See AbstractAttribute::initialize(...).
2642   void initialize(Attributor &A) override {
2643     if (Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()))
2644       if (!wouldInstructionBeTriviallyDead(I))
2645         indicatePessimisticFixpoint();
2646     if (isa<UndefValue>(getAssociatedValue()))
2647       indicatePessimisticFixpoint();
2648   }
2649 
2650   /// See AbstractAttribute::updateImpl(...).
2651   ChangeStatus updateImpl(Attributor &A) override {
2652     auto UsePred = [&](const Use &U, bool &Follow) {
2653       Instruction *UserI = cast<Instruction>(U.getUser());
2654       if (CallSite CS = CallSite(UserI)) {
2655         if (!CS.isArgOperand(&U))
2656           return false;
2657         const IRPosition &CSArgPos =
2658             IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
2659         const auto &CSArgIsDead = A.getAAFor<AAIsDead>(*this, CSArgPos);
2660         return CSArgIsDead.isAssumedDead();
2661       }
2662       if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
2663         const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
2664         const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, RetPos);
2665         return RetIsDeadAA.isAssumedDead();
2666       }
2667       Follow = true;
2668       return wouldInstructionBeTriviallyDead(UserI);
2669     };
2670 
2671     if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
2672       return indicatePessimisticFixpoint();
2673     return ChangeStatus::UNCHANGED;
2674   }
2675 
2676   /// See AbstractAttribute::manifest(...).
2677   ChangeStatus manifest(Attributor &A) override {
2678     Value &V = getAssociatedValue();
2679     if (auto *I = dyn_cast<Instruction>(&V))
2680       if (wouldInstructionBeTriviallyDead(I)) {
2681         A.deleteAfterManifest(*I);
2682         return ChangeStatus::CHANGED;
2683       }
2684 
2685     if (V.use_empty())
2686       return ChangeStatus::UNCHANGED;
2687 
2688     UndefValue &UV = *UndefValue::get(V.getType());
2689     bool AnyChange = A.changeValueAfterManifest(V, UV);
2690     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2691   }
2692 
2693   /// See AbstractAttribute::trackStatistics()
2694   void trackStatistics() const override {
2695     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2696   }
2697 };
2698 
2699 struct AAIsDeadArgument : public AAIsDeadFloating {
2700   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2701 
2702   /// See AbstractAttribute::initialize(...).
2703   void initialize(Attributor &A) override {
2704     if (!getAssociatedFunction()->hasExactDefinition())
2705       indicatePessimisticFixpoint();
2706   }
2707 
2708   /// See AbstractAttribute::manifest(...).
2709   ChangeStatus manifest(Attributor &A) override {
2710     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2711     Argument &Arg = *getAssociatedArgument();
2712     if (Arg.getParent()->hasLocalLinkage())
2713       if (A.registerFunctionSignatureRewrite(
2714               Arg, /* ReplacementTypes */ {},
2715               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2716               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{}))
2717         return ChangeStatus::CHANGED;
2718     return Changed;
2719   }
2720 
2721   /// See AbstractAttribute::trackStatistics()
2722   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2723 };
2724 
2725 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2726   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2727 
2728   /// See AbstractAttribute::initialize(...).
2729   void initialize(Attributor &A) override {
2730     if (isa<UndefValue>(getAssociatedValue()))
2731       indicatePessimisticFixpoint();
2732   }
2733 
2734   /// See AbstractAttribute::updateImpl(...).
2735   ChangeStatus updateImpl(Attributor &A) override {
2736     // TODO: Once we have call site specific value information we can provide
2737     //       call site specific liveness information and then it makes
2738     //       sense to specialize attributes for call sites arguments instead of
2739     //       redirecting requests to the callee argument.
2740     Argument *Arg = getAssociatedArgument();
2741     if (!Arg)
2742       return indicatePessimisticFixpoint();
2743     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2744     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2745     return clampStateAndIndicateChange(
2746         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2747   }
2748 
2749   /// See AbstractAttribute::manifest(...).
2750   ChangeStatus manifest(Attributor &A) override {
2751     CallBase &CB = cast<CallBase>(getAnchorValue());
2752     Use &U = CB.getArgOperandUse(getArgNo());
2753     assert(!isa<UndefValue>(U.get()) &&
2754            "Expected undef values to be filtered out!");
2755     UndefValue &UV = *UndefValue::get(U->getType());
2756     if (A.changeUseAfterManifest(U, UV))
2757       return ChangeStatus::CHANGED;
2758     return ChangeStatus::UNCHANGED;
2759   }
2760 
2761   /// See AbstractAttribute::trackStatistics()
2762   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2763 };
2764 
2765 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2766   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2767 
2768   /// See AbstractAttribute::updateImpl(...).
2769   ChangeStatus updateImpl(Attributor &A) override {
2770 
2771     auto PredForCallSite = [&](AbstractCallSite ACS) {
2772       if (ACS.isCallbackCall())
2773         return false;
2774       const IRPosition &CSRetPos =
2775           IRPosition::callsite_returned(ACS.getCallSite());
2776       const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, CSRetPos);
2777       return RetIsDeadAA.isAssumedDead();
2778     };
2779 
2780     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
2781       return indicatePessimisticFixpoint();
2782 
2783     return ChangeStatus::UNCHANGED;
2784   }
2785 
2786   /// See AbstractAttribute::manifest(...).
2787   ChangeStatus manifest(Attributor &A) override {
2788     // TODO: Rewrite the signature to return void?
2789     bool AnyChange = false;
2790     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2791     auto RetInstPred = [&](Instruction &I) {
2792       ReturnInst &RI = cast<ReturnInst>(I);
2793       if (!isa<UndefValue>(RI.getReturnValue()))
2794         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2795       return true;
2796     };
2797     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2798     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2799   }
2800 
2801   /// See AbstractAttribute::trackStatistics()
2802   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2803 };
2804 
2805 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2806   AAIsDeadCallSiteReturned(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2807 
2808   /// See AbstractAttribute::initialize(...).
2809   void initialize(Attributor &A) override {}
2810 
2811   /// See AbstractAttribute::trackStatistics()
2812   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(IsDead) }
2813 };
2814 
2815 struct AAIsDeadFunction : public AAIsDead {
2816   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
2817 
2818   /// See AbstractAttribute::initialize(...).
2819   void initialize(Attributor &A) override {
2820     const Function *F = getAssociatedFunction();
2821     if (F && !F->isDeclaration()) {
2822       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2823       assumeLive(A, F->getEntryBlock());
2824     }
2825   }
2826 
2827   /// See AbstractAttribute::getAsStr().
2828   const std::string getAsStr() const override {
2829     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2830            std::to_string(getAssociatedFunction()->size()) + "][#TBEP " +
2831            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2832            std::to_string(KnownDeadEnds.size()) + "]";
2833   }
2834 
2835   /// See AbstractAttribute::manifest(...).
2836   ChangeStatus manifest(Attributor &A) override {
2837     assert(getState().isValidState() &&
2838            "Attempted to manifest an invalid state!");
2839 
2840     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2841     Function &F = *getAssociatedFunction();
2842 
2843     if (AssumedLiveBlocks.empty()) {
2844       A.deleteAfterManifest(F);
2845       return ChangeStatus::CHANGED;
2846     }
2847 
2848     // Flag to determine if we can change an invoke to a call assuming the
2849     // callee is nounwind. This is not possible if the personality of the
2850     // function allows to catch asynchronous exceptions.
2851     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2852 
2853     KnownDeadEnds.set_union(ToBeExploredFrom);
2854     for (const Instruction *DeadEndI : KnownDeadEnds) {
2855       auto *CB = dyn_cast<CallBase>(DeadEndI);
2856       if (!CB)
2857         continue;
2858       const auto &NoReturnAA =
2859           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2860       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2861       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2862         continue;
2863 
2864       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
2865         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
2866       else
2867         A.changeToUnreachableAfterManifest(
2868             const_cast<Instruction *>(DeadEndI->getNextNode()));
2869       HasChanged = ChangeStatus::CHANGED;
2870     }
2871 
2872     for (BasicBlock &BB : F)
2873       if (!AssumedLiveBlocks.count(&BB))
2874         A.deleteAfterManifest(BB);
2875 
2876     return HasChanged;
2877   }
2878 
2879   /// See AbstractAttribute::updateImpl(...).
2880   ChangeStatus updateImpl(Attributor &A) override;
2881 
2882   /// See AbstractAttribute::trackStatistics()
2883   void trackStatistics() const override {}
2884 
2885   /// Returns true if the function is assumed dead.
2886   bool isAssumedDead() const override { return false; }
2887 
2888   /// See AAIsDead::isAssumedDead(BasicBlock *).
2889   bool isAssumedDead(const BasicBlock *BB) const override {
2890     assert(BB->getParent() == getAssociatedFunction() &&
2891            "BB must be in the same anchor scope function.");
2892 
2893     if (!getAssumed())
2894       return false;
2895     return !AssumedLiveBlocks.count(BB);
2896   }
2897 
2898   /// See AAIsDead::isKnownDead(BasicBlock *).
2899   bool isKnownDead(const BasicBlock *BB) const override {
2900     return getKnown() && isAssumedDead(BB);
2901   }
2902 
2903   /// See AAIsDead::isAssumed(Instruction *I).
2904   bool isAssumedDead(const Instruction *I) const override {
2905     assert(I->getParent()->getParent() == getAssociatedFunction() &&
2906            "Instruction must be in the same anchor scope function.");
2907 
2908     if (!getAssumed())
2909       return false;
2910 
2911     // If it is not in AssumedLiveBlocks then it for sure dead.
2912     // Otherwise, it can still be after noreturn call in a live block.
2913     if (!AssumedLiveBlocks.count(I->getParent()))
2914       return true;
2915 
2916     // If it is not after a liveness barrier it is live.
2917     const Instruction *PrevI = I->getPrevNode();
2918     while (PrevI) {
2919       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2920         return true;
2921       PrevI = PrevI->getPrevNode();
2922     }
2923     return false;
2924   }
2925 
2926   /// See AAIsDead::isKnownDead(Instruction *I).
2927   bool isKnownDead(const Instruction *I) const override {
2928     return getKnown() && isAssumedDead(I);
2929   }
2930 
2931   /// Determine if \p F might catch asynchronous exceptions.
2932   static bool mayCatchAsynchronousExceptions(const Function &F) {
2933     return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
2934   }
2935 
2936   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2937   /// that internal function called from \p BB should now be looked at.
2938   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2939     if (!AssumedLiveBlocks.insert(&BB).second)
2940       return false;
2941 
2942     // We assume that all of BB is (probably) live now and if there are calls to
2943     // internal functions we will assume that those are now live as well. This
2944     // is a performance optimization for blocks with calls to a lot of internal
2945     // functions. It can however cause dead functions to be treated as live.
2946     for (const Instruction &I : BB)
2947       if (ImmutableCallSite ICS = ImmutableCallSite(&I))
2948         if (const Function *F = ICS.getCalledFunction())
2949           if (F->hasLocalLinkage())
2950             A.markLiveInternalFunction(*F);
2951     return true;
2952   }
2953 
2954   /// Collection of instructions that need to be explored again, e.g., we
2955   /// did assume they do not transfer control to (one of their) successors.
2956   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
2957 
2958   /// Collection of instructions that are known to not transfer control.
2959   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
2960 
2961   /// Collection of all assumed live BasicBlocks.
2962   DenseSet<const BasicBlock *> AssumedLiveBlocks;
2963 };
2964 
2965 static bool
2966 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
2967                         AbstractAttribute &AA,
2968                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2969   const IRPosition &IPos = IRPosition::callsite_function(CB);
2970 
2971   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
2972   if (NoReturnAA.isAssumedNoReturn())
2973     return !NoReturnAA.isKnownNoReturn();
2974   if (CB.isTerminator())
2975     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
2976   else
2977     AliveSuccessors.push_back(CB.getNextNode());
2978   return false;
2979 }
2980 
2981 static bool
2982 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
2983                         AbstractAttribute &AA,
2984                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2985   bool UsedAssumedInformation =
2986       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
2987 
2988   // First, determine if we can change an invoke to a call assuming the
2989   // callee is nounwind. This is not possible if the personality of the
2990   // function allows to catch asynchronous exceptions.
2991   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
2992     AliveSuccessors.push_back(&II.getUnwindDest()->front());
2993   } else {
2994     const IRPosition &IPos = IRPosition::callsite_function(II);
2995     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
2996     if (AANoUnw.isAssumedNoUnwind()) {
2997       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
2998     } else {
2999       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3000     }
3001   }
3002   return UsedAssumedInformation;
3003 }
3004 
3005 static bool
3006 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3007                         AbstractAttribute &AA,
3008                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3009   bool UsedAssumedInformation = false;
3010   if (BI.getNumSuccessors() == 1) {
3011     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3012   } else {
3013     Optional<ConstantInt *> CI =
3014         getAssumedConstant(A, *BI.getCondition(), AA, UsedAssumedInformation);
3015     if (!CI.hasValue()) {
3016       // No value yet, assume both edges are dead.
3017     } else if (CI.getValue()) {
3018       const BasicBlock *SuccBB =
3019           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3020       AliveSuccessors.push_back(&SuccBB->front());
3021     } else {
3022       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3023       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3024       UsedAssumedInformation = false;
3025     }
3026   }
3027   return UsedAssumedInformation;
3028 }
3029 
3030 static bool
3031 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3032                         AbstractAttribute &AA,
3033                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3034   bool UsedAssumedInformation = false;
3035   Optional<ConstantInt *> CI =
3036       getAssumedConstant(A, *SI.getCondition(), AA, UsedAssumedInformation);
3037   if (!CI.hasValue()) {
3038     // No value yet, assume all edges are dead.
3039   } else if (CI.getValue()) {
3040     for (auto &CaseIt : SI.cases()) {
3041       if (CaseIt.getCaseValue() == CI.getValue()) {
3042         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3043         return UsedAssumedInformation;
3044       }
3045     }
3046     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3047     return UsedAssumedInformation;
3048   } else {
3049     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3050       AliveSuccessors.push_back(&SuccBB->front());
3051   }
3052   return UsedAssumedInformation;
3053 }
3054 
3055 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3056   ChangeStatus Change = ChangeStatus::UNCHANGED;
3057 
3058   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3059                     << getAssociatedFunction()->size() << "] BBs and "
3060                     << ToBeExploredFrom.size() << " exploration points and "
3061                     << KnownDeadEnds.size() << " known dead ends\n");
3062 
3063   // Copy and clear the list of instructions we need to explore from. It is
3064   // refilled with instructions the next update has to look at.
3065   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3066                                                ToBeExploredFrom.end());
3067   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3068 
3069   SmallVector<const Instruction *, 8> AliveSuccessors;
3070   while (!Worklist.empty()) {
3071     const Instruction *I = Worklist.pop_back_val();
3072     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3073 
3074     AliveSuccessors.clear();
3075 
3076     bool UsedAssumedInformation = false;
3077     switch (I->getOpcode()) {
3078     // TODO: look for (assumed) UB to backwards propagate "deadness".
3079     default:
3080       if (I->isTerminator()) {
3081         for (const BasicBlock *SuccBB : successors(I->getParent()))
3082           AliveSuccessors.push_back(&SuccBB->front());
3083       } else {
3084         AliveSuccessors.push_back(I->getNextNode());
3085       }
3086       break;
3087     case Instruction::Call:
3088       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3089                                                        *this, AliveSuccessors);
3090       break;
3091     case Instruction::Invoke:
3092       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3093                                                        *this, AliveSuccessors);
3094       break;
3095     case Instruction::Br:
3096       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3097                                                        *this, AliveSuccessors);
3098       break;
3099     case Instruction::Switch:
3100       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3101                                                        *this, AliveSuccessors);
3102       break;
3103     }
3104 
3105     if (UsedAssumedInformation) {
3106       NewToBeExploredFrom.insert(I);
3107     } else {
3108       Change = ChangeStatus::CHANGED;
3109       if (AliveSuccessors.empty() ||
3110           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3111         KnownDeadEnds.insert(I);
3112     }
3113 
3114     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3115                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3116                       << UsedAssumedInformation << "\n");
3117 
3118     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3119       if (!I->isTerminator()) {
3120         assert(AliveSuccessors.size() == 1 &&
3121                "Non-terminator expected to have a single successor!");
3122         Worklist.push_back(AliveSuccessor);
3123       } else {
3124         if (assumeLive(A, *AliveSuccessor->getParent()))
3125           Worklist.push_back(AliveSuccessor);
3126       }
3127     }
3128   }
3129 
3130   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3131 
3132   // If we know everything is live there is no need to query for liveness.
3133   // Instead, indicating a pessimistic fixpoint will cause the state to be
3134   // "invalid" and all queries to be answered conservatively without lookups.
3135   // To be in this state we have to (1) finished the exploration and (3) not
3136   // discovered any non-trivial dead end and (2) not ruled unreachable code
3137   // dead.
3138   if (ToBeExploredFrom.empty() &&
3139       getAssociatedFunction()->size() == AssumedLiveBlocks.size() &&
3140       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3141         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3142       }))
3143     return indicatePessimisticFixpoint();
3144   return Change;
3145 }
3146 
3147 /// Liveness information for a call sites.
3148 struct AAIsDeadCallSite final : AAIsDeadFunction {
3149   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
3150 
3151   /// See AbstractAttribute::initialize(...).
3152   void initialize(Attributor &A) override {
3153     // TODO: Once we have call site specific value information we can provide
3154     //       call site specific liveness information and then it makes
3155     //       sense to specialize attributes for call sites instead of
3156     //       redirecting requests to the callee.
3157     llvm_unreachable("Abstract attributes for liveness are not "
3158                      "supported for call sites yet!");
3159   }
3160 
3161   /// See AbstractAttribute::updateImpl(...).
3162   ChangeStatus updateImpl(Attributor &A) override {
3163     return indicatePessimisticFixpoint();
3164   }
3165 
3166   /// See AbstractAttribute::trackStatistics()
3167   void trackStatistics() const override {}
3168 };
3169 
3170 /// -------------------- Dereferenceable Argument Attribute --------------------
3171 
3172 template <>
3173 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3174                                                      const DerefState &R) {
3175   ChangeStatus CS0 =
3176       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3177   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3178   return CS0 | CS1;
3179 }
3180 
3181 struct AADereferenceableImpl : AADereferenceable {
3182   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
3183   using StateType = DerefState;
3184 
3185   void initialize(Attributor &A) override {
3186     SmallVector<Attribute, 4> Attrs;
3187     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3188              Attrs);
3189     for (const Attribute &Attr : Attrs)
3190       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3191 
3192     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition(),
3193                                        /* TrackDependence */ false);
3194 
3195     const IRPosition &IRP = this->getIRPosition();
3196     bool IsFnInterface = IRP.isFnInterfaceKind();
3197     const Function *FnScope = IRP.getAnchorScope();
3198     if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
3199       indicatePessimisticFixpoint();
3200   }
3201 
3202   /// See AbstractAttribute::getState()
3203   /// {
3204   StateType &getState() override { return *this; }
3205   const StateType &getState() const override { return *this; }
3206   /// }
3207 
3208   /// Helper function for collecting accessed bytes in must-be-executed-context
3209   void addAccessedBytesForUse(Attributor &A, const Use *U,
3210                               const Instruction *I) {
3211     const Value *UseV = U->get();
3212     if (!UseV->getType()->isPointerTy())
3213       return;
3214 
3215     Type *PtrTy = UseV->getType();
3216     const DataLayout &DL = A.getDataLayout();
3217     int64_t Offset;
3218     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3219             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3220       if (Base == &getAssociatedValue() &&
3221           Attributor::getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3222         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3223         addAccessedBytes(Offset, Size);
3224       }
3225     }
3226     return;
3227   }
3228 
3229   /// See AAFromMustBeExecutedContext
3230   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3231     bool IsNonNull = false;
3232     bool TrackUse = false;
3233     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3234         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3235 
3236     addAccessedBytesForUse(A, U, I);
3237     takeKnownDerefBytesMaximum(DerefBytes);
3238     return TrackUse;
3239   }
3240 
3241   /// See AbstractAttribute::manifest(...).
3242   ChangeStatus manifest(Attributor &A) override {
3243     ChangeStatus Change = AADereferenceable::manifest(A);
3244     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3245       removeAttrs({Attribute::DereferenceableOrNull});
3246       return ChangeStatus::CHANGED;
3247     }
3248     return Change;
3249   }
3250 
3251   void getDeducedAttributes(LLVMContext &Ctx,
3252                             SmallVectorImpl<Attribute> &Attrs) const override {
3253     // TODO: Add *_globally support
3254     if (isAssumedNonNull())
3255       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3256           Ctx, getAssumedDereferenceableBytes()));
3257     else
3258       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3259           Ctx, getAssumedDereferenceableBytes()));
3260   }
3261 
3262   /// See AbstractAttribute::getAsStr().
3263   const std::string getAsStr() const override {
3264     if (!getAssumedDereferenceableBytes())
3265       return "unknown-dereferenceable";
3266     return std::string("dereferenceable") +
3267            (isAssumedNonNull() ? "" : "_or_null") +
3268            (isAssumedGlobal() ? "_globally" : "") + "<" +
3269            std::to_string(getKnownDereferenceableBytes()) + "-" +
3270            std::to_string(getAssumedDereferenceableBytes()) + ">";
3271   }
3272 };
3273 
3274 /// Dereferenceable attribute for a floating value.
3275 struct AADereferenceableFloating
3276     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3277   using Base =
3278       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3279   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
3280 
3281   /// See AbstractAttribute::updateImpl(...).
3282   ChangeStatus updateImpl(Attributor &A) override {
3283     ChangeStatus Change = Base::updateImpl(A);
3284 
3285     const DataLayout &DL = A.getDataLayout();
3286 
3287     auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
3288       unsigned IdxWidth =
3289           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3290       APInt Offset(IdxWidth, 0);
3291       const Value *Base =
3292           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3293 
3294       const auto &AA =
3295           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3296       int64_t DerefBytes = 0;
3297       if (!Stripped && this == &AA) {
3298         // Use IR information if we did not strip anything.
3299         // TODO: track globally.
3300         bool CanBeNull;
3301         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3302         T.GlobalState.indicatePessimisticFixpoint();
3303       } else {
3304         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3305         DerefBytes = DS.DerefBytesState.getAssumed();
3306         T.GlobalState &= DS.GlobalState;
3307       }
3308 
3309       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3310 
3311       // For now we do not try to "increase" dereferenceability due to negative
3312       // indices as we first have to come up with code to deal with loops and
3313       // for overflows of the dereferenceable bytes.
3314       int64_t OffsetSExt = Offset.getSExtValue();
3315       if (OffsetSExt < 0)
3316         OffsetSExt = 0;
3317 
3318       T.takeAssumedDerefBytesMinimum(
3319           std::max(int64_t(0), DerefBytes - OffsetSExt));
3320 
3321       if (this == &AA) {
3322         if (!Stripped) {
3323           // If nothing was stripped IR information is all we got.
3324           T.takeKnownDerefBytesMaximum(
3325               std::max(int64_t(0), DerefBytes - OffsetSExt));
3326           T.indicatePessimisticFixpoint();
3327         } else if (OffsetSExt > 0) {
3328           // If something was stripped but there is circular reasoning we look
3329           // for the offset. If it is positive we basically decrease the
3330           // dereferenceable bytes in a circluar loop now, which will simply
3331           // drive them down to the known value in a very slow way which we
3332           // can accelerate.
3333           T.indicatePessimisticFixpoint();
3334         }
3335       }
3336 
3337       return T.isValidState();
3338     };
3339 
3340     DerefState T;
3341     if (!genericValueTraversal<AADereferenceable, DerefState>(
3342             A, getIRPosition(), *this, T, VisitValueCB))
3343       return indicatePessimisticFixpoint();
3344 
3345     return Change | clampStateAndIndicateChange(getState(), T);
3346   }
3347 
3348   /// See AbstractAttribute::trackStatistics()
3349   void trackStatistics() const override {
3350     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3351   }
3352 };
3353 
3354 /// Dereferenceable attribute for a return value.
3355 struct AADereferenceableReturned final
3356     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3357                                    DerefState> {
3358   AADereferenceableReturned(const IRPosition &IRP)
3359       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
3360                                      DerefState>(IRP) {}
3361 
3362   /// See AbstractAttribute::trackStatistics()
3363   void trackStatistics() const override {
3364     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3365   }
3366 };
3367 
3368 /// Dereferenceable attribute for an argument
3369 struct AADereferenceableArgument final
3370     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3371           AADereferenceable, AADereferenceableImpl, DerefState> {
3372   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3373       AADereferenceable, AADereferenceableImpl, DerefState>;
3374   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3375 
3376   /// See AbstractAttribute::trackStatistics()
3377   void trackStatistics() const override {
3378     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3379   }
3380 };
3381 
3382 /// Dereferenceable attribute for a call site argument.
3383 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3384   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3385       : AADereferenceableFloating(IRP) {}
3386 
3387   /// See AbstractAttribute::trackStatistics()
3388   void trackStatistics() const override {
3389     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3390   }
3391 };
3392 
3393 /// Dereferenceable attribute deduction for a call site return value.
3394 struct AADereferenceableCallSiteReturned final
3395     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3396           AADereferenceable, AADereferenceableImpl> {
3397   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3398       AADereferenceable, AADereferenceableImpl>;
3399   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3400 
3401   /// See AbstractAttribute::trackStatistics()
3402   void trackStatistics() const override {
3403     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3404   }
3405 };
3406 
3407 // ------------------------ Align Argument Attribute ------------------------
3408 
3409 static unsigned int getKnownAlignForUse(Attributor &A,
3410                                         AbstractAttribute &QueryingAA,
3411                                         Value &AssociatedValue, const Use *U,
3412                                         const Instruction *I, bool &TrackUse) {
3413   // We need to follow common pointer manipulation uses to the accesses they
3414   // feed into.
3415   if (isa<CastInst>(I)) {
3416     // Follow all but ptr2int casts.
3417     TrackUse = !isa<PtrToIntInst>(I);
3418     return 0;
3419   }
3420   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3421     if (GEP->hasAllConstantIndices()) {
3422       TrackUse = true;
3423       return 0;
3424     }
3425   }
3426 
3427   unsigned Alignment = 0;
3428   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
3429     if (ICS.isBundleOperand(U) || ICS.isCallee(U))
3430       return 0;
3431 
3432     unsigned ArgNo = ICS.getArgumentNo(U);
3433     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
3434     // As long as we only use known information there is no need to track
3435     // dependences here.
3436     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3437                                         /* TrackDependence */ false);
3438     Alignment = AlignAA.getKnownAlign();
3439   }
3440 
3441   const Value *UseV = U->get();
3442   if (auto *SI = dyn_cast<StoreInst>(I)) {
3443     if (SI->getPointerOperand() == UseV)
3444       Alignment = SI->getAlignment();
3445   } else if (auto *LI = dyn_cast<LoadInst>(I))
3446     Alignment = LI->getAlignment();
3447 
3448   if (Alignment <= 1)
3449     return 0;
3450 
3451   auto &DL = A.getDataLayout();
3452   int64_t Offset;
3453 
3454   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3455     if (Base == &AssociatedValue) {
3456       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3457       // So we can say that the maximum power of two which is a divisor of
3458       // gcd(Offset, Alignment) is an alignment.
3459 
3460       uint32_t gcd =
3461           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3462       Alignment = llvm::PowerOf2Floor(gcd);
3463     }
3464   }
3465 
3466   return Alignment;
3467 }
3468 struct AAAlignImpl : AAAlign {
3469   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3470 
3471   /// See AbstractAttribute::initialize(...).
3472   void initialize(Attributor &A) override {
3473     SmallVector<Attribute, 4> Attrs;
3474     getAttrs({Attribute::Alignment}, Attrs);
3475     for (const Attribute &Attr : Attrs)
3476       takeKnownMaximum(Attr.getValueAsInt());
3477 
3478     if (getIRPosition().isFnInterfaceKind() &&
3479         (!getAssociatedFunction() ||
3480          !getAssociatedFunction()->hasExactDefinition()))
3481       indicatePessimisticFixpoint();
3482   }
3483 
3484   /// See AbstractAttribute::manifest(...).
3485   ChangeStatus manifest(Attributor &A) override {
3486     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3487 
3488     // Check for users that allow alignment annotations.
3489     Value &AnchorVal = getIRPosition().getAnchorValue();
3490     for (const Use &U : AnchorVal.uses()) {
3491       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3492         if (SI->getPointerOperand() == &AnchorVal)
3493           if (SI->getAlignment() < getAssumedAlign()) {
3494             STATS_DECLTRACK(AAAlign, Store,
3495                             "Number of times alignment added to a store");
3496             SI->setAlignment(Align(getAssumedAlign()));
3497             LoadStoreChanged = ChangeStatus::CHANGED;
3498           }
3499       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3500         if (LI->getPointerOperand() == &AnchorVal)
3501           if (LI->getAlignment() < getAssumedAlign()) {
3502             LI->setAlignment(Align(getAssumedAlign()));
3503             STATS_DECLTRACK(AAAlign, Load,
3504                             "Number of times alignment added to a load");
3505             LoadStoreChanged = ChangeStatus::CHANGED;
3506           }
3507       }
3508     }
3509 
3510     ChangeStatus Changed = AAAlign::manifest(A);
3511 
3512     MaybeAlign InheritAlign =
3513         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3514     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3515       return LoadStoreChanged;
3516     return Changed | LoadStoreChanged;
3517   }
3518 
3519   // TODO: Provide a helper to determine the implied ABI alignment and check in
3520   //       the existing manifest method and a new one for AAAlignImpl that value
3521   //       to avoid making the alignment explicit if it did not improve.
3522 
3523   /// See AbstractAttribute::getDeducedAttributes
3524   virtual void
3525   getDeducedAttributes(LLVMContext &Ctx,
3526                        SmallVectorImpl<Attribute> &Attrs) const override {
3527     if (getAssumedAlign() > 1)
3528       Attrs.emplace_back(
3529           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3530   }
3531   /// See AAFromMustBeExecutedContext
3532   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3533     bool TrackUse = false;
3534 
3535     unsigned int KnownAlign =
3536         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3537     takeKnownMaximum(KnownAlign);
3538 
3539     return TrackUse;
3540   }
3541 
3542   /// See AbstractAttribute::getAsStr().
3543   const std::string getAsStr() const override {
3544     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3545                                 "-" + std::to_string(getAssumedAlign()) + ">")
3546                              : "unknown-align";
3547   }
3548 };
3549 
3550 /// Align attribute for a floating value.
3551 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3552   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3553   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3554 
3555   /// See AbstractAttribute::updateImpl(...).
3556   ChangeStatus updateImpl(Attributor &A) override {
3557     Base::updateImpl(A);
3558 
3559     const DataLayout &DL = A.getDataLayout();
3560 
3561     auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
3562                             bool Stripped) -> bool {
3563       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3564       if (!Stripped && this == &AA) {
3565         // Use only IR information if we did not strip anything.
3566         const MaybeAlign PA = V.getPointerAlignment(DL);
3567         T.takeKnownMaximum(PA ? PA->value() : 0);
3568         T.indicatePessimisticFixpoint();
3569       } else {
3570         // Use abstract attribute information.
3571         const AAAlign::StateType &DS =
3572             static_cast<const AAAlign::StateType &>(AA.getState());
3573         T ^= DS;
3574       }
3575       return T.isValidState();
3576     };
3577 
3578     StateType T;
3579     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3580                                                    VisitValueCB))
3581       return indicatePessimisticFixpoint();
3582 
3583     // TODO: If we know we visited all incoming values, thus no are assumed
3584     // dead, we can take the known information from the state T.
3585     return clampStateAndIndicateChange(getState(), T);
3586   }
3587 
3588   /// See AbstractAttribute::trackStatistics()
3589   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3590 };
3591 
3592 /// Align attribute for function return value.
3593 struct AAAlignReturned final
3594     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3595   AAAlignReturned(const IRPosition &IRP)
3596       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3597 
3598   /// See AbstractAttribute::trackStatistics()
3599   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3600 };
3601 
3602 /// Align attribute for function argument.
3603 struct AAAlignArgument final
3604     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3605                                                               AAAlignImpl> {
3606   AAAlignArgument(const IRPosition &IRP)
3607       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3608                                                                 AAAlignImpl>(
3609             IRP) {}
3610 
3611   /// See AbstractAttribute::trackStatistics()
3612   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3613 };
3614 
3615 struct AAAlignCallSiteArgument final : AAAlignFloating {
3616   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3617 
3618   /// See AbstractAttribute::manifest(...).
3619   ChangeStatus manifest(Attributor &A) override {
3620     ChangeStatus Changed = AAAlignImpl::manifest(A);
3621     MaybeAlign InheritAlign =
3622         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3623     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3624       Changed = ChangeStatus::UNCHANGED;
3625     return Changed;
3626   }
3627 
3628   /// See AbstractAttribute::updateImpl(Attributor &A).
3629   ChangeStatus updateImpl(Attributor &A) override {
3630     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3631     if (Argument *Arg = getAssociatedArgument()) {
3632       // We only take known information from the argument
3633       // so we do not need to track a dependence.
3634       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3635           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3636       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3637     }
3638     return Changed;
3639   }
3640 
3641   /// See AbstractAttribute::trackStatistics()
3642   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3643 };
3644 
3645 /// Align attribute deduction for a call site return value.
3646 struct AAAlignCallSiteReturned final
3647     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3648                                                              AAAlignImpl> {
3649   using Base =
3650       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3651                                                              AAAlignImpl>;
3652   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3653 
3654   /// See AbstractAttribute::initialize(...).
3655   void initialize(Attributor &A) override {
3656     Base::initialize(A);
3657     Function *F = getAssociatedFunction();
3658     if (!F)
3659       indicatePessimisticFixpoint();
3660   }
3661 
3662   /// See AbstractAttribute::trackStatistics()
3663   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3664 };
3665 
3666 /// ------------------ Function No-Return Attribute ----------------------------
3667 struct AANoReturnImpl : public AANoReturn {
3668   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3669 
3670   /// See AbstractAttribute::initialize(...).
3671   void initialize(Attributor &A) override {
3672     AANoReturn::initialize(A);
3673     Function *F = getAssociatedFunction();
3674     if (!F)
3675       indicatePessimisticFixpoint();
3676   }
3677 
3678   /// See AbstractAttribute::getAsStr().
3679   const std::string getAsStr() const override {
3680     return getAssumed() ? "noreturn" : "may-return";
3681   }
3682 
3683   /// See AbstractAttribute::updateImpl(Attributor &A).
3684   virtual ChangeStatus updateImpl(Attributor &A) override {
3685     auto CheckForNoReturn = [](Instruction &) { return false; };
3686     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3687                                    {(unsigned)Instruction::Ret}))
3688       return indicatePessimisticFixpoint();
3689     return ChangeStatus::UNCHANGED;
3690   }
3691 };
3692 
3693 struct AANoReturnFunction final : AANoReturnImpl {
3694   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3695 
3696   /// See AbstractAttribute::trackStatistics()
3697   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3698 };
3699 
3700 /// NoReturn attribute deduction for a call sites.
3701 struct AANoReturnCallSite final : AANoReturnImpl {
3702   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3703 
3704   /// See AbstractAttribute::updateImpl(...).
3705   ChangeStatus updateImpl(Attributor &A) override {
3706     // TODO: Once we have call site specific value information we can provide
3707     //       call site specific liveness information and then it makes
3708     //       sense to specialize attributes for call sites arguments instead of
3709     //       redirecting requests to the callee argument.
3710     Function *F = getAssociatedFunction();
3711     const IRPosition &FnPos = IRPosition::function(*F);
3712     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3713     return clampStateAndIndicateChange(
3714         getState(),
3715         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3716   }
3717 
3718   /// See AbstractAttribute::trackStatistics()
3719   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3720 };
3721 
3722 /// ----------------------- Variable Capturing ---------------------------------
3723 
3724 /// A class to hold the state of for no-capture attributes.
3725 struct AANoCaptureImpl : public AANoCapture {
3726   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
3727 
3728   /// See AbstractAttribute::initialize(...).
3729   void initialize(Attributor &A) override {
3730     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3731       indicateOptimisticFixpoint();
3732       return;
3733     }
3734     Function *AnchorScope = getAnchorScope();
3735     if (isFnInterfaceKind() &&
3736         (!AnchorScope || !AnchorScope->hasExactDefinition())) {
3737       indicatePessimisticFixpoint();
3738       return;
3739     }
3740 
3741     // You cannot "capture" null in the default address space.
3742     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3743         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3744       indicateOptimisticFixpoint();
3745       return;
3746     }
3747 
3748     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3749 
3750     // Check what state the associated function can actually capture.
3751     if (F)
3752       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3753     else
3754       indicatePessimisticFixpoint();
3755   }
3756 
3757   /// See AbstractAttribute::updateImpl(...).
3758   ChangeStatus updateImpl(Attributor &A) override;
3759 
3760   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3761   virtual void
3762   getDeducedAttributes(LLVMContext &Ctx,
3763                        SmallVectorImpl<Attribute> &Attrs) const override {
3764     if (!isAssumedNoCaptureMaybeReturned())
3765       return;
3766 
3767     if (getArgNo() >= 0) {
3768       if (isAssumedNoCapture())
3769         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3770       else if (ManifestInternal)
3771         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3772     }
3773   }
3774 
3775   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3776   /// depending on the ability of the function associated with \p IRP to capture
3777   /// state in memory and through "returning/throwing", respectively.
3778   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3779                                                    const Function &F,
3780                                                    BitIntegerState &State) {
3781     // TODO: Once we have memory behavior attributes we should use them here.
3782 
3783     // If we know we cannot communicate or write to memory, we do not care about
3784     // ptr2int anymore.
3785     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3786         F.getReturnType()->isVoidTy()) {
3787       State.addKnownBits(NO_CAPTURE);
3788       return;
3789     }
3790 
3791     // A function cannot capture state in memory if it only reads memory, it can
3792     // however return/throw state and the state might be influenced by the
3793     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3794     if (F.onlyReadsMemory())
3795       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3796 
3797     // A function cannot communicate state back if it does not through
3798     // exceptions and doesn not return values.
3799     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3800       State.addKnownBits(NOT_CAPTURED_IN_RET);
3801 
3802     // Check existing "returned" attributes.
3803     int ArgNo = IRP.getArgNo();
3804     if (F.doesNotThrow() && ArgNo >= 0) {
3805       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3806         if (F.hasParamAttribute(u, Attribute::Returned)) {
3807           if (u == unsigned(ArgNo))
3808             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3809           else if (F.onlyReadsMemory())
3810             State.addKnownBits(NO_CAPTURE);
3811           else
3812             State.addKnownBits(NOT_CAPTURED_IN_RET);
3813           break;
3814         }
3815     }
3816   }
3817 
3818   /// See AbstractState::getAsStr().
3819   const std::string getAsStr() const override {
3820     if (isKnownNoCapture())
3821       return "known not-captured";
3822     if (isAssumedNoCapture())
3823       return "assumed not-captured";
3824     if (isKnownNoCaptureMaybeReturned())
3825       return "known not-captured-maybe-returned";
3826     if (isAssumedNoCaptureMaybeReturned())
3827       return "assumed not-captured-maybe-returned";
3828     return "assumed-captured";
3829   }
3830 };
3831 
3832 /// Attributor-aware capture tracker.
3833 struct AACaptureUseTracker final : public CaptureTracker {
3834 
3835   /// Create a capture tracker that can lookup in-flight abstract attributes
3836   /// through the Attributor \p A.
3837   ///
3838   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3839   /// search is stopped. If a use leads to a return instruction,
3840   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3841   /// If a use leads to a ptr2int which may capture the value,
3842   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3843   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3844   /// set. All values in \p PotentialCopies are later tracked as well. For every
3845   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3846   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3847   /// conservatively set to true.
3848   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3849                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3850                       SmallVectorImpl<const Value *> &PotentialCopies,
3851                       unsigned &RemainingUsesToExplore)
3852       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3853         PotentialCopies(PotentialCopies),
3854         RemainingUsesToExplore(RemainingUsesToExplore) {}
3855 
3856   /// Determine if \p V maybe captured. *Also updates the state!*
3857   bool valueMayBeCaptured(const Value *V) {
3858     if (V->getType()->isPointerTy()) {
3859       PointerMayBeCaptured(V, this);
3860     } else {
3861       State.indicatePessimisticFixpoint();
3862     }
3863     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3864   }
3865 
3866   /// See CaptureTracker::tooManyUses().
3867   void tooManyUses() override {
3868     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3869   }
3870 
3871   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3872     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3873       return true;
3874     const auto &DerefAA = A.getAAFor<AADereferenceable>(
3875         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
3876         DepClassTy::OPTIONAL);
3877     return DerefAA.getAssumedDereferenceableBytes();
3878   }
3879 
3880   /// See CaptureTracker::captured(...).
3881   bool captured(const Use *U) override {
3882     Instruction *UInst = cast<Instruction>(U->getUser());
3883     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3884                       << "\n");
3885 
3886     // Because we may reuse the tracker multiple times we keep track of the
3887     // number of explored uses ourselves as well.
3888     if (RemainingUsesToExplore-- == 0) {
3889       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3890       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3891                           /* Return */ true);
3892     }
3893 
3894     // Deal with ptr2int by following uses.
3895     if (isa<PtrToIntInst>(UInst)) {
3896       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3897       return valueMayBeCaptured(UInst);
3898     }
3899 
3900     // Explicitly catch return instructions.
3901     if (isa<ReturnInst>(UInst))
3902       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3903                           /* Return */ true);
3904 
3905     // For now we only use special logic for call sites. However, the tracker
3906     // itself knows about a lot of other non-capturing cases already.
3907     CallSite CS(UInst);
3908     if (!CS || !CS.isArgOperand(U))
3909       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3910                           /* Return */ true);
3911 
3912     unsigned ArgNo = CS.getArgumentNo(U);
3913     const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
3914     // If we have a abstract no-capture attribute for the argument we can use
3915     // it to justify a non-capture attribute here. This allows recursion!
3916     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3917     if (ArgNoCaptureAA.isAssumedNoCapture())
3918       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3919                           /* Return */ false);
3920     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3921       addPotentialCopy(CS);
3922       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3923                           /* Return */ false);
3924     }
3925 
3926     // Lastly, we could not find a reason no-capture can be assumed so we don't.
3927     return isCapturedIn(/* Memory */ true, /* Integer */ true,
3928                         /* Return */ true);
3929   }
3930 
3931   /// Register \p CS as potential copy of the value we are checking.
3932   void addPotentialCopy(CallSite CS) {
3933     PotentialCopies.push_back(CS.getInstruction());
3934   }
3935 
3936   /// See CaptureTracker::shouldExplore(...).
3937   bool shouldExplore(const Use *U) override {
3938     // Check liveness, if it is used to stop exploring we need a dependence.
3939     if (IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()))) {
3940       A.recordDependence(IsDeadAA, NoCaptureAA, DepClassTy::OPTIONAL);
3941       return false;
3942     }
3943     return true;
3944   }
3945 
3946   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
3947   /// \p CapturedInRet, then return the appropriate value for use in the
3948   /// CaptureTracker::captured() interface.
3949   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
3950                     bool CapturedInRet) {
3951     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
3952                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
3953     if (CapturedInMem)
3954       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
3955     if (CapturedInInt)
3956       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
3957     if (CapturedInRet)
3958       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
3959     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3960   }
3961 
3962 private:
3963   /// The attributor providing in-flight abstract attributes.
3964   Attributor &A;
3965 
3966   /// The abstract attribute currently updated.
3967   AANoCapture &NoCaptureAA;
3968 
3969   /// The abstract liveness state.
3970   const AAIsDead &IsDeadAA;
3971 
3972   /// The state currently updated.
3973   AANoCapture::StateType &State;
3974 
3975   /// Set of potential copies of the tracked value.
3976   SmallVectorImpl<const Value *> &PotentialCopies;
3977 
3978   /// Global counter to limit the number of explored uses.
3979   unsigned &RemainingUsesToExplore;
3980 };
3981 
3982 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
3983   const IRPosition &IRP = getIRPosition();
3984   const Value *V =
3985       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
3986   if (!V)
3987     return indicatePessimisticFixpoint();
3988 
3989   const Function *F =
3990       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
3991   assert(F && "Expected a function!");
3992   const IRPosition &FnPos = IRPosition::function(*F);
3993   const auto &IsDeadAA =
3994       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
3995 
3996   AANoCapture::StateType T;
3997 
3998   // Readonly means we cannot capture through memory.
3999   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
4000       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4001   if (FnMemAA.isAssumedReadOnly()) {
4002     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4003     if (FnMemAA.isKnownReadOnly())
4004       addKnownBits(NOT_CAPTURED_IN_MEM);
4005   }
4006 
4007   // Make sure all returned values are different than the underlying value.
4008   // TODO: we could do this in a more sophisticated way inside
4009   //       AAReturnedValues, e.g., track all values that escape through returns
4010   //       directly somehow.
4011   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4012     bool SeenConstant = false;
4013     for (auto &It : RVAA.returned_values()) {
4014       if (isa<Constant>(It.first)) {
4015         if (SeenConstant)
4016           return false;
4017         SeenConstant = true;
4018       } else if (!isa<Argument>(It.first) ||
4019                  It.first == getAssociatedArgument())
4020         return false;
4021     }
4022     return true;
4023   };
4024 
4025   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4026       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4027   if (NoUnwindAA.isAssumedNoUnwind()) {
4028     bool IsVoidTy = F->getReturnType()->isVoidTy();
4029     const AAReturnedValues *RVAA =
4030         IsVoidTy ? nullptr
4031                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4032                                                  /* TrackDependence */ true,
4033                                                  DepClassTy::OPTIONAL);
4034     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4035       T.addKnownBits(NOT_CAPTURED_IN_RET);
4036       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4037         return ChangeStatus::UNCHANGED;
4038       if (NoUnwindAA.isKnownNoUnwind() &&
4039           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4040         addKnownBits(NOT_CAPTURED_IN_RET);
4041         if (isKnown(NOT_CAPTURED_IN_MEM))
4042           return indicateOptimisticFixpoint();
4043       }
4044     }
4045   }
4046 
4047   // Use the CaptureTracker interface and logic with the specialized tracker,
4048   // defined in AACaptureUseTracker, that can look at in-flight abstract
4049   // attributes and directly updates the assumed state.
4050   SmallVector<const Value *, 4> PotentialCopies;
4051   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
4052   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4053                               RemainingUsesToExplore);
4054 
4055   // Check all potential copies of the associated value until we can assume
4056   // none will be captured or we have to assume at least one might be.
4057   unsigned Idx = 0;
4058   PotentialCopies.push_back(V);
4059   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4060     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4061 
4062   AANoCapture::StateType &S = getState();
4063   auto Assumed = S.getAssumed();
4064   S.intersectAssumedBits(T.getAssumed());
4065   if (!isAssumedNoCaptureMaybeReturned())
4066     return indicatePessimisticFixpoint();
4067   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4068                                    : ChangeStatus::CHANGED;
4069 }
4070 
4071 /// NoCapture attribute for function arguments.
4072 struct AANoCaptureArgument final : AANoCaptureImpl {
4073   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4074 
4075   /// See AbstractAttribute::trackStatistics()
4076   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4077 };
4078 
4079 /// NoCapture attribute for call site arguments.
4080 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4081   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4082 
4083   /// See AbstractAttribute::initialize(...).
4084   void initialize(Attributor &A) override {
4085     if (Argument *Arg = getAssociatedArgument())
4086       if (Arg->hasByValAttr())
4087         indicateOptimisticFixpoint();
4088     AANoCaptureImpl::initialize(A);
4089   }
4090 
4091   /// See AbstractAttribute::updateImpl(...).
4092   ChangeStatus updateImpl(Attributor &A) override {
4093     // TODO: Once we have call site specific value information we can provide
4094     //       call site specific liveness information and then it makes
4095     //       sense to specialize attributes for call sites arguments instead of
4096     //       redirecting requests to the callee argument.
4097     Argument *Arg = getAssociatedArgument();
4098     if (!Arg)
4099       return indicatePessimisticFixpoint();
4100     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4101     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4102     return clampStateAndIndicateChange(
4103         getState(),
4104         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4105   }
4106 
4107   /// See AbstractAttribute::trackStatistics()
4108   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4109 };
4110 
4111 /// NoCapture attribute for floating values.
4112 struct AANoCaptureFloating final : AANoCaptureImpl {
4113   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4114 
4115   /// See AbstractAttribute::trackStatistics()
4116   void trackStatistics() const override {
4117     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4118   }
4119 };
4120 
4121 /// NoCapture attribute for function return value.
4122 struct AANoCaptureReturned final : AANoCaptureImpl {
4123   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
4124     llvm_unreachable("NoCapture is not applicable to function returns!");
4125   }
4126 
4127   /// See AbstractAttribute::initialize(...).
4128   void initialize(Attributor &A) override {
4129     llvm_unreachable("NoCapture is not applicable to function returns!");
4130   }
4131 
4132   /// See AbstractAttribute::updateImpl(...).
4133   ChangeStatus updateImpl(Attributor &A) override {
4134     llvm_unreachable("NoCapture is not applicable to function returns!");
4135   }
4136 
4137   /// See AbstractAttribute::trackStatistics()
4138   void trackStatistics() const override {}
4139 };
4140 
4141 /// NoCapture attribute deduction for a call site return value.
4142 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4143   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4144 
4145   /// See AbstractAttribute::trackStatistics()
4146   void trackStatistics() const override {
4147     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4148   }
4149 };
4150 
4151 /// ------------------ Value Simplify Attribute ----------------------------
4152 struct AAValueSimplifyImpl : AAValueSimplify {
4153   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
4154 
4155   /// See AbstractAttribute::initialize(...).
4156   void initialize(Attributor &A) override {
4157     if (getAssociatedValue().getType()->isVoidTy())
4158       indicatePessimisticFixpoint();
4159   }
4160 
4161   /// See AbstractAttribute::getAsStr().
4162   const std::string getAsStr() const override {
4163     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4164                         : "not-simple";
4165   }
4166 
4167   /// See AbstractAttribute::trackStatistics()
4168   void trackStatistics() const override {}
4169 
4170   /// See AAValueSimplify::getAssumedSimplifiedValue()
4171   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4172     if (!getAssumed())
4173       return const_cast<Value *>(&getAssociatedValue());
4174     return SimplifiedAssociatedValue;
4175   }
4176 
4177   /// Helper function for querying AAValueSimplify and updating candicate.
4178   /// \param QueryingValue Value trying to unify with SimplifiedValue
4179   /// \param AccumulatedSimplifiedValue Current simplification result.
4180   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4181                              Value &QueryingValue,
4182                              Optional<Value *> &AccumulatedSimplifiedValue) {
4183     // FIXME: Add a typecast support.
4184 
4185     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4186         QueryingAA, IRPosition::value(QueryingValue));
4187 
4188     Optional<Value *> QueryingValueSimplified =
4189         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4190 
4191     if (!QueryingValueSimplified.hasValue())
4192       return true;
4193 
4194     if (!QueryingValueSimplified.getValue())
4195       return false;
4196 
4197     Value &QueryingValueSimplifiedUnwrapped =
4198         *QueryingValueSimplified.getValue();
4199 
4200     if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4201       return true;
4202 
4203     if (AccumulatedSimplifiedValue.hasValue())
4204       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4205 
4206     LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << QueryingValue
4207                       << " is assumed to be "
4208                       << QueryingValueSimplifiedUnwrapped << "\n");
4209 
4210     AccumulatedSimplifiedValue = QueryingValueSimplified;
4211     return true;
4212   }
4213 
4214   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4215     if (!getAssociatedValue().getType()->isIntegerTy())
4216       return false;
4217 
4218     const auto &ValueConstantRangeAA =
4219         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4220 
4221     Optional<ConstantInt *> COpt =
4222         ValueConstantRangeAA.getAssumedConstantInt(A);
4223     if (COpt.hasValue()) {
4224       if (auto *C = COpt.getValue())
4225         SimplifiedAssociatedValue = C;
4226       else
4227         return false;
4228     } else {
4229       // FIXME: It should be llvm::None but if you set llvm::None,
4230       //        values are mistakenly infered as `undef` now.
4231       SimplifiedAssociatedValue = &getAssociatedValue();
4232     }
4233     return true;
4234   }
4235 
4236   /// See AbstractAttribute::manifest(...).
4237   ChangeStatus manifest(Attributor &A) override {
4238     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4239 
4240     if (!SimplifiedAssociatedValue.hasValue() ||
4241         !SimplifiedAssociatedValue.getValue())
4242       return Changed;
4243 
4244     if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
4245       // We can replace the AssociatedValue with the constant.
4246       Value &V = getAssociatedValue();
4247       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4248         LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << V << " -> " << *C
4249                           << "\n");
4250         A.changeValueAfterManifest(V, *C);
4251         Changed = ChangeStatus::CHANGED;
4252       }
4253     }
4254 
4255     return Changed | AAValueSimplify::manifest(A);
4256   }
4257 
4258   /// See AbstractState::indicatePessimisticFixpoint(...).
4259   ChangeStatus indicatePessimisticFixpoint() override {
4260     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4261     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4262     SimplifiedAssociatedValue = &getAssociatedValue();
4263     indicateOptimisticFixpoint();
4264     return ChangeStatus::CHANGED;
4265   }
4266 
4267 protected:
4268   // An assumed simplified value. Initially, it is set to Optional::None, which
4269   // means that the value is not clear under current assumption. If in the
4270   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4271   // returns orignal associated value.
4272   Optional<Value *> SimplifiedAssociatedValue;
4273 };
4274 
4275 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4276   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4277 
4278   void initialize(Attributor &A) override {
4279     AAValueSimplifyImpl::initialize(A);
4280     if (!getAssociatedFunction() || getAssociatedFunction()->isDeclaration())
4281       indicatePessimisticFixpoint();
4282     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4283                 /* IgnoreSubsumingPositions */ true))
4284       indicatePessimisticFixpoint();
4285   }
4286 
4287   /// See AbstractAttribute::updateImpl(...).
4288   ChangeStatus updateImpl(Attributor &A) override {
4289     // Byval is only replacable if it is readonly otherwise we would write into
4290     // the replaced value and not the copy that byval creates implicitly.
4291     Argument *Arg = getAssociatedArgument();
4292     if (Arg->hasByValAttr()) {
4293       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4294       if (!MemAA.isAssumedReadOnly())
4295         return indicatePessimisticFixpoint();
4296     }
4297 
4298     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4299 
4300     auto PredForCallSite = [&](AbstractCallSite ACS) {
4301       // Check if we have an associated argument or not (which can happen for
4302       // callback calls).
4303       Value *ArgOp = ACS.getCallArgOperand(getArgNo());
4304       if (!ArgOp)
4305         return false;
4306       // We can only propagate thread independent values through callbacks.
4307       // This is different to direct/indirect call sites because for them we
4308       // know the thread executing the caller and callee is the same. For
4309       // callbacks this is not guaranteed, thus a thread dependent value could
4310       // be different for the caller and callee, making it invalid to propagate.
4311       if (ACS.isCallbackCall())
4312         if (auto *C = dyn_cast<Constant>(ArgOp))
4313           if (C->isThreadDependent())
4314             return false;
4315       return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
4316     };
4317 
4318     if (!A.checkForAllCallSites(PredForCallSite, *this, true))
4319       if (!askSimplifiedValueForAAValueConstantRange(A))
4320         return indicatePessimisticFixpoint();
4321 
4322     // If a candicate was found in this update, return CHANGED.
4323     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4324                ? ChangeStatus::UNCHANGED
4325                : ChangeStatus ::CHANGED;
4326   }
4327 
4328   /// See AbstractAttribute::trackStatistics()
4329   void trackStatistics() const override {
4330     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4331   }
4332 };
4333 
4334 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4335   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4336 
4337   /// See AbstractAttribute::updateImpl(...).
4338   ChangeStatus updateImpl(Attributor &A) override {
4339     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4340 
4341     auto PredForReturned = [&](Value &V) {
4342       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4343     };
4344 
4345     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4346       if (!askSimplifiedValueForAAValueConstantRange(A))
4347         return indicatePessimisticFixpoint();
4348 
4349     // If a candicate was found in this update, return CHANGED.
4350     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4351                ? ChangeStatus::UNCHANGED
4352                : ChangeStatus ::CHANGED;
4353   }
4354   /// See AbstractAttribute::trackStatistics()
4355   void trackStatistics() const override {
4356     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4357   }
4358 };
4359 
4360 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4361   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4362 
4363   /// See AbstractAttribute::initialize(...).
4364   void initialize(Attributor &A) override {
4365     Value &V = getAnchorValue();
4366 
4367     // TODO: add other stuffs
4368     if (isa<Constant>(V))
4369       indicatePessimisticFixpoint();
4370   }
4371 
4372   /// See AbstractAttribute::updateImpl(...).
4373   ChangeStatus updateImpl(Attributor &A) override {
4374     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4375 
4376     auto VisitValueCB = [&](Value &V, BooleanState, bool Stripped) -> bool {
4377       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4378       if (!Stripped && this == &AA) {
4379         // TODO: Look the instruction and check recursively.
4380 
4381         LLVM_DEBUG(
4382             dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : "
4383                    << V << "\n");
4384         return false;
4385       }
4386       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4387     };
4388 
4389     if (!genericValueTraversal<AAValueSimplify, BooleanState>(
4390             A, getIRPosition(), *this, static_cast<BooleanState &>(*this),
4391             VisitValueCB))
4392       if (!askSimplifiedValueForAAValueConstantRange(A))
4393         return indicatePessimisticFixpoint();
4394 
4395     // If a candicate was found in this update, return CHANGED.
4396 
4397     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4398                ? ChangeStatus::UNCHANGED
4399                : ChangeStatus ::CHANGED;
4400   }
4401 
4402   /// See AbstractAttribute::trackStatistics()
4403   void trackStatistics() const override {
4404     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4405   }
4406 };
4407 
4408 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4409   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4410 
4411   /// See AbstractAttribute::initialize(...).
4412   void initialize(Attributor &A) override {
4413     SimplifiedAssociatedValue = &getAnchorValue();
4414     indicateOptimisticFixpoint();
4415   }
4416   /// See AbstractAttribute::initialize(...).
4417   ChangeStatus updateImpl(Attributor &A) override {
4418     llvm_unreachable(
4419         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4420   }
4421   /// See AbstractAttribute::trackStatistics()
4422   void trackStatistics() const override {
4423     STATS_DECLTRACK_FN_ATTR(value_simplify)
4424   }
4425 };
4426 
4427 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4428   AAValueSimplifyCallSite(const IRPosition &IRP)
4429       : AAValueSimplifyFunction(IRP) {}
4430   /// See AbstractAttribute::trackStatistics()
4431   void trackStatistics() const override {
4432     STATS_DECLTRACK_CS_ATTR(value_simplify)
4433   }
4434 };
4435 
4436 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4437   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4438       : AAValueSimplifyReturned(IRP) {}
4439 
4440   void trackStatistics() const override {
4441     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4442   }
4443 };
4444 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4445   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4446       : AAValueSimplifyFloating(IRP) {}
4447 
4448   void trackStatistics() const override {
4449     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4450   }
4451 };
4452 
4453 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4454 struct AAHeapToStackImpl : public AAHeapToStack {
4455   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4456 
4457   const std::string getAsStr() const override {
4458     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4459   }
4460 
4461   ChangeStatus manifest(Attributor &A) override {
4462     assert(getState().isValidState() &&
4463            "Attempted to manifest an invalid state!");
4464 
4465     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4466     Function *F = getAssociatedFunction();
4467     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4468 
4469     for (Instruction *MallocCall : MallocCalls) {
4470       // This malloc cannot be replaced.
4471       if (BadMallocCalls.count(MallocCall))
4472         continue;
4473 
4474       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4475         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4476         A.deleteAfterManifest(*FreeCall);
4477         HasChanged = ChangeStatus::CHANGED;
4478       }
4479 
4480       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4481                         << "\n");
4482 
4483       Constant *Size;
4484       if (isCallocLikeFn(MallocCall, TLI)) {
4485         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4486         auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
4487         APInt TotalSize = SizeT->getValue() * Num->getValue();
4488         Size =
4489             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4490       } else {
4491         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4492       }
4493 
4494       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4495       Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
4496                                        Size, "", MallocCall->getNextNode());
4497 
4498       if (AI->getType() != MallocCall->getType())
4499         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4500                              AI->getNextNode());
4501 
4502       replaceAllInstructionUsesWith(*MallocCall, *AI);
4503 
4504       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4505         auto *NBB = II->getNormalDest();
4506         BranchInst::Create(NBB, MallocCall->getParent());
4507         A.deleteAfterManifest(*MallocCall);
4508       } else {
4509         A.deleteAfterManifest(*MallocCall);
4510       }
4511 
4512       if (isCallocLikeFn(MallocCall, TLI)) {
4513         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4514                                    AI->getNextNode());
4515         Value *Ops[] = {
4516             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4517             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4518 
4519         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4520         Module *M = F->getParent();
4521         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4522         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4523       }
4524       HasChanged = ChangeStatus::CHANGED;
4525     }
4526 
4527     return HasChanged;
4528   }
4529 
4530   /// Collection of all malloc calls in a function.
4531   SmallSetVector<Instruction *, 4> MallocCalls;
4532 
4533   /// Collection of malloc calls that cannot be converted.
4534   DenseSet<const Instruction *> BadMallocCalls;
4535 
4536   /// A map for each malloc call to the set of associated free calls.
4537   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4538 
4539   ChangeStatus updateImpl(Attributor &A) override;
4540 };
4541 
4542 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4543   const Function *F = getAssociatedFunction();
4544   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4545 
4546   MustBeExecutedContextExplorer &Explorer =
4547       A.getInfoCache().getMustBeExecutedContextExplorer();
4548 
4549   auto FreeCheck = [&](Instruction &I) {
4550     const auto &Frees = FreesForMalloc.lookup(&I);
4551     if (Frees.size() != 1)
4552       return false;
4553     Instruction *UniqueFree = *Frees.begin();
4554     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4555   };
4556 
4557   auto UsesCheck = [&](Instruction &I) {
4558     bool ValidUsesOnly = true;
4559     bool MustUse = true;
4560     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4561       Instruction *UserI = cast<Instruction>(U.getUser());
4562       if (isa<LoadInst>(UserI))
4563         return true;
4564       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4565         if (SI->getValueOperand() == U.get()) {
4566           LLVM_DEBUG(dbgs()
4567                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4568           ValidUsesOnly = false;
4569         } else {
4570           // A store into the malloc'ed memory is fine.
4571         }
4572         return true;
4573       }
4574       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4575         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4576           return true;
4577         // Record malloc.
4578         if (isFreeCall(UserI, TLI)) {
4579           if (MustUse) {
4580             FreesForMalloc[&I].insert(UserI);
4581           } else {
4582             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4583                               << *UserI << "\n");
4584             ValidUsesOnly = false;
4585           }
4586           return true;
4587         }
4588 
4589         unsigned ArgNo = CB->getArgOperandNo(&U);
4590 
4591         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4592             *this, IRPosition::callsite_argument(*CB, ArgNo));
4593 
4594         // If a callsite argument use is nofree, we are fine.
4595         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4596             *this, IRPosition::callsite_argument(*CB, ArgNo));
4597 
4598         if (!NoCaptureAA.isAssumedNoCapture() ||
4599             !ArgNoFreeAA.isAssumedNoFree()) {
4600           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4601           ValidUsesOnly = false;
4602         }
4603         return true;
4604       }
4605 
4606       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4607           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4608         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4609         Follow = true;
4610         return true;
4611       }
4612       // Unknown user for which we can not track uses further (in a way that
4613       // makes sense).
4614       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4615       ValidUsesOnly = false;
4616       return true;
4617     };
4618     A.checkForAllUses(Pred, *this, I);
4619     return ValidUsesOnly;
4620   };
4621 
4622   auto MallocCallocCheck = [&](Instruction &I) {
4623     if (BadMallocCalls.count(&I))
4624       return true;
4625 
4626     bool IsMalloc = isMallocLikeFn(&I, TLI);
4627     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4628     if (!IsMalloc && !IsCalloc) {
4629       BadMallocCalls.insert(&I);
4630       return true;
4631     }
4632 
4633     if (IsMalloc) {
4634       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4635         if (Size->getValue().ule(MaxHeapToStackSize))
4636           if (UsesCheck(I) || FreeCheck(I)) {
4637             MallocCalls.insert(&I);
4638             return true;
4639           }
4640     } else if (IsCalloc) {
4641       bool Overflow = false;
4642       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4643         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4644           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4645                   .ule(MaxHeapToStackSize))
4646             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4647               MallocCalls.insert(&I);
4648               return true;
4649             }
4650     }
4651 
4652     BadMallocCalls.insert(&I);
4653     return true;
4654   };
4655 
4656   size_t NumBadMallocs = BadMallocCalls.size();
4657 
4658   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4659 
4660   if (NumBadMallocs != BadMallocCalls.size())
4661     return ChangeStatus::CHANGED;
4662 
4663   return ChangeStatus::UNCHANGED;
4664 }
4665 
4666 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4667   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
4668 
4669   /// See AbstractAttribute::trackStatistics()
4670   void trackStatistics() const override {
4671     STATS_DECL(MallocCalls, Function,
4672                "Number of malloc calls converted to allocas");
4673     for (auto *C : MallocCalls)
4674       if (!BadMallocCalls.count(C))
4675         ++BUILD_STAT_NAME(MallocCalls, Function);
4676   }
4677 };
4678 
4679 /// -------------------- Memory Behavior Attributes ----------------------------
4680 /// Includes read-none, read-only, and write-only.
4681 /// ----------------------------------------------------------------------------
4682 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
4683   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
4684 
4685   /// See AbstractAttribute::initialize(...).
4686   void initialize(Attributor &A) override {
4687     intersectAssumedBits(BEST_STATE);
4688     getKnownStateFromValue(getIRPosition(), getState());
4689     IRAttribute::initialize(A);
4690   }
4691 
4692   /// Return the memory behavior information encoded in the IR for \p IRP.
4693   static void getKnownStateFromValue(const IRPosition &IRP,
4694                                      BitIntegerState &State,
4695                                      bool IgnoreSubsumingPositions = false) {
4696     SmallVector<Attribute, 2> Attrs;
4697     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
4698     for (const Attribute &Attr : Attrs) {
4699       switch (Attr.getKindAsEnum()) {
4700       case Attribute::ReadNone:
4701         State.addKnownBits(NO_ACCESSES);
4702         break;
4703       case Attribute::ReadOnly:
4704         State.addKnownBits(NO_WRITES);
4705         break;
4706       case Attribute::WriteOnly:
4707         State.addKnownBits(NO_READS);
4708         break;
4709       default:
4710         llvm_unreachable("Unexpcted attribute!");
4711       }
4712     }
4713 
4714     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
4715       if (!I->mayReadFromMemory())
4716         State.addKnownBits(NO_READS);
4717       if (!I->mayWriteToMemory())
4718         State.addKnownBits(NO_WRITES);
4719     }
4720   }
4721 
4722   /// See AbstractAttribute::getDeducedAttributes(...).
4723   void getDeducedAttributes(LLVMContext &Ctx,
4724                             SmallVectorImpl<Attribute> &Attrs) const override {
4725     assert(Attrs.size() == 0);
4726     if (isAssumedReadNone())
4727       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
4728     else if (isAssumedReadOnly())
4729       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
4730     else if (isAssumedWriteOnly())
4731       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
4732     assert(Attrs.size() <= 1);
4733   }
4734 
4735   /// See AbstractAttribute::manifest(...).
4736   ChangeStatus manifest(Attributor &A) override {
4737     const IRPosition &IRP = getIRPosition();
4738 
4739     // Check if we would improve the existing attributes first.
4740     SmallVector<Attribute, 4> DeducedAttrs;
4741     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
4742     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
4743           return IRP.hasAttr(Attr.getKindAsEnum(),
4744                              /* IgnoreSubsumingPositions */ true);
4745         }))
4746       return ChangeStatus::UNCHANGED;
4747 
4748     // Clear existing attributes.
4749     IRP.removeAttrs(AttrKinds);
4750 
4751     // Use the generic manifest method.
4752     return IRAttribute::manifest(A);
4753   }
4754 
4755   /// See AbstractState::getAsStr().
4756   const std::string getAsStr() const override {
4757     if (isAssumedReadNone())
4758       return "readnone";
4759     if (isAssumedReadOnly())
4760       return "readonly";
4761     if (isAssumedWriteOnly())
4762       return "writeonly";
4763     return "may-read/write";
4764   }
4765 
4766   /// The set of IR attributes AAMemoryBehavior deals with.
4767   static const Attribute::AttrKind AttrKinds[3];
4768 };
4769 
4770 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
4771     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
4772 
4773 /// Memory behavior attribute for a floating value.
4774 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
4775   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4776 
4777   /// See AbstractAttribute::initialize(...).
4778   void initialize(Attributor &A) override {
4779     AAMemoryBehaviorImpl::initialize(A);
4780     // Initialize the use vector with all direct uses of the associated value.
4781     for (const Use &U : getAssociatedValue().uses())
4782       Uses.insert(&U);
4783   }
4784 
4785   /// See AbstractAttribute::updateImpl(...).
4786   ChangeStatus updateImpl(Attributor &A) override;
4787 
4788   /// See AbstractAttribute::trackStatistics()
4789   void trackStatistics() const override {
4790     if (isAssumedReadNone())
4791       STATS_DECLTRACK_FLOATING_ATTR(readnone)
4792     else if (isAssumedReadOnly())
4793       STATS_DECLTRACK_FLOATING_ATTR(readonly)
4794     else if (isAssumedWriteOnly())
4795       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
4796   }
4797 
4798 private:
4799   /// Return true if users of \p UserI might access the underlying
4800   /// variable/location described by \p U and should therefore be analyzed.
4801   bool followUsersOfUseIn(Attributor &A, const Use *U,
4802                           const Instruction *UserI);
4803 
4804   /// Update the state according to the effect of use \p U in \p UserI.
4805   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
4806 
4807 protected:
4808   /// Container for (transitive) uses of the associated argument.
4809   SetVector<const Use *> Uses;
4810 };
4811 
4812 /// Memory behavior attribute for function argument.
4813 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
4814   AAMemoryBehaviorArgument(const IRPosition &IRP)
4815       : AAMemoryBehaviorFloating(IRP) {}
4816 
4817   /// See AbstractAttribute::initialize(...).
4818   void initialize(Attributor &A) override {
4819     intersectAssumedBits(BEST_STATE);
4820     const IRPosition &IRP = getIRPosition();
4821     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
4822     // can query it when we use has/getAttr. That would allow us to reuse the
4823     // initialize of the base class here.
4824     bool HasByVal =
4825         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
4826     getKnownStateFromValue(IRP, getState(),
4827                            /* IgnoreSubsumingPositions */ HasByVal);
4828 
4829     // Initialize the use vector with all direct uses of the associated value.
4830     Argument *Arg = getAssociatedArgument();
4831     if (!Arg || !Arg->getParent()->hasExactDefinition()) {
4832       indicatePessimisticFixpoint();
4833     } else {
4834       // Initialize the use vector with all direct uses of the associated value.
4835       for (const Use &U : Arg->uses())
4836         Uses.insert(&U);
4837     }
4838   }
4839 
4840   ChangeStatus manifest(Attributor &A) override {
4841     // TODO: From readattrs.ll: "inalloca parameters are always
4842     //                           considered written"
4843     if (hasAttr({Attribute::InAlloca})) {
4844       removeKnownBits(NO_WRITES);
4845       removeAssumedBits(NO_WRITES);
4846     }
4847     return AAMemoryBehaviorFloating::manifest(A);
4848   }
4849 
4850   /// See AbstractAttribute::trackStatistics()
4851   void trackStatistics() const override {
4852     if (isAssumedReadNone())
4853       STATS_DECLTRACK_ARG_ATTR(readnone)
4854     else if (isAssumedReadOnly())
4855       STATS_DECLTRACK_ARG_ATTR(readonly)
4856     else if (isAssumedWriteOnly())
4857       STATS_DECLTRACK_ARG_ATTR(writeonly)
4858   }
4859 };
4860 
4861 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
4862   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
4863       : AAMemoryBehaviorArgument(IRP) {}
4864 
4865   /// See AbstractAttribute::initialize(...).
4866   void initialize(Attributor &A) override {
4867     if (Argument *Arg = getAssociatedArgument()) {
4868       if (Arg->hasByValAttr()) {
4869         addKnownBits(NO_WRITES);
4870         removeKnownBits(NO_READS);
4871         removeAssumedBits(NO_READS);
4872       }
4873     } else {
4874     }
4875     AAMemoryBehaviorArgument::initialize(A);
4876   }
4877 
4878   /// See AbstractAttribute::updateImpl(...).
4879   ChangeStatus updateImpl(Attributor &A) override {
4880     // TODO: Once we have call site specific value information we can provide
4881     //       call site specific liveness liveness information and then it makes
4882     //       sense to specialize attributes for call sites arguments instead of
4883     //       redirecting requests to the callee argument.
4884     Argument *Arg = getAssociatedArgument();
4885     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4886     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
4887     return clampStateAndIndicateChange(
4888         getState(),
4889         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
4890   }
4891 
4892   /// See AbstractAttribute::trackStatistics()
4893   void trackStatistics() const override {
4894     if (isAssumedReadNone())
4895       STATS_DECLTRACK_CSARG_ATTR(readnone)
4896     else if (isAssumedReadOnly())
4897       STATS_DECLTRACK_CSARG_ATTR(readonly)
4898     else if (isAssumedWriteOnly())
4899       STATS_DECLTRACK_CSARG_ATTR(writeonly)
4900   }
4901 };
4902 
4903 /// Memory behavior attribute for a call site return position.
4904 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
4905   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
4906       : AAMemoryBehaviorFloating(IRP) {}
4907 
4908   /// See AbstractAttribute::manifest(...).
4909   ChangeStatus manifest(Attributor &A) override {
4910     // We do not annotate returned values.
4911     return ChangeStatus::UNCHANGED;
4912   }
4913 
4914   /// See AbstractAttribute::trackStatistics()
4915   void trackStatistics() const override {}
4916 };
4917 
4918 /// An AA to represent the memory behavior function attributes.
4919 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
4920   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4921 
4922   /// See AbstractAttribute::updateImpl(Attributor &A).
4923   virtual ChangeStatus updateImpl(Attributor &A) override;
4924 
4925   /// See AbstractAttribute::manifest(...).
4926   ChangeStatus manifest(Attributor &A) override {
4927     Function &F = cast<Function>(getAnchorValue());
4928     if (isAssumedReadNone()) {
4929       F.removeFnAttr(Attribute::ArgMemOnly);
4930       F.removeFnAttr(Attribute::InaccessibleMemOnly);
4931       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
4932     }
4933     return AAMemoryBehaviorImpl::manifest(A);
4934   }
4935 
4936   /// See AbstractAttribute::trackStatistics()
4937   void trackStatistics() const override {
4938     if (isAssumedReadNone())
4939       STATS_DECLTRACK_FN_ATTR(readnone)
4940     else if (isAssumedReadOnly())
4941       STATS_DECLTRACK_FN_ATTR(readonly)
4942     else if (isAssumedWriteOnly())
4943       STATS_DECLTRACK_FN_ATTR(writeonly)
4944   }
4945 };
4946 
4947 /// AAMemoryBehavior attribute for call sites.
4948 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
4949   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
4950 
4951   /// See AbstractAttribute::initialize(...).
4952   void initialize(Attributor &A) override {
4953     AAMemoryBehaviorImpl::initialize(A);
4954     Function *F = getAssociatedFunction();
4955     if (!F || !F->hasExactDefinition())
4956       indicatePessimisticFixpoint();
4957   }
4958 
4959   /// See AbstractAttribute::updateImpl(...).
4960   ChangeStatus updateImpl(Attributor &A) override {
4961     // TODO: Once we have call site specific value information we can provide
4962     //       call site specific liveness liveness information and then it makes
4963     //       sense to specialize attributes for call sites arguments instead of
4964     //       redirecting requests to the callee argument.
4965     Function *F = getAssociatedFunction();
4966     const IRPosition &FnPos = IRPosition::function(*F);
4967     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
4968     return clampStateAndIndicateChange(
4969         getState(),
4970         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
4971   }
4972 
4973   /// See AbstractAttribute::trackStatistics()
4974   void trackStatistics() const override {
4975     if (isAssumedReadNone())
4976       STATS_DECLTRACK_CS_ATTR(readnone)
4977     else if (isAssumedReadOnly())
4978       STATS_DECLTRACK_CS_ATTR(readonly)
4979     else if (isAssumedWriteOnly())
4980       STATS_DECLTRACK_CS_ATTR(writeonly)
4981   }
4982 };
4983 } // namespace
4984 
4985 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
4986 
4987   // The current assumed state used to determine a change.
4988   auto AssumedState = getAssumed();
4989 
4990   auto CheckRWInst = [&](Instruction &I) {
4991     // If the instruction has an own memory behavior state, use it to restrict
4992     // the local state. No further analysis is required as the other memory
4993     // state is as optimistic as it gets.
4994     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
4995       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
4996           *this, IRPosition::callsite_function(ICS));
4997       intersectAssumedBits(MemBehaviorAA.getAssumed());
4998       return !isAtFixpoint();
4999     }
5000 
5001     // Remove access kind modifiers if necessary.
5002     if (I.mayReadFromMemory())
5003       removeAssumedBits(NO_READS);
5004     if (I.mayWriteToMemory())
5005       removeAssumedBits(NO_WRITES);
5006     return !isAtFixpoint();
5007   };
5008 
5009   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5010     return indicatePessimisticFixpoint();
5011 
5012   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5013                                         : ChangeStatus::UNCHANGED;
5014 }
5015 
5016 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5017 
5018   const IRPosition &IRP = getIRPosition();
5019   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5020   AAMemoryBehavior::StateType &S = getState();
5021 
5022   // First, check the function scope. We take the known information and we avoid
5023   // work if the assumed information implies the current assumed information for
5024   // this attribute. This is a valid for all but byval arguments.
5025   Argument *Arg = IRP.getAssociatedArgument();
5026   AAMemoryBehavior::base_t FnMemAssumedState =
5027       AAMemoryBehavior::StateType::getWorstState();
5028   if (!Arg || !Arg->hasByValAttr()) {
5029     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5030         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5031     FnMemAssumedState = FnMemAA.getAssumed();
5032     S.addKnownBits(FnMemAA.getKnown());
5033     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5034       return ChangeStatus::UNCHANGED;
5035   }
5036 
5037   // Make sure the value is not captured (except through "return"), if
5038   // it is, any information derived would be irrelevant anyway as we cannot
5039   // check the potential aliases introduced by the capture. However, no need
5040   // to fall back to anythign less optimistic than the function state.
5041   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5042       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5043   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5044     S.intersectAssumedBits(FnMemAssumedState);
5045     return ChangeStatus::CHANGED;
5046   }
5047 
5048   // The current assumed state used to determine a change.
5049   auto AssumedState = S.getAssumed();
5050 
5051   // Liveness information to exclude dead users.
5052   // TODO: Take the FnPos once we have call site specific liveness information.
5053   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5054       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5055       /* TrackDependence */ false);
5056 
5057   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5058   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5059     const Use *U = Uses[i];
5060     Instruction *UserI = cast<Instruction>(U->getUser());
5061     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5062                       << " [Dead: " << (LivenessAA.isAssumedDead(UserI))
5063                       << "]\n");
5064     if (LivenessAA.isAssumedDead(UserI)) {
5065       A.recordDependence(LivenessAA, *this, DepClassTy::OPTIONAL);
5066       continue;
5067     }
5068 
5069     // Check if the users of UserI should also be visited.
5070     if (followUsersOfUseIn(A, U, UserI))
5071       for (const Use &UserIUse : UserI->uses())
5072         Uses.insert(&UserIUse);
5073 
5074     // If UserI might touch memory we analyze the use in detail.
5075     if (UserI->mayReadOrWriteMemory())
5076       analyzeUseIn(A, U, UserI);
5077   }
5078 
5079   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5080                                         : ChangeStatus::UNCHANGED;
5081 }
5082 
5083 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5084                                                   const Instruction *UserI) {
5085   // The loaded value is unrelated to the pointer argument, no need to
5086   // follow the users of the load.
5087   if (isa<LoadInst>(UserI))
5088     return false;
5089 
5090   // By default we follow all uses assuming UserI might leak information on U,
5091   // we have special handling for call sites operands though.
5092   ImmutableCallSite ICS(UserI);
5093   if (!ICS || !ICS.isArgOperand(U))
5094     return true;
5095 
5096   // If the use is a call argument known not to be captured, the users of
5097   // the call do not need to be visited because they have to be unrelated to
5098   // the input. Note that this check is not trivial even though we disallow
5099   // general capturing of the underlying argument. The reason is that the
5100   // call might the argument "through return", which we allow and for which we
5101   // need to check call users.
5102   if (U->get()->getType()->isPointerTy()) {
5103     unsigned ArgNo = ICS.getArgumentNo(U);
5104     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5105         *this, IRPosition::callsite_argument(ICS, ArgNo),
5106         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5107     return !ArgNoCaptureAA.isAssumedNoCapture();
5108   }
5109 
5110   return true;
5111 }
5112 
5113 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5114                                             const Instruction *UserI) {
5115   assert(UserI->mayReadOrWriteMemory());
5116 
5117   switch (UserI->getOpcode()) {
5118   default:
5119     // TODO: Handle all atomics and other side-effect operations we know of.
5120     break;
5121   case Instruction::Load:
5122     // Loads cause the NO_READS property to disappear.
5123     removeAssumedBits(NO_READS);
5124     return;
5125 
5126   case Instruction::Store:
5127     // Stores cause the NO_WRITES property to disappear if the use is the
5128     // pointer operand. Note that we do assume that capturing was taken care of
5129     // somewhere else.
5130     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5131       removeAssumedBits(NO_WRITES);
5132     return;
5133 
5134   case Instruction::Call:
5135   case Instruction::CallBr:
5136   case Instruction::Invoke: {
5137     // For call sites we look at the argument memory behavior attribute (this
5138     // could be recursive!) in order to restrict our own state.
5139     ImmutableCallSite ICS(UserI);
5140 
5141     // Give up on operand bundles.
5142     if (ICS.isBundleOperand(U)) {
5143       indicatePessimisticFixpoint();
5144       return;
5145     }
5146 
5147     // Calling a function does read the function pointer, maybe write it if the
5148     // function is self-modifying.
5149     if (ICS.isCallee(U)) {
5150       removeAssumedBits(NO_READS);
5151       break;
5152     }
5153 
5154     // Adjust the possible access behavior based on the information on the
5155     // argument.
5156     IRPosition Pos;
5157     if (U->get()->getType()->isPointerTy())
5158       Pos = IRPosition::callsite_argument(ICS, ICS.getArgumentNo(U));
5159     else
5160       Pos = IRPosition::callsite_function(ICS);
5161     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5162         *this, Pos,
5163         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5164     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5165     // and at least "known".
5166     intersectAssumedBits(MemBehaviorAA.getAssumed());
5167     return;
5168   }
5169   };
5170 
5171   // Generally, look at the "may-properties" and adjust the assumed state if we
5172   // did not trigger special handling before.
5173   if (UserI->mayReadFromMemory())
5174     removeAssumedBits(NO_READS);
5175   if (UserI->mayWriteToMemory())
5176     removeAssumedBits(NO_WRITES);
5177 }
5178 /// ------------------ Value Constant Range Attribute -------------------------
5179 
5180 struct AAValueConstantRangeImpl : AAValueConstantRange {
5181   using StateType = IntegerRangeState;
5182   AAValueConstantRangeImpl(const IRPosition &IRP) : AAValueConstantRange(IRP) {}
5183 
5184   /// See AbstractAttribute::getAsStr().
5185   const std::string getAsStr() const override {
5186     std::string Str;
5187     llvm::raw_string_ostream OS(Str);
5188     OS << "range(" << getBitWidth() << ")<";
5189     getKnown().print(OS);
5190     OS << " / ";
5191     getAssumed().print(OS);
5192     OS << ">";
5193     return OS.str();
5194   }
5195 
5196   /// Helper function to get a SCEV expr for the associated value at program
5197   /// point \p I.
5198   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
5199     if (!getAnchorScope())
5200       return nullptr;
5201 
5202     ScalarEvolution *SE =
5203         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
5204             *getAnchorScope());
5205 
5206     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
5207         *getAnchorScope());
5208 
5209     if (!SE || !LI)
5210       return nullptr;
5211 
5212     const SCEV *S = SE->getSCEV(&getAssociatedValue());
5213     if (!I)
5214       return S;
5215 
5216     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
5217   }
5218 
5219   /// Helper function to get a range from SCEV for the associated value at
5220   /// program point \p I.
5221   ConstantRange getConstantRangeFromSCEV(Attributor &A,
5222                                          const Instruction *I = nullptr) const {
5223     if (!getAnchorScope())
5224       return getWorstState(getBitWidth());
5225 
5226     ScalarEvolution *SE =
5227         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
5228             *getAnchorScope());
5229 
5230     const SCEV *S = getSCEV(A, I);
5231     if (!SE || !S)
5232       return getWorstState(getBitWidth());
5233 
5234     return SE->getUnsignedRange(S);
5235   }
5236 
5237   /// Helper function to get a range from LVI for the associated value at
5238   /// program point \p I.
5239   ConstantRange
5240   getConstantRangeFromLVI(Attributor &A,
5241                           const Instruction *CtxI = nullptr) const {
5242     if (!getAnchorScope())
5243       return getWorstState(getBitWidth());
5244 
5245     LazyValueInfo *LVI =
5246         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
5247             *getAnchorScope());
5248 
5249     if (!LVI || !CtxI)
5250       return getWorstState(getBitWidth());
5251     return LVI->getConstantRange(&getAssociatedValue(),
5252                                  const_cast<BasicBlock *>(CtxI->getParent()),
5253                                  const_cast<Instruction *>(CtxI));
5254   }
5255 
5256   /// See AAValueConstantRange::getKnownConstantRange(..).
5257   ConstantRange
5258   getKnownConstantRange(Attributor &A,
5259                         const Instruction *CtxI = nullptr) const override {
5260     if (!CtxI || CtxI == getCtxI())
5261       return getKnown();
5262 
5263     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
5264     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
5265     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
5266   }
5267 
5268   /// See AAValueConstantRange::getAssumedConstantRange(..).
5269   ConstantRange
5270   getAssumedConstantRange(Attributor &A,
5271                           const Instruction *CtxI = nullptr) const override {
5272     // TODO: Make SCEV use Attributor assumption.
5273     //       We may be able to bound a variable range via assumptions in
5274     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
5275     //       evolve to x^2 + x, then we can say that y is in [2, 12].
5276 
5277     if (!CtxI || CtxI == getCtxI())
5278       return getAssumed();
5279 
5280     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
5281     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
5282     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
5283   }
5284 
5285   /// See AbstractAttribute::initialize(..).
5286   void initialize(Attributor &A) override {
5287     // Intersect a range given by SCEV.
5288     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
5289 
5290     // Intersect a range given by LVI.
5291     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
5292   }
5293 
5294   /// Helper function to create MDNode for range metadata.
5295   static MDNode *
5296   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
5297                             const ConstantRange &AssumedConstantRange) {
5298     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
5299                                   Ty, AssumedConstantRange.getLower())),
5300                               ConstantAsMetadata::get(ConstantInt::get(
5301                                   Ty, AssumedConstantRange.getUpper()))};
5302     return MDNode::get(Ctx, LowAndHigh);
5303   }
5304 
5305   /// Return true if \p Assumed is included in \p KnownRanges.
5306   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
5307 
5308     if (Assumed.isFullSet())
5309       return false;
5310 
5311     if (!KnownRanges)
5312       return true;
5313 
5314     // If multiple ranges are annotated in IR, we give up to annotate assumed
5315     // range for now.
5316 
5317     // TODO:  If there exists a known range which containts assumed range, we
5318     // can say assumed range is better.
5319     if (KnownRanges->getNumOperands() > 2)
5320       return false;
5321 
5322     ConstantInt *Lower =
5323         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
5324     ConstantInt *Upper =
5325         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
5326 
5327     ConstantRange Known(Lower->getValue(), Upper->getValue());
5328     return Known.contains(Assumed) && Known != Assumed;
5329   }
5330 
5331   /// Helper function to set range metadata.
5332   static bool
5333   setRangeMetadataIfisBetterRange(Instruction *I,
5334                                   const ConstantRange &AssumedConstantRange) {
5335     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
5336     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
5337       if (!AssumedConstantRange.isEmptySet()) {
5338         I->setMetadata(LLVMContext::MD_range,
5339                        getMDNodeForConstantRange(I->getType(), I->getContext(),
5340                                                  AssumedConstantRange));
5341         return true;
5342       }
5343     }
5344     return false;
5345   }
5346 
5347   /// See AbstractAttribute::manifest()
5348   ChangeStatus manifest(Attributor &A) override {
5349     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5350     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
5351     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
5352 
5353     auto &V = getAssociatedValue();
5354     if (!AssumedConstantRange.isEmptySet() &&
5355         !AssumedConstantRange.isSingleElement()) {
5356       if (Instruction *I = dyn_cast<Instruction>(&V))
5357         if (isa<CallInst>(I) || isa<LoadInst>(I))
5358           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
5359             Changed = ChangeStatus::CHANGED;
5360     }
5361 
5362     return Changed;
5363   }
5364 };
5365 
5366 struct AAValueConstantRangeArgument final : public AAValueConstantRangeImpl {
5367 
5368   AAValueConstantRangeArgument(const IRPosition &IRP)
5369       : AAValueConstantRangeImpl(IRP) {}
5370 
5371   /// See AbstractAttribute::updateImpl(...).
5372   ChangeStatus updateImpl(Attributor &A) override {
5373     // TODO: Use AAArgumentFromCallSiteArguments
5374 
5375     IntegerRangeState S(getBitWidth());
5376     clampCallSiteArgumentStates<AAValueConstantRange, IntegerRangeState>(
5377         A, *this, S);
5378 
5379     // TODO: If we know we visited all incoming values, thus no are assumed
5380     // dead, we can take the known information from the state T.
5381     return clampStateAndIndicateChange<IntegerRangeState>(this->getState(), S);
5382   }
5383 
5384   /// See AbstractAttribute::trackStatistics()
5385   void trackStatistics() const override {
5386     STATS_DECLTRACK_ARG_ATTR(value_range)
5387   }
5388 };
5389 
5390 struct AAValueConstantRangeReturned : AAValueConstantRangeImpl {
5391   AAValueConstantRangeReturned(const IRPosition &IRP)
5392       : AAValueConstantRangeImpl(IRP) {}
5393 
5394   /// See AbstractAttribute::updateImpl(...).
5395   ChangeStatus updateImpl(Attributor &A) override {
5396     // TODO: Use AAReturnedFromReturnedValues
5397 
5398     // TODO: If we know we visited all returned values, thus no are assumed
5399     // dead, we can take the known information from the state T.
5400 
5401     IntegerRangeState S(getBitWidth());
5402 
5403     clampReturnedValueStates<AAValueConstantRange, IntegerRangeState>(A, *this,
5404                                                                       S);
5405     return clampStateAndIndicateChange<StateType>(this->getState(), S);
5406   }
5407 
5408   /// See AbstractAttribute::trackStatistics()
5409   void trackStatistics() const override {
5410     STATS_DECLTRACK_FNRET_ATTR(value_range)
5411   }
5412 };
5413 
5414 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
5415   AAValueConstantRangeFloating(const IRPosition &IRP)
5416       : AAValueConstantRangeImpl(IRP) {}
5417 
5418   /// See AbstractAttribute::initialize(...).
5419   void initialize(Attributor &A) override {
5420     AAValueConstantRange::initialize(A);
5421     Value &V = getAssociatedValue();
5422 
5423     if (auto *C = dyn_cast<ConstantInt>(&V)) {
5424       unionAssumed(ConstantRange(C->getValue()));
5425       indicateOptimisticFixpoint();
5426       return;
5427     }
5428 
5429     if (isa<UndefValue>(&V)) {
5430       indicateOptimisticFixpoint();
5431       return;
5432     }
5433 
5434     if (auto *I = dyn_cast<Instruction>(&V))
5435       if (isa<BinaryOperator>(I) || isa<CmpInst>(I)) {
5436         Value *LHS = I->getOperand(0);
5437         Value *RHS = I->getOperand(1);
5438 
5439         if (LHS->getType()->isIntegerTy() && RHS->getType()->isIntegerTy())
5440           return;
5441       }
5442 
5443     // If it is a load instruction with range metadata, use it.
5444     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
5445       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
5446         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
5447         return;
5448       }
5449 
5450     // Otherwise we give up.
5451     indicatePessimisticFixpoint();
5452 
5453     LLVM_DEBUG(dbgs() << "[Attributor][AAValueConstantRange] We give up: "
5454                       << getAssociatedValue());
5455   }
5456 
5457   bool calculateBinaryOperator(Attributor &A, BinaryOperator *BinOp,
5458                                IntegerRangeState &T, Instruction *CtxI) {
5459     Value *LHS = BinOp->getOperand(0);
5460     Value *RHS = BinOp->getOperand(1);
5461 
5462     auto &LHSAA =
5463         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
5464     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
5465 
5466     auto &RHSAA =
5467         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
5468     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
5469 
5470     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
5471 
5472     T.unionAssumed(AssumedRange);
5473 
5474     // TODO: Track a known state too.
5475 
5476     return T.isValidState();
5477   }
5478 
5479   bool calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
5480                         Instruction *CtxI) {
5481     Value *LHS = CmpI->getOperand(0);
5482     Value *RHS = CmpI->getOperand(1);
5483 
5484     auto &LHSAA =
5485         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
5486     auto &RHSAA =
5487         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
5488 
5489     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
5490     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
5491 
5492     // If one of them is empty set, we can't decide.
5493     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
5494       return true;
5495 
5496     bool MustTrue = false, MustFalse = false;
5497 
5498     auto AllowedRegion =
5499         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
5500 
5501     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
5502         CmpI->getPredicate(), RHSAARange);
5503 
5504     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
5505       MustFalse = true;
5506 
5507     if (SatisfyingRegion.contains(LHSAARange))
5508       MustTrue = true;
5509 
5510     assert((!MustTrue || !MustFalse) &&
5511            "Either MustTrue or MustFalse should be false!");
5512 
5513     if (MustTrue)
5514       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
5515     else if (MustFalse)
5516       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
5517     else
5518       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
5519 
5520     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
5521                       << " " << RHSAA << "\n");
5522 
5523     // TODO: Track a known state too.
5524     return T.isValidState();
5525   }
5526 
5527   /// See AbstractAttribute::updateImpl(...).
5528   ChangeStatus updateImpl(Attributor &A) override {
5529     Instruction *CtxI = getCtxI();
5530     auto VisitValueCB = [&](Value &V, IntegerRangeState &T,
5531                             bool Stripped) -> bool {
5532       Instruction *I = dyn_cast<Instruction>(&V);
5533       if (!I) {
5534 
5535         // If the value is not instruction, we query AA to Attributor.
5536         const auto &AA =
5537             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
5538 
5539         // Clamp operator is not used to utilize a program point CtxI.
5540         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
5541 
5542         return T.isValidState();
5543       }
5544 
5545       if (auto *BinOp = dyn_cast<BinaryOperator>(I))
5546         return calculateBinaryOperator(A, BinOp, T, CtxI);
5547       else if (auto *CmpI = dyn_cast<CmpInst>(I))
5548         return calculateCmpInst(A, CmpI, T, CtxI);
5549       else {
5550         // Give up with other instructions.
5551         // TODO: Add other instructions
5552 
5553         T.indicatePessimisticFixpoint();
5554         return false;
5555       }
5556     };
5557 
5558     IntegerRangeState T(getBitWidth());
5559 
5560     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
5561             A, getIRPosition(), *this, T, VisitValueCB))
5562       return indicatePessimisticFixpoint();
5563 
5564     return clampStateAndIndicateChange(getState(), T);
5565   }
5566 
5567   /// See AbstractAttribute::trackStatistics()
5568   void trackStatistics() const override {
5569     STATS_DECLTRACK_FLOATING_ATTR(value_range)
5570   }
5571 };
5572 
5573 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
5574   AAValueConstantRangeFunction(const IRPosition &IRP)
5575       : AAValueConstantRangeImpl(IRP) {}
5576 
5577   /// See AbstractAttribute::initialize(...).
5578   ChangeStatus updateImpl(Attributor &A) override {
5579     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
5580                      "not be called");
5581   }
5582 
5583   /// See AbstractAttribute::trackStatistics()
5584   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
5585 };
5586 
5587 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
5588   AAValueConstantRangeCallSite(const IRPosition &IRP)
5589       : AAValueConstantRangeFunction(IRP) {}
5590 
5591   /// See AbstractAttribute::trackStatistics()
5592   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
5593 };
5594 
5595 struct AAValueConstantRangeCallSiteReturned : AAValueConstantRangeReturned {
5596   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP)
5597       : AAValueConstantRangeReturned(IRP) {}
5598 
5599   /// See AbstractAttribute::initialize(...).
5600   void initialize(Attributor &A) override {
5601     // If it is a load instruction with range metadata, use the metadata.
5602     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
5603       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
5604         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
5605 
5606     AAValueConstantRangeReturned::initialize(A);
5607   }
5608 
5609   /// See AbstractAttribute::trackStatistics()
5610   void trackStatistics() const override {
5611     STATS_DECLTRACK_CSRET_ATTR(value_range)
5612   }
5613 };
5614 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
5615   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP)
5616       : AAValueConstantRangeFloating(IRP) {}
5617 
5618   /// See AbstractAttribute::trackStatistics()
5619   void trackStatistics() const override {
5620     STATS_DECLTRACK_CSARG_ATTR(value_range)
5621   }
5622 };
5623 /// ----------------------------------------------------------------------------
5624 ///                               Attributor
5625 /// ----------------------------------------------------------------------------
5626 
5627 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
5628                                const AAIsDead *LivenessAA) {
5629   const Instruction *CtxI = AA.getIRPosition().getCtxI();
5630   if (!CtxI)
5631     return false;
5632 
5633   // TODO: Find a good way to utilize fine and coarse grained liveness
5634   // information.
5635   if (!LivenessAA)
5636     LivenessAA =
5637         &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
5638                             /* TrackDependence */ false);
5639 
5640   // Don't check liveness for AAIsDead.
5641   if (&AA == LivenessAA)
5642     return false;
5643 
5644   if (!LivenessAA->isAssumedDead(CtxI))
5645     return false;
5646 
5647   // We actually used liveness information so we have to record a dependence.
5648   recordDependence(*LivenessAA, AA, DepClassTy::OPTIONAL);
5649 
5650   return true;
5651 }
5652 
5653 bool Attributor::checkForAllUses(
5654     const function_ref<bool(const Use &, bool &)> &Pred,
5655     const AbstractAttribute &QueryingAA, const Value &V) {
5656   const IRPosition &IRP = QueryingAA.getIRPosition();
5657   SmallVector<const Use *, 16> Worklist;
5658   SmallPtrSet<const Use *, 16> Visited;
5659 
5660   for (const Use &U : V.uses())
5661     Worklist.push_back(&U);
5662 
5663   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
5664                     << " initial uses to check\n");
5665 
5666   if (Worklist.empty())
5667     return true;
5668 
5669   bool AnyDead = false;
5670   const Function *ScopeFn = IRP.getAnchorScope();
5671   const auto *LivenessAA =
5672       ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
5673                                     /* TrackDependence */ false)
5674               : nullptr;
5675 
5676   while (!Worklist.empty()) {
5677     const Use *U = Worklist.pop_back_val();
5678     if (!Visited.insert(U).second)
5679       continue;
5680     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << **U << "\n");
5681     if (Instruction *UserI = dyn_cast<Instruction>(U->getUser()))
5682       if (LivenessAA && LivenessAA->isAssumedDead(UserI)) {
5683         LLVM_DEBUG(dbgs() << "[Attributor] Dead user: " << *UserI << ": "
5684                           << *LivenessAA << "\n");
5685         AnyDead = true;
5686         continue;
5687       }
5688 
5689     bool Follow = false;
5690     if (!Pred(*U, Follow))
5691       return false;
5692     if (!Follow)
5693       continue;
5694     for (const Use &UU : U->getUser()->uses())
5695       Worklist.push_back(&UU);
5696   }
5697 
5698   if (AnyDead)
5699     recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5700 
5701   return true;
5702 }
5703 
5704 bool Attributor::checkForAllCallSites(
5705     const function_ref<bool(AbstractCallSite)> &Pred,
5706     const AbstractAttribute &QueryingAA, bool RequireAllCallSites) {
5707   // We can try to determine information from
5708   // the call sites. However, this is only possible all call sites are known,
5709   // hence the function has internal linkage.
5710   const IRPosition &IRP = QueryingAA.getIRPosition();
5711   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5712   if (!AssociatedFunction) {
5713     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
5714                       << "\n");
5715     return false;
5716   }
5717 
5718   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
5719                               &QueryingAA);
5720 }
5721 
5722 bool Attributor::checkForAllCallSites(
5723     const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
5724     bool RequireAllCallSites, const AbstractAttribute *QueryingAA) {
5725   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
5726     LLVM_DEBUG(
5727         dbgs()
5728         << "[Attributor] Function " << Fn.getName()
5729         << " has no internal linkage, hence not all call sites are known\n");
5730     return false;
5731   }
5732 
5733   for (const Use &U : Fn.uses()) {
5734     AbstractCallSite ACS(&U);
5735     if (!ACS) {
5736       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
5737                         << " has non call site use " << *U.get() << " in "
5738                         << *U.getUser() << "\n");
5739       // BlockAddress users are allowed.
5740       if (isa<BlockAddress>(U.getUser()))
5741         continue;
5742       return false;
5743     }
5744 
5745     Instruction *I = ACS.getInstruction();
5746     Function *Caller = I->getFunction();
5747 
5748     const auto *LivenessAA =
5749         lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
5750                               /* TrackDependence */ false);
5751 
5752     // Skip dead calls.
5753     if (LivenessAA && LivenessAA->isAssumedDead(I)) {
5754       // We actually used liveness information so we have to record a
5755       // dependence.
5756       if (QueryingAA)
5757         recordDependence(*LivenessAA, *QueryingAA, DepClassTy::OPTIONAL);
5758       continue;
5759     }
5760 
5761     const Use *EffectiveUse =
5762         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
5763     if (!ACS.isCallee(EffectiveUse)) {
5764       if (!RequireAllCallSites)
5765         continue;
5766       LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
5767                         << " is an invalid use of " << Fn.getName() << "\n");
5768       return false;
5769     }
5770 
5771     if (Pred(ACS))
5772       continue;
5773 
5774     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
5775                       << *ACS.getInstruction() << "\n");
5776     return false;
5777   }
5778 
5779   return true;
5780 }
5781 
5782 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
5783     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
5784         &Pred,
5785     const AbstractAttribute &QueryingAA) {
5786 
5787   const IRPosition &IRP = QueryingAA.getIRPosition();
5788   // Since we need to provide return instructions we have to have an exact
5789   // definition.
5790   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5791   if (!AssociatedFunction)
5792     return false;
5793 
5794   // If this is a call site query we use the call site specific return values
5795   // and liveness information.
5796   // TODO: use the function scope once we have call site AAReturnedValues.
5797   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5798   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
5799   if (!AARetVal.getState().isValidState())
5800     return false;
5801 
5802   return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
5803 }
5804 
5805 bool Attributor::checkForAllReturnedValues(
5806     const function_ref<bool(Value &)> &Pred,
5807     const AbstractAttribute &QueryingAA) {
5808 
5809   const IRPosition &IRP = QueryingAA.getIRPosition();
5810   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5811   if (!AssociatedFunction)
5812     return false;
5813 
5814   // TODO: use the function scope once we have call site AAReturnedValues.
5815   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5816   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
5817   if (!AARetVal.getState().isValidState())
5818     return false;
5819 
5820   return AARetVal.checkForAllReturnedValuesAndReturnInsts(
5821       [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
5822         return Pred(RV);
5823       });
5824 }
5825 
5826 static bool
5827 checkForAllInstructionsImpl(InformationCache::OpcodeInstMapTy &OpcodeInstMap,
5828                             const function_ref<bool(Instruction &)> &Pred,
5829                             const AAIsDead *LivenessAA, bool &AnyDead,
5830                             const ArrayRef<unsigned> &Opcodes) {
5831   for (unsigned Opcode : Opcodes) {
5832     for (Instruction *I : OpcodeInstMap[Opcode]) {
5833       // Skip dead instructions.
5834       if (LivenessAA && LivenessAA->isAssumedDead(I)) {
5835         AnyDead = true;
5836         continue;
5837       }
5838 
5839       if (!Pred(*I))
5840         return false;
5841     }
5842   }
5843   return true;
5844 }
5845 
5846 bool Attributor::checkForAllInstructions(
5847     const llvm::function_ref<bool(Instruction &)> &Pred,
5848     const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
5849 
5850   const IRPosition &IRP = QueryingAA.getIRPosition();
5851   // Since we need to provide instructions we have to have an exact definition.
5852   const Function *AssociatedFunction = IRP.getAssociatedFunction();
5853   if (!AssociatedFunction)
5854     return false;
5855 
5856   // TODO: use the function scope once we have call site AAReturnedValues.
5857   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5858   const auto &LivenessAA =
5859       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
5860   bool AnyDead = false;
5861 
5862   auto &OpcodeInstMap =
5863       InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
5864   if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
5865                                    Opcodes))
5866     return false;
5867 
5868   // If we actually used liveness information so we have to record a dependence.
5869   if (AnyDead)
5870     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5871 
5872   return true;
5873 }
5874 
5875 bool Attributor::checkForAllReadWriteInstructions(
5876     const llvm::function_ref<bool(Instruction &)> &Pred,
5877     AbstractAttribute &QueryingAA) {
5878 
5879   const Function *AssociatedFunction =
5880       QueryingAA.getIRPosition().getAssociatedFunction();
5881   if (!AssociatedFunction)
5882     return false;
5883 
5884   // TODO: use the function scope once we have call site AAReturnedValues.
5885   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
5886   const auto &LivenessAA =
5887       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
5888   bool AnyDead = false;
5889 
5890   for (Instruction *I :
5891        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
5892     // Skip dead instructions.
5893     if (LivenessAA.isAssumedDead(I)) {
5894       AnyDead = true;
5895       continue;
5896     }
5897 
5898     if (!Pred(*I))
5899       return false;
5900   }
5901 
5902   // If we actually used liveness information so we have to record a dependence.
5903   if (AnyDead)
5904     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
5905 
5906   return true;
5907 }
5908 
5909 ChangeStatus Attributor::run(Module &M) {
5910   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
5911                     << AllAbstractAttributes.size()
5912                     << " abstract attributes.\n");
5913 
5914   // Now that all abstract attributes are collected and initialized we start
5915   // the abstract analysis.
5916 
5917   unsigned IterationCounter = 1;
5918 
5919   SmallVector<AbstractAttribute *, 64> ChangedAAs;
5920   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
5921   Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
5922 
5923   bool RecomputeDependences = false;
5924 
5925   do {
5926     // Remember the size to determine new attributes.
5927     size_t NumAAs = AllAbstractAttributes.size();
5928     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
5929                       << ", Worklist size: " << Worklist.size() << "\n");
5930 
5931     // For invalid AAs we can fix dependent AAs that have a required dependence,
5932     // thereby folding long dependence chains in a single step without the need
5933     // to run updates.
5934     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
5935       AbstractAttribute *InvalidAA = InvalidAAs[u];
5936       auto &QuerriedAAs = QueryMap[InvalidAA];
5937       LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "
5938                         << QuerriedAAs.RequiredAAs.size() << "/"
5939                         << QuerriedAAs.OptionalAAs.size()
5940                         << " required/optional dependences\n");
5941       for (AbstractAttribute *DepOnInvalidAA : QuerriedAAs.RequiredAAs) {
5942         AbstractState &DOIAAState = DepOnInvalidAA->getState();
5943         DOIAAState.indicatePessimisticFixpoint();
5944         ++NumAttributesFixedDueToRequiredDependences;
5945         assert(DOIAAState.isAtFixpoint() && "Expected fixpoint state!");
5946         if (!DOIAAState.isValidState())
5947           InvalidAAs.insert(DepOnInvalidAA);
5948       }
5949       if (!RecomputeDependences)
5950         Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5951                         QuerriedAAs.OptionalAAs.end());
5952     }
5953 
5954     // If dependences (=QueryMap) are recomputed we have to look at all abstract
5955     // attributes again, regardless of what changed in the last iteration.
5956     if (RecomputeDependences) {
5957       LLVM_DEBUG(
5958           dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
5959       QueryMap.clear();
5960       ChangedAAs.clear();
5961       Worklist.insert(AllAbstractAttributes.begin(),
5962                       AllAbstractAttributes.end());
5963     }
5964 
5965     // Add all abstract attributes that are potentially dependent on one that
5966     // changed to the work list.
5967     for (AbstractAttribute *ChangedAA : ChangedAAs) {
5968       auto &QuerriedAAs = QueryMap[ChangedAA];
5969       Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
5970                       QuerriedAAs.OptionalAAs.end());
5971       Worklist.insert(QuerriedAAs.RequiredAAs.begin(),
5972                       QuerriedAAs.RequiredAAs.end());
5973     }
5974 
5975     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
5976                       << ", Worklist+Dependent size: " << Worklist.size()
5977                       << "\n");
5978 
5979     // Reset the changed and invalid set.
5980     ChangedAAs.clear();
5981     InvalidAAs.clear();
5982 
5983     // Update all abstract attribute in the work list and record the ones that
5984     // changed.
5985     for (AbstractAttribute *AA : Worklist)
5986       if (!AA->getState().isAtFixpoint() && !isAssumedDead(*AA, nullptr)) {
5987         QueriedNonFixAA = false;
5988         if (AA->update(*this) == ChangeStatus::CHANGED) {
5989           ChangedAAs.push_back(AA);
5990           if (!AA->getState().isValidState())
5991             InvalidAAs.insert(AA);
5992         } else if (!QueriedNonFixAA) {
5993           // If the attribute did not query any non-fix information, the state
5994           // will not change and we can indicate that right away.
5995           AA->getState().indicateOptimisticFixpoint();
5996         }
5997       }
5998 
5999     // Check if we recompute the dependences in the next iteration.
6000     RecomputeDependences = (DepRecomputeInterval > 0 &&
6001                             IterationCounter % DepRecomputeInterval == 0);
6002 
6003     // Add attributes to the changed set if they have been created in the last
6004     // iteration.
6005     ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
6006                       AllAbstractAttributes.end());
6007 
6008     // Reset the work list and repopulate with the changed abstract attributes.
6009     // Note that dependent ones are added above.
6010     Worklist.clear();
6011     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
6012 
6013   } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
6014                                  VerifyMaxFixpointIterations));
6015 
6016   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
6017                     << IterationCounter << "/" << MaxFixpointIterations
6018                     << " iterations\n");
6019 
6020   size_t NumFinalAAs = AllAbstractAttributes.size();
6021 
6022   // Reset abstract arguments not settled in a sound fixpoint by now. This
6023   // happens when we stopped the fixpoint iteration early. Note that only the
6024   // ones marked as "changed" *and* the ones transitively depending on them
6025   // need to be reverted to a pessimistic state. Others might not be in a
6026   // fixpoint state but we can use the optimistic results for them anyway.
6027   SmallPtrSet<AbstractAttribute *, 32> Visited;
6028   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
6029     AbstractAttribute *ChangedAA = ChangedAAs[u];
6030     if (!Visited.insert(ChangedAA).second)
6031       continue;
6032 
6033     AbstractState &State = ChangedAA->getState();
6034     if (!State.isAtFixpoint()) {
6035       State.indicatePessimisticFixpoint();
6036 
6037       NumAttributesTimedOut++;
6038     }
6039 
6040     auto &QuerriedAAs = QueryMap[ChangedAA];
6041     ChangedAAs.append(QuerriedAAs.OptionalAAs.begin(),
6042                       QuerriedAAs.OptionalAAs.end());
6043     ChangedAAs.append(QuerriedAAs.RequiredAAs.begin(),
6044                       QuerriedAAs.RequiredAAs.end());
6045   }
6046 
6047   LLVM_DEBUG({
6048     if (!Visited.empty())
6049       dbgs() << "\n[Attributor] Finalized " << Visited.size()
6050              << " abstract attributes.\n";
6051   });
6052 
6053   unsigned NumManifested = 0;
6054   unsigned NumAtFixpoint = 0;
6055   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
6056   for (AbstractAttribute *AA : AllAbstractAttributes) {
6057     AbstractState &State = AA->getState();
6058 
6059     // If there is not already a fixpoint reached, we can now take the
6060     // optimistic state. This is correct because we enforced a pessimistic one
6061     // on abstract attributes that were transitively dependent on a changed one
6062     // already above.
6063     if (!State.isAtFixpoint())
6064       State.indicateOptimisticFixpoint();
6065 
6066     // If the state is invalid, we do not try to manifest it.
6067     if (!State.isValidState())
6068       continue;
6069 
6070     // Skip dead code.
6071     if (isAssumedDead(*AA, nullptr))
6072       continue;
6073     // Manifest the state and record if we changed the IR.
6074     ChangeStatus LocalChange = AA->manifest(*this);
6075     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
6076       AA->trackStatistics();
6077 
6078     ManifestChange = ManifestChange | LocalChange;
6079 
6080     NumAtFixpoint++;
6081     NumManifested += (LocalChange == ChangeStatus::CHANGED);
6082   }
6083 
6084   (void)NumManifested;
6085   (void)NumAtFixpoint;
6086   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
6087                     << " arguments while " << NumAtFixpoint
6088                     << " were in a valid fixpoint state\n");
6089 
6090   NumAttributesManifested += NumManifested;
6091   NumAttributesValidFixpoint += NumAtFixpoint;
6092 
6093   (void)NumFinalAAs;
6094   assert(
6095       NumFinalAAs == AllAbstractAttributes.size() &&
6096       "Expected the final number of abstract attributes to remain unchanged!");
6097 
6098   // Delete stuff at the end to avoid invalid references and a nice order.
6099   {
6100     LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
6101                       << ToBeDeletedFunctions.size() << " functions and "
6102                       << ToBeDeletedBlocks.size() << " blocks and "
6103                       << ToBeDeletedInsts.size() << " instructions and "
6104                       << ToBeChangedUses.size() << " uses\n");
6105 
6106     SmallVector<WeakTrackingVH, 32> DeadInsts;
6107     SmallVector<Instruction *, 32> TerminatorsToFold;
6108 
6109     for (auto &It : ToBeChangedUses) {
6110       Use *U = It.first;
6111       Value *NewV = It.second;
6112       Value *OldV = U->get();
6113       LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
6114                         << " instead of " << *OldV << "\n");
6115       U->set(NewV);
6116       if (Instruction *I = dyn_cast<Instruction>(OldV))
6117         if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
6118             isInstructionTriviallyDead(I)) {
6119           DeadInsts.push_back(I);
6120         }
6121       if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
6122         Instruction *UserI = cast<Instruction>(U->getUser());
6123         if (isa<UndefValue>(NewV)) {
6124           ToBeChangedToUnreachableInsts.insert(UserI);
6125         } else {
6126           TerminatorsToFold.push_back(UserI);
6127         }
6128       }
6129     }
6130     for (auto &V : InvokeWithDeadSuccessor)
6131       if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
6132         bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
6133         bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
6134         bool Invoke2CallAllowed =
6135             !AAIsDeadFunction::mayCatchAsynchronousExceptions(
6136                 *II->getFunction());
6137         assert((UnwindBBIsDead || NormalBBIsDead) &&
6138                "Invoke does not have dead successors!");
6139         BasicBlock *BB = II->getParent();
6140         BasicBlock *NormalDestBB = II->getNormalDest();
6141         if (UnwindBBIsDead) {
6142           Instruction *NormalNextIP = &NormalDestBB->front();
6143           if (Invoke2CallAllowed) {
6144             changeToCall(II);
6145             NormalNextIP = BB->getTerminator();
6146           }
6147           if (NormalBBIsDead)
6148             ToBeChangedToUnreachableInsts.insert(NormalNextIP);
6149         } else {
6150           assert(NormalBBIsDead && "Broken invariant!");
6151           if (!NormalDestBB->getUniquePredecessor())
6152             NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
6153           ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
6154         }
6155       }
6156     for (auto &V : ToBeChangedToUnreachableInsts)
6157       if (Instruction *I = dyn_cast_or_null<Instruction>(V))
6158         changeToUnreachable(I, /* UseLLVMTrap */ false);
6159     for (Instruction *I : TerminatorsToFold)
6160       ConstantFoldTerminator(I->getParent());
6161 
6162     for (auto &V : ToBeDeletedInsts) {
6163       if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
6164         I->replaceAllUsesWith(UndefValue::get(I->getType()));
6165         if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
6166           DeadInsts.push_back(I);
6167         else
6168           I->eraseFromParent();
6169       }
6170     }
6171 
6172     RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
6173 
6174     if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
6175       SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
6176       ToBeDeletedBBs.reserve(NumDeadBlocks);
6177       ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end());
6178       // Actually we do not delete the blocks but squash them into a single
6179       // unreachable but untangling branches that jump here is something we need
6180       // to do in a more generic way.
6181       DetatchDeadBlocks(ToBeDeletedBBs, nullptr);
6182       STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
6183       BUILD_STAT_NAME(AAIsDead, BasicBlock) += ToBeDeletedBlocks.size();
6184     }
6185 
6186     // Identify dead internal functions and delete them. This happens outside
6187     // the other fixpoint analysis as we might treat potentially dead functions
6188     // as live to lower the number of iterations. If they happen to be dead, the
6189     // below fixpoint loop will identify and eliminate them.
6190     SmallVector<Function *, 8> InternalFns;
6191     for (Function &F : M)
6192       if (F.hasLocalLinkage())
6193         InternalFns.push_back(&F);
6194 
6195     bool FoundDeadFn = true;
6196     while (FoundDeadFn) {
6197       FoundDeadFn = false;
6198       for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
6199         Function *F = InternalFns[u];
6200         if (!F)
6201           continue;
6202 
6203         if (!checkForAllCallSites(
6204                 [this](AbstractCallSite ACS) {
6205                   return ToBeDeletedFunctions.count(
6206                       ACS.getInstruction()->getFunction());
6207                 },
6208                 *F, true, nullptr))
6209           continue;
6210 
6211         ToBeDeletedFunctions.insert(F);
6212         InternalFns[u] = nullptr;
6213         FoundDeadFn = true;
6214       }
6215     }
6216   }
6217 
6218   STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
6219   BUILD_STAT_NAME(AAIsDead, Function) += ToBeDeletedFunctions.size();
6220 
6221   // Rewrite the functions as requested during manifest.
6222   ManifestChange = ManifestChange | rewriteFunctionSignatures();
6223 
6224   for (Function *Fn : ToBeDeletedFunctions) {
6225     Fn->deleteBody();
6226     Fn->replaceAllUsesWith(UndefValue::get(Fn->getType()));
6227     Fn->eraseFromParent();
6228   }
6229 
6230   if (VerifyMaxFixpointIterations &&
6231       IterationCounter != MaxFixpointIterations) {
6232     errs() << "\n[Attributor] Fixpoint iteration done after: "
6233            << IterationCounter << "/" << MaxFixpointIterations
6234            << " iterations\n";
6235     llvm_unreachable("The fixpoint was not reached with exactly the number of "
6236                      "specified iterations!");
6237   }
6238 
6239   return ManifestChange;
6240 }
6241 
6242 bool Attributor::registerFunctionSignatureRewrite(
6243     Argument &Arg, ArrayRef<Type *> ReplacementTypes,
6244     ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
6245     ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
6246 
6247   auto CallSiteCanBeChanged = [](AbstractCallSite ACS) {
6248     // Forbid must-tail calls for now.
6249     return !ACS.isCallbackCall() && !ACS.getCallSite().isMustTailCall();
6250   };
6251 
6252   Function *Fn = Arg.getParent();
6253   // Avoid var-arg functions for now.
6254   if (Fn->isVarArg()) {
6255     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
6256     return false;
6257   }
6258 
6259   // Avoid functions with complicated argument passing semantics.
6260   AttributeList FnAttributeList = Fn->getAttributes();
6261   if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
6262       FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
6263       FnAttributeList.hasAttrSomewhere(Attribute::InAlloca)) {
6264     LLVM_DEBUG(
6265         dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
6266     return false;
6267   }
6268 
6269   // Avoid callbacks for now.
6270   if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr)) {
6271     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
6272     return false;
6273   }
6274 
6275   auto InstPred = [](Instruction &I) {
6276     if (auto *CI = dyn_cast<CallInst>(&I))
6277       return !CI->isMustTailCall();
6278     return true;
6279   };
6280 
6281   // Forbid must-tail calls for now.
6282   // TODO:
6283   bool AnyDead;
6284   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
6285   if (!checkForAllInstructionsImpl(OpcodeInstMap, InstPred, nullptr, AnyDead,
6286                                    {Instruction::Call})) {
6287     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
6288     return false;
6289   }
6290 
6291   SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = ArgumentReplacementMap[Fn];
6292   if (ARIs.size() == 0)
6293     ARIs.resize(Fn->arg_size());
6294 
6295   // If we have a replacement already with less than or equal new arguments,
6296   // ignore this request.
6297   ArgumentReplacementInfo *&ARI = ARIs[Arg.getArgNo()];
6298   if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
6299     LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
6300     return false;
6301   }
6302 
6303   // If we have a replacement already but we like the new one better, delete
6304   // the old.
6305   if (ARI)
6306     delete ARI;
6307 
6308   // Remember the replacement.
6309   ARI = new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
6310                                     std::move(CalleeRepairCB),
6311                                     std::move(ACSRepairCB));
6312 
6313   return true;
6314 }
6315 
6316 ChangeStatus Attributor::rewriteFunctionSignatures() {
6317   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6318 
6319   for (auto &It : ArgumentReplacementMap) {
6320     Function *OldFn = It.getFirst();
6321 
6322     // Deleted functions do not require rewrites.
6323     if (ToBeDeletedFunctions.count(OldFn))
6324       continue;
6325 
6326     const SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = It.getSecond();
6327     assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
6328 
6329     SmallVector<Type *, 16> NewArgumentTypes;
6330     SmallVector<AttributeSet, 16> NewArgumentAttributes;
6331 
6332     // Collect replacement argument types and copy over existing attributes.
6333     AttributeList OldFnAttributeList = OldFn->getAttributes();
6334     for (Argument &Arg : OldFn->args()) {
6335       if (ArgumentReplacementInfo *ARI = ARIs[Arg.getArgNo()]) {
6336         NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
6337                                 ARI->ReplacementTypes.end());
6338         NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
6339                                      AttributeSet());
6340       } else {
6341         NewArgumentTypes.push_back(Arg.getType());
6342         NewArgumentAttributes.push_back(
6343             OldFnAttributeList.getParamAttributes(Arg.getArgNo()));
6344       }
6345     }
6346 
6347     FunctionType *OldFnTy = OldFn->getFunctionType();
6348     Type *RetTy = OldFnTy->getReturnType();
6349 
6350     // Construct the new function type using the new arguments types.
6351     FunctionType *NewFnTy =
6352         FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
6353 
6354     LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
6355                       << "' from " << *OldFn->getFunctionType() << " to "
6356                       << *NewFnTy << "\n");
6357 
6358     // Create the new function body and insert it into the module.
6359     Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
6360                                        OldFn->getAddressSpace(), "");
6361     OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
6362     NewFn->takeName(OldFn);
6363     NewFn->copyAttributesFrom(OldFn);
6364 
6365     // Patch the pointer to LLVM function in debug info descriptor.
6366     NewFn->setSubprogram(OldFn->getSubprogram());
6367     OldFn->setSubprogram(nullptr);
6368 
6369     // Recompute the parameter attributes list based on the new arguments for
6370     // the function.
6371     LLVMContext &Ctx = OldFn->getContext();
6372     NewFn->setAttributes(AttributeList::get(
6373         Ctx, OldFnAttributeList.getFnAttributes(),
6374         OldFnAttributeList.getRetAttributes(), NewArgumentAttributes));
6375 
6376     // Since we have now created the new function, splice the body of the old
6377     // function right into the new function, leaving the old rotting hulk of the
6378     // function empty.
6379     NewFn->getBasicBlockList().splice(NewFn->begin(),
6380                                       OldFn->getBasicBlockList());
6381 
6382     // Set of all "call-like" instructions that invoke the old function mapped
6383     // to their new replacements.
6384     SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
6385 
6386     // Callback to create a new "call-like" instruction for a given one.
6387     auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
6388       CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
6389       const AttributeList &OldCallAttributeList = OldCB->getAttributes();
6390 
6391       // Collect the new argument operands for the replacement call site.
6392       SmallVector<Value *, 16> NewArgOperands;
6393       SmallVector<AttributeSet, 16> NewArgOperandAttributes;
6394       for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
6395         unsigned NewFirstArgNum = NewArgOperands.size();
6396         (void)NewFirstArgNum; // only used inside assert.
6397         if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
6398           if (ARI->ACSRepairCB)
6399             ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
6400           assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
6401                      NewArgOperands.size() &&
6402                  "ACS repair callback did not provide as many operand as new "
6403                  "types were registered!");
6404           // TODO: Exose the attribute set to the ACS repair callback
6405           NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
6406                                          AttributeSet());
6407         } else {
6408           NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
6409           NewArgOperandAttributes.push_back(
6410               OldCallAttributeList.getParamAttributes(OldArgNum));
6411         }
6412       }
6413 
6414       assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
6415              "Mismatch # argument operands vs. # argument operand attributes!");
6416       assert(NewArgOperands.size() == NewFn->arg_size() &&
6417              "Mismatch # argument operands vs. # function arguments!");
6418 
6419       SmallVector<OperandBundleDef, 4> OperandBundleDefs;
6420       OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
6421 
6422       // Create a new call or invoke instruction to replace the old one.
6423       CallBase *NewCB;
6424       if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
6425         NewCB =
6426             InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
6427                                NewArgOperands, OperandBundleDefs, "", OldCB);
6428       } else {
6429         auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
6430                                        "", OldCB);
6431         NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
6432         NewCB = NewCI;
6433       }
6434 
6435       // Copy over various properties and the new attributes.
6436       uint64_t W;
6437       if (OldCB->extractProfTotalWeight(W))
6438         NewCB->setProfWeight(W);
6439       NewCB->setCallingConv(OldCB->getCallingConv());
6440       NewCB->setDebugLoc(OldCB->getDebugLoc());
6441       NewCB->takeName(OldCB);
6442       NewCB->setAttributes(AttributeList::get(
6443           Ctx, OldCallAttributeList.getFnAttributes(),
6444           OldCallAttributeList.getRetAttributes(), NewArgOperandAttributes));
6445 
6446       CallSitePairs.push_back({OldCB, NewCB});
6447       return true;
6448     };
6449 
6450     // Use the CallSiteReplacementCreator to create replacement call sites.
6451     bool Success =
6452         checkForAllCallSites(CallSiteReplacementCreator, *OldFn, true, nullptr);
6453     (void)Success;
6454     assert(Success && "Assumed call site replacement to succeed!");
6455 
6456     // Rewire the arguments.
6457     auto OldFnArgIt = OldFn->arg_begin();
6458     auto NewFnArgIt = NewFn->arg_begin();
6459     for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
6460          ++OldArgNum, ++OldFnArgIt) {
6461       if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
6462         if (ARI->CalleeRepairCB)
6463           ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
6464         NewFnArgIt += ARI->ReplacementTypes.size();
6465       } else {
6466         NewFnArgIt->takeName(&*OldFnArgIt);
6467         OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
6468         ++NewFnArgIt;
6469       }
6470     }
6471 
6472     // Eliminate the instructions *after* we visited all of them.
6473     for (auto &CallSitePair : CallSitePairs) {
6474       CallBase &OldCB = *CallSitePair.first;
6475       CallBase &NewCB = *CallSitePair.second;
6476       OldCB.replaceAllUsesWith(&NewCB);
6477       OldCB.eraseFromParent();
6478     }
6479 
6480     ToBeDeletedFunctions.insert(OldFn);
6481 
6482     Changed = ChangeStatus::CHANGED;
6483   }
6484 
6485   return Changed;
6486 }
6487 
6488 void Attributor::initializeInformationCache(Function &F) {
6489 
6490   // Walk all instructions to find interesting instructions that might be
6491   // queried by abstract attributes during their initialization or update.
6492   // This has to happen before we create attributes.
6493   auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
6494   auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
6495 
6496   for (Instruction &I : instructions(&F)) {
6497     bool IsInterestingOpcode = false;
6498 
6499     // To allow easy access to all instructions in a function with a given
6500     // opcode we store them in the InfoCache. As not all opcodes are interesting
6501     // to concrete attributes we only cache the ones that are as identified in
6502     // the following switch.
6503     // Note: There are no concrete attributes now so this is initially empty.
6504     switch (I.getOpcode()) {
6505     default:
6506       assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
6507              "New call site/base instruction type needs to be known int the "
6508              "Attributor.");
6509       break;
6510     case Instruction::Load:
6511       // The alignment of a pointer is interesting for loads.
6512     case Instruction::Store:
6513       // The alignment of a pointer is interesting for stores.
6514     case Instruction::Call:
6515     case Instruction::CallBr:
6516     case Instruction::Invoke:
6517     case Instruction::CleanupRet:
6518     case Instruction::CatchSwitch:
6519     case Instruction::AtomicRMW:
6520     case Instruction::AtomicCmpXchg:
6521     case Instruction::Br:
6522     case Instruction::Resume:
6523     case Instruction::Ret:
6524       IsInterestingOpcode = true;
6525     }
6526     if (IsInterestingOpcode)
6527       InstOpcodeMap[I.getOpcode()].push_back(&I);
6528     if (I.mayReadOrWriteMemory())
6529       ReadOrWriteInsts.push_back(&I);
6530   }
6531 }
6532 
6533 void Attributor::recordDependence(const AbstractAttribute &FromAA,
6534                                   const AbstractAttribute &ToAA,
6535                                   DepClassTy DepClass) {
6536   if (FromAA.getState().isAtFixpoint())
6537     return;
6538 
6539   if (DepClass == DepClassTy::REQUIRED)
6540     QueryMap[&FromAA].RequiredAAs.insert(
6541         const_cast<AbstractAttribute *>(&ToAA));
6542   else
6543     QueryMap[&FromAA].OptionalAAs.insert(
6544         const_cast<AbstractAttribute *>(&ToAA));
6545   QueriedNonFixAA = true;
6546 }
6547 
6548 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
6549   if (!VisitedFunctions.insert(&F).second)
6550     return;
6551   if (F.isDeclaration())
6552     return;
6553 
6554   IRPosition FPos = IRPosition::function(F);
6555 
6556   // Check for dead BasicBlocks in every function.
6557   // We need dead instruction detection because we do not want to deal with
6558   // broken IR in which SSA rules do not apply.
6559   getOrCreateAAFor<AAIsDead>(FPos);
6560 
6561   // Every function might be "will-return".
6562   getOrCreateAAFor<AAWillReturn>(FPos);
6563 
6564   // Every function might contain instructions that cause "undefined behavior".
6565   getOrCreateAAFor<AAUndefinedBehavior>(FPos);
6566 
6567   // Every function can be nounwind.
6568   getOrCreateAAFor<AANoUnwind>(FPos);
6569 
6570   // Every function might be marked "nosync"
6571   getOrCreateAAFor<AANoSync>(FPos);
6572 
6573   // Every function might be "no-free".
6574   getOrCreateAAFor<AANoFree>(FPos);
6575 
6576   // Every function might be "no-return".
6577   getOrCreateAAFor<AANoReturn>(FPos);
6578 
6579   // Every function might be "no-recurse".
6580   getOrCreateAAFor<AANoRecurse>(FPos);
6581 
6582   // Every function might be "readnone/readonly/writeonly/...".
6583   getOrCreateAAFor<AAMemoryBehavior>(FPos);
6584 
6585   // Every function might be applicable for Heap-To-Stack conversion.
6586   if (EnableHeapToStack)
6587     getOrCreateAAFor<AAHeapToStack>(FPos);
6588 
6589   // Return attributes are only appropriate if the return type is non void.
6590   Type *ReturnType = F.getReturnType();
6591   if (!ReturnType->isVoidTy()) {
6592     // Argument attribute "returned" --- Create only one per function even
6593     // though it is an argument attribute.
6594     getOrCreateAAFor<AAReturnedValues>(FPos);
6595 
6596     IRPosition RetPos = IRPosition::returned(F);
6597 
6598     // Every returned value might be dead.
6599     getOrCreateAAFor<AAIsDead>(RetPos);
6600 
6601     // Every function might be simplified.
6602     getOrCreateAAFor<AAValueSimplify>(RetPos);
6603 
6604     if (ReturnType->isPointerTy()) {
6605 
6606       // Every function with pointer return type might be marked align.
6607       getOrCreateAAFor<AAAlign>(RetPos);
6608 
6609       // Every function with pointer return type might be marked nonnull.
6610       getOrCreateAAFor<AANonNull>(RetPos);
6611 
6612       // Every function with pointer return type might be marked noalias.
6613       getOrCreateAAFor<AANoAlias>(RetPos);
6614 
6615       // Every function with pointer return type might be marked
6616       // dereferenceable.
6617       getOrCreateAAFor<AADereferenceable>(RetPos);
6618     }
6619   }
6620 
6621   for (Argument &Arg : F.args()) {
6622     IRPosition ArgPos = IRPosition::argument(Arg);
6623 
6624     // Every argument might be simplified.
6625     getOrCreateAAFor<AAValueSimplify>(ArgPos);
6626 
6627     if (Arg.getType()->isPointerTy()) {
6628       // Every argument with pointer type might be marked nonnull.
6629       getOrCreateAAFor<AANonNull>(ArgPos);
6630 
6631       // Every argument with pointer type might be marked noalias.
6632       getOrCreateAAFor<AANoAlias>(ArgPos);
6633 
6634       // Every argument with pointer type might be marked dereferenceable.
6635       getOrCreateAAFor<AADereferenceable>(ArgPos);
6636 
6637       // Every argument with pointer type might be marked align.
6638       getOrCreateAAFor<AAAlign>(ArgPos);
6639 
6640       // Every argument with pointer type might be marked nocapture.
6641       getOrCreateAAFor<AANoCapture>(ArgPos);
6642 
6643       // Every argument with pointer type might be marked
6644       // "readnone/readonly/writeonly/..."
6645       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
6646 
6647       // Every argument with pointer type might be marked nofree.
6648       getOrCreateAAFor<AANoFree>(ArgPos);
6649     }
6650   }
6651 
6652   auto CallSitePred = [&](Instruction &I) -> bool {
6653     CallSite CS(&I);
6654     if (Function *Callee = CS.getCalledFunction()) {
6655       // Skip declerations except if annotations on their call sites were
6656       // explicitly requested.
6657       if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
6658           !Callee->hasMetadata(LLVMContext::MD_callback))
6659         return true;
6660 
6661       if (!Callee->getReturnType()->isVoidTy() && !CS->use_empty()) {
6662 
6663         IRPosition CSRetPos = IRPosition::callsite_returned(CS);
6664 
6665         // Call site return values might be dead.
6666         getOrCreateAAFor<AAIsDead>(CSRetPos);
6667 
6668         // Call site return integer values might be limited by a constant range.
6669         if (Callee->getReturnType()->isIntegerTy()) {
6670           getOrCreateAAFor<AAValueConstantRange>(CSRetPos);
6671         }
6672       }
6673 
6674       for (int i = 0, e = CS.getNumArgOperands(); i < e; i++) {
6675 
6676         IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
6677 
6678         // Every call site argument might be dead.
6679         getOrCreateAAFor<AAIsDead>(CSArgPos);
6680 
6681         // Call site argument might be simplified.
6682         getOrCreateAAFor<AAValueSimplify>(CSArgPos);
6683 
6684         if (!CS.getArgument(i)->getType()->isPointerTy())
6685           continue;
6686 
6687         // Call site argument attribute "non-null".
6688         getOrCreateAAFor<AANonNull>(CSArgPos);
6689 
6690         // Call site argument attribute "no-alias".
6691         getOrCreateAAFor<AANoAlias>(CSArgPos);
6692 
6693         // Call site argument attribute "dereferenceable".
6694         getOrCreateAAFor<AADereferenceable>(CSArgPos);
6695 
6696         // Call site argument attribute "align".
6697         getOrCreateAAFor<AAAlign>(CSArgPos);
6698 
6699         // Call site argument attribute
6700         // "readnone/readonly/writeonly/..."
6701         getOrCreateAAFor<AAMemoryBehavior>(CSArgPos);
6702 
6703         // Call site argument attribute "nofree".
6704         getOrCreateAAFor<AANoFree>(CSArgPos);
6705       }
6706     }
6707     return true;
6708   };
6709 
6710   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
6711   bool Success, AnyDead = false;
6712   Success = checkForAllInstructionsImpl(
6713       OpcodeInstMap, CallSitePred, nullptr, AnyDead,
6714       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
6715        (unsigned)Instruction::Call});
6716   (void)Success;
6717   assert(Success && !AnyDead && "Expected the check call to be successful!");
6718 
6719   auto LoadStorePred = [&](Instruction &I) -> bool {
6720     if (isa<LoadInst>(I))
6721       getOrCreateAAFor<AAAlign>(
6722           IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
6723     else
6724       getOrCreateAAFor<AAAlign>(
6725           IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
6726     return true;
6727   };
6728   Success = checkForAllInstructionsImpl(
6729       OpcodeInstMap, LoadStorePred, nullptr, AnyDead,
6730       {(unsigned)Instruction::Load, (unsigned)Instruction::Store});
6731   (void)Success;
6732   assert(Success && !AnyDead && "Expected the check call to be successful!");
6733 }
6734 
6735 /// Helpers to ease debugging through output streams and print calls.
6736 ///
6737 ///{
6738 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
6739   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
6740 }
6741 
6742 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
6743   switch (AP) {
6744   case IRPosition::IRP_INVALID:
6745     return OS << "inv";
6746   case IRPosition::IRP_FLOAT:
6747     return OS << "flt";
6748   case IRPosition::IRP_RETURNED:
6749     return OS << "fn_ret";
6750   case IRPosition::IRP_CALL_SITE_RETURNED:
6751     return OS << "cs_ret";
6752   case IRPosition::IRP_FUNCTION:
6753     return OS << "fn";
6754   case IRPosition::IRP_CALL_SITE:
6755     return OS << "cs";
6756   case IRPosition::IRP_ARGUMENT:
6757     return OS << "arg";
6758   case IRPosition::IRP_CALL_SITE_ARGUMENT:
6759     return OS << "cs_arg";
6760   }
6761   llvm_unreachable("Unknown attribute position!");
6762 }
6763 
6764 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
6765   const Value &AV = Pos.getAssociatedValue();
6766   return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
6767             << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
6768 }
6769 
6770 template <typename base_ty, base_ty BestState, base_ty WorstState>
6771 raw_ostream &
6772 llvm::operator<<(raw_ostream &OS,
6773                  const IntegerStateBase<base_ty, BestState, WorstState> &S) {
6774   return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
6775             << static_cast<const AbstractState &>(S);
6776 }
6777 
6778 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
6779   OS << "range-state(" << S.getBitWidth() << ")<";
6780   S.getKnown().print(OS);
6781   OS << " / ";
6782   S.getAssumed().print(OS);
6783   OS << ">";
6784 
6785   return OS << static_cast<const AbstractState &>(S);
6786 }
6787 
6788 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
6789   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
6790 }
6791 
6792 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
6793   AA.print(OS);
6794   return OS;
6795 }
6796 
6797 void AbstractAttribute::print(raw_ostream &OS) const {
6798   OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
6799      << "]";
6800 }
6801 ///}
6802 
6803 /// ----------------------------------------------------------------------------
6804 ///                       Pass (Manager) Boilerplate
6805 /// ----------------------------------------------------------------------------
6806 
6807 static bool runAttributorOnModule(Module &M, AnalysisGetter &AG) {
6808   if (DisableAttributor)
6809     return false;
6810 
6811   LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
6812                     << " functions.\n");
6813 
6814   // Create an Attributor and initially empty information cache that is filled
6815   // while we identify default attribute opportunities.
6816   InformationCache InfoCache(M, AG);
6817   Attributor A(InfoCache, DepRecInterval);
6818 
6819   for (Function &F : M)
6820     A.initializeInformationCache(F);
6821 
6822   for (Function &F : M) {
6823     if (F.hasExactDefinition())
6824       NumFnWithExactDefinition++;
6825     else
6826       NumFnWithoutExactDefinition++;
6827 
6828     // We look at internal functions only on-demand but if any use is not a
6829     // direct call, we have to do it eagerly.
6830     if (F.hasLocalLinkage()) {
6831       if (llvm::all_of(F.uses(), [](const Use &U) {
6832             return ImmutableCallSite(U.getUser()) &&
6833                    ImmutableCallSite(U.getUser()).isCallee(&U);
6834           }))
6835         continue;
6836     }
6837 
6838     // Populate the Attributor with abstract attribute opportunities in the
6839     // function and the information cache with IR information.
6840     A.identifyDefaultAbstractAttributes(F);
6841   }
6842 
6843   bool Changed = A.run(M) == ChangeStatus::CHANGED;
6844   assert(!verifyModule(M, &errs()) && "Module verification failed!");
6845   return Changed;
6846 }
6847 
6848 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
6849   AnalysisGetter AG(AM);
6850   if (runAttributorOnModule(M, AG)) {
6851     // FIXME: Think about passes we will preserve and add them here.
6852     return PreservedAnalyses::none();
6853   }
6854   return PreservedAnalyses::all();
6855 }
6856 
6857 namespace {
6858 
6859 struct AttributorLegacyPass : public ModulePass {
6860   static char ID;
6861 
6862   AttributorLegacyPass() : ModulePass(ID) {
6863     initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
6864   }
6865 
6866   bool runOnModule(Module &M) override {
6867     if (skipModule(M))
6868       return false;
6869 
6870     AnalysisGetter AG;
6871     return runAttributorOnModule(M, AG);
6872   }
6873 
6874   void getAnalysisUsage(AnalysisUsage &AU) const override {
6875     // FIXME: Think about passes we will preserve and add them here.
6876     AU.addRequired<TargetLibraryInfoWrapperPass>();
6877   }
6878 };
6879 
6880 } // end anonymous namespace
6881 
6882 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
6883 
6884 char AttributorLegacyPass::ID = 0;
6885 
6886 const char AAReturnedValues::ID = 0;
6887 const char AANoUnwind::ID = 0;
6888 const char AANoSync::ID = 0;
6889 const char AANoFree::ID = 0;
6890 const char AANonNull::ID = 0;
6891 const char AANoRecurse::ID = 0;
6892 const char AAWillReturn::ID = 0;
6893 const char AAUndefinedBehavior::ID = 0;
6894 const char AANoAlias::ID = 0;
6895 const char AAReachability::ID = 0;
6896 const char AANoReturn::ID = 0;
6897 const char AAIsDead::ID = 0;
6898 const char AADereferenceable::ID = 0;
6899 const char AAAlign::ID = 0;
6900 const char AANoCapture::ID = 0;
6901 const char AAValueSimplify::ID = 0;
6902 const char AAHeapToStack::ID = 0;
6903 const char AAMemoryBehavior::ID = 0;
6904 const char AAValueConstantRange::ID = 0;
6905 
6906 // Macro magic to create the static generator function for attributes that
6907 // follow the naming scheme.
6908 
6909 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
6910   case IRPosition::PK:                                                         \
6911     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
6912 
6913 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
6914   case IRPosition::PK:                                                         \
6915     AA = new CLASS##SUFFIX(IRP);                                               \
6916     break;
6917 
6918 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
6919   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6920     CLASS *AA = nullptr;                                                       \
6921     switch (IRP.getPositionKind()) {                                           \
6922       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6923       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6924       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6925       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6926       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6927       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6928       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6929       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6930     }                                                                          \
6931     return *AA;                                                                \
6932   }
6933 
6934 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
6935   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6936     CLASS *AA = nullptr;                                                       \
6937     switch (IRP.getPositionKind()) {                                           \
6938       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6939       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
6940       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
6941       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6942       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6943       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
6944       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6945       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6946     }                                                                          \
6947     return *AA;                                                                \
6948   }
6949 
6950 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
6951   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6952     CLASS *AA = nullptr;                                                       \
6953     switch (IRP.getPositionKind()) {                                           \
6954       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6955       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6956       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6957       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6958       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6959       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
6960       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6961       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6962     }                                                                          \
6963     return *AA;                                                                \
6964   }
6965 
6966 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
6967   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6968     CLASS *AA = nullptr;                                                       \
6969     switch (IRP.getPositionKind()) {                                           \
6970       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6971       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6972       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6973       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6974       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6975       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6976       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
6977       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6978     }                                                                          \
6979     return *AA;                                                                \
6980   }
6981 
6982 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
6983   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6984     CLASS *AA = nullptr;                                                       \
6985     switch (IRP.getPositionKind()) {                                           \
6986       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6987       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6988       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6989       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6990       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6991       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6992       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6993       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6994     }                                                                          \
6995     return *AA;                                                                \
6996   }
6997 
6998 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
6999 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7000 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7001 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7002 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7003 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7004 
7005 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7006 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7007 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7008 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7009 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7010 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7011 
7012 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7013 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7014 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7015 
7016 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7017 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7018 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7019 
7020 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7021 
7022 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7023 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7024 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7025 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7026 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7027 #undef SWITCH_PK_CREATE
7028 #undef SWITCH_PK_INV
7029 
7030 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
7031                       "Deduce and propagate attributes", false, false)
7032 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
7033 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
7034                     "Deduce and propagate attributes", false, false)
7035