1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/Argument.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/InstIterator.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 #include "llvm/Transforms/Utils/Local.h"
38 
39 #include <cassert>
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "attributor"
44 
45 STATISTIC(NumFnWithExactDefinition,
46           "Number of function with exact definitions");
47 STATISTIC(NumFnWithoutExactDefinition,
48           "Number of function without exact definitions");
49 STATISTIC(NumAttributesTimedOut,
50           "Number of abstract attributes timed out before fixpoint");
51 STATISTIC(NumAttributesValidFixpoint,
52           "Number of abstract attributes in a valid fixpoint state");
53 STATISTIC(NumAttributesManifested,
54           "Number of abstract attributes manifested in IR");
55 STATISTIC(NumFnNoUnwind, "Number of functions marked nounwind");
56 
57 STATISTIC(NumFnUniqueReturned, "Number of function with unique return");
58 STATISTIC(NumFnKnownReturns, "Number of function with known return values");
59 STATISTIC(NumFnArgumentReturned,
60           "Number of function arguments marked returned");
61 STATISTIC(NumFnNoSync, "Number of functions marked nosync");
62 STATISTIC(NumFnNoFree, "Number of functions marked nofree");
63 STATISTIC(NumFnReturnedNonNull,
64           "Number of function return values marked nonnull");
65 STATISTIC(NumFnArgumentNonNull, "Number of function arguments marked nonnull");
66 STATISTIC(NumCSArgumentNonNull, "Number of call site arguments marked nonnull");
67 STATISTIC(NumFnWillReturn, "Number of functions marked willreturn");
68 STATISTIC(NumFnArgumentNoAlias, "Number of function arguments marked noalias");
69 STATISTIC(NumFnReturnedDereferenceable,
70           "Number of function return values marked dereferenceable");
71 STATISTIC(NumFnArgumentDereferenceable,
72           "Number of function arguments marked dereferenceable");
73 STATISTIC(NumCSArgumentDereferenceable,
74           "Number of call site arguments marked dereferenceable");
75 STATISTIC(NumFnReturnedAlign, "Number of function return values marked align");
76 STATISTIC(NumFnArgumentAlign, "Number of function arguments marked align");
77 STATISTIC(NumCSArgumentAlign, "Number of call site arguments marked align");
78 
79 // TODO: Determine a good default value.
80 //
81 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
82 // (when run with the first 5 abstract attributes). The results also indicate
83 // that we never reach 32 iterations but always find a fixpoint sooner.
84 //
85 // This will become more evolved once we perform two interleaved fixpoint
86 // iterations: bottom-up and top-down.
87 static cl::opt<unsigned>
88     MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
89                           cl::desc("Maximal number of fixpoint iterations."),
90                           cl::init(32));
91 
92 static cl::opt<bool> DisableAttributor(
93     "attributor-disable", cl::Hidden,
94     cl::desc("Disable the attributor inter-procedural deduction pass."),
95     cl::init(true));
96 
97 static cl::opt<bool> VerifyAttributor(
98     "attributor-verify", cl::Hidden,
99     cl::desc("Verify the Attributor deduction and "
100              "manifestation of attributes -- may issue false-positive errors"),
101     cl::init(false));
102 
103 /// Logic operators for the change status enum class.
104 ///
105 ///{
106 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
107   return l == ChangeStatus::CHANGED ? l : r;
108 }
109 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
110   return l == ChangeStatus::UNCHANGED ? l : r;
111 }
112 ///}
113 
114 /// Helper to adjust the statistics.
115 static void bookkeeping(AbstractAttribute::ManifestPosition MP,
116                         const Attribute &Attr) {
117   if (!AreStatisticsEnabled())
118     return;
119 
120   switch (Attr.getKindAsEnum()) {
121   case Attribute::Alignment:
122     switch (MP) {
123     case AbstractAttribute::MP_RETURNED:
124       NumFnReturnedAlign++;
125       break;
126     case AbstractAttribute::MP_ARGUMENT:
127       NumFnArgumentAlign++;
128       break;
129     case AbstractAttribute::MP_CALL_SITE_ARGUMENT:
130       NumCSArgumentAlign++;
131       break;
132     default:
133       break;
134     }
135     break;
136   case Attribute::Dereferenceable:
137     switch (MP) {
138     case AbstractAttribute::MP_RETURNED:
139       NumFnReturnedDereferenceable++;
140       break;
141     case AbstractAttribute::MP_ARGUMENT:
142       NumFnArgumentDereferenceable++;
143       break;
144     case AbstractAttribute::MP_CALL_SITE_ARGUMENT:
145       NumCSArgumentDereferenceable++;
146       break;
147     default:
148       break;
149     }
150     break;
151   case Attribute::NoUnwind:
152     NumFnNoUnwind++;
153     return;
154   case Attribute::Returned:
155     NumFnArgumentReturned++;
156     return;
157   case Attribute::NoSync:
158     NumFnNoSync++;
159     break;
160   case Attribute::NoFree:
161     NumFnNoFree++;
162     break;
163   case Attribute::NonNull:
164     switch (MP) {
165     case AbstractAttribute::MP_RETURNED:
166       NumFnReturnedNonNull++;
167       break;
168     case AbstractAttribute::MP_ARGUMENT:
169       NumFnArgumentNonNull++;
170       break;
171     case AbstractAttribute::MP_CALL_SITE_ARGUMENT:
172       NumCSArgumentNonNull++;
173       break;
174     default:
175       break;
176     }
177     break;
178   case Attribute::WillReturn:
179     NumFnWillReturn++;
180     break;
181   case Attribute::NoAlias:
182     NumFnArgumentNoAlias++;
183     return;
184   default:
185     return;
186   }
187 }
188 
189 template <typename StateTy>
190 using followValueCB_t = std::function<bool(Value *, StateTy &State)>;
191 template <typename StateTy>
192 using visitValueCB_t = std::function<void(Value *, StateTy &State)>;
193 
194 /// Recursively visit all values that might become \p InitV at some point. This
195 /// will be done by looking through cast instructions, selects, phis, and calls
196 /// with the "returned" attribute. The callback \p FollowValueCB is asked before
197 /// a potential origin value is looked at. If no \p FollowValueCB is passed, a
198 /// default one is used that will make sure we visit every value only once. Once
199 /// we cannot look through the value any further, the callback \p VisitValueCB
200 /// is invoked and passed the current value and the \p State. To limit how much
201 /// effort is invested, we will never visit more than \p MaxValues values.
202 template <typename StateTy>
203 static bool genericValueTraversal(
204     Value *InitV, StateTy &State, visitValueCB_t<StateTy> &VisitValueCB,
205     followValueCB_t<StateTy> *FollowValueCB = nullptr, int MaxValues = 8) {
206 
207   SmallPtrSet<Value *, 16> Visited;
208   followValueCB_t<bool> DefaultFollowValueCB = [&](Value *Val, bool &) {
209     return Visited.insert(Val).second;
210   };
211 
212   if (!FollowValueCB)
213     FollowValueCB = &DefaultFollowValueCB;
214 
215   SmallVector<Value *, 16> Worklist;
216   Worklist.push_back(InitV);
217 
218   int Iteration = 0;
219   do {
220     Value *V = Worklist.pop_back_val();
221 
222     // Check if we should process the current value. To prevent endless
223     // recursion keep a record of the values we followed!
224     if (!(*FollowValueCB)(V, State))
225       continue;
226 
227     // Make sure we limit the compile time for complex expressions.
228     if (Iteration++ >= MaxValues)
229       return false;
230 
231     // Explicitly look through calls with a "returned" attribute if we do
232     // not have a pointer as stripPointerCasts only works on them.
233     if (V->getType()->isPointerTy()) {
234       V = V->stripPointerCasts();
235     } else {
236       CallSite CS(V);
237       if (CS && CS.getCalledFunction()) {
238         Value *NewV = nullptr;
239         for (Argument &Arg : CS.getCalledFunction()->args())
240           if (Arg.hasReturnedAttr()) {
241             NewV = CS.getArgOperand(Arg.getArgNo());
242             break;
243           }
244         if (NewV) {
245           Worklist.push_back(NewV);
246           continue;
247         }
248       }
249     }
250 
251     // Look through select instructions, visit both potential values.
252     if (auto *SI = dyn_cast<SelectInst>(V)) {
253       Worklist.push_back(SI->getTrueValue());
254       Worklist.push_back(SI->getFalseValue());
255       continue;
256     }
257 
258     // Look through phi nodes, visit all operands.
259     if (auto *PHI = dyn_cast<PHINode>(V)) {
260       Worklist.append(PHI->op_begin(), PHI->op_end());
261       continue;
262     }
263 
264     // Once a leaf is reached we inform the user through the callback.
265     VisitValueCB(V, State);
266   } while (!Worklist.empty());
267 
268   // All values have been visited.
269   return true;
270 }
271 
272 /// Helper to identify the correct offset into an attribute list.
273 static unsigned getAttrIndex(AbstractAttribute::ManifestPosition MP,
274                              unsigned ArgNo = 0) {
275   switch (MP) {
276   case AbstractAttribute::MP_ARGUMENT:
277   case AbstractAttribute::MP_CALL_SITE_ARGUMENT:
278     return ArgNo + AttributeList::FirstArgIndex;
279   case AbstractAttribute::MP_FUNCTION:
280     return AttributeList::FunctionIndex;
281   case AbstractAttribute::MP_RETURNED:
282     return AttributeList::ReturnIndex;
283   }
284   llvm_unreachable("Unknown manifest position!");
285 }
286 
287 /// Return true if \p New is equal or worse than \p Old.
288 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
289   if (!Old.isIntAttribute())
290     return true;
291 
292   return Old.getValueAsInt() >= New.getValueAsInt();
293 }
294 
295 /// Return true if the information provided by \p Attr was added to the
296 /// attribute list \p Attrs. This is only the case if it was not already present
297 /// in \p Attrs at the position describe by \p MP and \p ArgNo.
298 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
299                              AttributeList &Attrs,
300                              AbstractAttribute::ManifestPosition MP,
301                              unsigned ArgNo = 0) {
302   unsigned AttrIdx = getAttrIndex(MP, ArgNo);
303 
304   if (Attr.isEnumAttribute()) {
305     Attribute::AttrKind Kind = Attr.getKindAsEnum();
306     if (Attrs.hasAttribute(AttrIdx, Kind))
307       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
308         return false;
309     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
310     return true;
311   }
312   if (Attr.isStringAttribute()) {
313     StringRef Kind = Attr.getKindAsString();
314     if (Attrs.hasAttribute(AttrIdx, Kind))
315       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
316         return false;
317     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
318     return true;
319   }
320   if (Attr.isIntAttribute()) {
321     Attribute::AttrKind Kind = Attr.getKindAsEnum();
322     if (Attrs.hasAttribute(AttrIdx, Kind))
323       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
324         return false;
325     Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
326     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
327     return true;
328   }
329 
330   llvm_unreachable("Expected enum or string attribute!");
331 }
332 
333 ChangeStatus AbstractAttribute::update(Attributor &A) {
334   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
335   if (getState().isAtFixpoint())
336     return HasChanged;
337 
338   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
339 
340   HasChanged = updateImpl(A);
341 
342   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
343                     << "\n");
344 
345   return HasChanged;
346 }
347 
348 ChangeStatus AbstractAttribute::manifest(Attributor &A) {
349   assert(getState().isValidState() &&
350          "Attempted to manifest an invalid state!");
351   assert(getAssociatedValue() &&
352          "Attempted to manifest an attribute without associated value!");
353 
354   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
355   SmallVector<Attribute, 4> DeducedAttrs;
356   getDeducedAttributes(DeducedAttrs);
357 
358   Function &ScopeFn = getAnchorScope();
359   LLVMContext &Ctx = ScopeFn.getContext();
360   ManifestPosition MP = getManifestPosition();
361 
362   AttributeList Attrs;
363   SmallVector<unsigned, 4> ArgNos;
364 
365   // In the following some generic code that will manifest attributes in
366   // DeducedAttrs if they improve the current IR. Due to the different
367   // annotation positions we use the underlying AttributeList interface.
368   // Note that MP_CALL_SITE_ARGUMENT can annotate multiple locations.
369 
370   switch (MP) {
371   case MP_ARGUMENT:
372     ArgNos.push_back(cast<Argument>(getAssociatedValue())->getArgNo());
373     Attrs = ScopeFn.getAttributes();
374     break;
375   case MP_FUNCTION:
376   case MP_RETURNED:
377     ArgNos.push_back(0);
378     Attrs = ScopeFn.getAttributes();
379     break;
380   case MP_CALL_SITE_ARGUMENT: {
381     CallSite CS(&getAnchoredValue());
382     for (unsigned u = 0, e = CS.getNumArgOperands(); u != e; u++)
383       if (CS.getArgOperand(u) == getAssociatedValue())
384         ArgNos.push_back(u);
385     Attrs = CS.getAttributes();
386   }
387   }
388 
389   for (const Attribute &Attr : DeducedAttrs) {
390     for (unsigned ArgNo : ArgNos) {
391       if (!addIfNotExistent(Ctx, Attr, Attrs, MP, ArgNo))
392         continue;
393 
394       HasChanged = ChangeStatus::CHANGED;
395       bookkeeping(MP, Attr);
396     }
397   }
398 
399   if (HasChanged == ChangeStatus::UNCHANGED)
400     return HasChanged;
401 
402   switch (MP) {
403   case MP_ARGUMENT:
404   case MP_FUNCTION:
405   case MP_RETURNED:
406     ScopeFn.setAttributes(Attrs);
407     break;
408   case MP_CALL_SITE_ARGUMENT:
409     CallSite(&getAnchoredValue()).setAttributes(Attrs);
410   }
411 
412   return HasChanged;
413 }
414 
415 Function &AbstractAttribute::getAnchorScope() {
416   Value &V = getAnchoredValue();
417   if (isa<Function>(V))
418     return cast<Function>(V);
419   if (isa<Argument>(V))
420     return *cast<Argument>(V).getParent();
421   if (isa<Instruction>(V))
422     return *cast<Instruction>(V).getFunction();
423   llvm_unreachable("No scope for anchored value found!");
424 }
425 
426 const Function &AbstractAttribute::getAnchorScope() const {
427   return const_cast<AbstractAttribute *>(this)->getAnchorScope();
428 }
429 
430 // Helper function that returns argument index of value.
431 // If the value is not an argument, this returns -1.
432 static int getArgNo(Value &V) {
433   if (auto *Arg = dyn_cast<Argument>(&V))
434     return Arg->getArgNo();
435   return -1;
436 }
437 
438 /// -----------------------NoUnwind Function Attribute--------------------------
439 
440 struct AANoUnwindFunction : AANoUnwind, BooleanState {
441 
442   AANoUnwindFunction(Function &F, InformationCache &InfoCache)
443       : AANoUnwind(F, InfoCache) {}
444 
445   /// See AbstractAttribute::getState()
446   /// {
447   AbstractState &getState() override { return *this; }
448   const AbstractState &getState() const override { return *this; }
449   /// }
450 
451   /// See AbstractAttribute::getManifestPosition().
452   ManifestPosition getManifestPosition() const override { return MP_FUNCTION; }
453 
454   const std::string getAsStr() const override {
455     return getAssumed() ? "nounwind" : "may-unwind";
456   }
457 
458   /// See AbstractAttribute::updateImpl(...).
459   ChangeStatus updateImpl(Attributor &A) override;
460 
461   /// See AANoUnwind::isAssumedNoUnwind().
462   bool isAssumedNoUnwind() const override { return getAssumed(); }
463 
464   /// See AANoUnwind::isKnownNoUnwind().
465   bool isKnownNoUnwind() const override { return getKnown(); }
466 };
467 
468 ChangeStatus AANoUnwindFunction::updateImpl(Attributor &A) {
469   Function &F = getAnchorScope();
470 
471   // The map from instruction opcodes to those instructions in the function.
472   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
473   auto Opcodes = {
474       (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
475       (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
476       (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
477 
478   auto *LivenessAA = A.getAAFor<AAIsDead>(*this, F);
479 
480   for (unsigned Opcode : Opcodes) {
481     for (Instruction *I : OpcodeInstMap[Opcode]) {
482       // Skip dead instructions.
483       if (LivenessAA && LivenessAA->isAssumedDead(I))
484         continue;
485 
486       if (!I->mayThrow())
487         continue;
488 
489       auto *NoUnwindAA = A.getAAFor<AANoUnwind>(*this, *I);
490 
491       if (!NoUnwindAA || !NoUnwindAA->isAssumedNoUnwind())
492         return indicatePessimisticFixpoint();
493     }
494   }
495   return ChangeStatus::UNCHANGED;
496 }
497 
498 /// --------------------- Function Return Values -------------------------------
499 
500 /// "Attribute" that collects all potential returned values and the return
501 /// instructions that they arise from.
502 ///
503 /// If there is a unique returned value R, the manifest method will:
504 ///   - mark R with the "returned" attribute, if R is an argument.
505 class AAReturnedValuesImpl final : public AAReturnedValues, AbstractState {
506 
507   /// Mapping of values potentially returned by the associated function to the
508   /// return instructions that might return them.
509   DenseMap<Value *, SmallPtrSet<ReturnInst *, 2>> ReturnedValues;
510 
511   /// State flags
512   ///
513   ///{
514   bool IsFixed;
515   bool IsValidState;
516   bool HasOverdefinedReturnedCalls;
517   ///}
518 
519   /// Collect values that could become \p V in the set \p Values, each mapped to
520   /// \p ReturnInsts.
521   void collectValuesRecursively(
522       Attributor &A, Value *V, SmallPtrSetImpl<ReturnInst *> &ReturnInsts,
523       DenseMap<Value *, SmallPtrSet<ReturnInst *, 2>> &Values) {
524 
525     visitValueCB_t<bool> VisitValueCB = [&](Value *Val, bool &) {
526       assert(!isa<Instruction>(Val) ||
527              &getAnchorScope() == cast<Instruction>(Val)->getFunction());
528       Values[Val].insert(ReturnInsts.begin(), ReturnInsts.end());
529     };
530 
531     bool UnusedBool;
532     bool Success = genericValueTraversal(V, UnusedBool, VisitValueCB);
533 
534     // If we did abort the above traversal we haven't see all the values.
535     // Consequently, we cannot know if the information we would derive is
536     // accurate so we give up early.
537     if (!Success)
538       indicatePessimisticFixpoint();
539   }
540 
541 public:
542   /// See AbstractAttribute::AbstractAttribute(...).
543   AAReturnedValuesImpl(Function &F, InformationCache &InfoCache)
544       : AAReturnedValues(F, InfoCache) {
545     // We do not have an associated argument yet.
546     AssociatedVal = nullptr;
547   }
548 
549   /// See AbstractAttribute::initialize(...).
550   void initialize(Attributor &A) override {
551     // Reset the state.
552     AssociatedVal = nullptr;
553     IsFixed = false;
554     IsValidState = true;
555     HasOverdefinedReturnedCalls = false;
556     ReturnedValues.clear();
557 
558     Function &F = cast<Function>(getAnchoredValue());
559 
560     // The map from instruction opcodes to those instructions in the function.
561     auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
562 
563     // Look through all arguments, if one is marked as returned we are done.
564     for (Argument &Arg : F.args()) {
565       if (Arg.hasReturnedAttr()) {
566 
567         auto &ReturnInstSet = ReturnedValues[&Arg];
568         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
569           ReturnInstSet.insert(cast<ReturnInst>(RI));
570 
571         indicateOptimisticFixpoint();
572         return;
573       }
574     }
575 
576     // If no argument was marked as returned we look at all return instructions
577     // and collect potentially returned values.
578     for (Instruction *RI : OpcodeInstMap[Instruction::Ret]) {
579       SmallPtrSet<ReturnInst *, 1> RISet({cast<ReturnInst>(RI)});
580       collectValuesRecursively(A, cast<ReturnInst>(RI)->getReturnValue(), RISet,
581                                ReturnedValues);
582     }
583   }
584 
585   /// See AbstractAttribute::manifest(...).
586   ChangeStatus manifest(Attributor &A) override;
587 
588   /// See AbstractAttribute::getState(...).
589   AbstractState &getState() override { return *this; }
590 
591   /// See AbstractAttribute::getState(...).
592   const AbstractState &getState() const override { return *this; }
593 
594   /// See AbstractAttribute::getManifestPosition().
595   ManifestPosition getManifestPosition() const override { return MP_ARGUMENT; }
596 
597   /// See AbstractAttribute::updateImpl(Attributor &A).
598   ChangeStatus updateImpl(Attributor &A) override;
599 
600   /// Return the number of potential return values, -1 if unknown.
601   size_t getNumReturnValues() const {
602     return isValidState() ? ReturnedValues.size() : -1;
603   }
604 
605   /// Return an assumed unique return value if a single candidate is found. If
606   /// there cannot be one, return a nullptr. If it is not clear yet, return the
607   /// Optional::NoneType.
608   Optional<Value *>
609   getAssumedUniqueReturnValue(const AAIsDead *LivenessAA) const;
610 
611   /// See AbstractState::checkForallReturnedValues(...).
612   bool checkForallReturnedValues(
613       std::function<bool(Value &, const SmallPtrSetImpl<ReturnInst *> &)> &Pred)
614       const override;
615 
616   /// Pretty print the attribute similar to the IR representation.
617   const std::string getAsStr() const override;
618 
619   /// See AbstractState::isAtFixpoint().
620   bool isAtFixpoint() const override { return IsFixed; }
621 
622   /// See AbstractState::isValidState().
623   bool isValidState() const override { return IsValidState; }
624 
625   /// See AbstractState::indicateOptimisticFixpoint(...).
626   ChangeStatus indicateOptimisticFixpoint() override {
627     IsFixed = true;
628     IsValidState &= true;
629     return ChangeStatus::UNCHANGED;
630   }
631 
632   ChangeStatus indicatePessimisticFixpoint() override {
633     IsFixed = true;
634     IsValidState = false;
635     return ChangeStatus::CHANGED;
636   }
637 };
638 
639 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
640   ChangeStatus Changed = ChangeStatus::UNCHANGED;
641 
642   // Bookkeeping.
643   assert(isValidState());
644   NumFnKnownReturns++;
645 
646   auto *LivenessAA = A.getAAFor<AAIsDead>(*this, getAnchorScope());
647 
648   // Check if we have an assumed unique return value that we could manifest.
649   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(LivenessAA);
650 
651   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
652     return Changed;
653 
654   // Bookkeeping.
655   NumFnUniqueReturned++;
656 
657   // If the assumed unique return value is an argument, annotate it.
658   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
659     AssociatedVal = UniqueRVArg;
660     Changed = AbstractAttribute::manifest(A) | Changed;
661   }
662 
663   return Changed;
664 }
665 
666 const std::string AAReturnedValuesImpl::getAsStr() const {
667   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
668          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
669          ")[OD: " + std::to_string(HasOverdefinedReturnedCalls) + "]";
670 }
671 
672 Optional<Value *> AAReturnedValuesImpl::getAssumedUniqueReturnValue(
673     const AAIsDead *LivenessAA) const {
674   // If checkForallReturnedValues provides a unique value, ignoring potential
675   // undef values that can also be present, it is assumed to be the actual
676   // return value and forwarded to the caller of this method. If there are
677   // multiple, a nullptr is returned indicating there cannot be a unique
678   // returned value.
679   Optional<Value *> UniqueRV;
680 
681   std::function<bool(Value &, const SmallPtrSetImpl<ReturnInst *> &)> Pred =
682       [&](Value &RV, const SmallPtrSetImpl<ReturnInst *> &RetInsts) -> bool {
683     // If all ReturnInsts are dead, then ReturnValue is dead as well
684     // and can be ignored.
685     if (LivenessAA &&
686         !LivenessAA->isLiveInstSet(RetInsts.begin(), RetInsts.end()))
687       return true;
688 
689     // If we found a second returned value and neither the current nor the saved
690     // one is an undef, there is no unique returned value. Undefs are special
691     // since we can pretend they have any value.
692     if (UniqueRV.hasValue() && UniqueRV != &RV &&
693         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
694       UniqueRV = nullptr;
695       return false;
696     }
697 
698     // Do not overwrite a value with an undef.
699     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
700       UniqueRV = &RV;
701 
702     return true;
703   };
704 
705   if (!checkForallReturnedValues(Pred))
706     UniqueRV = nullptr;
707 
708   return UniqueRV;
709 }
710 
711 bool AAReturnedValuesImpl::checkForallReturnedValues(
712     std::function<bool(Value &, const SmallPtrSetImpl<ReturnInst *> &)> &Pred)
713     const {
714   if (!isValidState())
715     return false;
716 
717   // Check all returned values but ignore call sites as long as we have not
718   // encountered an overdefined one during an update.
719   for (auto &It : ReturnedValues) {
720     Value *RV = It.first;
721     const SmallPtrSetImpl<ReturnInst *> &RetInsts = It.second;
722 
723     ImmutableCallSite ICS(RV);
724     if (ICS && !HasOverdefinedReturnedCalls)
725       continue;
726 
727     if (!Pred(*RV, RetInsts))
728       return false;
729   }
730 
731   return true;
732 }
733 
734 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
735 
736   // Check if we know of any values returned by the associated function,
737   // if not, we are done.
738   if (getNumReturnValues() == 0) {
739     indicateOptimisticFixpoint();
740     return ChangeStatus::UNCHANGED;
741   }
742 
743   // Check if any of the returned values is a call site we can refine.
744   decltype(ReturnedValues) AddRVs;
745   bool HasCallSite = false;
746 
747   // Keep track of any change to trigger updates on dependent attributes.
748   ChangeStatus Changed = ChangeStatus::UNCHANGED;
749 
750   auto *LivenessAA = A.getAAFor<AAIsDead>(*this, getAnchorScope());
751 
752   // Look at all returned call sites.
753   for (auto &It : ReturnedValues) {
754     SmallPtrSet<ReturnInst *, 2> &ReturnInsts = It.second;
755     Value *RV = It.first;
756 
757     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Potentially returned value " << *RV
758                       << "\n");
759 
760     // Only call sites can change during an update, ignore the rest.
761     CallSite RetCS(RV);
762     if (!RetCS)
763       continue;
764 
765     // For now, any call site we see will prevent us from directly fixing the
766     // state. However, if the information on the callees is fixed, the call
767     // sites will be removed and we will fix the information for this state.
768     HasCallSite = true;
769 
770     // Ignore dead ReturnValues.
771     if (LivenessAA &&
772         !LivenessAA->isLiveInstSet(ReturnInsts.begin(), ReturnInsts.end())) {
773       LLVM_DEBUG(dbgs() << "[AAReturnedValues] all returns are assumed dead, "
774                            "skip it for now\n");
775       continue;
776     }
777 
778     // Try to find a assumed unique return value for the called function.
779     auto *RetCSAA = A.getAAFor<AAReturnedValuesImpl>(*this, *RV);
780     if (!RetCSAA) {
781       if (!HasOverdefinedReturnedCalls)
782         Changed = ChangeStatus::CHANGED;
783       HasOverdefinedReturnedCalls = true;
784       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned call site (" << *RV
785                         << ") with " << (RetCSAA ? "invalid" : "no")
786                         << " associated state\n");
787       continue;
788     }
789 
790     auto *LivenessCSAA = A.getAAFor<AAIsDead>(*this, RetCSAA->getAnchorScope());
791 
792     // Try to find a assumed unique return value for the called function.
793     Optional<Value *> AssumedUniqueRV =
794         RetCSAA->getAssumedUniqueReturnValue(LivenessCSAA);
795 
796     // If no assumed unique return value was found due to the lack of
797     // candidates, we may need to resolve more calls (through more update
798     // iterations) or the called function will not return. Either way, we simply
799     // stick with the call sites as return values. Because there were not
800     // multiple possibilities, we do not treat it as overdefined.
801     if (!AssumedUniqueRV.hasValue())
802       continue;
803 
804     // If multiple, non-refinable values were found, there cannot be a unique
805     // return value for the called function. The returned call is overdefined!
806     if (!AssumedUniqueRV.getValue()) {
807       if (!HasOverdefinedReturnedCalls)
808         Changed = ChangeStatus::CHANGED;
809       HasOverdefinedReturnedCalls = true;
810       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned call site has multiple "
811                            "potentially returned values\n");
812       continue;
813     }
814 
815     LLVM_DEBUG({
816       bool UniqueRVIsKnown = RetCSAA->isAtFixpoint();
817       dbgs() << "[AAReturnedValues] Returned call site "
818              << (UniqueRVIsKnown ? "known" : "assumed")
819              << " unique return value: " << *AssumedUniqueRV << "\n";
820     });
821 
822     // The assumed unique return value.
823     Value *AssumedRetVal = AssumedUniqueRV.getValue();
824 
825     // If the assumed unique return value is an argument, lookup the matching
826     // call site operand and recursively collect new returned values.
827     // If it is not an argument, it is just put into the set of returned values
828     // as we would have already looked through casts, phis, and similar values.
829     if (Argument *AssumedRetArg = dyn_cast<Argument>(AssumedRetVal))
830       collectValuesRecursively(A,
831                                RetCS.getArgOperand(AssumedRetArg->getArgNo()),
832                                ReturnInsts, AddRVs);
833     else
834       AddRVs[AssumedRetVal].insert(ReturnInsts.begin(), ReturnInsts.end());
835   }
836 
837   for (auto &It : AddRVs) {
838     assert(!It.second.empty() && "Entry does not add anything.");
839     auto &ReturnInsts = ReturnedValues[It.first];
840     for (ReturnInst *RI : It.second)
841       if (ReturnInsts.insert(RI).second) {
842         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
843                           << *It.first << " => " << *RI << "\n");
844         Changed = ChangeStatus::CHANGED;
845       }
846   }
847 
848   // If there is no call site in the returned values we are done.
849   if (!HasCallSite) {
850     indicateOptimisticFixpoint();
851     return ChangeStatus::CHANGED;
852   }
853 
854   return Changed;
855 }
856 
857 /// ------------------------ NoSync Function Attribute -------------------------
858 
859 struct AANoSyncFunction : AANoSync, BooleanState {
860 
861   AANoSyncFunction(Function &F, InformationCache &InfoCache)
862       : AANoSync(F, InfoCache) {}
863 
864   /// See AbstractAttribute::getState()
865   /// {
866   AbstractState &getState() override { return *this; }
867   const AbstractState &getState() const override { return *this; }
868   /// }
869 
870   /// See AbstractAttribute::getManifestPosition().
871   ManifestPosition getManifestPosition() const override { return MP_FUNCTION; }
872 
873   const std::string getAsStr() const override {
874     return getAssumed() ? "nosync" : "may-sync";
875   }
876 
877   /// See AbstractAttribute::updateImpl(...).
878   ChangeStatus updateImpl(Attributor &A) override;
879 
880   /// See AANoSync::isAssumedNoSync()
881   bool isAssumedNoSync() const override { return getAssumed(); }
882 
883   /// See AANoSync::isKnownNoSync()
884   bool isKnownNoSync() const override { return getKnown(); }
885 
886   /// Helper function used to determine whether an instruction is non-relaxed
887   /// atomic. In other words, if an atomic instruction does not have unordered
888   /// or monotonic ordering
889   static bool isNonRelaxedAtomic(Instruction *I);
890 
891   /// Helper function used to determine whether an instruction is volatile.
892   static bool isVolatile(Instruction *I);
893 
894   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
895   /// memset).
896   static bool isNoSyncIntrinsic(Instruction *I);
897 };
898 
899 bool AANoSyncFunction::isNonRelaxedAtomic(Instruction *I) {
900   if (!I->isAtomic())
901     return false;
902 
903   AtomicOrdering Ordering;
904   switch (I->getOpcode()) {
905   case Instruction::AtomicRMW:
906     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
907     break;
908   case Instruction::Store:
909     Ordering = cast<StoreInst>(I)->getOrdering();
910     break;
911   case Instruction::Load:
912     Ordering = cast<LoadInst>(I)->getOrdering();
913     break;
914   case Instruction::Fence: {
915     auto *FI = cast<FenceInst>(I);
916     if (FI->getSyncScopeID() == SyncScope::SingleThread)
917       return false;
918     Ordering = FI->getOrdering();
919     break;
920   }
921   case Instruction::AtomicCmpXchg: {
922     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
923     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
924     // Only if both are relaxed, than it can be treated as relaxed.
925     // Otherwise it is non-relaxed.
926     if (Success != AtomicOrdering::Unordered &&
927         Success != AtomicOrdering::Monotonic)
928       return true;
929     if (Failure != AtomicOrdering::Unordered &&
930         Failure != AtomicOrdering::Monotonic)
931       return true;
932     return false;
933   }
934   default:
935     llvm_unreachable(
936         "New atomic operations need to be known in the attributor.");
937   }
938 
939   // Relaxed.
940   if (Ordering == AtomicOrdering::Unordered ||
941       Ordering == AtomicOrdering::Monotonic)
942     return false;
943   return true;
944 }
945 
946 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
947 /// FIXME: We should ipmrove the handling of intrinsics.
948 bool AANoSyncFunction::isNoSyncIntrinsic(Instruction *I) {
949   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
950     switch (II->getIntrinsicID()) {
951     /// Element wise atomic memory intrinsics are can only be unordered,
952     /// therefore nosync.
953     case Intrinsic::memset_element_unordered_atomic:
954     case Intrinsic::memmove_element_unordered_atomic:
955     case Intrinsic::memcpy_element_unordered_atomic:
956       return true;
957     case Intrinsic::memset:
958     case Intrinsic::memmove:
959     case Intrinsic::memcpy:
960       if (!cast<MemIntrinsic>(II)->isVolatile())
961         return true;
962       return false;
963     default:
964       return false;
965     }
966   }
967   return false;
968 }
969 
970 bool AANoSyncFunction::isVolatile(Instruction *I) {
971   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
972          "Calls should not be checked here");
973 
974   switch (I->getOpcode()) {
975   case Instruction::AtomicRMW:
976     return cast<AtomicRMWInst>(I)->isVolatile();
977   case Instruction::Store:
978     return cast<StoreInst>(I)->isVolatile();
979   case Instruction::Load:
980     return cast<LoadInst>(I)->isVolatile();
981   case Instruction::AtomicCmpXchg:
982     return cast<AtomicCmpXchgInst>(I)->isVolatile();
983   default:
984     return false;
985   }
986 }
987 
988 ChangeStatus AANoSyncFunction::updateImpl(Attributor &A) {
989   Function &F = getAnchorScope();
990 
991   auto *LivenessAA = A.getAAFor<AAIsDead>(*this, F);
992 
993   /// We are looking for volatile instructions or Non-Relaxed atomics.
994   /// FIXME: We should ipmrove the handling of intrinsics.
995   for (Instruction *I : InfoCache.getReadOrWriteInstsForFunction(F)) {
996     // Skip assumed dead instructions.
997     if (LivenessAA && LivenessAA->isAssumedDead(I))
998       continue;
999 
1000     ImmutableCallSite ICS(I);
1001     auto *NoSyncAA = A.getAAFor<AANoSyncFunction>(*this, *I);
1002 
1003     if (isa<IntrinsicInst>(I) && isNoSyncIntrinsic(I))
1004       continue;
1005 
1006     if (ICS && (!NoSyncAA || !NoSyncAA->isAssumedNoSync()) &&
1007         !ICS.hasFnAttr(Attribute::NoSync))
1008       return indicatePessimisticFixpoint();
1009 
1010     if (ICS)
1011       continue;
1012 
1013     if (!isVolatile(I) && !isNonRelaxedAtomic(I))
1014       continue;
1015 
1016     return indicatePessimisticFixpoint();
1017   }
1018 
1019   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
1020   auto Opcodes = {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
1021                   (unsigned)Instruction::Call};
1022 
1023   for (unsigned Opcode : Opcodes) {
1024     for (Instruction *I : OpcodeInstMap[Opcode]) {
1025       // Skip assumed dead instructions.
1026       if (LivenessAA && LivenessAA->isAssumedDead(I))
1027         continue;
1028       // At this point we handled all read/write effects and they are all
1029       // nosync, so they can be skipped.
1030       if (I->mayReadOrWriteMemory())
1031         continue;
1032 
1033       ImmutableCallSite ICS(I);
1034 
1035       // non-convergent and readnone imply nosync.
1036       if (!ICS.isConvergent())
1037         continue;
1038 
1039       return indicatePessimisticFixpoint();
1040     }
1041   }
1042 
1043   return ChangeStatus::UNCHANGED;
1044 }
1045 
1046 /// ------------------------ No-Free Attributes ----------------------------
1047 
1048 struct AANoFreeFunction : AbstractAttribute, BooleanState {
1049 
1050   /// See AbstractAttribute::AbstractAttribute(...).
1051   AANoFreeFunction(Function &F, InformationCache &InfoCache)
1052       : AbstractAttribute(F, InfoCache) {}
1053 
1054   /// See AbstractAttribute::getState()
1055   ///{
1056   AbstractState &getState() override { return *this; }
1057   const AbstractState &getState() const override { return *this; }
1058   ///}
1059 
1060   /// See AbstractAttribute::getManifestPosition().
1061   ManifestPosition getManifestPosition() const override { return MP_FUNCTION; }
1062 
1063   /// See AbstractAttribute::getAsStr().
1064   const std::string getAsStr() const override {
1065     return getAssumed() ? "nofree" : "may-free";
1066   }
1067 
1068   /// See AbstractAttribute::updateImpl(...).
1069   ChangeStatus updateImpl(Attributor &A) override;
1070 
1071   /// See AbstractAttribute::getAttrKind().
1072   Attribute::AttrKind getAttrKind() const override { return ID; }
1073 
1074   /// Return true if "nofree" is assumed.
1075   bool isAssumedNoFree() const { return getAssumed(); }
1076 
1077   /// Return true if "nofree" is known.
1078   bool isKnownNoFree() const { return getKnown(); }
1079 
1080   /// The identifier used by the Attributor for this class of attributes.
1081   static constexpr Attribute::AttrKind ID = Attribute::NoFree;
1082 };
1083 
1084 ChangeStatus AANoFreeFunction::updateImpl(Attributor &A) {
1085   Function &F = getAnchorScope();
1086 
1087   auto *LivenessAA = A.getAAFor<AAIsDead>(*this, F);
1088 
1089   // The map from instruction opcodes to those instructions in the function.
1090   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
1091 
1092   for (unsigned Opcode :
1093        {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
1094         (unsigned)Instruction::Call}) {
1095     for (Instruction *I : OpcodeInstMap[Opcode]) {
1096       // Skip assumed dead instructions.
1097       if (LivenessAA && LivenessAA->isAssumedDead(I))
1098         continue;
1099       auto ICS = ImmutableCallSite(I);
1100       auto *NoFreeAA = A.getAAFor<AANoFreeFunction>(*this, *I);
1101 
1102       if ((!NoFreeAA || !NoFreeAA->isAssumedNoFree()) &&
1103           !ICS.hasFnAttr(Attribute::NoFree))
1104         return indicatePessimisticFixpoint();
1105     }
1106   }
1107   return ChangeStatus::UNCHANGED;
1108 }
1109 
1110 /// ------------------------ NonNull Argument Attribute ------------------------
1111 struct AANonNullImpl : AANonNull, BooleanState {
1112 
1113   AANonNullImpl(Value &V, InformationCache &InfoCache)
1114       : AANonNull(V, InfoCache) {}
1115 
1116   AANonNullImpl(Value *AssociatedVal, Value &AnchoredValue,
1117                 InformationCache &InfoCache)
1118       : AANonNull(AssociatedVal, AnchoredValue, InfoCache) {}
1119 
1120   /// See AbstractAttribute::getState()
1121   /// {
1122   AbstractState &getState() override { return *this; }
1123   const AbstractState &getState() const override { return *this; }
1124   /// }
1125 
1126   /// See AbstractAttribute::getAsStr().
1127   const std::string getAsStr() const override {
1128     return getAssumed() ? "nonnull" : "may-null";
1129   }
1130 
1131   /// See AANonNull::isAssumedNonNull().
1132   bool isAssumedNonNull() const override { return getAssumed(); }
1133 
1134   /// See AANonNull::isKnownNonNull().
1135   bool isKnownNonNull() const override { return getKnown(); }
1136 
1137   /// Generate a predicate that checks if a given value is assumed nonnull.
1138   /// The generated function returns true if a value satisfies any of
1139   /// following conditions.
1140   /// (i) A value is known nonZero(=nonnull).
1141   /// (ii) A value is associated with AANonNull and its isAssumedNonNull() is
1142   /// true.
1143   std::function<bool(Value &, const SmallPtrSetImpl<ReturnInst *> &)>
1144   generatePredicate(Attributor &);
1145 };
1146 
1147 std::function<bool(Value &, const SmallPtrSetImpl<ReturnInst *> &)>
1148 AANonNullImpl::generatePredicate(Attributor &A) {
1149   // FIXME: The `AAReturnedValues` should provide the predicate with the
1150   // `ReturnInst` vector as well such that we can use the control flow sensitive
1151   // version of `isKnownNonZero`. This should fix `test11` in
1152   // `test/Transforms/FunctionAttrs/nonnull.ll`
1153 
1154   std::function<bool(Value &, const SmallPtrSetImpl<ReturnInst *> &)> Pred =
1155       [&](Value &RV, const SmallPtrSetImpl<ReturnInst *> &RetInsts) -> bool {
1156     Function &F = getAnchorScope();
1157 
1158     if (isKnownNonZero(&RV, F.getParent()->getDataLayout()))
1159       return true;
1160 
1161     auto *NonNullAA = A.getAAFor<AANonNull>(*this, RV);
1162 
1163     ImmutableCallSite ICS(&RV);
1164 
1165     if ((!NonNullAA || !NonNullAA->isAssumedNonNull()) &&
1166         (!ICS || !ICS.hasRetAttr(Attribute::NonNull)))
1167       return false;
1168 
1169     return true;
1170   };
1171 
1172   return Pred;
1173 }
1174 
1175 /// NonNull attribute for function return value.
1176 struct AANonNullReturned : AANonNullImpl {
1177 
1178   AANonNullReturned(Function &F, InformationCache &InfoCache)
1179       : AANonNullImpl(F, InfoCache) {}
1180 
1181   /// See AbstractAttribute::getManifestPosition().
1182   ManifestPosition getManifestPosition() const override { return MP_RETURNED; }
1183 
1184   /// See AbstractAttriubute::initialize(...).
1185   void initialize(Attributor &A) override {
1186     Function &F = getAnchorScope();
1187 
1188     // Already nonnull.
1189     if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex,
1190                                        Attribute::NonNull) ||
1191         F.getAttributes().hasAttribute(AttributeList::ReturnIndex,
1192                                        Attribute::Dereferenceable))
1193       indicateOptimisticFixpoint();
1194   }
1195 
1196   /// See AbstractAttribute::updateImpl(...).
1197   ChangeStatus updateImpl(Attributor &A) override;
1198 };
1199 
1200 ChangeStatus AANonNullReturned::updateImpl(Attributor &A) {
1201   Function &F = getAnchorScope();
1202 
1203   auto *AARetVal = A.getAAFor<AAReturnedValues>(*this, F);
1204   if (!AARetVal)
1205     return indicatePessimisticFixpoint();
1206 
1207   std::function<bool(Value &, const SmallPtrSetImpl<ReturnInst *> &)> Pred =
1208       this->generatePredicate(A);
1209 
1210   if (!AARetVal->checkForallReturnedValues(Pred))
1211     return indicatePessimisticFixpoint();
1212   return ChangeStatus::UNCHANGED;
1213 }
1214 
1215 /// NonNull attribute for function argument.
1216 struct AANonNullArgument : AANonNullImpl {
1217 
1218   AANonNullArgument(Argument &A, InformationCache &InfoCache)
1219       : AANonNullImpl(A, InfoCache) {}
1220 
1221   /// See AbstractAttribute::getManifestPosition().
1222   ManifestPosition getManifestPosition() const override { return MP_ARGUMENT; }
1223 
1224   /// See AbstractAttriubute::initialize(...).
1225   void initialize(Attributor &A) override {
1226     Argument *Arg = cast<Argument>(getAssociatedValue());
1227     if (Arg->hasNonNullAttr())
1228       indicateOptimisticFixpoint();
1229   }
1230 
1231   /// See AbstractAttribute::updateImpl(...).
1232   ChangeStatus updateImpl(Attributor &A) override;
1233 };
1234 
1235 /// NonNull attribute for a call site argument.
1236 struct AANonNullCallSiteArgument : AANonNullImpl {
1237 
1238   /// See AANonNullImpl::AANonNullImpl(...).
1239   AANonNullCallSiteArgument(CallSite CS, unsigned ArgNo,
1240                             InformationCache &InfoCache)
1241       : AANonNullImpl(CS.getArgOperand(ArgNo), *CS.getInstruction(), InfoCache),
1242         ArgNo(ArgNo) {}
1243 
1244   /// See AbstractAttribute::initialize(...).
1245   void initialize(Attributor &A) override {
1246     CallSite CS(&getAnchoredValue());
1247     if (CS.paramHasAttr(ArgNo, getAttrKind()) ||
1248         CS.paramHasAttr(ArgNo, Attribute::Dereferenceable) ||
1249         isKnownNonZero(getAssociatedValue(),
1250                        getAnchorScope().getParent()->getDataLayout()))
1251       indicateOptimisticFixpoint();
1252   }
1253 
1254   /// See AbstractAttribute::updateImpl(Attributor &A).
1255   ChangeStatus updateImpl(Attributor &A) override;
1256 
1257   /// See AbstractAttribute::getManifestPosition().
1258   ManifestPosition getManifestPosition() const override {
1259     return MP_CALL_SITE_ARGUMENT;
1260   };
1261 
1262   // Return argument index of associated value.
1263   int getArgNo() const { return ArgNo; }
1264 
1265 private:
1266   unsigned ArgNo;
1267 };
1268 ChangeStatus AANonNullArgument::updateImpl(Attributor &A) {
1269   Function &F = getAnchorScope();
1270   Argument &Arg = cast<Argument>(getAnchoredValue());
1271 
1272   unsigned ArgNo = Arg.getArgNo();
1273 
1274   // Callback function
1275   std::function<bool(CallSite)> CallSiteCheck = [&](CallSite CS) {
1276     assert(CS && "Sanity check: Call site was not initialized properly!");
1277 
1278     auto *NonNullAA = A.getAAFor<AANonNull>(*this, *CS.getInstruction(), ArgNo);
1279 
1280     // Check that NonNullAA is AANonNullCallSiteArgument.
1281     if (NonNullAA) {
1282       ImmutableCallSite ICS(&NonNullAA->getAnchoredValue());
1283       if (ICS && CS.getInstruction() == ICS.getInstruction())
1284         return NonNullAA->isAssumedNonNull();
1285       return false;
1286     }
1287 
1288     if (CS.paramHasAttr(ArgNo, Attribute::NonNull))
1289       return true;
1290 
1291     Value *V = CS.getArgOperand(ArgNo);
1292     if (isKnownNonZero(V, getAnchorScope().getParent()->getDataLayout()))
1293       return true;
1294 
1295     return false;
1296   };
1297   if (!A.checkForAllCallSites(F, CallSiteCheck, true, *this))
1298     return indicatePessimisticFixpoint();
1299   return ChangeStatus::UNCHANGED;
1300 }
1301 
1302 ChangeStatus AANonNullCallSiteArgument::updateImpl(Attributor &A) {
1303   // NOTE: Never look at the argument of the callee in this method.
1304   //       If we do this, "nonnull" is always deduced because of the assumption.
1305 
1306   Value &V = *getAssociatedValue();
1307 
1308   auto *NonNullAA = A.getAAFor<AANonNull>(*this, V);
1309 
1310   if (!NonNullAA || !NonNullAA->isAssumedNonNull())
1311     return indicatePessimisticFixpoint();
1312 
1313   return ChangeStatus::UNCHANGED;
1314 }
1315 
1316 /// ------------------------ Will-Return Attributes ----------------------------
1317 
1318 struct AAWillReturnImpl : public AAWillReturn, BooleanState {
1319 
1320   /// See AbstractAttribute::AbstractAttribute(...).
1321   AAWillReturnImpl(Function &F, InformationCache &InfoCache)
1322       : AAWillReturn(F, InfoCache) {}
1323 
1324   /// See AAWillReturn::isKnownWillReturn().
1325   bool isKnownWillReturn() const override { return getKnown(); }
1326 
1327   /// See AAWillReturn::isAssumedWillReturn().
1328   bool isAssumedWillReturn() const override { return getAssumed(); }
1329 
1330   /// See AbstractAttribute::getState(...).
1331   AbstractState &getState() override { return *this; }
1332 
1333   /// See AbstractAttribute::getState(...).
1334   const AbstractState &getState() const override { return *this; }
1335 
1336   /// See AbstractAttribute::getAsStr()
1337   const std::string getAsStr() const override {
1338     return getAssumed() ? "willreturn" : "may-noreturn";
1339   }
1340 };
1341 
1342 struct AAWillReturnFunction final : AAWillReturnImpl {
1343 
1344   /// See AbstractAttribute::AbstractAttribute(...).
1345   AAWillReturnFunction(Function &F, InformationCache &InfoCache)
1346       : AAWillReturnImpl(F, InfoCache) {}
1347 
1348   /// See AbstractAttribute::getManifestPosition().
1349   ManifestPosition getManifestPosition() const override { return MP_FUNCTION; }
1350 
1351   /// See AbstractAttribute::initialize(...).
1352   void initialize(Attributor &A) override;
1353 
1354   /// See AbstractAttribute::updateImpl(...).
1355   ChangeStatus updateImpl(Attributor &A) override;
1356 };
1357 
1358 // Helper function that checks whether a function has any cycle.
1359 // TODO: Replace with more efficent code
1360 bool containsCycle(Function &F) {
1361   SmallPtrSet<BasicBlock *, 32> Visited;
1362 
1363   // Traverse BB by dfs and check whether successor is already visited.
1364   for (BasicBlock *BB : depth_first(&F)) {
1365     Visited.insert(BB);
1366     for (auto *SuccBB : successors(BB)) {
1367       if (Visited.count(SuccBB))
1368         return true;
1369     }
1370   }
1371   return false;
1372 }
1373 
1374 // Helper function that checks the function have a loop which might become an
1375 // endless loop
1376 // FIXME: Any cycle is regarded as endless loop for now.
1377 //        We have to allow some patterns.
1378 bool containsPossiblyEndlessLoop(Function &F) { return containsCycle(F); }
1379 
1380 void AAWillReturnFunction::initialize(Attributor &A) {
1381   Function &F = getAnchorScope();
1382 
1383   if (containsPossiblyEndlessLoop(F))
1384     indicatePessimisticFixpoint();
1385 }
1386 
1387 ChangeStatus AAWillReturnFunction::updateImpl(Attributor &A) {
1388   Function &F = getAnchorScope();
1389 
1390   // The map from instruction opcodes to those instructions in the function.
1391   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
1392 
1393   auto *LivenessAA = A.getAAFor<AAIsDead>(*this, F);
1394 
1395   for (unsigned Opcode :
1396        {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
1397         (unsigned)Instruction::Call}) {
1398     for (Instruction *I : OpcodeInstMap[Opcode]) {
1399       // Skip assumed dead instructions.
1400       if (LivenessAA && LivenessAA->isAssumedDead(I))
1401         continue;
1402 
1403       auto ICS = ImmutableCallSite(I);
1404 
1405       if (ICS.hasFnAttr(Attribute::WillReturn))
1406         continue;
1407 
1408       auto *WillReturnAA = A.getAAFor<AAWillReturn>(*this, *I);
1409       if (!WillReturnAA || !WillReturnAA->isAssumedWillReturn())
1410         return indicatePessimisticFixpoint();
1411 
1412       auto *NoRecurseAA = A.getAAFor<AANoRecurse>(*this, *I);
1413 
1414       // FIXME: (i) Prohibit any recursion for now.
1415       //        (ii) AANoRecurse isn't implemented yet so currently any call is
1416       //        regarded as having recursion.
1417       //       Code below should be
1418       //       if ((!NoRecurseAA || !NoRecurseAA->isAssumedNoRecurse()) &&
1419       if (!NoRecurseAA && !ICS.hasFnAttr(Attribute::NoRecurse))
1420         return indicatePessimisticFixpoint();
1421     }
1422   }
1423 
1424   return ChangeStatus::UNCHANGED;
1425 }
1426 
1427 /// ------------------------ NoAlias Argument Attribute ------------------------
1428 
1429 struct AANoAliasImpl : AANoAlias, BooleanState {
1430 
1431   AANoAliasImpl(Value &V, InformationCache &InfoCache)
1432       : AANoAlias(V, InfoCache) {}
1433 
1434   /// See AbstractAttribute::getState()
1435   /// {
1436   AbstractState &getState() override { return *this; }
1437   const AbstractState &getState() const override { return *this; }
1438   /// }
1439 
1440   const std::string getAsStr() const override {
1441     return getAssumed() ? "noalias" : "may-alias";
1442   }
1443 
1444   /// See AANoAlias::isAssumedNoAlias().
1445   bool isAssumedNoAlias() const override { return getAssumed(); }
1446 
1447   /// See AANoAlias::isKnowndNoAlias().
1448   bool isKnownNoAlias() const override { return getKnown(); }
1449 };
1450 
1451 /// NoAlias attribute for function return value.
1452 struct AANoAliasReturned : AANoAliasImpl {
1453 
1454   AANoAliasReturned(Function &F, InformationCache &InfoCache)
1455       : AANoAliasImpl(F, InfoCache) {}
1456 
1457   /// See AbstractAttribute::getManifestPosition().
1458   virtual ManifestPosition getManifestPosition() const override {
1459     return MP_RETURNED;
1460   }
1461 
1462   /// See AbstractAttriubute::initialize(...).
1463   void initialize(Attributor &A) override {
1464     Function &F = getAnchorScope();
1465 
1466     // Already noalias.
1467     if (F.returnDoesNotAlias()) {
1468       indicateOptimisticFixpoint();
1469       return;
1470     }
1471   }
1472 
1473   /// See AbstractAttribute::updateImpl(...).
1474   virtual ChangeStatus updateImpl(Attributor &A) override;
1475 };
1476 
1477 ChangeStatus AANoAliasReturned::updateImpl(Attributor &A) {
1478   Function &F = getAnchorScope();
1479 
1480   auto *AARetValImpl = A.getAAFor<AAReturnedValuesImpl>(*this, F);
1481   if (!AARetValImpl)
1482     return indicatePessimisticFixpoint();
1483 
1484   std::function<bool(Value &, const SmallPtrSetImpl<ReturnInst *> &)> Pred =
1485       [&](Value &RV, const SmallPtrSetImpl<ReturnInst *> &RetInsts) -> bool {
1486     if (Constant *C = dyn_cast<Constant>(&RV))
1487       if (C->isNullValue() || isa<UndefValue>(C))
1488         return true;
1489 
1490     /// For now, we can only deduce noalias if we have call sites.
1491     /// FIXME: add more support.
1492     ImmutableCallSite ICS(&RV);
1493     if (!ICS)
1494       return false;
1495 
1496     auto *NoAliasAA = A.getAAFor<AANoAlias>(*this, RV);
1497 
1498     if (!ICS.returnDoesNotAlias() &&
1499         (!NoAliasAA || !NoAliasAA->isAssumedNoAlias()))
1500       return false;
1501 
1502     /// FIXME: We can improve capture check in two ways:
1503     /// 1. Use the AANoCapture facilities.
1504     /// 2. Use the location of return insts for escape queries.
1505     if (PointerMayBeCaptured(&RV, /* ReturnCaptures */ false,
1506                              /* StoreCaptures */ true))
1507       return false;
1508 
1509     return true;
1510   };
1511 
1512   if (!AARetValImpl->checkForallReturnedValues(Pred))
1513     return indicatePessimisticFixpoint();
1514 
1515   return ChangeStatus::UNCHANGED;
1516 }
1517 
1518 /// -------------------AAIsDead Function Attribute-----------------------
1519 
1520 struct AAIsDeadFunction : AAIsDead, BooleanState {
1521 
1522   AAIsDeadFunction(Function &F, InformationCache &InfoCache)
1523       : AAIsDead(F, InfoCache) {}
1524 
1525   /// See AbstractAttribute::getState()
1526   /// {
1527   AbstractState &getState() override { return *this; }
1528   const AbstractState &getState() const override { return *this; }
1529   /// }
1530 
1531   /// See AbstractAttribute::getManifestPosition().
1532   ManifestPosition getManifestPosition() const override { return MP_FUNCTION; }
1533 
1534   void initialize(Attributor &A) override {
1535     Function &F = getAnchorScope();
1536 
1537     ToBeExploredPaths.insert(&(F.getEntryBlock().front()));
1538     AssumedLiveBlocks.insert(&(F.getEntryBlock()));
1539     for (size_t i = 0; i < ToBeExploredPaths.size(); ++i)
1540       if (const Instruction *NextNoReturnI =
1541               findNextNoReturn(A, ToBeExploredPaths[i]))
1542         NoReturnCalls.insert(NextNoReturnI);
1543   }
1544 
1545   /// Find the next assumed noreturn instruction in the block of \p I starting
1546   /// from, thus including, \p I.
1547   ///
1548   /// The caller is responsible to monitor the ToBeExploredPaths set as new
1549   /// instructions discovered in other basic block will be placed in there.
1550   ///
1551   /// \returns The next assumed noreturn instructions in the block of \p I
1552   ///          starting from, thus including, \p I.
1553   const Instruction *findNextNoReturn(Attributor &A, const Instruction *I);
1554 
1555   const std::string getAsStr() const override {
1556     return "LiveBBs(" + std::to_string(AssumedLiveBlocks.size()) + "/" +
1557            std::to_string(getAnchorScope().size()) + ")";
1558   }
1559 
1560   /// See AbstractAttribute::manifest(...).
1561   ChangeStatus manifest(Attributor &A) override {
1562     assert(getState().isValidState() &&
1563            "Attempted to manifest an invalid state!");
1564 
1565     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1566 
1567     for (const Instruction *NRC : NoReturnCalls) {
1568       Instruction *I = const_cast<Instruction *>(NRC);
1569       BasicBlock *BB = I->getParent();
1570       Instruction *SplitPos = I->getNextNode();
1571 
1572       if (auto *II = dyn_cast<InvokeInst>(I)) {
1573         /// Invoke is replaced with a call and unreachable is placed after it if
1574         /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
1575         /// and only place an unreachable in the normal successor.
1576         if (Function *Callee = II->getCalledFunction()) {
1577           auto *AANoUnw = A.getAAFor<AANoUnwind>(*this, *Callee);
1578           if (Callee->hasFnAttribute(Attribute::NoUnwind) ||
1579               (AANoUnw && AANoUnw->isAssumedNoUnwind())) {
1580             LLVM_DEBUG(dbgs() << "[AAIsDead] Replace invoke with call inst\n");
1581             changeToCall(II);
1582             changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
1583             continue;
1584           }
1585         }
1586 
1587         BB = II->getNormalDest();
1588         SplitPos = &BB->front();
1589       }
1590 
1591       SplitBlock(BB, SplitPos);
1592       changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
1593       HasChanged = ChangeStatus::CHANGED;
1594     }
1595 
1596     return HasChanged;
1597   }
1598 
1599   /// See AbstractAttribute::updateImpl(...).
1600   ChangeStatus updateImpl(Attributor &A) override;
1601 
1602   /// See AAIsDead::isAssumedDead(BasicBlock *).
1603   bool isAssumedDead(const BasicBlock *BB) const override {
1604     assert(BB->getParent() == &getAnchorScope() &&
1605            "BB must be in the same anchor scope function.");
1606 
1607     if (!getAssumed())
1608       return false;
1609     return !AssumedLiveBlocks.count(BB);
1610   }
1611 
1612   /// See AAIsDead::isKnownDead(BasicBlock *).
1613   bool isKnownDead(const BasicBlock *BB) const override {
1614     return getKnown() && isAssumedDead(BB);
1615   }
1616 
1617   /// See AAIsDead::isAssumed(Instruction *I).
1618   bool isAssumedDead(const Instruction *I) const override {
1619     assert(I->getParent()->getParent() == &getAnchorScope() &&
1620            "Instruction must be in the same anchor scope function.");
1621 
1622     if (!getAssumed())
1623       return false;
1624 
1625     // If it is not in AssumedLiveBlocks then it for sure dead.
1626     // Otherwise, it can still be after noreturn call in a live block.
1627     if (!AssumedLiveBlocks.count(I->getParent()))
1628       return true;
1629 
1630     // If it is not after a noreturn call, than it is live.
1631     return isAfterNoReturn(I);
1632   }
1633 
1634   /// See AAIsDead::isKnownDead(Instruction *I).
1635   bool isKnownDead(const Instruction *I) const override {
1636     return getKnown() && isAssumedDead(I);
1637   }
1638 
1639   /// Check if instruction is after noreturn call, in other words, assumed dead.
1640   bool isAfterNoReturn(const Instruction *I) const;
1641 
1642   /// Collection of to be explored paths.
1643   SmallSetVector<const Instruction *, 8> ToBeExploredPaths;
1644 
1645   /// Collection of all assumed live BasicBlocks.
1646   DenseSet<const BasicBlock *> AssumedLiveBlocks;
1647 
1648   /// Collection of calls with noreturn attribute, assumed or knwon.
1649   SmallSetVector<const Instruction *, 4> NoReturnCalls;
1650 };
1651 
1652 bool AAIsDeadFunction::isAfterNoReturn(const Instruction *I) const {
1653   const Instruction *PrevI = I->getPrevNode();
1654   while (PrevI) {
1655     if (NoReturnCalls.count(PrevI))
1656       return true;
1657     PrevI = PrevI->getPrevNode();
1658   }
1659   return false;
1660 }
1661 
1662 const Instruction *AAIsDeadFunction::findNextNoReturn(Attributor &A,
1663                                                       const Instruction *I) {
1664   const BasicBlock *BB = I->getParent();
1665 
1666   // TODO: We should have a function that determines if an "edge" is dead.
1667   //       Edges could be from an instruction to the next or from a terminator
1668   //       to the successor. For now, we need to special case the unwind block
1669   //       of InvokeInst below.
1670 
1671   while (I) {
1672     ImmutableCallSite ICS(I);
1673 
1674     if (ICS) {
1675       // Regarless of the no-return property of an invoke instruction we only
1676       // learn that the regular successor is not reachable through this
1677       // instruction but the unwind block might still be.
1678       if (auto *Invoke = dyn_cast<InvokeInst>(I)) {
1679         // Use nounwind to justify the unwind block is dead as well.
1680         auto *AANoUnw = A.getAAFor<AANoUnwind>(*this, *Invoke);
1681         if (!AANoUnw || !AANoUnw->isAssumedNoUnwind()) {
1682           AssumedLiveBlocks.insert(Invoke->getUnwindDest());
1683           ToBeExploredPaths.insert(&Invoke->getUnwindDest()->front());
1684         }
1685       }
1686 
1687       auto *NoReturnAA = A.getAAFor<AANoReturn>(*this, *I);
1688       if (ICS.hasFnAttr(Attribute::NoReturn) ||
1689           (NoReturnAA && NoReturnAA->isAssumedNoReturn()))
1690         return I;
1691     }
1692 
1693     I = I->getNextNode();
1694   }
1695 
1696   // get new paths (reachable blocks).
1697   for (const BasicBlock *SuccBB : successors(BB)) {
1698     AssumedLiveBlocks.insert(SuccBB);
1699     ToBeExploredPaths.insert(&SuccBB->front());
1700   }
1701 
1702   // No noreturn instruction found.
1703   return nullptr;
1704 }
1705 
1706 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
1707   // Temporary collection to iterate over existing noreturn instructions. This
1708   // will alow easier modification of NoReturnCalls collection
1709   SmallVector<const Instruction *, 8> NoReturnChanged;
1710   ChangeStatus Status = ChangeStatus::UNCHANGED;
1711 
1712   for (const Instruction *I : NoReturnCalls)
1713     NoReturnChanged.push_back(I);
1714 
1715   for (const Instruction *I : NoReturnChanged) {
1716     size_t Size = ToBeExploredPaths.size();
1717 
1718     const Instruction *NextNoReturnI = findNextNoReturn(A, I);
1719     if (NextNoReturnI != I) {
1720       Status = ChangeStatus::CHANGED;
1721       NoReturnCalls.remove(I);
1722       if (NextNoReturnI)
1723         NoReturnCalls.insert(NextNoReturnI);
1724     }
1725 
1726     // Explore new paths.
1727     while (Size != ToBeExploredPaths.size()) {
1728       Status = ChangeStatus::CHANGED;
1729       if (const Instruction *NextNoReturnI =
1730               findNextNoReturn(A, ToBeExploredPaths[Size++]))
1731         NoReturnCalls.insert(NextNoReturnI);
1732     }
1733   }
1734 
1735   LLVM_DEBUG(
1736       dbgs() << "[AAIsDead] AssumedLiveBlocks: " << AssumedLiveBlocks.size()
1737              << " Total number of blocks: " << getAnchorScope().size() << "\n");
1738 
1739   return Status;
1740 }
1741 
1742 /// -------------------- Dereferenceable Argument Attribute --------------------
1743 
1744 struct DerefState : AbstractState {
1745 
1746   /// State representing for dereferenceable bytes.
1747   IntegerState DerefBytesState;
1748 
1749   /// State representing that whether the value is nonnull or global.
1750   IntegerState NonNullGlobalState;
1751 
1752   /// Bits encoding for NonNullGlobalState.
1753   enum {
1754     DEREF_NONNULL = 1 << 0,
1755     DEREF_GLOBAL = 1 << 1,
1756   };
1757 
1758   /// See AbstractState::isValidState()
1759   bool isValidState() const override { return DerefBytesState.isValidState(); }
1760 
1761   /// See AbstractState::isAtFixpoint()
1762   bool isAtFixpoint() const override {
1763     return !isValidState() || (DerefBytesState.isAtFixpoint() &&
1764                                NonNullGlobalState.isAtFixpoint());
1765   }
1766 
1767   /// See AbstractState::indicateOptimisticFixpoint(...)
1768   ChangeStatus indicateOptimisticFixpoint() override {
1769     DerefBytesState.indicateOptimisticFixpoint();
1770     NonNullGlobalState.indicateOptimisticFixpoint();
1771     return ChangeStatus::UNCHANGED;
1772   }
1773 
1774   /// See AbstractState::indicatePessimisticFixpoint(...)
1775   ChangeStatus indicatePessimisticFixpoint() override {
1776     DerefBytesState.indicatePessimisticFixpoint();
1777     NonNullGlobalState.indicatePessimisticFixpoint();
1778     return ChangeStatus::CHANGED;
1779   }
1780 
1781   /// Update known dereferenceable bytes.
1782   void takeKnownDerefBytesMaximum(uint64_t Bytes) {
1783     DerefBytesState.takeKnownMaximum(Bytes);
1784   }
1785 
1786   /// Update assumed dereferenceable bytes.
1787   void takeAssumedDerefBytesMinimum(uint64_t Bytes) {
1788     DerefBytesState.takeAssumedMinimum(Bytes);
1789   }
1790 
1791   /// Update assumed NonNullGlobalState
1792   void updateAssumedNonNullGlobalState(bool IsNonNull, bool IsGlobal) {
1793     if (!IsNonNull)
1794       NonNullGlobalState.removeAssumedBits(DEREF_NONNULL);
1795     if (!IsGlobal)
1796       NonNullGlobalState.removeAssumedBits(DEREF_GLOBAL);
1797   }
1798 
1799   /// Equality for DerefState.
1800   bool operator==(const DerefState &R) {
1801     return this->DerefBytesState == R.DerefBytesState &&
1802            this->NonNullGlobalState == R.NonNullGlobalState;
1803   }
1804 };
1805 struct AADereferenceableImpl : AADereferenceable, DerefState {
1806 
1807   AADereferenceableImpl(Value &V, InformationCache &InfoCache)
1808       : AADereferenceable(V, InfoCache) {}
1809 
1810   AADereferenceableImpl(Value *AssociatedVal, Value &AnchoredValue,
1811                         InformationCache &InfoCache)
1812       : AADereferenceable(AssociatedVal, AnchoredValue, InfoCache) {}
1813 
1814   /// See AbstractAttribute::getState()
1815   /// {
1816   AbstractState &getState() override { return *this; }
1817   const AbstractState &getState() const override { return *this; }
1818   /// }
1819 
1820   /// See AADereferenceable::getAssumedDereferenceableBytes().
1821   uint32_t getAssumedDereferenceableBytes() const override {
1822     return DerefBytesState.getAssumed();
1823   }
1824 
1825   /// See AADereferenceable::getKnownDereferenceableBytes().
1826   uint32_t getKnownDereferenceableBytes() const override {
1827     return DerefBytesState.getKnown();
1828   }
1829 
1830   // Helper function for syncing nonnull state.
1831   void syncNonNull(const AANonNull *NonNullAA) {
1832     if (!NonNullAA) {
1833       NonNullGlobalState.removeAssumedBits(DEREF_NONNULL);
1834       return;
1835     }
1836 
1837     if (NonNullAA->isKnownNonNull())
1838       NonNullGlobalState.addKnownBits(DEREF_NONNULL);
1839 
1840     if (!NonNullAA->isAssumedNonNull())
1841       NonNullGlobalState.removeAssumedBits(DEREF_NONNULL);
1842   }
1843 
1844   /// See AADereferenceable::isAssumedGlobal().
1845   bool isAssumedGlobal() const override {
1846     return NonNullGlobalState.isAssumed(DEREF_GLOBAL);
1847   }
1848 
1849   /// See AADereferenceable::isKnownGlobal().
1850   bool isKnownGlobal() const override {
1851     return NonNullGlobalState.isKnown(DEREF_GLOBAL);
1852   }
1853 
1854   /// See AADereferenceable::isAssumedNonNull().
1855   bool isAssumedNonNull() const override {
1856     return NonNullGlobalState.isAssumed(DEREF_NONNULL);
1857   }
1858 
1859   /// See AADereferenceable::isKnownNonNull().
1860   bool isKnownNonNull() const override {
1861     return NonNullGlobalState.isKnown(DEREF_NONNULL);
1862   }
1863 
1864   void getDeducedAttributes(SmallVectorImpl<Attribute> &Attrs) const override {
1865     LLVMContext &Ctx = AnchoredVal.getContext();
1866 
1867     // TODO: Add *_globally support
1868     if (isAssumedNonNull())
1869       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
1870           Ctx, getAssumedDereferenceableBytes()));
1871     else
1872       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
1873           Ctx, getAssumedDereferenceableBytes()));
1874   }
1875   uint64_t computeAssumedDerefenceableBytes(Attributor &A, Value &V,
1876                                             bool &IsNonNull, bool &IsGlobal);
1877 
1878   void initialize(Attributor &A) override {
1879     Function &F = getAnchorScope();
1880     unsigned AttrIdx =
1881         getAttrIndex(getManifestPosition(), getArgNo(getAnchoredValue()));
1882 
1883     for (Attribute::AttrKind AK :
1884          {Attribute::Dereferenceable, Attribute::DereferenceableOrNull})
1885       if (F.getAttributes().hasAttribute(AttrIdx, AK))
1886         takeKnownDerefBytesMaximum(F.getAttribute(AttrIdx, AK).getValueAsInt());
1887   }
1888 
1889   /// See AbstractAttribute::getAsStr().
1890   const std::string getAsStr() const override {
1891     if (!getAssumedDereferenceableBytes())
1892       return "unknown-dereferenceable";
1893     return std::string("dereferenceable") +
1894            (isAssumedNonNull() ? "" : "_or_null") +
1895            (isAssumedGlobal() ? "_globally" : "") + "<" +
1896            std::to_string(getKnownDereferenceableBytes()) + "-" +
1897            std::to_string(getAssumedDereferenceableBytes()) + ">";
1898   }
1899 };
1900 
1901 struct AADereferenceableReturned : AADereferenceableImpl {
1902   AADereferenceableReturned(Function &F, InformationCache &InfoCache)
1903       : AADereferenceableImpl(F, InfoCache) {}
1904 
1905   /// See AbstractAttribute::getManifestPosition().
1906   ManifestPosition getManifestPosition() const override { return MP_RETURNED; }
1907 
1908   /// See AbstractAttribute::updateImpl(...).
1909   ChangeStatus updateImpl(Attributor &A) override;
1910 };
1911 
1912 // Helper function that returns dereferenceable bytes.
1913 static uint64_t calcDifferenceIfBaseIsNonNull(int64_t DerefBytes,
1914                                               int64_t Offset, bool IsNonNull) {
1915   if (!IsNonNull)
1916     return 0;
1917   return std::max((int64_t)0, DerefBytes - Offset);
1918 }
1919 
1920 uint64_t AADereferenceableImpl::computeAssumedDerefenceableBytes(
1921     Attributor &A, Value &V, bool &IsNonNull, bool &IsGlobal) {
1922   // TODO: Tracking the globally flag.
1923   IsGlobal = false;
1924 
1925   // First, we try to get information about V from Attributor.
1926   if (auto *DerefAA = A.getAAFor<AADereferenceable>(*this, V)) {
1927     IsNonNull &= DerefAA->isAssumedNonNull();
1928     return DerefAA->getAssumedDereferenceableBytes();
1929   }
1930 
1931   // Otherwise, we try to compute assumed bytes from base pointer.
1932   const DataLayout &DL = getAnchorScope().getParent()->getDataLayout();
1933   unsigned IdxWidth =
1934       DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
1935   APInt Offset(IdxWidth, 0);
1936   Value *Base = V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
1937 
1938   if (auto *BaseDerefAA = A.getAAFor<AADereferenceable>(*this, *Base)) {
1939     IsNonNull &= Offset != 0;
1940     return calcDifferenceIfBaseIsNonNull(
1941         BaseDerefAA->getAssumedDereferenceableBytes(), Offset.getSExtValue(),
1942         Offset != 0 || BaseDerefAA->isAssumedNonNull());
1943   }
1944 
1945   // Then, use IR information.
1946 
1947   if (isDereferenceablePointer(Base, Base->getType(), DL))
1948     return calcDifferenceIfBaseIsNonNull(
1949         DL.getTypeStoreSize(Base->getType()->getPointerElementType()),
1950         Offset.getSExtValue(),
1951         !NullPointerIsDefined(&getAnchorScope(),
1952                               V.getType()->getPointerAddressSpace()));
1953 
1954   IsNonNull = false;
1955   return 0;
1956 }
1957 ChangeStatus AADereferenceableReturned::updateImpl(Attributor &A) {
1958   Function &F = getAnchorScope();
1959   auto BeforeState = static_cast<DerefState>(*this);
1960 
1961   syncNonNull(A.getAAFor<AANonNull>(*this, F));
1962 
1963   auto *AARetVal = A.getAAFor<AAReturnedValues>(*this, F);
1964   if (!AARetVal)
1965     return indicatePessimisticFixpoint();
1966 
1967   bool IsNonNull = isAssumedNonNull();
1968   bool IsGlobal = isAssumedGlobal();
1969 
1970   std::function<bool(Value &, const SmallPtrSetImpl<ReturnInst *> &)> Pred =
1971       [&](Value &RV, const SmallPtrSetImpl<ReturnInst *> &RetInsts) -> bool {
1972     takeAssumedDerefBytesMinimum(
1973         computeAssumedDerefenceableBytes(A, RV, IsNonNull, IsGlobal));
1974     return isValidState();
1975   };
1976 
1977   if (AARetVal->checkForallReturnedValues(Pred)) {
1978     updateAssumedNonNullGlobalState(IsNonNull, IsGlobal);
1979     return BeforeState == static_cast<DerefState>(*this)
1980                ? ChangeStatus::UNCHANGED
1981                : ChangeStatus::CHANGED;
1982   }
1983   return indicatePessimisticFixpoint();
1984 }
1985 
1986 struct AADereferenceableArgument : AADereferenceableImpl {
1987   AADereferenceableArgument(Argument &A, InformationCache &InfoCache)
1988       : AADereferenceableImpl(A, InfoCache) {}
1989 
1990   /// See AbstractAttribute::getManifestPosition().
1991   ManifestPosition getManifestPosition() const override { return MP_ARGUMENT; }
1992 
1993   /// See AbstractAttribute::updateImpl(...).
1994   ChangeStatus updateImpl(Attributor &A) override;
1995 };
1996 
1997 ChangeStatus AADereferenceableArgument::updateImpl(Attributor &A) {
1998   Function &F = getAnchorScope();
1999   Argument &Arg = cast<Argument>(getAnchoredValue());
2000 
2001   auto BeforeState = static_cast<DerefState>(*this);
2002 
2003   unsigned ArgNo = Arg.getArgNo();
2004 
2005   syncNonNull(A.getAAFor<AANonNull>(*this, F, ArgNo));
2006 
2007   bool IsNonNull = isAssumedNonNull();
2008   bool IsGlobal = isAssumedGlobal();
2009 
2010   // Callback function
2011   std::function<bool(CallSite)> CallSiteCheck = [&](CallSite CS) -> bool {
2012     assert(CS && "Sanity check: Call site was not initialized properly!");
2013 
2014     // Check that DereferenceableAA is AADereferenceableCallSiteArgument.
2015     if (auto *DereferenceableAA =
2016             A.getAAFor<AADereferenceable>(*this, *CS.getInstruction(), ArgNo)) {
2017       ImmutableCallSite ICS(&DereferenceableAA->getAnchoredValue());
2018       if (ICS && CS.getInstruction() == ICS.getInstruction()) {
2019         takeAssumedDerefBytesMinimum(
2020             DereferenceableAA->getAssumedDereferenceableBytes());
2021         IsNonNull &= DereferenceableAA->isAssumedNonNull();
2022         IsGlobal &= DereferenceableAA->isAssumedGlobal();
2023         return isValidState();
2024       }
2025     }
2026 
2027     takeAssumedDerefBytesMinimum(computeAssumedDerefenceableBytes(
2028         A, *CS.getArgOperand(ArgNo), IsNonNull, IsGlobal));
2029 
2030     return isValidState();
2031   };
2032 
2033   if (!A.checkForAllCallSites(F, CallSiteCheck, true, *this))
2034     return indicatePessimisticFixpoint();
2035 
2036   updateAssumedNonNullGlobalState(IsNonNull, IsGlobal);
2037 
2038   return BeforeState == static_cast<DerefState>(*this) ? ChangeStatus::UNCHANGED
2039                                                        : ChangeStatus::CHANGED;
2040 }
2041 
2042 /// Dereferenceable attribute for a call site argument.
2043 struct AADereferenceableCallSiteArgument : AADereferenceableImpl {
2044 
2045   /// See AADereferenceableImpl::AADereferenceableImpl(...).
2046   AADereferenceableCallSiteArgument(CallSite CS, unsigned ArgNo,
2047                                     InformationCache &InfoCache)
2048       : AADereferenceableImpl(CS.getArgOperand(ArgNo), *CS.getInstruction(),
2049                               InfoCache),
2050         ArgNo(ArgNo) {}
2051 
2052   /// See AbstractAttribute::initialize(...).
2053   void initialize(Attributor &A) override {
2054     CallSite CS(&getAnchoredValue());
2055     if (CS.paramHasAttr(ArgNo, Attribute::Dereferenceable))
2056       takeKnownDerefBytesMaximum(
2057           CS.getDereferenceableBytes(ArgNo + AttributeList::FirstArgIndex));
2058 
2059     if (CS.paramHasAttr(ArgNo, Attribute::DereferenceableOrNull))
2060       takeKnownDerefBytesMaximum(CS.getDereferenceableOrNullBytes(
2061           ArgNo + AttributeList::FirstArgIndex));
2062   }
2063 
2064   /// See AbstractAttribute::updateImpl(Attributor &A).
2065   ChangeStatus updateImpl(Attributor &A) override;
2066 
2067   /// See AbstractAttribute::getManifestPosition().
2068   ManifestPosition getManifestPosition() const override {
2069     return MP_CALL_SITE_ARGUMENT;
2070   };
2071 
2072   // Return argument index of associated value.
2073   int getArgNo() const { return ArgNo; }
2074 
2075 private:
2076   unsigned ArgNo;
2077 };
2078 
2079 ChangeStatus AADereferenceableCallSiteArgument::updateImpl(Attributor &A) {
2080   // NOTE: Never look at the argument of the callee in this method.
2081   //       If we do this, "dereferenceable" is always deduced because of the
2082   //       assumption.
2083 
2084   Value &V = *getAssociatedValue();
2085 
2086   auto BeforeState = static_cast<DerefState>(*this);
2087 
2088   syncNonNull(A.getAAFor<AANonNull>(*this, getAnchoredValue(), ArgNo));
2089   bool IsNonNull = isAssumedNonNull();
2090   bool IsGlobal = isKnownGlobal();
2091 
2092   takeAssumedDerefBytesMinimum(
2093       computeAssumedDerefenceableBytes(A, V, IsNonNull, IsGlobal));
2094   updateAssumedNonNullGlobalState(IsNonNull, IsGlobal);
2095 
2096   return BeforeState == static_cast<DerefState>(*this) ? ChangeStatus::UNCHANGED
2097                                                        : ChangeStatus::CHANGED;
2098 }
2099 
2100 // ------------------------ Align Argument Attribute ------------------------
2101 
2102 struct AAAlignImpl : AAAlign, IntegerState {
2103 
2104   // Max alignemnt value allowed in IR
2105   static const unsigned MAX_ALIGN = 1U << 29;
2106 
2107   AAAlignImpl(Value *AssociatedVal, Value &AnchoredValue,
2108               InformationCache &InfoCache)
2109       : AAAlign(AssociatedVal, AnchoredValue, InfoCache),
2110         IntegerState(MAX_ALIGN) {}
2111 
2112   AAAlignImpl(Value &V, InformationCache &InfoCache)
2113       : AAAlignImpl(&V, V, InfoCache) {}
2114 
2115   /// See AbstractAttribute::getState()
2116   /// {
2117   AbstractState &getState() override { return *this; }
2118   const AbstractState &getState() const override { return *this; }
2119   /// }
2120 
2121   virtual const std::string getAsStr() const override {
2122     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
2123                                 "-" + std::to_string(getAssumedAlign()) + ">")
2124                              : "unknown-align";
2125   }
2126 
2127   /// See AAAlign::getAssumedAlign().
2128   unsigned getAssumedAlign() const override { return getAssumed(); }
2129 
2130   /// See AAAlign::getKnownAlign().
2131   unsigned getKnownAlign() const override { return getKnown(); }
2132 
2133   /// See AbstractAttriubute::initialize(...).
2134   void initialize(Attributor &A) override {
2135     Function &F = getAnchorScope();
2136 
2137     unsigned AttrIdx =
2138         getAttrIndex(getManifestPosition(), getArgNo(getAnchoredValue()));
2139 
2140     // Already the function has align attribute on return value or argument.
2141     if (F.getAttributes().hasAttribute(AttrIdx, ID))
2142       addKnownBits(F.getAttribute(AttrIdx, ID).getAlignment());
2143   }
2144 
2145   /// See AbstractAttribute::getDeducedAttributes
2146   virtual void
2147   getDeducedAttributes(SmallVectorImpl<Attribute> &Attrs) const override {
2148     LLVMContext &Ctx = AnchoredVal.getContext();
2149 
2150     Attrs.emplace_back(Attribute::getWithAlignment(Ctx, getAssumedAlign()));
2151   }
2152 };
2153 
2154 /// Align attribute for function return value.
2155 struct AAAlignReturned : AAAlignImpl {
2156 
2157   AAAlignReturned(Function &F, InformationCache &InfoCache)
2158       : AAAlignImpl(F, InfoCache) {}
2159 
2160   /// See AbstractAttribute::getManifestPosition().
2161   virtual ManifestPosition getManifestPosition() const override {
2162     return MP_RETURNED;
2163   }
2164 
2165   /// See AbstractAttribute::updateImpl(...).
2166   virtual ChangeStatus updateImpl(Attributor &A) override;
2167 };
2168 
2169 ChangeStatus AAAlignReturned::updateImpl(Attributor &A) {
2170   Function &F = getAnchorScope();
2171   auto *AARetValImpl = A.getAAFor<AAReturnedValuesImpl>(*this, F);
2172   if (!AARetValImpl)
2173     return indicatePessimisticFixpoint();
2174 
2175   // Currently, align<n> is deduced if alignments in return values are assumed
2176   // as greater than n. We reach pessimistic fixpoint if any of the return value
2177   // wouldn't have align. If no assumed state was used for reasoning, an
2178   // optimistic fixpoint is reached earlier.
2179 
2180   base_t BeforeState = getAssumed();
2181   std::function<bool(Value &, const SmallPtrSetImpl<ReturnInst *> &)> Pred =
2182       [&](Value &RV, const SmallPtrSetImpl<ReturnInst *> &RetInsts) -> bool {
2183     auto *AlignAA = A.getAAFor<AAAlign>(*this, RV);
2184 
2185     if (AlignAA)
2186       takeAssumedMinimum(AlignAA->getAssumedAlign());
2187     else
2188       // Use IR information.
2189       takeAssumedMinimum(RV.getPointerAlignment(
2190           getAnchorScope().getParent()->getDataLayout()));
2191 
2192     return isValidState();
2193   };
2194 
2195   if (!AARetValImpl->checkForallReturnedValues(Pred))
2196     return indicatePessimisticFixpoint();
2197 
2198   return (getAssumed() != BeforeState) ? ChangeStatus::CHANGED
2199                                        : ChangeStatus::UNCHANGED;
2200 }
2201 
2202 /// Align attribute for function argument.
2203 struct AAAlignArgument : AAAlignImpl {
2204 
2205   AAAlignArgument(Argument &A, InformationCache &InfoCache)
2206       : AAAlignImpl(A, InfoCache) {}
2207 
2208   /// See AbstractAttribute::getManifestPosition().
2209   virtual ManifestPosition getManifestPosition() const override {
2210     return MP_ARGUMENT;
2211   }
2212 
2213   /// See AbstractAttribute::updateImpl(...).
2214   virtual ChangeStatus updateImpl(Attributor &A) override;
2215 };
2216 
2217 ChangeStatus AAAlignArgument::updateImpl(Attributor &A) {
2218 
2219   Function &F = getAnchorScope();
2220   Argument &Arg = cast<Argument>(getAnchoredValue());
2221 
2222   unsigned ArgNo = Arg.getArgNo();
2223   const DataLayout &DL = F.getParent()->getDataLayout();
2224 
2225   auto BeforeState = getAssumed();
2226 
2227   // Callback function
2228   std::function<bool(CallSite)> CallSiteCheck = [&](CallSite CS) {
2229     assert(CS && "Sanity check: Call site was not initialized properly!");
2230 
2231     auto *AlignAA = A.getAAFor<AAAlign>(*this, *CS.getInstruction(), ArgNo);
2232 
2233     // Check that AlignAA is AAAlignCallSiteArgument.
2234     if (AlignAA) {
2235       ImmutableCallSite ICS(&AlignAA->getAnchoredValue());
2236       if (ICS && CS.getInstruction() == ICS.getInstruction()) {
2237         takeAssumedMinimum(AlignAA->getAssumedAlign());
2238         return isValidState();
2239       }
2240     }
2241 
2242     Value *V = CS.getArgOperand(ArgNo);
2243     takeAssumedMinimum(V->getPointerAlignment(DL));
2244     return isValidState();
2245   };
2246 
2247   if (!A.checkForAllCallSites(F, CallSiteCheck, true, *this))
2248     indicatePessimisticFixpoint();
2249 
2250   return BeforeState == getAssumed() ? ChangeStatus::UNCHANGED
2251                                      : ChangeStatus ::CHANGED;
2252 }
2253 
2254 struct AAAlignCallSiteArgument : AAAlignImpl {
2255 
2256   /// See AANonNullImpl::AANonNullImpl(...).
2257   AAAlignCallSiteArgument(CallSite CS, unsigned ArgNo,
2258                           InformationCache &InfoCache)
2259       : AAAlignImpl(CS.getArgOperand(ArgNo), *CS.getInstruction(), InfoCache),
2260         ArgNo(ArgNo) {}
2261 
2262   /// See AbstractAttribute::initialize(...).
2263   void initialize(Attributor &A) override {
2264     CallSite CS(&getAnchoredValue());
2265     takeKnownMaximum(getAssociatedValue()->getPointerAlignment(
2266         getAnchorScope().getParent()->getDataLayout()));
2267   }
2268 
2269   /// See AbstractAttribute::updateImpl(Attributor &A).
2270   ChangeStatus updateImpl(Attributor &A) override;
2271 
2272   /// See AbstractAttribute::getManifestPosition().
2273   ManifestPosition getManifestPosition() const override {
2274     return MP_CALL_SITE_ARGUMENT;
2275   };
2276 
2277   // Return argument index of associated value.
2278   int getArgNo() const { return ArgNo; }
2279 
2280 private:
2281   unsigned ArgNo;
2282 };
2283 
2284 ChangeStatus AAAlignCallSiteArgument::updateImpl(Attributor &A) {
2285   // NOTE: Never look at the argument of the callee in this method.
2286   //       If we do this, "align" is always deduced because of the assumption.
2287 
2288   auto BeforeState = getAssumed();
2289 
2290   Value &V = *getAssociatedValue();
2291 
2292   auto *AlignAA = A.getAAFor<AAAlign>(*this, V);
2293 
2294   if (AlignAA)
2295     takeAssumedMinimum(AlignAA->getAssumedAlign());
2296   else
2297     indicatePessimisticFixpoint();
2298 
2299   return BeforeState == getAssumed() ? ChangeStatus::UNCHANGED
2300                                      : ChangeStatus::CHANGED;
2301 }
2302 
2303 /// ----------------------------------------------------------------------------
2304 ///                               Attributor
2305 /// ----------------------------------------------------------------------------
2306 
2307 bool Attributor::checkForAllCallSites(Function &F,
2308                                       std::function<bool(CallSite)> &Pred,
2309                                       bool RequireAllCallSites,
2310                                       AbstractAttribute &AA) {
2311   // We can try to determine information from
2312   // the call sites. However, this is only possible all call sites are known,
2313   // hence the function has internal linkage.
2314   if (RequireAllCallSites && !F.hasInternalLinkage()) {
2315     LLVM_DEBUG(
2316         dbgs()
2317         << "Attributor: Function " << F.getName()
2318         << " has no internal linkage, hence not all call sites are known\n");
2319     return false;
2320   }
2321 
2322   for (const Use &U : F.uses()) {
2323     Instruction *I = cast<Instruction>(U.getUser());
2324     Function *AnchorValue = I->getParent()->getParent();
2325 
2326     auto *LivenessAA = getAAFor<AAIsDead>(AA, *AnchorValue);
2327 
2328     // Skip dead calls.
2329     if (LivenessAA && LivenessAA->isAssumedDead(I))
2330       continue;
2331 
2332     CallSite CS(U.getUser());
2333     if (!CS || !CS.isCallee(&U) || !CS.getCaller()->hasExactDefinition()) {
2334       if (!RequireAllCallSites)
2335         continue;
2336 
2337       LLVM_DEBUG(dbgs() << "Attributor: User " << *U.getUser()
2338                         << " is an invalid use of " << F.getName() << "\n");
2339       return false;
2340     }
2341 
2342     if (Pred(CS))
2343       continue;
2344 
2345     LLVM_DEBUG(dbgs() << "Attributor: Call site callback failed for "
2346                       << *CS.getInstruction() << "\n");
2347     return false;
2348   }
2349 
2350   return true;
2351 }
2352 
2353 ChangeStatus Attributor::run() {
2354   // Initialize all abstract attributes.
2355   for (AbstractAttribute *AA : AllAbstractAttributes)
2356     AA->initialize(*this);
2357 
2358   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2359                     << AllAbstractAttributes.size()
2360                     << " abstract attributes.\n");
2361 
2362   // Now that all abstract attributes are collected and initialized we start
2363   // the abstract analysis.
2364 
2365   unsigned IterationCounter = 1;
2366 
2367   SmallVector<AbstractAttribute *, 64> ChangedAAs;
2368   SetVector<AbstractAttribute *> Worklist;
2369   Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
2370 
2371   do {
2372     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2373                       << ", Worklist size: " << Worklist.size() << "\n");
2374 
2375     // Add all abstract attributes that are potentially dependent on one that
2376     // changed to the work list.
2377     for (AbstractAttribute *ChangedAA : ChangedAAs) {
2378       auto &QuerriedAAs = QueryMap[ChangedAA];
2379       Worklist.insert(QuerriedAAs.begin(), QuerriedAAs.end());
2380     }
2381 
2382     // Reset the changed set.
2383     ChangedAAs.clear();
2384 
2385     // Update all abstract attribute in the work list and record the ones that
2386     // changed.
2387     for (AbstractAttribute *AA : Worklist)
2388       if (AA->update(*this) == ChangeStatus::CHANGED)
2389         ChangedAAs.push_back(AA);
2390 
2391     // Reset the work list and repopulate with the changed abstract attributes.
2392     // Note that dependent ones are added above.
2393     Worklist.clear();
2394     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2395 
2396   } while (!Worklist.empty() && ++IterationCounter < MaxFixpointIterations);
2397 
2398   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2399                     << IterationCounter << "/" << MaxFixpointIterations
2400                     << " iterations\n");
2401 
2402   bool FinishedAtFixpoint = Worklist.empty();
2403 
2404   // Reset abstract arguments not settled in a sound fixpoint by now. This
2405   // happens when we stopped the fixpoint iteration early. Note that only the
2406   // ones marked as "changed" *and* the ones transitively depending on them
2407   // need to be reverted to a pessimistic state. Others might not be in a
2408   // fixpoint state but we can use the optimistic results for them anyway.
2409   SmallPtrSet<AbstractAttribute *, 32> Visited;
2410   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2411     AbstractAttribute *ChangedAA = ChangedAAs[u];
2412     if (!Visited.insert(ChangedAA).second)
2413       continue;
2414 
2415     AbstractState &State = ChangedAA->getState();
2416     if (!State.isAtFixpoint()) {
2417       State.indicatePessimisticFixpoint();
2418 
2419       NumAttributesTimedOut++;
2420     }
2421 
2422     auto &QuerriedAAs = QueryMap[ChangedAA];
2423     ChangedAAs.append(QuerriedAAs.begin(), QuerriedAAs.end());
2424   }
2425 
2426   LLVM_DEBUG({
2427     if (!Visited.empty())
2428       dbgs() << "\n[Attributor] Finalized " << Visited.size()
2429              << " abstract attributes.\n";
2430   });
2431 
2432   unsigned NumManifested = 0;
2433   unsigned NumAtFixpoint = 0;
2434   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2435   for (AbstractAttribute *AA : AllAbstractAttributes) {
2436     AbstractState &State = AA->getState();
2437 
2438     // If there is not already a fixpoint reached, we can now take the
2439     // optimistic state. This is correct because we enforced a pessimistic one
2440     // on abstract attributes that were transitively dependent on a changed one
2441     // already above.
2442     if (!State.isAtFixpoint())
2443       State.indicateOptimisticFixpoint();
2444 
2445     // If the state is invalid, we do not try to manifest it.
2446     if (!State.isValidState())
2447       continue;
2448 
2449     // Manifest the state and record if we changed the IR.
2450     ChangeStatus LocalChange = AA->manifest(*this);
2451     ManifestChange = ManifestChange | LocalChange;
2452 
2453     NumAtFixpoint++;
2454     NumManifested += (LocalChange == ChangeStatus::CHANGED);
2455   }
2456 
2457   (void)NumManifested;
2458   (void)NumAtFixpoint;
2459   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2460                     << " arguments while " << NumAtFixpoint
2461                     << " were in a valid fixpoint state\n");
2462 
2463   // If verification is requested, we finished this run at a fixpoint, and the
2464   // IR was changed, we re-run the whole fixpoint analysis, starting at
2465   // re-initialization of the arguments. This re-run should not result in an IR
2466   // change. Though, the (virtual) state of attributes at the end of the re-run
2467   // might be more optimistic than the known state or the IR state if the better
2468   // state cannot be manifested.
2469   if (VerifyAttributor && FinishedAtFixpoint &&
2470       ManifestChange == ChangeStatus::CHANGED) {
2471     VerifyAttributor = false;
2472     ChangeStatus VerifyStatus = run();
2473     if (VerifyStatus != ChangeStatus::UNCHANGED)
2474       llvm_unreachable(
2475           "Attributor verification failed, re-run did result in an IR change "
2476           "even after a fixpoint was reached in the original run. (False "
2477           "positives possible!)");
2478     VerifyAttributor = true;
2479   }
2480 
2481   NumAttributesManifested += NumManifested;
2482   NumAttributesValidFixpoint += NumAtFixpoint;
2483 
2484   return ManifestChange;
2485 }
2486 
2487 void Attributor::identifyDefaultAbstractAttributes(
2488     Function &F, InformationCache &InfoCache,
2489     DenseSet</* Attribute::AttrKind */ unsigned> *Whitelist) {
2490 
2491   // Check for dead BasicBlocks in every function.
2492   registerAA(*new AAIsDeadFunction(F, InfoCache));
2493 
2494   // Every function might be "will-return".
2495   registerAA(*new AAWillReturnFunction(F, InfoCache));
2496 
2497   // Every function can be nounwind.
2498   registerAA(*new AANoUnwindFunction(F, InfoCache));
2499 
2500   // Every function might be marked "nosync"
2501   registerAA(*new AANoSyncFunction(F, InfoCache));
2502 
2503   // Every function might be "no-free".
2504   registerAA(*new AANoFreeFunction(F, InfoCache));
2505 
2506   // Return attributes are only appropriate if the return type is non void.
2507   Type *ReturnType = F.getReturnType();
2508   if (!ReturnType->isVoidTy()) {
2509     // Argument attribute "returned" --- Create only one per function even
2510     // though it is an argument attribute.
2511     if (!Whitelist || Whitelist->count(AAReturnedValues::ID))
2512       registerAA(*new AAReturnedValuesImpl(F, InfoCache));
2513 
2514     if (ReturnType->isPointerTy()) {
2515       // Every function with pointer return type might be marked align.
2516       if (!Whitelist || Whitelist->count(AAAlignReturned::ID))
2517         registerAA(*new AAAlignReturned(F, InfoCache));
2518 
2519       // Every function with pointer return type might be marked nonnull.
2520       if (!Whitelist || Whitelist->count(AANonNullReturned::ID))
2521         registerAA(*new AANonNullReturned(F, InfoCache));
2522 
2523       // Every function with pointer return type might be marked noalias.
2524       if (!Whitelist || Whitelist->count(AANoAliasReturned::ID))
2525         registerAA(*new AANoAliasReturned(F, InfoCache));
2526 
2527       // Every function with pointer return type might be marked
2528       // dereferenceable.
2529       if (ReturnType->isPointerTy() &&
2530           (!Whitelist || Whitelist->count(AADereferenceableReturned::ID)))
2531         registerAA(*new AADereferenceableReturned(F, InfoCache));
2532     }
2533   }
2534 
2535   for (Argument &Arg : F.args()) {
2536     if (Arg.getType()->isPointerTy()) {
2537       // Every argument with pointer type might be marked nonnull.
2538       if (!Whitelist || Whitelist->count(AANonNullArgument::ID))
2539         registerAA(*new AANonNullArgument(Arg, InfoCache));
2540 
2541       // Every argument with pointer type might be marked dereferenceable.
2542       if (!Whitelist || Whitelist->count(AADereferenceableArgument::ID))
2543         registerAA(*new AADereferenceableArgument(Arg, InfoCache));
2544 
2545       // Every argument with pointer type might be marked align.
2546       if (!Whitelist || Whitelist->count(AAAlignArgument::ID))
2547         registerAA(*new AAAlignArgument(Arg, InfoCache));
2548     }
2549   }
2550 
2551   // Walk all instructions to find more attribute opportunities and also
2552   // interesting instructions that might be queried by abstract attributes
2553   // during their initialization or update.
2554   auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
2555   auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
2556 
2557   for (Instruction &I : instructions(&F)) {
2558     bool IsInterestingOpcode = false;
2559 
2560     // To allow easy access to all instructions in a function with a given
2561     // opcode we store them in the InfoCache. As not all opcodes are interesting
2562     // to concrete attributes we only cache the ones that are as identified in
2563     // the following switch.
2564     // Note: There are no concrete attributes now so this is initially empty.
2565     switch (I.getOpcode()) {
2566     default:
2567       assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
2568              "New call site/base instruction type needs to be known int the "
2569              "attributor.");
2570       break;
2571     case Instruction::Call:
2572     case Instruction::CallBr:
2573     case Instruction::Invoke:
2574     case Instruction::CleanupRet:
2575     case Instruction::CatchSwitch:
2576     case Instruction::Resume:
2577     case Instruction::Ret:
2578       IsInterestingOpcode = true;
2579     }
2580     if (IsInterestingOpcode)
2581       InstOpcodeMap[I.getOpcode()].push_back(&I);
2582     if (I.mayReadOrWriteMemory())
2583       ReadOrWriteInsts.push_back(&I);
2584 
2585     CallSite CS(&I);
2586     if (CS && CS.getCalledFunction()) {
2587       for (int i = 0, e = CS.getCalledFunction()->arg_size(); i < e; i++) {
2588         if (!CS.getArgument(i)->getType()->isPointerTy())
2589           continue;
2590 
2591         // Call site argument attribute "non-null".
2592         if (!Whitelist || Whitelist->count(AANonNullCallSiteArgument::ID))
2593           registerAA(*new AANonNullCallSiteArgument(CS, i, InfoCache), i);
2594 
2595         // Call site argument attribute "dereferenceable".
2596         if (!Whitelist ||
2597             Whitelist->count(AADereferenceableCallSiteArgument::ID))
2598           registerAA(*new AADereferenceableCallSiteArgument(CS, i, InfoCache),
2599                      i);
2600 
2601         // Call site argument attribute "align".
2602         if (!Whitelist || Whitelist->count(AAAlignCallSiteArgument::ID))
2603           registerAA(*new AAAlignCallSiteArgument(CS, i, InfoCache), i);
2604       }
2605     }
2606   }
2607 }
2608 
2609 /// Helpers to ease debugging through output streams and print calls.
2610 ///
2611 ///{
2612 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
2613   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
2614 }
2615 
2616 raw_ostream &llvm::operator<<(raw_ostream &OS,
2617                               AbstractAttribute::ManifestPosition AP) {
2618   switch (AP) {
2619   case AbstractAttribute::MP_ARGUMENT:
2620     return OS << "arg";
2621   case AbstractAttribute::MP_CALL_SITE_ARGUMENT:
2622     return OS << "cs_arg";
2623   case AbstractAttribute::MP_FUNCTION:
2624     return OS << "fn";
2625   case AbstractAttribute::MP_RETURNED:
2626     return OS << "fn_ret";
2627   }
2628   llvm_unreachable("Unknown attribute position!");
2629 }
2630 
2631 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
2632   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
2633 }
2634 
2635 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
2636   AA.print(OS);
2637   return OS;
2638 }
2639 
2640 void AbstractAttribute::print(raw_ostream &OS) const {
2641   OS << "[" << getManifestPosition() << "][" << getAsStr() << "]["
2642      << AnchoredVal.getName() << "]";
2643 }
2644 ///}
2645 
2646 /// ----------------------------------------------------------------------------
2647 ///                       Pass (Manager) Boilerplate
2648 /// ----------------------------------------------------------------------------
2649 
2650 static bool runAttributorOnModule(Module &M) {
2651   if (DisableAttributor)
2652     return false;
2653 
2654   LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
2655                     << " functions.\n");
2656 
2657   // Create an Attributor and initially empty information cache that is filled
2658   // while we identify default attribute opportunities.
2659   Attributor A;
2660   InformationCache InfoCache;
2661 
2662   for (Function &F : M) {
2663     // TODO: Not all attributes require an exact definition. Find a way to
2664     //       enable deduction for some but not all attributes in case the
2665     //       definition might be changed at runtime, see also
2666     //       http://lists.llvm.org/pipermail/llvm-dev/2018-February/121275.html.
2667     // TODO: We could always determine abstract attributes and if sufficient
2668     //       information was found we could duplicate the functions that do not
2669     //       have an exact definition.
2670     if (!F.hasExactDefinition()) {
2671       NumFnWithoutExactDefinition++;
2672       continue;
2673     }
2674 
2675     // For now we ignore naked and optnone functions.
2676     if (F.hasFnAttribute(Attribute::Naked) ||
2677         F.hasFnAttribute(Attribute::OptimizeNone))
2678       continue;
2679 
2680     NumFnWithExactDefinition++;
2681 
2682     // Populate the Attributor with abstract attribute opportunities in the
2683     // function and the information cache with IR information.
2684     A.identifyDefaultAbstractAttributes(F, InfoCache);
2685   }
2686 
2687   return A.run() == ChangeStatus::CHANGED;
2688 }
2689 
2690 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
2691   if (runAttributorOnModule(M)) {
2692     // FIXME: Think about passes we will preserve and add them here.
2693     return PreservedAnalyses::none();
2694   }
2695   return PreservedAnalyses::all();
2696 }
2697 
2698 namespace {
2699 
2700 struct AttributorLegacyPass : public ModulePass {
2701   static char ID;
2702 
2703   AttributorLegacyPass() : ModulePass(ID) {
2704     initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
2705   }
2706 
2707   bool runOnModule(Module &M) override {
2708     if (skipModule(M))
2709       return false;
2710     return runAttributorOnModule(M);
2711   }
2712 
2713   void getAnalysisUsage(AnalysisUsage &AU) const override {
2714     // FIXME: Think about passes we will preserve and add them here.
2715     AU.setPreservesCFG();
2716   }
2717 };
2718 
2719 } // end anonymous namespace
2720 
2721 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
2722 
2723 char AttributorLegacyPass::ID = 0;
2724 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
2725                       "Deduce and propagate attributes", false, false)
2726 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
2727                     "Deduce and propagate attributes", false, false)
2728