1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CallGraph.h"
24 #include "llvm/Analysis/CallGraphSCCPass.h"
25 #include "llvm/Analysis/CaptureTracking.h"
26 #include "llvm/Analysis/EHPersonalities.h"
27 #include "llvm/Analysis/GlobalsModRef.h"
28 #include "llvm/Analysis/LazyValueInfo.h"
29 #include "llvm/Analysis/Loads.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/ScalarEvolution.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Argument.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/CFG.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/InstIterator.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Verifier.h"
40 #include "llvm/InitializePasses.h"
41 #include "llvm/IR/NoFolder.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/Local.h"
48 
49 #include <cassert>
50 
51 using namespace llvm;
52 
53 #define DEBUG_TYPE "attributor"
54 
55 STATISTIC(NumFnWithExactDefinition,
56           "Number of function with exact definitions");
57 STATISTIC(NumFnWithoutExactDefinition,
58           "Number of function without exact definitions");
59 STATISTIC(NumAttributesTimedOut,
60           "Number of abstract attributes timed out before fixpoint");
61 STATISTIC(NumAttributesValidFixpoint,
62           "Number of abstract attributes in a valid fixpoint state");
63 STATISTIC(NumAttributesManifested,
64           "Number of abstract attributes manifested in IR");
65 STATISTIC(NumAttributesFixedDueToRequiredDependences,
66           "Number of abstract attributes fixed due to required dependences");
67 
68 // Some helper macros to deal with statistics tracking.
69 //
70 // Usage:
71 // For simple IR attribute tracking overload trackStatistics in the abstract
72 // attribute and choose the right STATS_DECLTRACK_********* macro,
73 // e.g.,:
74 //  void trackStatistics() const override {
75 //    STATS_DECLTRACK_ARG_ATTR(returned)
76 //  }
77 // If there is a single "increment" side one can use the macro
78 // STATS_DECLTRACK with a custom message. If there are multiple increment
79 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
80 //
81 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
82   ("Number of " #TYPE " marked '" #NAME "'")
83 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
84 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
85 #define STATS_DECL(NAME, TYPE, MSG)                                            \
86   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
87 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
88 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
89   {                                                                            \
90     STATS_DECL(NAME, TYPE, MSG)                                                \
91     STATS_TRACK(NAME, TYPE)                                                    \
92   }
93 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
94   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
95 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
96   STATS_DECLTRACK(NAME, CSArguments,                                           \
97                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
98 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
99   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
100 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
101   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
102 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
103   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
104                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
105 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
106   STATS_DECLTRACK(NAME, CSReturn,                                              \
107                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
108 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
109   STATS_DECLTRACK(NAME, Floating,                                              \
110                   ("Number of floating values known to be '" #NAME "'"))
111 
112 // Specialization of the operator<< for abstract attributes subclasses. This
113 // disambiguates situations where multiple operators are applicable.
114 namespace llvm {
115 #define PIPE_OPERATOR(CLASS)                                                   \
116   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
117     return OS << static_cast<const AbstractAttribute &>(AA);                   \
118   }
119 
120 PIPE_OPERATOR(AAIsDead)
121 PIPE_OPERATOR(AANoUnwind)
122 PIPE_OPERATOR(AANoSync)
123 PIPE_OPERATOR(AANoRecurse)
124 PIPE_OPERATOR(AAWillReturn)
125 PIPE_OPERATOR(AANoReturn)
126 PIPE_OPERATOR(AAReturnedValues)
127 PIPE_OPERATOR(AANonNull)
128 PIPE_OPERATOR(AANoAlias)
129 PIPE_OPERATOR(AADereferenceable)
130 PIPE_OPERATOR(AAAlign)
131 PIPE_OPERATOR(AANoCapture)
132 PIPE_OPERATOR(AAValueSimplify)
133 PIPE_OPERATOR(AANoFree)
134 PIPE_OPERATOR(AAHeapToStack)
135 PIPE_OPERATOR(AAReachability)
136 PIPE_OPERATOR(AAMemoryBehavior)
137 PIPE_OPERATOR(AAValueConstantRange)
138 PIPE_OPERATOR(AAPrivatizablePtr)
139 
140 #undef PIPE_OPERATOR
141 } // namespace llvm
142 
143 // TODO: Determine a good default value.
144 //
145 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
146 // (when run with the first 5 abstract attributes). The results also indicate
147 // that we never reach 32 iterations but always find a fixpoint sooner.
148 //
149 // This will become more evolved once we perform two interleaved fixpoint
150 // iterations: bottom-up and top-down.
151 static cl::opt<unsigned>
152     MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
153                           cl::desc("Maximal number of fixpoint iterations."),
154                           cl::init(32));
155 static cl::opt<bool> VerifyMaxFixpointIterations(
156     "attributor-max-iterations-verify", cl::Hidden,
157     cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
158     cl::init(false));
159 
160 static cl::opt<bool> DisableAttributor(
161     "attributor-disable", cl::Hidden,
162     cl::desc("Disable the attributor inter-procedural deduction pass."),
163     cl::init(true));
164 
165 static cl::opt<bool> AnnotateDeclarationCallSites(
166     "attributor-annotate-decl-cs", cl::Hidden,
167     cl::desc("Annotate call sites of function declarations."), cl::init(false));
168 
169 static cl::opt<bool> ManifestInternal(
170     "attributor-manifest-internal", cl::Hidden,
171     cl::desc("Manifest Attributor internal string attributes."),
172     cl::init(false));
173 
174 static cl::opt<unsigned> DepRecInterval(
175     "attributor-dependence-recompute-interval", cl::Hidden,
176     cl::desc("Number of iterations until dependences are recomputed."),
177     cl::init(4));
178 
179 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
180                                        cl::init(true), cl::Hidden);
181 
182 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
183                                        cl::Hidden);
184 
185 /// Logic operators for the change status enum class.
186 ///
187 ///{
188 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
189   return l == ChangeStatus::CHANGED ? l : r;
190 }
191 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
192   return l == ChangeStatus::UNCHANGED ? l : r;
193 }
194 ///}
195 
196 Argument *IRPosition::getAssociatedArgument() const {
197   if (getPositionKind() == IRP_ARGUMENT)
198     return cast<Argument>(&getAnchorValue());
199 
200   // Not an Argument and no argument number means this is not a call site
201   // argument, thus we cannot find a callback argument to return.
202   int ArgNo = getArgNo();
203   if (ArgNo < 0)
204     return nullptr;
205 
206   // Use abstract call sites to make the connection between the call site
207   // values and the ones in callbacks. If a callback was found that makes use
208   // of the underlying call site operand, we want the corresponding callback
209   // callee argument and not the direct callee argument.
210   Optional<Argument *> CBCandidateArg;
211   SmallVector<const Use *, 4> CBUses;
212   ImmutableCallSite ICS(&getAnchorValue());
213   AbstractCallSite::getCallbackUses(ICS, CBUses);
214   for (const Use *U : CBUses) {
215     AbstractCallSite ACS(U);
216     assert(ACS && ACS.isCallbackCall());
217     if (!ACS.getCalledFunction())
218       continue;
219 
220     for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
221 
222       // Test if the underlying call site operand is argument number u of the
223       // callback callee.
224       if (ACS.getCallArgOperandNo(u) != ArgNo)
225         continue;
226 
227       assert(ACS.getCalledFunction()->arg_size() > u &&
228              "ACS mapped into var-args arguments!");
229       if (CBCandidateArg.hasValue()) {
230         CBCandidateArg = nullptr;
231         break;
232       }
233       CBCandidateArg = ACS.getCalledFunction()->getArg(u);
234     }
235   }
236 
237   // If we found a unique callback candidate argument, return it.
238   if (CBCandidateArg.hasValue() && CBCandidateArg.getValue())
239     return CBCandidateArg.getValue();
240 
241   // If no callbacks were found, or none used the underlying call site operand
242   // exclusively, use the direct callee argument if available.
243   const Function *Callee = ICS.getCalledFunction();
244   if (Callee && Callee->arg_size() > unsigned(ArgNo))
245     return Callee->getArg(ArgNo);
246 
247   return nullptr;
248 }
249 
250 static Optional<ConstantInt *>
251 getAssumedConstant(Attributor &A, const Value &V, const AbstractAttribute &AA,
252                    bool &UsedAssumedInformation) {
253   const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
254       AA, IRPosition::value(V), /* TrackDependence */ false);
255   Optional<Value *> SimplifiedV = ValueSimplifyAA.getAssumedSimplifiedValue(A);
256   bool IsKnown = ValueSimplifyAA.isKnown();
257   UsedAssumedInformation |= !IsKnown;
258   if (!SimplifiedV.hasValue()) {
259     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
260     return llvm::None;
261   }
262   if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue())) {
263     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
264     return llvm::None;
265   }
266   ConstantInt *CI = dyn_cast_or_null<ConstantInt>(SimplifiedV.getValue());
267   if (CI)
268     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
269   return CI;
270 }
271 
272 /// Get pointer operand of memory accessing instruction. If \p I is
273 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
274 /// is set to false and the instruction is volatile, return nullptr.
275 static const Value *getPointerOperand(const Instruction *I,
276                                       bool AllowVolatile) {
277   if (auto *LI = dyn_cast<LoadInst>(I)) {
278     if (!AllowVolatile && LI->isVolatile())
279       return nullptr;
280     return LI->getPointerOperand();
281   }
282 
283   if (auto *SI = dyn_cast<StoreInst>(I)) {
284     if (!AllowVolatile && SI->isVolatile())
285       return nullptr;
286     return SI->getPointerOperand();
287   }
288 
289   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
290     if (!AllowVolatile && CXI->isVolatile())
291       return nullptr;
292     return CXI->getPointerOperand();
293   }
294 
295   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
296     if (!AllowVolatile && RMWI->isVolatile())
297       return nullptr;
298     return RMWI->getPointerOperand();
299   }
300 
301   return nullptr;
302 }
303 
304 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
305 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
306 /// getelement pointer instructions that traverse the natural type of \p Ptr if
307 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
308 /// through a cast to i8*.
309 ///
310 /// TODO: This could probably live somewhere more prominantly if it doesn't
311 ///       already exist.
312 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
313                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
314   assert(Offset >= 0 && "Negative offset not supported yet!");
315   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
316                     << "-bytes as " << *ResTy << "\n");
317 
318   // The initial type we are trying to traverse to get nice GEPs.
319   Type *Ty = Ptr->getType();
320 
321   SmallVector<Value *, 4> Indices;
322   std::string GEPName = Ptr->getName().str();
323   while (Offset) {
324     uint64_t Idx, Rem;
325 
326     if (auto *STy = dyn_cast<StructType>(Ty)) {
327       const StructLayout *SL = DL.getStructLayout(STy);
328       if (int64_t(SL->getSizeInBytes()) < Offset)
329         break;
330       Idx = SL->getElementContainingOffset(Offset);
331       assert(Idx < STy->getNumElements() && "Offset calculation error!");
332       Rem = Offset - SL->getElementOffset(Idx);
333       Ty = STy->getElementType(Idx);
334     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
335       Ty = PTy->getElementType();
336       if (!Ty->isSized())
337         break;
338       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
339       assert(ElementSize && "Expected type with size!");
340       Idx = Offset / ElementSize;
341       Rem = Offset % ElementSize;
342     } else {
343       // Non-aggregate type, we cast and make byte-wise progress now.
344       break;
345     }
346 
347     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
348                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
349 
350     GEPName += "." + std::to_string(Idx);
351     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
352     Offset = Rem;
353   }
354 
355   // Create a GEP if we collected indices above.
356   if (Indices.size())
357     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
358 
359   // If an offset is left we use byte-wise adjustment.
360   if (Offset) {
361     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
362     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
363                         GEPName + ".b" + Twine(Offset));
364   }
365 
366   // Ensure the result has the requested type.
367   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
368 
369   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
370   return Ptr;
371 }
372 
373 /// Recursively visit all values that might become \p IRP at some point. This
374 /// will be done by looking through cast instructions, selects, phis, and calls
375 /// with the "returned" attribute. Once we cannot look through the value any
376 /// further, the callback \p VisitValueCB is invoked and passed the current
377 /// value, the \p State, and a flag to indicate if we stripped anything. To
378 /// limit how much effort is invested, we will never visit more values than
379 /// specified by \p MaxValues.
380 template <typename AAType, typename StateTy>
381 static bool genericValueTraversal(
382     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
383     const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
384     int MaxValues = 8) {
385 
386   const AAIsDead *LivenessAA = nullptr;
387   if (IRP.getAnchorScope())
388     LivenessAA = &A.getAAFor<AAIsDead>(
389         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
390         /* TrackDependence */ false);
391   bool AnyDead = false;
392 
393   // TODO: Use Positions here to allow context sensitivity in VisitValueCB
394   SmallPtrSet<Value *, 16> Visited;
395   SmallVector<Value *, 16> Worklist;
396   Worklist.push_back(&IRP.getAssociatedValue());
397 
398   int Iteration = 0;
399   do {
400     Value *V = Worklist.pop_back_val();
401 
402     // Check if we should process the current value. To prevent endless
403     // recursion keep a record of the values we followed!
404     if (!Visited.insert(V).second)
405       continue;
406 
407     // Make sure we limit the compile time for complex expressions.
408     if (Iteration++ >= MaxValues)
409       return false;
410 
411     // Explicitly look through calls with a "returned" attribute if we do
412     // not have a pointer as stripPointerCasts only works on them.
413     Value *NewV = nullptr;
414     if (V->getType()->isPointerTy()) {
415       NewV = V->stripPointerCasts();
416     } else {
417       CallSite CS(V);
418       if (CS && CS.getCalledFunction()) {
419         for (Argument &Arg : CS.getCalledFunction()->args())
420           if (Arg.hasReturnedAttr()) {
421             NewV = CS.getArgOperand(Arg.getArgNo());
422             break;
423           }
424       }
425     }
426     if (NewV && NewV != V) {
427       Worklist.push_back(NewV);
428       continue;
429     }
430 
431     // Look through select instructions, visit both potential values.
432     if (auto *SI = dyn_cast<SelectInst>(V)) {
433       Worklist.push_back(SI->getTrueValue());
434       Worklist.push_back(SI->getFalseValue());
435       continue;
436     }
437 
438     // Look through phi nodes, visit all live operands.
439     if (auto *PHI = dyn_cast<PHINode>(V)) {
440       assert(LivenessAA &&
441              "Expected liveness in the presence of instructions!");
442       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
443         const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
444         if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
445           AnyDead = true;
446           continue;
447         }
448         Worklist.push_back(PHI->getIncomingValue(u));
449       }
450       continue;
451     }
452 
453     // Once a leaf is reached we inform the user through the callback.
454     if (!VisitValueCB(*V, State, Iteration > 1))
455       return false;
456   } while (!Worklist.empty());
457 
458   // If we actually used liveness information so we have to record a dependence.
459   if (AnyDead)
460     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
461 
462   // All values have been visited.
463   return true;
464 }
465 
466 /// Return true if \p New is equal or worse than \p Old.
467 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
468   if (!Old.isIntAttribute())
469     return true;
470 
471   return Old.getValueAsInt() >= New.getValueAsInt();
472 }
473 
474 /// Return true if the information provided by \p Attr was added to the
475 /// attribute list \p Attrs. This is only the case if it was not already present
476 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
477 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
478                              AttributeList &Attrs, int AttrIdx) {
479 
480   if (Attr.isEnumAttribute()) {
481     Attribute::AttrKind Kind = Attr.getKindAsEnum();
482     if (Attrs.hasAttribute(AttrIdx, Kind))
483       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
484         return false;
485     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
486     return true;
487   }
488   if (Attr.isStringAttribute()) {
489     StringRef Kind = Attr.getKindAsString();
490     if (Attrs.hasAttribute(AttrIdx, Kind))
491       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
492         return false;
493     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
494     return true;
495   }
496   if (Attr.isIntAttribute()) {
497     Attribute::AttrKind Kind = Attr.getKindAsEnum();
498     if (Attrs.hasAttribute(AttrIdx, Kind))
499       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
500         return false;
501     Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
502     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
503     return true;
504   }
505 
506   llvm_unreachable("Expected enum or string attribute!");
507 }
508 
509 static const Value *
510 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
511                                      const DataLayout &DL,
512                                      bool AllowNonInbounds = false) {
513   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
514   if (!Ptr)
515     return nullptr;
516 
517   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
518                                           AllowNonInbounds);
519 }
520 
521 ChangeStatus AbstractAttribute::update(Attributor &A) {
522   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
523   if (getState().isAtFixpoint())
524     return HasChanged;
525 
526   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
527 
528   HasChanged = updateImpl(A);
529 
530   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
531                     << "\n");
532 
533   return HasChanged;
534 }
535 
536 ChangeStatus
537 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
538                                    const ArrayRef<Attribute> &DeducedAttrs) {
539   Function *ScopeFn = IRP.getAssociatedFunction();
540   IRPosition::Kind PK = IRP.getPositionKind();
541 
542   // In the following some generic code that will manifest attributes in
543   // DeducedAttrs if they improve the current IR. Due to the different
544   // annotation positions we use the underlying AttributeList interface.
545 
546   AttributeList Attrs;
547   switch (PK) {
548   case IRPosition::IRP_INVALID:
549   case IRPosition::IRP_FLOAT:
550     return ChangeStatus::UNCHANGED;
551   case IRPosition::IRP_ARGUMENT:
552   case IRPosition::IRP_FUNCTION:
553   case IRPosition::IRP_RETURNED:
554     Attrs = ScopeFn->getAttributes();
555     break;
556   case IRPosition::IRP_CALL_SITE:
557   case IRPosition::IRP_CALL_SITE_RETURNED:
558   case IRPosition::IRP_CALL_SITE_ARGUMENT:
559     Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
560     break;
561   }
562 
563   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
564   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
565   for (const Attribute &Attr : DeducedAttrs) {
566     if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
567       continue;
568 
569     HasChanged = ChangeStatus::CHANGED;
570   }
571 
572   if (HasChanged == ChangeStatus::UNCHANGED)
573     return HasChanged;
574 
575   switch (PK) {
576   case IRPosition::IRP_ARGUMENT:
577   case IRPosition::IRP_FUNCTION:
578   case IRPosition::IRP_RETURNED:
579     ScopeFn->setAttributes(Attrs);
580     break;
581   case IRPosition::IRP_CALL_SITE:
582   case IRPosition::IRP_CALL_SITE_RETURNED:
583   case IRPosition::IRP_CALL_SITE_ARGUMENT:
584     CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
585     break;
586   case IRPosition::IRP_INVALID:
587   case IRPosition::IRP_FLOAT:
588     break;
589   }
590 
591   return HasChanged;
592 }
593 
594 const IRPosition IRPosition::EmptyKey(255);
595 const IRPosition IRPosition::TombstoneKey(256);
596 
597 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
598   IRPositions.emplace_back(IRP);
599 
600   ImmutableCallSite ICS(&IRP.getAnchorValue());
601   switch (IRP.getPositionKind()) {
602   case IRPosition::IRP_INVALID:
603   case IRPosition::IRP_FLOAT:
604   case IRPosition::IRP_FUNCTION:
605     return;
606   case IRPosition::IRP_ARGUMENT:
607   case IRPosition::IRP_RETURNED:
608     IRPositions.emplace_back(
609         IRPosition::function(*IRP.getAssociatedFunction()));
610     return;
611   case IRPosition::IRP_CALL_SITE:
612     assert(ICS && "Expected call site!");
613     // TODO: We need to look at the operand bundles similar to the redirection
614     //       in CallBase.
615     if (!ICS.hasOperandBundles())
616       if (const Function *Callee = ICS.getCalledFunction())
617         IRPositions.emplace_back(IRPosition::function(*Callee));
618     return;
619   case IRPosition::IRP_CALL_SITE_RETURNED:
620     assert(ICS && "Expected call site!");
621     // TODO: We need to look at the operand bundles similar to the redirection
622     //       in CallBase.
623     if (!ICS.hasOperandBundles()) {
624       if (const Function *Callee = ICS.getCalledFunction()) {
625         IRPositions.emplace_back(IRPosition::returned(*Callee));
626         IRPositions.emplace_back(IRPosition::function(*Callee));
627       }
628     }
629     IRPositions.emplace_back(
630         IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
631     return;
632   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
633     int ArgNo = IRP.getArgNo();
634     assert(ICS && ArgNo >= 0 && "Expected call site!");
635     // TODO: We need to look at the operand bundles similar to the redirection
636     //       in CallBase.
637     if (!ICS.hasOperandBundles()) {
638       const Function *Callee = ICS.getCalledFunction();
639       if (Callee && Callee->arg_size() > unsigned(ArgNo))
640         IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
641       if (Callee)
642         IRPositions.emplace_back(IRPosition::function(*Callee));
643     }
644     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
645     return;
646   }
647   }
648 }
649 
650 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
651                          bool IgnoreSubsumingPositions) const {
652   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
653     for (Attribute::AttrKind AK : AKs)
654       if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
655         return true;
656     // The first position returned by the SubsumingPositionIterator is
657     // always the position itself. If we ignore subsuming positions we
658     // are done after the first iteration.
659     if (IgnoreSubsumingPositions)
660       break;
661   }
662   return false;
663 }
664 
665 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
666                           SmallVectorImpl<Attribute> &Attrs,
667                           bool IgnoreSubsumingPositions) const {
668   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
669     for (Attribute::AttrKind AK : AKs) {
670       const Attribute &Attr = EquivIRP.getAttr(AK);
671       if (Attr.getKindAsEnum() == AK)
672         Attrs.push_back(Attr);
673     }
674     // The first position returned by the SubsumingPositionIterator is
675     // always the position itself. If we ignore subsuming positions we
676     // are done after the first iteration.
677     if (IgnoreSubsumingPositions)
678       break;
679   }
680 }
681 
682 void IRPosition::verify() {
683   switch (KindOrArgNo) {
684   default:
685     assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
686     assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
687            "Expected call base or argument for positive attribute index!");
688     if (isa<Argument>(AnchorVal)) {
689       assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
690              "Argument number mismatch!");
691       assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
692              "Associated value mismatch!");
693     } else {
694       assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
695              "Call site argument number mismatch!");
696       assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
697                  &getAssociatedValue() &&
698              "Associated value mismatch!");
699     }
700     break;
701   case IRP_INVALID:
702     assert(!AnchorVal && "Expected no value for an invalid position!");
703     break;
704   case IRP_FLOAT:
705     assert((!isa<CallBase>(&getAssociatedValue()) &&
706             !isa<Argument>(&getAssociatedValue())) &&
707            "Expected specialized kind for call base and argument values!");
708     break;
709   case IRP_RETURNED:
710     assert(isa<Function>(AnchorVal) &&
711            "Expected function for a 'returned' position!");
712     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
713     break;
714   case IRP_CALL_SITE_RETURNED:
715     assert((isa<CallBase>(AnchorVal)) &&
716            "Expected call base for 'call site returned' position!");
717     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
718     break;
719   case IRP_CALL_SITE:
720     assert((isa<CallBase>(AnchorVal)) &&
721            "Expected call base for 'call site function' position!");
722     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
723     break;
724   case IRP_FUNCTION:
725     assert(isa<Function>(AnchorVal) &&
726            "Expected function for a 'function' position!");
727     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
728     break;
729   }
730 }
731 
732 namespace {
733 /// Helper function to clamp a state \p S of type \p StateType with the
734 /// information in \p R and indicate/return if \p S did change (as-in update is
735 /// required to be run again).
736 template <typename StateType>
737 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
738   auto Assumed = S.getAssumed();
739   S ^= R;
740   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
741                                    : ChangeStatus::CHANGED;
742 }
743 
744 /// Clamp the information known for all returned values of a function
745 /// (identified by \p QueryingAA) into \p S.
746 template <typename AAType, typename StateType = typename AAType::StateType>
747 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
748                                      StateType &S) {
749   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
750                     << QueryingAA << " into " << S << "\n");
751 
752   assert((QueryingAA.getIRPosition().getPositionKind() ==
753               IRPosition::IRP_RETURNED ||
754           QueryingAA.getIRPosition().getPositionKind() ==
755               IRPosition::IRP_CALL_SITE_RETURNED) &&
756          "Can only clamp returned value states for a function returned or call "
757          "site returned position!");
758 
759   // Use an optional state as there might not be any return values and we want
760   // to join (IntegerState::operator&) the state of all there are.
761   Optional<StateType> T;
762 
763   // Callback for each possibly returned value.
764   auto CheckReturnValue = [&](Value &RV) -> bool {
765     const IRPosition &RVPos = IRPosition::value(RV);
766     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
767     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
768                       << " @ " << RVPos << "\n");
769     const StateType &AAS = static_cast<const StateType &>(AA.getState());
770     if (T.hasValue())
771       *T &= AAS;
772     else
773       T = AAS;
774     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
775                       << "\n");
776     return T->isValidState();
777   };
778 
779   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
780     S.indicatePessimisticFixpoint();
781   else if (T.hasValue())
782     S ^= *T;
783 }
784 
785 /// Helper class to compose two generic deduction
786 template <typename AAType, typename Base, typename StateType,
787           template <typename...> class F, template <typename...> class G>
788 struct AAComposeTwoGenericDeduction
789     : public F<AAType, G<AAType, Base, StateType>, StateType> {
790   AAComposeTwoGenericDeduction(const IRPosition &IRP)
791       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
792 
793   /// See AbstractAttribute::updateImpl(...).
794   ChangeStatus updateImpl(Attributor &A) override {
795     ChangeStatus ChangedF =
796         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
797     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
798     return ChangedF | ChangedG;
799   }
800 };
801 
802 /// Helper class for generic deduction: return value -> returned position.
803 template <typename AAType, typename Base,
804           typename StateType = typename Base::StateType>
805 struct AAReturnedFromReturnedValues : public Base {
806   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
807 
808   /// See AbstractAttribute::updateImpl(...).
809   ChangeStatus updateImpl(Attributor &A) override {
810     StateType S(StateType::getBestState(this->getState()));
811     clampReturnedValueStates<AAType, StateType>(A, *this, S);
812     // TODO: If we know we visited all returned values, thus no are assumed
813     // dead, we can take the known information from the state T.
814     return clampStateAndIndicateChange<StateType>(this->getState(), S);
815   }
816 };
817 
818 /// Clamp the information known at all call sites for a given argument
819 /// (identified by \p QueryingAA) into \p S.
820 template <typename AAType, typename StateType = typename AAType::StateType>
821 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
822                                         StateType &S) {
823   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
824                     << QueryingAA << " into " << S << "\n");
825 
826   assert(QueryingAA.getIRPosition().getPositionKind() ==
827              IRPosition::IRP_ARGUMENT &&
828          "Can only clamp call site argument states for an argument position!");
829 
830   // Use an optional state as there might not be any return values and we want
831   // to join (IntegerState::operator&) the state of all there are.
832   Optional<StateType> T;
833 
834   // The argument number which is also the call site argument number.
835   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
836 
837   auto CallSiteCheck = [&](AbstractCallSite ACS) {
838     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
839     // Check if a coresponding argument was found or if it is on not associated
840     // (which can happen for callback calls).
841     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
842       return false;
843 
844     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
845     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
846                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
847     const StateType &AAS = static_cast<const StateType &>(AA.getState());
848     if (T.hasValue())
849       *T &= AAS;
850     else
851       T = AAS;
852     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
853                       << "\n");
854     return T->isValidState();
855   };
856 
857   bool AllCallSitesKnown;
858   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
859                               AllCallSitesKnown))
860     S.indicatePessimisticFixpoint();
861   else if (T.hasValue())
862     S ^= *T;
863 }
864 
865 /// Helper class for generic deduction: call site argument -> argument position.
866 template <typename AAType, typename Base,
867           typename StateType = typename AAType::StateType>
868 struct AAArgumentFromCallSiteArguments : public Base {
869   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
870 
871   /// See AbstractAttribute::updateImpl(...).
872   ChangeStatus updateImpl(Attributor &A) override {
873     StateType S(StateType::getBestState(this->getState()));
874     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
875     // TODO: If we know we visited all incoming values, thus no are assumed
876     // dead, we can take the known information from the state T.
877     return clampStateAndIndicateChange<StateType>(this->getState(), S);
878   }
879 };
880 
881 /// Helper class for generic replication: function returned -> cs returned.
882 template <typename AAType, typename Base,
883           typename StateType = typename Base::StateType>
884 struct AACallSiteReturnedFromReturned : public Base {
885   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
886 
887   /// See AbstractAttribute::updateImpl(...).
888   ChangeStatus updateImpl(Attributor &A) override {
889     assert(this->getIRPosition().getPositionKind() ==
890                IRPosition::IRP_CALL_SITE_RETURNED &&
891            "Can only wrap function returned positions for call site returned "
892            "positions!");
893     auto &S = this->getState();
894 
895     const Function *AssociatedFunction =
896         this->getIRPosition().getAssociatedFunction();
897     if (!AssociatedFunction)
898       return S.indicatePessimisticFixpoint();
899 
900     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
901     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
902     return clampStateAndIndicateChange(
903         S, static_cast<const StateType &>(AA.getState()));
904   }
905 };
906 
907 /// Helper class for generic deduction using must-be-executed-context
908 /// Base class is required to have `followUse` method.
909 
910 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
911 /// U - Underlying use.
912 /// I - The user of the \p U.
913 /// `followUse` returns true if the value should be tracked transitively.
914 
915 template <typename AAType, typename Base,
916           typename StateType = typename AAType::StateType>
917 struct AAFromMustBeExecutedContext : public Base {
918   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
919 
920   void initialize(Attributor &A) override {
921     Base::initialize(A);
922     const IRPosition &IRP = this->getIRPosition();
923     Instruction *CtxI = IRP.getCtxI();
924 
925     if (!CtxI)
926       return;
927 
928     for (const Use &U : IRP.getAssociatedValue().uses())
929       Uses.insert(&U);
930   }
931 
932   /// See AbstractAttribute::updateImpl(...).
933   ChangeStatus updateImpl(Attributor &A) override {
934     auto BeforeState = this->getState();
935     auto &S = this->getState();
936     Instruction *CtxI = this->getIRPosition().getCtxI();
937     if (!CtxI)
938       return ChangeStatus::UNCHANGED;
939 
940     MustBeExecutedContextExplorer &Explorer =
941         A.getInfoCache().getMustBeExecutedContextExplorer();
942 
943     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
944     for (unsigned u = 0; u < Uses.size(); ++u) {
945       const Use *U = Uses[u];
946       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
947         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
948         if (Found && Base::followUse(A, U, UserI))
949           for (const Use &Us : UserI->uses())
950             Uses.insert(&Us);
951       }
952     }
953 
954     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
955   }
956 
957 private:
958   /// Container for (transitive) uses of the associated value.
959   SetVector<const Use *> Uses;
960 };
961 
962 template <typename AAType, typename Base,
963           typename StateType = typename AAType::StateType>
964 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
965     AAComposeTwoGenericDeduction<AAType, Base, StateType,
966                                  AAFromMustBeExecutedContext,
967                                  AAArgumentFromCallSiteArguments>;
968 
969 template <typename AAType, typename Base,
970           typename StateType = typename AAType::StateType>
971 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
972     AAComposeTwoGenericDeduction<AAType, Base, StateType,
973                                  AAFromMustBeExecutedContext,
974                                  AACallSiteReturnedFromReturned>;
975 
976 /// -----------------------NoUnwind Function Attribute--------------------------
977 
978 struct AANoUnwindImpl : AANoUnwind {
979   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
980 
981   const std::string getAsStr() const override {
982     return getAssumed() ? "nounwind" : "may-unwind";
983   }
984 
985   /// See AbstractAttribute::updateImpl(...).
986   ChangeStatus updateImpl(Attributor &A) override {
987     auto Opcodes = {
988         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
989         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
990         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
991 
992     auto CheckForNoUnwind = [&](Instruction &I) {
993       if (!I.mayThrow())
994         return true;
995 
996       if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
997         const auto &NoUnwindAA =
998             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
999         return NoUnwindAA.isAssumedNoUnwind();
1000       }
1001       return false;
1002     };
1003 
1004     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
1005       return indicatePessimisticFixpoint();
1006 
1007     return ChangeStatus::UNCHANGED;
1008   }
1009 };
1010 
1011 struct AANoUnwindFunction final : public AANoUnwindImpl {
1012   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
1013 
1014   /// See AbstractAttribute::trackStatistics()
1015   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1016 };
1017 
1018 /// NoUnwind attribute deduction for a call sites.
1019 struct AANoUnwindCallSite final : AANoUnwindImpl {
1020   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
1021 
1022   /// See AbstractAttribute::initialize(...).
1023   void initialize(Attributor &A) override {
1024     AANoUnwindImpl::initialize(A);
1025     Function *F = getAssociatedFunction();
1026     if (!F)
1027       indicatePessimisticFixpoint();
1028   }
1029 
1030   /// See AbstractAttribute::updateImpl(...).
1031   ChangeStatus updateImpl(Attributor &A) override {
1032     // TODO: Once we have call site specific value information we can provide
1033     //       call site specific liveness information and then it makes
1034     //       sense to specialize attributes for call sites arguments instead of
1035     //       redirecting requests to the callee argument.
1036     Function *F = getAssociatedFunction();
1037     const IRPosition &FnPos = IRPosition::function(*F);
1038     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
1039     return clampStateAndIndicateChange(
1040         getState(),
1041         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
1042   }
1043 
1044   /// See AbstractAttribute::trackStatistics()
1045   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1046 };
1047 
1048 /// --------------------- Function Return Values -------------------------------
1049 
1050 /// "Attribute" that collects all potential returned values and the return
1051 /// instructions that they arise from.
1052 ///
1053 /// If there is a unique returned value R, the manifest method will:
1054 ///   - mark R with the "returned" attribute, if R is an argument.
1055 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1056 
1057   /// Mapping of values potentially returned by the associated function to the
1058   /// return instructions that might return them.
1059   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1060 
1061   /// Mapping to remember the number of returned values for a call site such
1062   /// that we can avoid updates if nothing changed.
1063   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
1064 
1065   /// Set of unresolved calls returned by the associated function.
1066   SmallSetVector<CallBase *, 4> UnresolvedCalls;
1067 
1068   /// State flags
1069   ///
1070   ///{
1071   bool IsFixed = false;
1072   bool IsValidState = true;
1073   ///}
1074 
1075 public:
1076   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
1077 
1078   /// See AbstractAttribute::initialize(...).
1079   void initialize(Attributor &A) override {
1080     // Reset the state.
1081     IsFixed = false;
1082     IsValidState = true;
1083     ReturnedValues.clear();
1084 
1085     Function *F = getAssociatedFunction();
1086     if (!F) {
1087       indicatePessimisticFixpoint();
1088       return;
1089     }
1090     assert(!F->getReturnType()->isVoidTy() &&
1091            "Did not expect a void return type!");
1092 
1093     // The map from instruction opcodes to those instructions in the function.
1094     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1095 
1096     // Look through all arguments, if one is marked as returned we are done.
1097     for (Argument &Arg : F->args()) {
1098       if (Arg.hasReturnedAttr()) {
1099         auto &ReturnInstSet = ReturnedValues[&Arg];
1100         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
1101           ReturnInstSet.insert(cast<ReturnInst>(RI));
1102 
1103         indicateOptimisticFixpoint();
1104         return;
1105       }
1106     }
1107 
1108     if (!F->hasExactDefinition())
1109       indicatePessimisticFixpoint();
1110   }
1111 
1112   /// See AbstractAttribute::manifest(...).
1113   ChangeStatus manifest(Attributor &A) override;
1114 
1115   /// See AbstractAttribute::getState(...).
1116   AbstractState &getState() override { return *this; }
1117 
1118   /// See AbstractAttribute::getState(...).
1119   const AbstractState &getState() const override { return *this; }
1120 
1121   /// See AbstractAttribute::updateImpl(Attributor &A).
1122   ChangeStatus updateImpl(Attributor &A) override;
1123 
1124   llvm::iterator_range<iterator> returned_values() override {
1125     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1126   }
1127 
1128   llvm::iterator_range<const_iterator> returned_values() const override {
1129     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1130   }
1131 
1132   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
1133     return UnresolvedCalls;
1134   }
1135 
1136   /// Return the number of potential return values, -1 if unknown.
1137   size_t getNumReturnValues() const override {
1138     return isValidState() ? ReturnedValues.size() : -1;
1139   }
1140 
1141   /// Return an assumed unique return value if a single candidate is found. If
1142   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1143   /// Optional::NoneType.
1144   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1145 
1146   /// See AbstractState::checkForAllReturnedValues(...).
1147   bool checkForAllReturnedValuesAndReturnInsts(
1148       const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1149           &Pred) const override;
1150 
1151   /// Pretty print the attribute similar to the IR representation.
1152   const std::string getAsStr() const override;
1153 
1154   /// See AbstractState::isAtFixpoint().
1155   bool isAtFixpoint() const override { return IsFixed; }
1156 
1157   /// See AbstractState::isValidState().
1158   bool isValidState() const override { return IsValidState; }
1159 
1160   /// See AbstractState::indicateOptimisticFixpoint(...).
1161   ChangeStatus indicateOptimisticFixpoint() override {
1162     IsFixed = true;
1163     return ChangeStatus::UNCHANGED;
1164   }
1165 
1166   ChangeStatus indicatePessimisticFixpoint() override {
1167     IsFixed = true;
1168     IsValidState = false;
1169     return ChangeStatus::CHANGED;
1170   }
1171 };
1172 
1173 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1174   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1175 
1176   // Bookkeeping.
1177   assert(isValidState());
1178   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1179                   "Number of function with known return values");
1180 
1181   // Check if we have an assumed unique return value that we could manifest.
1182   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1183 
1184   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1185     return Changed;
1186 
1187   // Bookkeeping.
1188   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1189                   "Number of function with unique return");
1190 
1191   // Callback to replace the uses of CB with the constant C.
1192   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
1193     if (CB.getNumUses() == 0 || CB.isMustTailCall())
1194       return ChangeStatus::UNCHANGED;
1195     A.replaceAllUsesWith(CB, C);
1196     return ChangeStatus::CHANGED;
1197   };
1198 
1199   // If the assumed unique return value is an argument, annotate it.
1200   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1201     // TODO: This should be handled differently!
1202     this->AnchorVal = UniqueRVArg;
1203     this->KindOrArgNo = UniqueRVArg->getArgNo();
1204     Changed = IRAttribute::manifest(A);
1205   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
1206     // We can replace the returned value with the unique returned constant.
1207     Value &AnchorValue = getAnchorValue();
1208     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1209       for (const Use &U : F->uses())
1210         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1211           if (CB->isCallee(&U)) {
1212             Constant *RVCCast =
1213                 CB->getType() == RVC->getType()
1214                     ? RVC
1215                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
1216             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1217           }
1218     } else {
1219       assert(isa<CallBase>(AnchorValue) &&
1220              "Expcected a function or call base anchor!");
1221       Constant *RVCCast =
1222           AnchorValue.getType() == RVC->getType()
1223               ? RVC
1224               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1225       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1226     }
1227     if (Changed == ChangeStatus::CHANGED)
1228       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1229                       "Number of function returns replaced by constant return");
1230   }
1231 
1232   return Changed;
1233 }
1234 
1235 const std::string AAReturnedValuesImpl::getAsStr() const {
1236   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1237          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1238          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1239 }
1240 
1241 Optional<Value *>
1242 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1243   // If checkForAllReturnedValues provides a unique value, ignoring potential
1244   // undef values that can also be present, it is assumed to be the actual
1245   // return value and forwarded to the caller of this method. If there are
1246   // multiple, a nullptr is returned indicating there cannot be a unique
1247   // returned value.
1248   Optional<Value *> UniqueRV;
1249 
1250   auto Pred = [&](Value &RV) -> bool {
1251     // If we found a second returned value and neither the current nor the saved
1252     // one is an undef, there is no unique returned value. Undefs are special
1253     // since we can pretend they have any value.
1254     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1255         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1256       UniqueRV = nullptr;
1257       return false;
1258     }
1259 
1260     // Do not overwrite a value with an undef.
1261     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1262       UniqueRV = &RV;
1263 
1264     return true;
1265   };
1266 
1267   if (!A.checkForAllReturnedValues(Pred, *this))
1268     UniqueRV = nullptr;
1269 
1270   return UniqueRV;
1271 }
1272 
1273 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1274     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1275         &Pred) const {
1276   if (!isValidState())
1277     return false;
1278 
1279   // Check all returned values but ignore call sites as long as we have not
1280   // encountered an overdefined one during an update.
1281   for (auto &It : ReturnedValues) {
1282     Value *RV = It.first;
1283 
1284     CallBase *CB = dyn_cast<CallBase>(RV);
1285     if (CB && !UnresolvedCalls.count(CB))
1286       continue;
1287 
1288     if (!Pred(*RV, It.second))
1289       return false;
1290   }
1291 
1292   return true;
1293 }
1294 
1295 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1296   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1297   bool Changed = false;
1298 
1299   // State used in the value traversals starting in returned values.
1300   struct RVState {
1301     // The map in which we collect return values -> return instrs.
1302     decltype(ReturnedValues) &RetValsMap;
1303     // The flag to indicate a change.
1304     bool &Changed;
1305     // The return instrs we come from.
1306     SmallSetVector<ReturnInst *, 4> RetInsts;
1307   };
1308 
1309   // Callback for a leaf value returned by the associated function.
1310   auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1311     auto Size = RVS.RetValsMap[&Val].size();
1312     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1313     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1314     RVS.Changed |= Inserted;
1315     LLVM_DEBUG({
1316       if (Inserted)
1317         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1318                << " => " << RVS.RetInsts.size() << "\n";
1319     });
1320     return true;
1321   };
1322 
1323   // Helper method to invoke the generic value traversal.
1324   auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1325     IRPosition RetValPos = IRPosition::value(RV);
1326     return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1327                                                             RVS, VisitValueCB);
1328   };
1329 
1330   // Callback for all "return intructions" live in the associated function.
1331   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1332     ReturnInst &Ret = cast<ReturnInst>(I);
1333     RVState RVS({ReturnedValues, Changed, {}});
1334     RVS.RetInsts.insert(&Ret);
1335     return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1336   };
1337 
1338   // Start by discovering returned values from all live returned instructions in
1339   // the associated function.
1340   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1341     return indicatePessimisticFixpoint();
1342 
1343   // Once returned values "directly" present in the code are handled we try to
1344   // resolve returned calls.
1345   decltype(ReturnedValues) NewRVsMap;
1346   for (auto &It : ReturnedValues) {
1347     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1348                       << " by #" << It.second.size() << " RIs\n");
1349     CallBase *CB = dyn_cast<CallBase>(It.first);
1350     if (!CB || UnresolvedCalls.count(CB))
1351       continue;
1352 
1353     if (!CB->getCalledFunction()) {
1354       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1355                         << "\n");
1356       UnresolvedCalls.insert(CB);
1357       continue;
1358     }
1359 
1360     // TODO: use the function scope once we have call site AAReturnedValues.
1361     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1362         *this, IRPosition::function(*CB->getCalledFunction()));
1363     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1364                       << RetValAA << "\n");
1365 
1366     // Skip dead ends, thus if we do not know anything about the returned
1367     // call we mark it as unresolved and it will stay that way.
1368     if (!RetValAA.getState().isValidState()) {
1369       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1370                         << "\n");
1371       UnresolvedCalls.insert(CB);
1372       continue;
1373     }
1374 
1375     // Do not try to learn partial information. If the callee has unresolved
1376     // return values we will treat the call as unresolved/opaque.
1377     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1378     if (!RetValAAUnresolvedCalls.empty()) {
1379       UnresolvedCalls.insert(CB);
1380       continue;
1381     }
1382 
1383     // Now check if we can track transitively returned values. If possible, thus
1384     // if all return value can be represented in the current scope, do so.
1385     bool Unresolved = false;
1386     for (auto &RetValAAIt : RetValAA.returned_values()) {
1387       Value *RetVal = RetValAAIt.first;
1388       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1389           isa<Constant>(RetVal))
1390         continue;
1391       // Anything that did not fit in the above categories cannot be resolved,
1392       // mark the call as unresolved.
1393       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1394                            "cannot be translated: "
1395                         << *RetVal << "\n");
1396       UnresolvedCalls.insert(CB);
1397       Unresolved = true;
1398       break;
1399     }
1400 
1401     if (Unresolved)
1402       continue;
1403 
1404     // Now track transitively returned values.
1405     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1406     if (NumRetAA == RetValAA.getNumReturnValues()) {
1407       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1408                            "changed since it was seen last\n");
1409       continue;
1410     }
1411     NumRetAA = RetValAA.getNumReturnValues();
1412 
1413     for (auto &RetValAAIt : RetValAA.returned_values()) {
1414       Value *RetVal = RetValAAIt.first;
1415       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1416         // Arguments are mapped to call site operands and we begin the traversal
1417         // again.
1418         bool Unused = false;
1419         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1420         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1421         continue;
1422       } else if (isa<CallBase>(RetVal)) {
1423         // Call sites are resolved by the callee attribute over time, no need to
1424         // do anything for us.
1425         continue;
1426       } else if (isa<Constant>(RetVal)) {
1427         // Constants are valid everywhere, we can simply take them.
1428         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1429         continue;
1430       }
1431     }
1432   }
1433 
1434   // To avoid modifications to the ReturnedValues map while we iterate over it
1435   // we kept record of potential new entries in a copy map, NewRVsMap.
1436   for (auto &It : NewRVsMap) {
1437     assert(!It.second.empty() && "Entry does not add anything.");
1438     auto &ReturnInsts = ReturnedValues[It.first];
1439     for (ReturnInst *RI : It.second)
1440       if (ReturnInsts.insert(RI)) {
1441         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1442                           << *It.first << " => " << *RI << "\n");
1443         Changed = true;
1444       }
1445   }
1446 
1447   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1448   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1449 }
1450 
1451 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1452   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1453 
1454   /// See AbstractAttribute::trackStatistics()
1455   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1456 };
1457 
1458 /// Returned values information for a call sites.
1459 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1460   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1461 
1462   /// See AbstractAttribute::initialize(...).
1463   void initialize(Attributor &A) override {
1464     // TODO: Once we have call site specific value information we can provide
1465     //       call site specific liveness information and then it makes
1466     //       sense to specialize attributes for call sites instead of
1467     //       redirecting requests to the callee.
1468     llvm_unreachable("Abstract attributes for returned values are not "
1469                      "supported for call sites yet!");
1470   }
1471 
1472   /// See AbstractAttribute::updateImpl(...).
1473   ChangeStatus updateImpl(Attributor &A) override {
1474     return indicatePessimisticFixpoint();
1475   }
1476 
1477   /// See AbstractAttribute::trackStatistics()
1478   void trackStatistics() const override {}
1479 };
1480 
1481 /// ------------------------ NoSync Function Attribute -------------------------
1482 
1483 struct AANoSyncImpl : AANoSync {
1484   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1485 
1486   const std::string getAsStr() const override {
1487     return getAssumed() ? "nosync" : "may-sync";
1488   }
1489 
1490   /// See AbstractAttribute::updateImpl(...).
1491   ChangeStatus updateImpl(Attributor &A) override;
1492 
1493   /// Helper function used to determine whether an instruction is non-relaxed
1494   /// atomic. In other words, if an atomic instruction does not have unordered
1495   /// or monotonic ordering
1496   static bool isNonRelaxedAtomic(Instruction *I);
1497 
1498   /// Helper function used to determine whether an instruction is volatile.
1499   static bool isVolatile(Instruction *I);
1500 
1501   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1502   /// memset).
1503   static bool isNoSyncIntrinsic(Instruction *I);
1504 };
1505 
1506 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1507   if (!I->isAtomic())
1508     return false;
1509 
1510   AtomicOrdering Ordering;
1511   switch (I->getOpcode()) {
1512   case Instruction::AtomicRMW:
1513     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1514     break;
1515   case Instruction::Store:
1516     Ordering = cast<StoreInst>(I)->getOrdering();
1517     break;
1518   case Instruction::Load:
1519     Ordering = cast<LoadInst>(I)->getOrdering();
1520     break;
1521   case Instruction::Fence: {
1522     auto *FI = cast<FenceInst>(I);
1523     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1524       return false;
1525     Ordering = FI->getOrdering();
1526     break;
1527   }
1528   case Instruction::AtomicCmpXchg: {
1529     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1530     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1531     // Only if both are relaxed, than it can be treated as relaxed.
1532     // Otherwise it is non-relaxed.
1533     if (Success != AtomicOrdering::Unordered &&
1534         Success != AtomicOrdering::Monotonic)
1535       return true;
1536     if (Failure != AtomicOrdering::Unordered &&
1537         Failure != AtomicOrdering::Monotonic)
1538       return true;
1539     return false;
1540   }
1541   default:
1542     llvm_unreachable(
1543         "New atomic operations need to be known in the attributor.");
1544   }
1545 
1546   // Relaxed.
1547   if (Ordering == AtomicOrdering::Unordered ||
1548       Ordering == AtomicOrdering::Monotonic)
1549     return false;
1550   return true;
1551 }
1552 
1553 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1554 /// FIXME: We should ipmrove the handling of intrinsics.
1555 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1556   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1557     switch (II->getIntrinsicID()) {
1558     /// Element wise atomic memory intrinsics are can only be unordered,
1559     /// therefore nosync.
1560     case Intrinsic::memset_element_unordered_atomic:
1561     case Intrinsic::memmove_element_unordered_atomic:
1562     case Intrinsic::memcpy_element_unordered_atomic:
1563       return true;
1564     case Intrinsic::memset:
1565     case Intrinsic::memmove:
1566     case Intrinsic::memcpy:
1567       if (!cast<MemIntrinsic>(II)->isVolatile())
1568         return true;
1569       return false;
1570     default:
1571       return false;
1572     }
1573   }
1574   return false;
1575 }
1576 
1577 bool AANoSyncImpl::isVolatile(Instruction *I) {
1578   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1579          "Calls should not be checked here");
1580 
1581   switch (I->getOpcode()) {
1582   case Instruction::AtomicRMW:
1583     return cast<AtomicRMWInst>(I)->isVolatile();
1584   case Instruction::Store:
1585     return cast<StoreInst>(I)->isVolatile();
1586   case Instruction::Load:
1587     return cast<LoadInst>(I)->isVolatile();
1588   case Instruction::AtomicCmpXchg:
1589     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1590   default:
1591     return false;
1592   }
1593 }
1594 
1595 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1596 
1597   auto CheckRWInstForNoSync = [&](Instruction &I) {
1598     /// We are looking for volatile instructions or Non-Relaxed atomics.
1599     /// FIXME: We should improve the handling of intrinsics.
1600 
1601     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1602       return true;
1603 
1604     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1605       if (ICS.hasFnAttr(Attribute::NoSync))
1606         return true;
1607 
1608       const auto &NoSyncAA =
1609           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1610       if (NoSyncAA.isAssumedNoSync())
1611         return true;
1612       return false;
1613     }
1614 
1615     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1616       return true;
1617 
1618     return false;
1619   };
1620 
1621   auto CheckForNoSync = [&](Instruction &I) {
1622     // At this point we handled all read/write effects and they are all
1623     // nosync, so they can be skipped.
1624     if (I.mayReadOrWriteMemory())
1625       return true;
1626 
1627     // non-convergent and readnone imply nosync.
1628     return !ImmutableCallSite(&I).isConvergent();
1629   };
1630 
1631   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1632       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1633     return indicatePessimisticFixpoint();
1634 
1635   return ChangeStatus::UNCHANGED;
1636 }
1637 
1638 struct AANoSyncFunction final : public AANoSyncImpl {
1639   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1640 
1641   /// See AbstractAttribute::trackStatistics()
1642   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1643 };
1644 
1645 /// NoSync attribute deduction for a call sites.
1646 struct AANoSyncCallSite final : AANoSyncImpl {
1647   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1648 
1649   /// See AbstractAttribute::initialize(...).
1650   void initialize(Attributor &A) override {
1651     AANoSyncImpl::initialize(A);
1652     Function *F = getAssociatedFunction();
1653     if (!F)
1654       indicatePessimisticFixpoint();
1655   }
1656 
1657   /// See AbstractAttribute::updateImpl(...).
1658   ChangeStatus updateImpl(Attributor &A) override {
1659     // TODO: Once we have call site specific value information we can provide
1660     //       call site specific liveness information and then it makes
1661     //       sense to specialize attributes for call sites arguments instead of
1662     //       redirecting requests to the callee argument.
1663     Function *F = getAssociatedFunction();
1664     const IRPosition &FnPos = IRPosition::function(*F);
1665     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1666     return clampStateAndIndicateChange(
1667         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1668   }
1669 
1670   /// See AbstractAttribute::trackStatistics()
1671   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1672 };
1673 
1674 /// ------------------------ No-Free Attributes ----------------------------
1675 
1676 struct AANoFreeImpl : public AANoFree {
1677   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1678 
1679   /// See AbstractAttribute::updateImpl(...).
1680   ChangeStatus updateImpl(Attributor &A) override {
1681     auto CheckForNoFree = [&](Instruction &I) {
1682       ImmutableCallSite ICS(&I);
1683       if (ICS.hasFnAttr(Attribute::NoFree))
1684         return true;
1685 
1686       const auto &NoFreeAA =
1687           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1688       return NoFreeAA.isAssumedNoFree();
1689     };
1690 
1691     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1692       return indicatePessimisticFixpoint();
1693     return ChangeStatus::UNCHANGED;
1694   }
1695 
1696   /// See AbstractAttribute::getAsStr().
1697   const std::string getAsStr() const override {
1698     return getAssumed() ? "nofree" : "may-free";
1699   }
1700 };
1701 
1702 struct AANoFreeFunction final : public AANoFreeImpl {
1703   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1704 
1705   /// See AbstractAttribute::trackStatistics()
1706   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1707 };
1708 
1709 /// NoFree attribute deduction for a call sites.
1710 struct AANoFreeCallSite final : AANoFreeImpl {
1711   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1712 
1713   /// See AbstractAttribute::initialize(...).
1714   void initialize(Attributor &A) override {
1715     AANoFreeImpl::initialize(A);
1716     Function *F = getAssociatedFunction();
1717     if (!F)
1718       indicatePessimisticFixpoint();
1719   }
1720 
1721   /// See AbstractAttribute::updateImpl(...).
1722   ChangeStatus updateImpl(Attributor &A) override {
1723     // TODO: Once we have call site specific value information we can provide
1724     //       call site specific liveness information and then it makes
1725     //       sense to specialize attributes for call sites arguments instead of
1726     //       redirecting requests to the callee argument.
1727     Function *F = getAssociatedFunction();
1728     const IRPosition &FnPos = IRPosition::function(*F);
1729     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1730     return clampStateAndIndicateChange(
1731         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1732   }
1733 
1734   /// See AbstractAttribute::trackStatistics()
1735   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1736 };
1737 
1738 /// NoFree attribute for floating values.
1739 struct AANoFreeFloating : AANoFreeImpl {
1740   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1741 
1742   /// See AbstractAttribute::trackStatistics()
1743   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1744 
1745   /// See Abstract Attribute::updateImpl(...).
1746   ChangeStatus updateImpl(Attributor &A) override {
1747     const IRPosition &IRP = getIRPosition();
1748 
1749     const auto &NoFreeAA =
1750         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1751     if (NoFreeAA.isAssumedNoFree())
1752       return ChangeStatus::UNCHANGED;
1753 
1754     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1755     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1756       Instruction *UserI = cast<Instruction>(U.getUser());
1757       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1758         if (CB->isBundleOperand(&U))
1759           return false;
1760         if (!CB->isArgOperand(&U))
1761           return true;
1762         unsigned ArgNo = CB->getArgOperandNo(&U);
1763 
1764         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1765             *this, IRPosition::callsite_argument(*CB, ArgNo));
1766         return NoFreeArg.isAssumedNoFree();
1767       }
1768 
1769       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1770           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1771         Follow = true;
1772         return true;
1773       }
1774 
1775       // Unknown user.
1776       return false;
1777     };
1778     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1779       return indicatePessimisticFixpoint();
1780 
1781     return ChangeStatus::UNCHANGED;
1782   }
1783 };
1784 
1785 /// NoFree attribute for a call site argument.
1786 struct AANoFreeArgument final : AANoFreeFloating {
1787   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1788 
1789   /// See AbstractAttribute::trackStatistics()
1790   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1791 };
1792 
1793 /// NoFree attribute for call site arguments.
1794 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1795   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1796 
1797   /// See AbstractAttribute::updateImpl(...).
1798   ChangeStatus updateImpl(Attributor &A) override {
1799     // TODO: Once we have call site specific value information we can provide
1800     //       call site specific liveness information and then it makes
1801     //       sense to specialize attributes for call sites arguments instead of
1802     //       redirecting requests to the callee argument.
1803     Argument *Arg = getAssociatedArgument();
1804     if (!Arg)
1805       return indicatePessimisticFixpoint();
1806     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1807     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1808     return clampStateAndIndicateChange(
1809         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1810   }
1811 
1812   /// See AbstractAttribute::trackStatistics()
1813   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1814 };
1815 
1816 /// NoFree attribute for function return value.
1817 struct AANoFreeReturned final : AANoFreeFloating {
1818   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1819     llvm_unreachable("NoFree is not applicable to function returns!");
1820   }
1821 
1822   /// See AbstractAttribute::initialize(...).
1823   void initialize(Attributor &A) override {
1824     llvm_unreachable("NoFree is not applicable to function returns!");
1825   }
1826 
1827   /// See AbstractAttribute::updateImpl(...).
1828   ChangeStatus updateImpl(Attributor &A) override {
1829     llvm_unreachable("NoFree is not applicable to function returns!");
1830   }
1831 
1832   /// See AbstractAttribute::trackStatistics()
1833   void trackStatistics() const override {}
1834 };
1835 
1836 /// NoFree attribute deduction for a call site return value.
1837 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1838   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1839 
1840   ChangeStatus manifest(Attributor &A) override {
1841     return ChangeStatus::UNCHANGED;
1842   }
1843   /// See AbstractAttribute::trackStatistics()
1844   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1845 };
1846 
1847 /// ------------------------ NonNull Argument Attribute ------------------------
1848 static int64_t getKnownNonNullAndDerefBytesForUse(
1849     Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1850     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1851   TrackUse = false;
1852 
1853   const Value *UseV = U->get();
1854   if (!UseV->getType()->isPointerTy())
1855     return 0;
1856 
1857   Type *PtrTy = UseV->getType();
1858   const Function *F = I->getFunction();
1859   bool NullPointerIsDefined =
1860       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1861   const DataLayout &DL = A.getInfoCache().getDL();
1862   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1863     if (ICS.isBundleOperand(U))
1864       return 0;
1865 
1866     if (ICS.isCallee(U)) {
1867       IsNonNull |= !NullPointerIsDefined;
1868       return 0;
1869     }
1870 
1871     unsigned ArgNo = ICS.getArgumentNo(U);
1872     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1873     // As long as we only use known information there is no need to track
1874     // dependences here.
1875     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1876                                                   /* TrackDependence */ false);
1877     IsNonNull |= DerefAA.isKnownNonNull();
1878     return DerefAA.getKnownDereferenceableBytes();
1879   }
1880 
1881   // We need to follow common pointer manipulation uses to the accesses they
1882   // feed into. We can try to be smart to avoid looking through things we do not
1883   // like for now, e.g., non-inbounds GEPs.
1884   if (isa<CastInst>(I)) {
1885     TrackUse = true;
1886     return 0;
1887   }
1888   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1889     if (GEP->hasAllConstantIndices()) {
1890       TrackUse = true;
1891       return 0;
1892     }
1893 
1894   int64_t Offset;
1895   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1896     if (Base == &AssociatedValue &&
1897         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1898       int64_t DerefBytes =
1899           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1900 
1901       IsNonNull |= !NullPointerIsDefined;
1902       return std::max(int64_t(0), DerefBytes);
1903     }
1904   }
1905 
1906   /// Corner case when an offset is 0.
1907   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1908           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1909     if (Offset == 0 && Base == &AssociatedValue &&
1910         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1911       int64_t DerefBytes =
1912           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1913       IsNonNull |= !NullPointerIsDefined;
1914       return std::max(int64_t(0), DerefBytes);
1915     }
1916   }
1917 
1918   return 0;
1919 }
1920 
1921 struct AANonNullImpl : AANonNull {
1922   AANonNullImpl(const IRPosition &IRP)
1923       : AANonNull(IRP),
1924         NullIsDefined(NullPointerIsDefined(
1925             getAnchorScope(),
1926             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1927 
1928   /// See AbstractAttribute::initialize(...).
1929   void initialize(Attributor &A) override {
1930     if (!NullIsDefined &&
1931         hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1932       indicateOptimisticFixpoint();
1933     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1934       indicatePessimisticFixpoint();
1935     else
1936       AANonNull::initialize(A);
1937   }
1938 
1939   /// See AAFromMustBeExecutedContext
1940   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1941     bool IsNonNull = false;
1942     bool TrackUse = false;
1943     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1944                                        IsNonNull, TrackUse);
1945     setKnown(IsNonNull);
1946     return TrackUse;
1947   }
1948 
1949   /// See AbstractAttribute::getAsStr().
1950   const std::string getAsStr() const override {
1951     return getAssumed() ? "nonnull" : "may-null";
1952   }
1953 
1954   /// Flag to determine if the underlying value can be null and still allow
1955   /// valid accesses.
1956   const bool NullIsDefined;
1957 };
1958 
1959 /// NonNull attribute for a floating value.
1960 struct AANonNullFloating
1961     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1962   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1963   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1964 
1965   /// See AbstractAttribute::updateImpl(...).
1966   ChangeStatus updateImpl(Attributor &A) override {
1967     ChangeStatus Change = Base::updateImpl(A);
1968     if (isKnownNonNull())
1969       return Change;
1970 
1971     if (!NullIsDefined) {
1972       const auto &DerefAA =
1973           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1974       if (DerefAA.getAssumedDereferenceableBytes())
1975         return Change;
1976     }
1977 
1978     const DataLayout &DL = A.getDataLayout();
1979 
1980     DominatorTree *DT = nullptr;
1981     InformationCache &InfoCache = A.getInfoCache();
1982     if (const Function *Fn = getAnchorScope())
1983       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1984 
1985     auto VisitValueCB = [&](Value &V, AANonNull::StateType &T,
1986                             bool Stripped) -> bool {
1987       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1988       if (!Stripped && this == &AA) {
1989         if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, getCtxI(), DT))
1990           T.indicatePessimisticFixpoint();
1991       } else {
1992         // Use abstract attribute information.
1993         const AANonNull::StateType &NS =
1994             static_cast<const AANonNull::StateType &>(AA.getState());
1995         T ^= NS;
1996       }
1997       return T.isValidState();
1998     };
1999 
2000     StateType T;
2001     if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
2002                                                      T, VisitValueCB))
2003       return indicatePessimisticFixpoint();
2004 
2005     return clampStateAndIndicateChange(getState(), T);
2006   }
2007 
2008   /// See AbstractAttribute::trackStatistics()
2009   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2010 };
2011 
2012 /// NonNull attribute for function return value.
2013 struct AANonNullReturned final
2014     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
2015   AANonNullReturned(const IRPosition &IRP)
2016       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
2017 
2018   /// See AbstractAttribute::trackStatistics()
2019   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2020 };
2021 
2022 /// NonNull attribute for function argument.
2023 struct AANonNullArgument final
2024     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
2025                                                               AANonNullImpl> {
2026   AANonNullArgument(const IRPosition &IRP)
2027       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
2028                                                                 AANonNullImpl>(
2029             IRP) {}
2030 
2031   /// See AbstractAttribute::trackStatistics()
2032   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2033 };
2034 
2035 struct AANonNullCallSiteArgument final : AANonNullFloating {
2036   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
2037 
2038   /// See AbstractAttribute::trackStatistics()
2039   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2040 };
2041 
2042 /// NonNull attribute for a call site return position.
2043 struct AANonNullCallSiteReturned final
2044     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
2045                                                              AANonNullImpl> {
2046   AANonNullCallSiteReturned(const IRPosition &IRP)
2047       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
2048                                                                AANonNullImpl>(
2049             IRP) {}
2050 
2051   /// See AbstractAttribute::trackStatistics()
2052   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2053 };
2054 
2055 /// ------------------------ No-Recurse Attributes ----------------------------
2056 
2057 struct AANoRecurseImpl : public AANoRecurse {
2058   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
2059 
2060   /// See AbstractAttribute::getAsStr()
2061   const std::string getAsStr() const override {
2062     return getAssumed() ? "norecurse" : "may-recurse";
2063   }
2064 };
2065 
2066 struct AANoRecurseFunction final : AANoRecurseImpl {
2067   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
2068 
2069   /// See AbstractAttribute::initialize(...).
2070   void initialize(Attributor &A) override {
2071     AANoRecurseImpl::initialize(A);
2072     if (const Function *F = getAnchorScope())
2073       if (A.getInfoCache().getSccSize(*F) != 1)
2074         indicatePessimisticFixpoint();
2075   }
2076 
2077   /// See AbstractAttribute::updateImpl(...).
2078   ChangeStatus updateImpl(Attributor &A) override {
2079 
2080     // If all live call sites are known to be no-recurse, we are as well.
2081     auto CallSitePred = [&](AbstractCallSite ACS) {
2082       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2083           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2084           /* TrackDependence */ false, DepClassTy::OPTIONAL);
2085       return NoRecurseAA.isKnownNoRecurse();
2086     };
2087     bool AllCallSitesKnown;
2088     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2089       // If we know all call sites and all are known no-recurse, we are done.
2090       // If all known call sites, which might not be all that exist, are known
2091       // to be no-recurse, we are not done but we can continue to assume
2092       // no-recurse. If one of the call sites we have not visited will become
2093       // live, another update is triggered.
2094       if (AllCallSitesKnown)
2095         indicateOptimisticFixpoint();
2096       return ChangeStatus::UNCHANGED;
2097     }
2098 
2099     // If the above check does not hold anymore we look at the calls.
2100     auto CheckForNoRecurse = [&](Instruction &I) {
2101       ImmutableCallSite ICS(&I);
2102       if (ICS.hasFnAttr(Attribute::NoRecurse))
2103         return true;
2104 
2105       const auto &NoRecurseAA =
2106           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
2107       if (!NoRecurseAA.isAssumedNoRecurse())
2108         return false;
2109 
2110       // Recursion to the same function
2111       if (ICS.getCalledFunction() == getAnchorScope())
2112         return false;
2113 
2114       return true;
2115     };
2116 
2117     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
2118       return indicatePessimisticFixpoint();
2119     return ChangeStatus::UNCHANGED;
2120   }
2121 
2122   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2123 };
2124 
2125 /// NoRecurse attribute deduction for a call sites.
2126 struct AANoRecurseCallSite final : AANoRecurseImpl {
2127   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
2128 
2129   /// See AbstractAttribute::initialize(...).
2130   void initialize(Attributor &A) override {
2131     AANoRecurseImpl::initialize(A);
2132     Function *F = getAssociatedFunction();
2133     if (!F)
2134       indicatePessimisticFixpoint();
2135   }
2136 
2137   /// See AbstractAttribute::updateImpl(...).
2138   ChangeStatus updateImpl(Attributor &A) override {
2139     // TODO: Once we have call site specific value information we can provide
2140     //       call site specific liveness information and then it makes
2141     //       sense to specialize attributes for call sites arguments instead of
2142     //       redirecting requests to the callee argument.
2143     Function *F = getAssociatedFunction();
2144     const IRPosition &FnPos = IRPosition::function(*F);
2145     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
2146     return clampStateAndIndicateChange(
2147         getState(),
2148         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
2149   }
2150 
2151   /// See AbstractAttribute::trackStatistics()
2152   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2153 };
2154 
2155 /// -------------------- Undefined-Behavior Attributes ------------------------
2156 
2157 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2158   AAUndefinedBehaviorImpl(const IRPosition &IRP) : AAUndefinedBehavior(IRP) {}
2159 
2160   /// See AbstractAttribute::updateImpl(...).
2161   // through a pointer (i.e. also branches etc.)
2162   ChangeStatus updateImpl(Attributor &A) override {
2163     const size_t UBPrevSize = KnownUBInsts.size();
2164     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2165 
2166     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2167       // Skip instructions that are already saved.
2168       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2169         return true;
2170 
2171       // If we reach here, we know we have an instruction
2172       // that accesses memory through a pointer operand,
2173       // for which getPointerOperand() should give it to us.
2174       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
2175       assert(PtrOp &&
2176              "Expected pointer operand of memory accessing instruction");
2177 
2178       // A memory access through a pointer is considered UB
2179       // only if the pointer has constant null value.
2180       // TODO: Expand it to not only check constant values.
2181       if (!isa<ConstantPointerNull>(PtrOp)) {
2182         AssumedNoUBInsts.insert(&I);
2183         return true;
2184       }
2185       const Type *PtrTy = PtrOp->getType();
2186 
2187       // Because we only consider instructions inside functions,
2188       // assume that a parent function exists.
2189       const Function *F = I.getFunction();
2190 
2191       // A memory access using constant null pointer is only considered UB
2192       // if null pointer is _not_ defined for the target platform.
2193       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2194         AssumedNoUBInsts.insert(&I);
2195       else
2196         KnownUBInsts.insert(&I);
2197       return true;
2198     };
2199 
2200     auto InspectBrInstForUB = [&](Instruction &I) {
2201       // A conditional branch instruction is considered UB if it has `undef`
2202       // condition.
2203 
2204       // Skip instructions that are already saved.
2205       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2206         return true;
2207 
2208       // We know we have a branch instruction.
2209       auto BrInst = cast<BranchInst>(&I);
2210 
2211       // Unconditional branches are never considered UB.
2212       if (BrInst->isUnconditional())
2213         return true;
2214 
2215       // Either we stopped and the appropriate action was taken,
2216       // or we got back a simplified value to continue.
2217       Optional<Value *> SimplifiedCond =
2218           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2219       if (!SimplifiedCond.hasValue())
2220         return true;
2221       AssumedNoUBInsts.insert(&I);
2222       return true;
2223     };
2224 
2225     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2226                               {Instruction::Load, Instruction::Store,
2227                                Instruction::AtomicCmpXchg,
2228                                Instruction::AtomicRMW});
2229     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br});
2230     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2231         UBPrevSize != KnownUBInsts.size())
2232       return ChangeStatus::CHANGED;
2233     return ChangeStatus::UNCHANGED;
2234   }
2235 
2236   bool isKnownToCauseUB(Instruction *I) const override {
2237     return KnownUBInsts.count(I);
2238   }
2239 
2240   bool isAssumedToCauseUB(Instruction *I) const override {
2241     // In simple words, if an instruction is not in the assumed to _not_
2242     // cause UB, then it is assumed UB (that includes those
2243     // in the KnownUBInsts set). The rest is boilerplate
2244     // is to ensure that it is one of the instructions we test
2245     // for UB.
2246 
2247     switch (I->getOpcode()) {
2248     case Instruction::Load:
2249     case Instruction::Store:
2250     case Instruction::AtomicCmpXchg:
2251     case Instruction::AtomicRMW:
2252       return !AssumedNoUBInsts.count(I);
2253     case Instruction::Br: {
2254       auto BrInst = cast<BranchInst>(I);
2255       if (BrInst->isUnconditional())
2256         return false;
2257       return !AssumedNoUBInsts.count(I);
2258     } break;
2259     default:
2260       return false;
2261     }
2262     return false;
2263   }
2264 
2265   ChangeStatus manifest(Attributor &A) override {
2266     if (KnownUBInsts.empty())
2267       return ChangeStatus::UNCHANGED;
2268     for (Instruction *I : KnownUBInsts)
2269       A.changeToUnreachableAfterManifest(I);
2270     return ChangeStatus::CHANGED;
2271   }
2272 
2273   /// See AbstractAttribute::getAsStr()
2274   const std::string getAsStr() const override {
2275     return getAssumed() ? "undefined-behavior" : "no-ub";
2276   }
2277 
2278   /// Note: The correctness of this analysis depends on the fact that the
2279   /// following 2 sets will stop changing after some point.
2280   /// "Change" here means that their size changes.
2281   /// The size of each set is monotonically increasing
2282   /// (we only add items to them) and it is upper bounded by the number of
2283   /// instructions in the processed function (we can never save more
2284   /// elements in either set than this number). Hence, at some point,
2285   /// they will stop increasing.
2286   /// Consequently, at some point, both sets will have stopped
2287   /// changing, effectively making the analysis reach a fixpoint.
2288 
2289   /// Note: These 2 sets are disjoint and an instruction can be considered
2290   /// one of 3 things:
2291   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2292   ///    the KnownUBInsts set.
2293   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2294   ///    has a reason to assume it).
2295   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2296   ///    could not find a reason to assume or prove that it can cause UB,
2297   ///    hence it assumes it doesn't. We have a set for these instructions
2298   ///    so that we don't reprocess them in every update.
2299   ///    Note however that instructions in this set may cause UB.
2300 
2301 protected:
2302   /// A set of all live instructions _known_ to cause UB.
2303   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2304 
2305 private:
2306   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2307   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2308 
2309   // Should be called on updates in which if we're processing an instruction
2310   // \p I that depends on a value \p V, one of the following has to happen:
2311   // - If the value is assumed, then stop.
2312   // - If the value is known but undef, then consider it UB.
2313   // - Otherwise, do specific processing with the simplified value.
2314   // We return None in the first 2 cases to signify that an appropriate
2315   // action was taken and the caller should stop.
2316   // Otherwise, we return the simplified value that the caller should
2317   // use for specific processing.
2318   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2319                                          Instruction *I) {
2320     const auto &ValueSimplifyAA =
2321         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2322     Optional<Value *> SimplifiedV =
2323         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2324     if (!ValueSimplifyAA.isKnown()) {
2325       // Don't depend on assumed values.
2326       return llvm::None;
2327     }
2328     if (!SimplifiedV.hasValue()) {
2329       // If it is known (which we tested above) but it doesn't have a value,
2330       // then we can assume `undef` and hence the instruction is UB.
2331       KnownUBInsts.insert(I);
2332       return llvm::None;
2333     }
2334     Value *Val = SimplifiedV.getValue();
2335     if (isa<UndefValue>(Val)) {
2336       KnownUBInsts.insert(I);
2337       return llvm::None;
2338     }
2339     return Val;
2340   }
2341 };
2342 
2343 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2344   AAUndefinedBehaviorFunction(const IRPosition &IRP)
2345       : AAUndefinedBehaviorImpl(IRP) {}
2346 
2347   /// See AbstractAttribute::trackStatistics()
2348   void trackStatistics() const override {
2349     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2350                "Number of instructions known to have UB");
2351     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2352         KnownUBInsts.size();
2353   }
2354 };
2355 
2356 /// ------------------------ Will-Return Attributes ----------------------------
2357 
2358 // Helper function that checks whether a function has any cycle.
2359 // TODO: Replace with more efficent code
2360 static bool containsCycle(Function &F) {
2361   SmallPtrSet<BasicBlock *, 32> Visited;
2362 
2363   // Traverse BB by dfs and check whether successor is already visited.
2364   for (BasicBlock *BB : depth_first(&F)) {
2365     Visited.insert(BB);
2366     for (auto *SuccBB : successors(BB)) {
2367       if (Visited.count(SuccBB))
2368         return true;
2369     }
2370   }
2371   return false;
2372 }
2373 
2374 // Helper function that checks the function have a loop which might become an
2375 // endless loop
2376 // FIXME: Any cycle is regarded as endless loop for now.
2377 //        We have to allow some patterns.
2378 static bool containsPossiblyEndlessLoop(Function *F) {
2379   return !F || !F->hasExactDefinition() || containsCycle(*F);
2380 }
2381 
2382 struct AAWillReturnImpl : public AAWillReturn {
2383   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
2384 
2385   /// See AbstractAttribute::initialize(...).
2386   void initialize(Attributor &A) override {
2387     AAWillReturn::initialize(A);
2388 
2389     Function *F = getAssociatedFunction();
2390     if (containsPossiblyEndlessLoop(F))
2391       indicatePessimisticFixpoint();
2392   }
2393 
2394   /// See AbstractAttribute::updateImpl(...).
2395   ChangeStatus updateImpl(Attributor &A) override {
2396     auto CheckForWillReturn = [&](Instruction &I) {
2397       IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
2398       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2399       if (WillReturnAA.isKnownWillReturn())
2400         return true;
2401       if (!WillReturnAA.isAssumedWillReturn())
2402         return false;
2403       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2404       return NoRecurseAA.isAssumedNoRecurse();
2405     };
2406 
2407     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2408       return indicatePessimisticFixpoint();
2409 
2410     return ChangeStatus::UNCHANGED;
2411   }
2412 
2413   /// See AbstractAttribute::getAsStr()
2414   const std::string getAsStr() const override {
2415     return getAssumed() ? "willreturn" : "may-noreturn";
2416   }
2417 };
2418 
2419 struct AAWillReturnFunction final : AAWillReturnImpl {
2420   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2421 
2422   /// See AbstractAttribute::trackStatistics()
2423   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2424 };
2425 
2426 /// WillReturn attribute deduction for a call sites.
2427 struct AAWillReturnCallSite final : AAWillReturnImpl {
2428   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2429 
2430   /// See AbstractAttribute::initialize(...).
2431   void initialize(Attributor &A) override {
2432     AAWillReturnImpl::initialize(A);
2433     Function *F = getAssociatedFunction();
2434     if (!F)
2435       indicatePessimisticFixpoint();
2436   }
2437 
2438   /// See AbstractAttribute::updateImpl(...).
2439   ChangeStatus updateImpl(Attributor &A) override {
2440     // TODO: Once we have call site specific value information we can provide
2441     //       call site specific liveness information and then it makes
2442     //       sense to specialize attributes for call sites arguments instead of
2443     //       redirecting requests to the callee argument.
2444     Function *F = getAssociatedFunction();
2445     const IRPosition &FnPos = IRPosition::function(*F);
2446     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2447     return clampStateAndIndicateChange(
2448         getState(),
2449         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2450   }
2451 
2452   /// See AbstractAttribute::trackStatistics()
2453   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2454 };
2455 
2456 /// -------------------AAReachability Attribute--------------------------
2457 
2458 struct AAReachabilityImpl : AAReachability {
2459   AAReachabilityImpl(const IRPosition &IRP) : AAReachability(IRP) {}
2460 
2461   const std::string getAsStr() const override {
2462     // TODO: Return the number of reachable queries.
2463     return "reachable";
2464   }
2465 
2466   /// See AbstractAttribute::initialize(...).
2467   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2468 
2469   /// See AbstractAttribute::updateImpl(...).
2470   ChangeStatus updateImpl(Attributor &A) override {
2471     return indicatePessimisticFixpoint();
2472   }
2473 };
2474 
2475 struct AAReachabilityFunction final : public AAReachabilityImpl {
2476   AAReachabilityFunction(const IRPosition &IRP) : AAReachabilityImpl(IRP) {}
2477 
2478   /// See AbstractAttribute::trackStatistics()
2479   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2480 };
2481 
2482 /// ------------------------ NoAlias Argument Attribute ------------------------
2483 
2484 struct AANoAliasImpl : AANoAlias {
2485   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
2486 
2487   const std::string getAsStr() const override {
2488     return getAssumed() ? "noalias" : "may-alias";
2489   }
2490 };
2491 
2492 /// NoAlias attribute for a floating value.
2493 struct AANoAliasFloating final : AANoAliasImpl {
2494   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2495 
2496   /// See AbstractAttribute::initialize(...).
2497   void initialize(Attributor &A) override {
2498     AANoAliasImpl::initialize(A);
2499     Value *Val = &getAssociatedValue();
2500     do {
2501       CastInst *CI = dyn_cast<CastInst>(Val);
2502       if (!CI)
2503         break;
2504       Value *Base = CI->getOperand(0);
2505       if (Base->getNumUses() != 1)
2506         break;
2507       Val = Base;
2508     } while (true);
2509 
2510     if (isa<AllocaInst>(Val))
2511       indicateOptimisticFixpoint();
2512     else if (isa<ConstantPointerNull>(Val) &&
2513              !NullPointerIsDefined(getAnchorScope(),
2514                                    Val->getType()->getPointerAddressSpace()))
2515       indicateOptimisticFixpoint();
2516     else if (Val != &getAssociatedValue()) {
2517       const auto &ValNoAliasAA =
2518           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2519       if (ValNoAliasAA.isKnownNoAlias())
2520         indicateOptimisticFixpoint();
2521     }
2522   }
2523 
2524   /// See AbstractAttribute::updateImpl(...).
2525   ChangeStatus updateImpl(Attributor &A) override {
2526     // TODO: Implement this.
2527     return indicatePessimisticFixpoint();
2528   }
2529 
2530   /// See AbstractAttribute::trackStatistics()
2531   void trackStatistics() const override {
2532     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2533   }
2534 };
2535 
2536 /// NoAlias attribute for an argument.
2537 struct AANoAliasArgument final
2538     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2539   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2540   AANoAliasArgument(const IRPosition &IRP) : Base(IRP) {}
2541 
2542   /// See AbstractAttribute::initialize(...).
2543   void initialize(Attributor &A) override {
2544     Base::initialize(A);
2545     // See callsite argument attribute and callee argument attribute.
2546     if (hasAttr({Attribute::ByVal}))
2547       indicateOptimisticFixpoint();
2548   }
2549 
2550   /// See AbstractAttribute::update(...).
2551   ChangeStatus updateImpl(Attributor &A) override {
2552     // We have to make sure no-alias on the argument does not break
2553     // synchronization when this is a callback argument, see also [1] below.
2554     // If synchronization cannot be affected, we delegate to the base updateImpl
2555     // function, otherwise we give up for now.
2556 
2557     // If the function is no-sync, no-alias cannot break synchronization.
2558     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2559         *this, IRPosition::function_scope(getIRPosition()));
2560     if (NoSyncAA.isAssumedNoSync())
2561       return Base::updateImpl(A);
2562 
2563     // If the argument is read-only, no-alias cannot break synchronization.
2564     const auto &MemBehaviorAA =
2565         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2566     if (MemBehaviorAA.isAssumedReadOnly())
2567       return Base::updateImpl(A);
2568 
2569     // If the argument is never passed through callbacks, no-alias cannot break
2570     // synchronization.
2571     bool AllCallSitesKnown;
2572     if (A.checkForAllCallSites(
2573             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2574             true, AllCallSitesKnown))
2575       return Base::updateImpl(A);
2576 
2577     // TODO: add no-alias but make sure it doesn't break synchronization by
2578     // introducing fake uses. See:
2579     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2580     //     International Workshop on OpenMP 2018,
2581     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2582 
2583     return indicatePessimisticFixpoint();
2584   }
2585 
2586   /// See AbstractAttribute::trackStatistics()
2587   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2588 };
2589 
2590 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2591   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2592 
2593   /// See AbstractAttribute::initialize(...).
2594   void initialize(Attributor &A) override {
2595     // See callsite argument attribute and callee argument attribute.
2596     ImmutableCallSite ICS(&getAnchorValue());
2597     if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2598       indicateOptimisticFixpoint();
2599     Value &Val = getAssociatedValue();
2600     if (isa<ConstantPointerNull>(Val) &&
2601         !NullPointerIsDefined(getAnchorScope(),
2602                               Val.getType()->getPointerAddressSpace()))
2603       indicateOptimisticFixpoint();
2604   }
2605 
2606   /// Determine if the underlying value may alias with the call site argument
2607   /// \p OtherArgNo of \p ICS (= the underlying call site).
2608   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2609                             const AAMemoryBehavior &MemBehaviorAA,
2610                             ImmutableCallSite ICS, unsigned OtherArgNo) {
2611     // We do not need to worry about aliasing with the underlying IRP.
2612     if (this->getArgNo() == (int)OtherArgNo)
2613       return false;
2614 
2615     // If it is not a pointer or pointer vector we do not alias.
2616     const Value *ArgOp = ICS.getArgOperand(OtherArgNo);
2617     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2618       return false;
2619 
2620     auto &ICSArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2621         *this, IRPosition::callsite_argument(ICS, OtherArgNo),
2622         /* TrackDependence */ false);
2623 
2624     // If the argument is readnone, there is no read-write aliasing.
2625     if (ICSArgMemBehaviorAA.isAssumedReadNone()) {
2626       A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2627       return false;
2628     }
2629 
2630     // If the argument is readonly and the underlying value is readonly, there
2631     // is no read-write aliasing.
2632     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2633     if (ICSArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2634       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2635       A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2636       return false;
2637     }
2638 
2639     // We have to utilize actual alias analysis queries so we need the object.
2640     if (!AAR)
2641       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2642 
2643     // Try to rule it out at the call site.
2644     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2645     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2646                          "callsite arguments: "
2647                       << getAssociatedValue() << " " << *ArgOp << " => "
2648                       << (IsAliasing ? "" : "no-") << "alias \n");
2649 
2650     return IsAliasing;
2651   }
2652 
2653   bool
2654   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2655                                          const AAMemoryBehavior &MemBehaviorAA,
2656                                          const AANoAlias &NoAliasAA) {
2657     // We can deduce "noalias" if the following conditions hold.
2658     // (i)   Associated value is assumed to be noalias in the definition.
2659     // (ii)  Associated value is assumed to be no-capture in all the uses
2660     //       possibly executed before this callsite.
2661     // (iii) There is no other pointer argument which could alias with the
2662     //       value.
2663 
2664     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2665     if (!AssociatedValueIsNoAliasAtDef) {
2666       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2667                         << " is not no-alias at the definition\n");
2668       return false;
2669     }
2670 
2671     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2672     auto &NoCaptureAA =
2673         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2674     // Check whether the value is captured in the scope using AANoCapture.
2675     // FIXME: This is conservative though, it is better to look at CFG and
2676     //        check only uses possibly executed before this callsite.
2677     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2678       LLVM_DEBUG(
2679           dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2680                  << " cannot be noalias as it is potentially captured\n");
2681       return false;
2682     }
2683     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2684 
2685     // Check there is no other pointer argument which could alias with the
2686     // value passed at this call site.
2687     // TODO: AbstractCallSite
2688     ImmutableCallSite ICS(&getAnchorValue());
2689     for (unsigned OtherArgNo = 0; OtherArgNo < ICS.getNumArgOperands();
2690          OtherArgNo++)
2691       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, ICS, OtherArgNo))
2692         return false;
2693 
2694     return true;
2695   }
2696 
2697   /// See AbstractAttribute::updateImpl(...).
2698   ChangeStatus updateImpl(Attributor &A) override {
2699     // If the argument is readnone we are done as there are no accesses via the
2700     // argument.
2701     auto &MemBehaviorAA =
2702         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2703                                      /* TrackDependence */ false);
2704     if (MemBehaviorAA.isAssumedReadNone()) {
2705       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2706       return ChangeStatus::UNCHANGED;
2707     }
2708 
2709     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2710     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2711                                                   /* TrackDependence */ false);
2712 
2713     AAResults *AAR = nullptr;
2714     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2715                                                NoAliasAA)) {
2716       LLVM_DEBUG(
2717           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2718       return ChangeStatus::UNCHANGED;
2719     }
2720 
2721     return indicatePessimisticFixpoint();
2722   }
2723 
2724   /// See AbstractAttribute::trackStatistics()
2725   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2726 };
2727 
2728 /// NoAlias attribute for function return value.
2729 struct AANoAliasReturned final : AANoAliasImpl {
2730   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2731 
2732   /// See AbstractAttribute::updateImpl(...).
2733   virtual ChangeStatus updateImpl(Attributor &A) override {
2734 
2735     auto CheckReturnValue = [&](Value &RV) -> bool {
2736       if (Constant *C = dyn_cast<Constant>(&RV))
2737         if (C->isNullValue() || isa<UndefValue>(C))
2738           return true;
2739 
2740       /// For now, we can only deduce noalias if we have call sites.
2741       /// FIXME: add more support.
2742       ImmutableCallSite ICS(&RV);
2743       if (!ICS)
2744         return false;
2745 
2746       const IRPosition &RVPos = IRPosition::value(RV);
2747       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2748       if (!NoAliasAA.isAssumedNoAlias())
2749         return false;
2750 
2751       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2752       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2753     };
2754 
2755     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2756       return indicatePessimisticFixpoint();
2757 
2758     return ChangeStatus::UNCHANGED;
2759   }
2760 
2761   /// See AbstractAttribute::trackStatistics()
2762   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2763 };
2764 
2765 /// NoAlias attribute deduction for a call site return value.
2766 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2767   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2768 
2769   /// See AbstractAttribute::initialize(...).
2770   void initialize(Attributor &A) override {
2771     AANoAliasImpl::initialize(A);
2772     Function *F = getAssociatedFunction();
2773     if (!F)
2774       indicatePessimisticFixpoint();
2775   }
2776 
2777   /// See AbstractAttribute::updateImpl(...).
2778   ChangeStatus updateImpl(Attributor &A) override {
2779     // TODO: Once we have call site specific value information we can provide
2780     //       call site specific liveness information and then it makes
2781     //       sense to specialize attributes for call sites arguments instead of
2782     //       redirecting requests to the callee argument.
2783     Function *F = getAssociatedFunction();
2784     const IRPosition &FnPos = IRPosition::returned(*F);
2785     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2786     return clampStateAndIndicateChange(
2787         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2788   }
2789 
2790   /// See AbstractAttribute::trackStatistics()
2791   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2792 };
2793 
2794 /// -------------------AAIsDead Function Attribute-----------------------
2795 
2796 struct AAIsDeadValueImpl : public AAIsDead {
2797   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2798 
2799   /// See AAIsDead::isAssumedDead().
2800   bool isAssumedDead() const override { return getAssumed(); }
2801 
2802   /// See AAIsDead::isKnownDead().
2803   bool isKnownDead() const override { return getKnown(); }
2804 
2805   /// See AAIsDead::isAssumedDead(BasicBlock *).
2806   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2807 
2808   /// See AAIsDead::isKnownDead(BasicBlock *).
2809   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2810 
2811   /// See AAIsDead::isAssumedDead(Instruction *I).
2812   bool isAssumedDead(const Instruction *I) const override {
2813     return I == getCtxI() && isAssumedDead();
2814   }
2815 
2816   /// See AAIsDead::isKnownDead(Instruction *I).
2817   bool isKnownDead(const Instruction *I) const override {
2818     return I == getCtxI() && getKnown();
2819   }
2820 
2821   /// See AbstractAttribute::getAsStr().
2822   const std::string getAsStr() const override {
2823     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2824   }
2825 };
2826 
2827 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2828   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2829 
2830   /// See AbstractAttribute::initialize(...).
2831   void initialize(Attributor &A) override {
2832     if (Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()))
2833       if (!wouldInstructionBeTriviallyDead(I))
2834         indicatePessimisticFixpoint();
2835     if (isa<UndefValue>(getAssociatedValue()))
2836       indicatePessimisticFixpoint();
2837   }
2838 
2839   /// See AbstractAttribute::updateImpl(...).
2840   ChangeStatus updateImpl(Attributor &A) override {
2841     auto UsePred = [&](const Use &U, bool &Follow) {
2842       Instruction *UserI = cast<Instruction>(U.getUser());
2843       if (CallSite CS = CallSite(UserI)) {
2844         if (!CS.isArgOperand(&U))
2845           return false;
2846         const IRPosition &CSArgPos =
2847             IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
2848         const auto &CSArgIsDead = A.getAAFor<AAIsDead>(*this, CSArgPos);
2849         return CSArgIsDead.isAssumedDead();
2850       }
2851       if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
2852         const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
2853         const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, RetPos);
2854         return RetIsDeadAA.isAssumedDead();
2855       }
2856       Follow = true;
2857       return wouldInstructionBeTriviallyDead(UserI);
2858     };
2859 
2860     if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
2861       return indicatePessimisticFixpoint();
2862     return ChangeStatus::UNCHANGED;
2863   }
2864 
2865   /// See AbstractAttribute::manifest(...).
2866   ChangeStatus manifest(Attributor &A) override {
2867     Value &V = getAssociatedValue();
2868     if (auto *I = dyn_cast<Instruction>(&V))
2869       if (wouldInstructionBeTriviallyDead(I)) {
2870         A.deleteAfterManifest(*I);
2871         return ChangeStatus::CHANGED;
2872       }
2873 
2874     if (V.use_empty())
2875       return ChangeStatus::UNCHANGED;
2876 
2877     UndefValue &UV = *UndefValue::get(V.getType());
2878     bool AnyChange = A.changeValueAfterManifest(V, UV);
2879     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2880   }
2881 
2882   /// See AbstractAttribute::trackStatistics()
2883   void trackStatistics() const override {
2884     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2885   }
2886 };
2887 
2888 struct AAIsDeadArgument : public AAIsDeadFloating {
2889   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2890 
2891   /// See AbstractAttribute::initialize(...).
2892   void initialize(Attributor &A) override {
2893     if (!getAssociatedFunction()->hasExactDefinition())
2894       indicatePessimisticFixpoint();
2895   }
2896 
2897   /// See AbstractAttribute::manifest(...).
2898   ChangeStatus manifest(Attributor &A) override {
2899     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2900     Argument &Arg = *getAssociatedArgument();
2901     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2902       if (A.registerFunctionSignatureRewrite(
2903               Arg, /* ReplacementTypes */ {},
2904               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2905               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{}))
2906         return ChangeStatus::CHANGED;
2907     return Changed;
2908   }
2909 
2910   /// See AbstractAttribute::trackStatistics()
2911   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2912 };
2913 
2914 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2915   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2916 
2917   /// See AbstractAttribute::initialize(...).
2918   void initialize(Attributor &A) override {
2919     if (isa<UndefValue>(getAssociatedValue()))
2920       indicatePessimisticFixpoint();
2921   }
2922 
2923   /// See AbstractAttribute::updateImpl(...).
2924   ChangeStatus updateImpl(Attributor &A) override {
2925     // TODO: Once we have call site specific value information we can provide
2926     //       call site specific liveness information and then it makes
2927     //       sense to specialize attributes for call sites arguments instead of
2928     //       redirecting requests to the callee argument.
2929     Argument *Arg = getAssociatedArgument();
2930     if (!Arg)
2931       return indicatePessimisticFixpoint();
2932     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2933     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2934     return clampStateAndIndicateChange(
2935         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2936   }
2937 
2938   /// See AbstractAttribute::manifest(...).
2939   ChangeStatus manifest(Attributor &A) override {
2940     CallBase &CB = cast<CallBase>(getAnchorValue());
2941     Use &U = CB.getArgOperandUse(getArgNo());
2942     assert(!isa<UndefValue>(U.get()) &&
2943            "Expected undef values to be filtered out!");
2944     UndefValue &UV = *UndefValue::get(U->getType());
2945     if (A.changeUseAfterManifest(U, UV))
2946       return ChangeStatus::CHANGED;
2947     return ChangeStatus::UNCHANGED;
2948   }
2949 
2950   /// See AbstractAttribute::trackStatistics()
2951   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2952 };
2953 
2954 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2955   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2956 
2957   /// See AbstractAttribute::updateImpl(...).
2958   ChangeStatus updateImpl(Attributor &A) override {
2959 
2960     bool AllKnownDead = true;
2961     auto PredForCallSite = [&](AbstractCallSite ACS) {
2962       if (ACS.isCallbackCall())
2963         return false;
2964       const IRPosition &CSRetPos =
2965           IRPosition::callsite_returned(ACS.getCallSite());
2966       const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, CSRetPos);
2967       AllKnownDead &= RetIsDeadAA.isKnownDead();
2968       return RetIsDeadAA.isAssumedDead();
2969     };
2970 
2971     bool AllCallSitesKnown;
2972     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
2973                                 AllCallSitesKnown))
2974       return indicatePessimisticFixpoint();
2975 
2976     if (AllCallSitesKnown && AllKnownDead)
2977       indicateOptimisticFixpoint();
2978 
2979     return ChangeStatus::UNCHANGED;
2980   }
2981 
2982   /// See AbstractAttribute::manifest(...).
2983   ChangeStatus manifest(Attributor &A) override {
2984     // TODO: Rewrite the signature to return void?
2985     bool AnyChange = false;
2986     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2987     auto RetInstPred = [&](Instruction &I) {
2988       ReturnInst &RI = cast<ReturnInst>(I);
2989       if (!isa<UndefValue>(RI.getReturnValue()))
2990         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2991       return true;
2992     };
2993     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2994     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2995   }
2996 
2997   /// See AbstractAttribute::trackStatistics()
2998   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2999 };
3000 
3001 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3002   AAIsDeadCallSiteReturned(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
3003 
3004   /// See AbstractAttribute::initialize(...).
3005   void initialize(Attributor &A) override {}
3006 
3007   /// See AbstractAttribute::trackStatistics()
3008   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(IsDead) }
3009 };
3010 
3011 struct AAIsDeadFunction : public AAIsDead {
3012   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
3013 
3014   /// See AbstractAttribute::initialize(...).
3015   void initialize(Attributor &A) override {
3016     const Function *F = getAssociatedFunction();
3017     if (F && !F->isDeclaration()) {
3018       ToBeExploredFrom.insert(&F->getEntryBlock().front());
3019       assumeLive(A, F->getEntryBlock());
3020     }
3021   }
3022 
3023   /// See AbstractAttribute::getAsStr().
3024   const std::string getAsStr() const override {
3025     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3026            std::to_string(getAssociatedFunction()->size()) + "][#TBEP " +
3027            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3028            std::to_string(KnownDeadEnds.size()) + "]";
3029   }
3030 
3031   /// See AbstractAttribute::manifest(...).
3032   ChangeStatus manifest(Attributor &A) override {
3033     assert(getState().isValidState() &&
3034            "Attempted to manifest an invalid state!");
3035 
3036     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3037     Function &F = *getAssociatedFunction();
3038 
3039     if (AssumedLiveBlocks.empty()) {
3040       A.deleteAfterManifest(F);
3041       return ChangeStatus::CHANGED;
3042     }
3043 
3044     // Flag to determine if we can change an invoke to a call assuming the
3045     // callee is nounwind. This is not possible if the personality of the
3046     // function allows to catch asynchronous exceptions.
3047     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3048 
3049     KnownDeadEnds.set_union(ToBeExploredFrom);
3050     for (const Instruction *DeadEndI : KnownDeadEnds) {
3051       auto *CB = dyn_cast<CallBase>(DeadEndI);
3052       if (!CB)
3053         continue;
3054       const auto &NoReturnAA =
3055           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
3056       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3057       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3058         continue;
3059 
3060       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3061         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3062       else
3063         A.changeToUnreachableAfterManifest(
3064             const_cast<Instruction *>(DeadEndI->getNextNode()));
3065       HasChanged = ChangeStatus::CHANGED;
3066     }
3067 
3068     for (BasicBlock &BB : F)
3069       if (!AssumedLiveBlocks.count(&BB))
3070         A.deleteAfterManifest(BB);
3071 
3072     return HasChanged;
3073   }
3074 
3075   /// See AbstractAttribute::updateImpl(...).
3076   ChangeStatus updateImpl(Attributor &A) override;
3077 
3078   /// See AbstractAttribute::trackStatistics()
3079   void trackStatistics() const override {}
3080 
3081   /// Returns true if the function is assumed dead.
3082   bool isAssumedDead() const override { return false; }
3083 
3084   /// See AAIsDead::isKnownDead().
3085   bool isKnownDead() const override { return false; }
3086 
3087   /// See AAIsDead::isAssumedDead(BasicBlock *).
3088   bool isAssumedDead(const BasicBlock *BB) const override {
3089     assert(BB->getParent() == getAssociatedFunction() &&
3090            "BB must be in the same anchor scope function.");
3091 
3092     if (!getAssumed())
3093       return false;
3094     return !AssumedLiveBlocks.count(BB);
3095   }
3096 
3097   /// See AAIsDead::isKnownDead(BasicBlock *).
3098   bool isKnownDead(const BasicBlock *BB) const override {
3099     return getKnown() && isAssumedDead(BB);
3100   }
3101 
3102   /// See AAIsDead::isAssumed(Instruction *I).
3103   bool isAssumedDead(const Instruction *I) const override {
3104     assert(I->getParent()->getParent() == getAssociatedFunction() &&
3105            "Instruction must be in the same anchor scope function.");
3106 
3107     if (!getAssumed())
3108       return false;
3109 
3110     // If it is not in AssumedLiveBlocks then it for sure dead.
3111     // Otherwise, it can still be after noreturn call in a live block.
3112     if (!AssumedLiveBlocks.count(I->getParent()))
3113       return true;
3114 
3115     // If it is not after a liveness barrier it is live.
3116     const Instruction *PrevI = I->getPrevNode();
3117     while (PrevI) {
3118       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3119         return true;
3120       PrevI = PrevI->getPrevNode();
3121     }
3122     return false;
3123   }
3124 
3125   /// See AAIsDead::isKnownDead(Instruction *I).
3126   bool isKnownDead(const Instruction *I) const override {
3127     return getKnown() && isAssumedDead(I);
3128   }
3129 
3130   /// Determine if \p F might catch asynchronous exceptions.
3131   static bool mayCatchAsynchronousExceptions(const Function &F) {
3132     return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
3133   }
3134 
3135   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3136   /// that internal function called from \p BB should now be looked at.
3137   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3138     if (!AssumedLiveBlocks.insert(&BB).second)
3139       return false;
3140 
3141     // We assume that all of BB is (probably) live now and if there are calls to
3142     // internal functions we will assume that those are now live as well. This
3143     // is a performance optimization for blocks with calls to a lot of internal
3144     // functions. It can however cause dead functions to be treated as live.
3145     for (const Instruction &I : BB)
3146       if (ImmutableCallSite ICS = ImmutableCallSite(&I))
3147         if (const Function *F = ICS.getCalledFunction())
3148           if (F->hasLocalLinkage())
3149             A.markLiveInternalFunction(*F);
3150     return true;
3151   }
3152 
3153   /// Collection of instructions that need to be explored again, e.g., we
3154   /// did assume they do not transfer control to (one of their) successors.
3155   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3156 
3157   /// Collection of instructions that are known to not transfer control.
3158   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3159 
3160   /// Collection of all assumed live BasicBlocks.
3161   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3162 };
3163 
3164 static bool
3165 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3166                         AbstractAttribute &AA,
3167                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3168   const IRPosition &IPos = IRPosition::callsite_function(CB);
3169 
3170   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
3171   if (NoReturnAA.isAssumedNoReturn())
3172     return !NoReturnAA.isKnownNoReturn();
3173   if (CB.isTerminator())
3174     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3175   else
3176     AliveSuccessors.push_back(CB.getNextNode());
3177   return false;
3178 }
3179 
3180 static bool
3181 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3182                         AbstractAttribute &AA,
3183                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3184   bool UsedAssumedInformation =
3185       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3186 
3187   // First, determine if we can change an invoke to a call assuming the
3188   // callee is nounwind. This is not possible if the personality of the
3189   // function allows to catch asynchronous exceptions.
3190   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3191     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3192   } else {
3193     const IRPosition &IPos = IRPosition::callsite_function(II);
3194     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
3195     if (AANoUnw.isAssumedNoUnwind()) {
3196       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3197     } else {
3198       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3199     }
3200   }
3201   return UsedAssumedInformation;
3202 }
3203 
3204 static bool
3205 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3206                         AbstractAttribute &AA,
3207                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3208   bool UsedAssumedInformation = false;
3209   if (BI.getNumSuccessors() == 1) {
3210     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3211   } else {
3212     Optional<ConstantInt *> CI =
3213         getAssumedConstant(A, *BI.getCondition(), AA, UsedAssumedInformation);
3214     if (!CI.hasValue()) {
3215       // No value yet, assume both edges are dead.
3216     } else if (CI.getValue()) {
3217       const BasicBlock *SuccBB =
3218           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3219       AliveSuccessors.push_back(&SuccBB->front());
3220     } else {
3221       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3222       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3223       UsedAssumedInformation = false;
3224     }
3225   }
3226   return UsedAssumedInformation;
3227 }
3228 
3229 static bool
3230 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3231                         AbstractAttribute &AA,
3232                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3233   bool UsedAssumedInformation = false;
3234   Optional<ConstantInt *> CI =
3235       getAssumedConstant(A, *SI.getCondition(), AA, UsedAssumedInformation);
3236   if (!CI.hasValue()) {
3237     // No value yet, assume all edges are dead.
3238   } else if (CI.getValue()) {
3239     for (auto &CaseIt : SI.cases()) {
3240       if (CaseIt.getCaseValue() == CI.getValue()) {
3241         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3242         return UsedAssumedInformation;
3243       }
3244     }
3245     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3246     return UsedAssumedInformation;
3247   } else {
3248     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3249       AliveSuccessors.push_back(&SuccBB->front());
3250   }
3251   return UsedAssumedInformation;
3252 }
3253 
3254 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3255   ChangeStatus Change = ChangeStatus::UNCHANGED;
3256 
3257   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3258                     << getAssociatedFunction()->size() << "] BBs and "
3259                     << ToBeExploredFrom.size() << " exploration points and "
3260                     << KnownDeadEnds.size() << " known dead ends\n");
3261 
3262   // Copy and clear the list of instructions we need to explore from. It is
3263   // refilled with instructions the next update has to look at.
3264   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3265                                                ToBeExploredFrom.end());
3266   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3267 
3268   SmallVector<const Instruction *, 8> AliveSuccessors;
3269   while (!Worklist.empty()) {
3270     const Instruction *I = Worklist.pop_back_val();
3271     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3272 
3273     AliveSuccessors.clear();
3274 
3275     bool UsedAssumedInformation = false;
3276     switch (I->getOpcode()) {
3277     // TODO: look for (assumed) UB to backwards propagate "deadness".
3278     default:
3279       if (I->isTerminator()) {
3280         for (const BasicBlock *SuccBB : successors(I->getParent()))
3281           AliveSuccessors.push_back(&SuccBB->front());
3282       } else {
3283         AliveSuccessors.push_back(I->getNextNode());
3284       }
3285       break;
3286     case Instruction::Call:
3287       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3288                                                        *this, AliveSuccessors);
3289       break;
3290     case Instruction::Invoke:
3291       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3292                                                        *this, AliveSuccessors);
3293       break;
3294     case Instruction::Br:
3295       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3296                                                        *this, AliveSuccessors);
3297       break;
3298     case Instruction::Switch:
3299       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3300                                                        *this, AliveSuccessors);
3301       break;
3302     }
3303 
3304     if (UsedAssumedInformation) {
3305       NewToBeExploredFrom.insert(I);
3306     } else {
3307       Change = ChangeStatus::CHANGED;
3308       if (AliveSuccessors.empty() ||
3309           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3310         KnownDeadEnds.insert(I);
3311     }
3312 
3313     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3314                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3315                       << UsedAssumedInformation << "\n");
3316 
3317     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3318       if (!I->isTerminator()) {
3319         assert(AliveSuccessors.size() == 1 &&
3320                "Non-terminator expected to have a single successor!");
3321         Worklist.push_back(AliveSuccessor);
3322       } else {
3323         if (assumeLive(A, *AliveSuccessor->getParent()))
3324           Worklist.push_back(AliveSuccessor);
3325       }
3326     }
3327   }
3328 
3329   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3330 
3331   // If we know everything is live there is no need to query for liveness.
3332   // Instead, indicating a pessimistic fixpoint will cause the state to be
3333   // "invalid" and all queries to be answered conservatively without lookups.
3334   // To be in this state we have to (1) finished the exploration and (3) not
3335   // discovered any non-trivial dead end and (2) not ruled unreachable code
3336   // dead.
3337   if (ToBeExploredFrom.empty() &&
3338       getAssociatedFunction()->size() == AssumedLiveBlocks.size() &&
3339       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3340         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3341       }))
3342     return indicatePessimisticFixpoint();
3343   return Change;
3344 }
3345 
3346 /// Liveness information for a call sites.
3347 struct AAIsDeadCallSite final : AAIsDeadFunction {
3348   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
3349 
3350   /// See AbstractAttribute::initialize(...).
3351   void initialize(Attributor &A) override {
3352     // TODO: Once we have call site specific value information we can provide
3353     //       call site specific liveness information and then it makes
3354     //       sense to specialize attributes for call sites instead of
3355     //       redirecting requests to the callee.
3356     llvm_unreachable("Abstract attributes for liveness are not "
3357                      "supported for call sites yet!");
3358   }
3359 
3360   /// See AbstractAttribute::updateImpl(...).
3361   ChangeStatus updateImpl(Attributor &A) override {
3362     return indicatePessimisticFixpoint();
3363   }
3364 
3365   /// See AbstractAttribute::trackStatistics()
3366   void trackStatistics() const override {}
3367 };
3368 
3369 /// -------------------- Dereferenceable Argument Attribute --------------------
3370 
3371 template <>
3372 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3373                                                      const DerefState &R) {
3374   ChangeStatus CS0 =
3375       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3376   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3377   return CS0 | CS1;
3378 }
3379 
3380 struct AADereferenceableImpl : AADereferenceable {
3381   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
3382   using StateType = DerefState;
3383 
3384   void initialize(Attributor &A) override {
3385     SmallVector<Attribute, 4> Attrs;
3386     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3387              Attrs);
3388     for (const Attribute &Attr : Attrs)
3389       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3390 
3391     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition(),
3392                                        /* TrackDependence */ false);
3393 
3394     const IRPosition &IRP = this->getIRPosition();
3395     bool IsFnInterface = IRP.isFnInterfaceKind();
3396     const Function *FnScope = IRP.getAnchorScope();
3397     if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
3398       indicatePessimisticFixpoint();
3399   }
3400 
3401   /// See AbstractAttribute::getState()
3402   /// {
3403   StateType &getState() override { return *this; }
3404   const StateType &getState() const override { return *this; }
3405   /// }
3406 
3407   /// Helper function for collecting accessed bytes in must-be-executed-context
3408   void addAccessedBytesForUse(Attributor &A, const Use *U,
3409                               const Instruction *I) {
3410     const Value *UseV = U->get();
3411     if (!UseV->getType()->isPointerTy())
3412       return;
3413 
3414     Type *PtrTy = UseV->getType();
3415     const DataLayout &DL = A.getDataLayout();
3416     int64_t Offset;
3417     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3418             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3419       if (Base == &getAssociatedValue() &&
3420           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3421         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3422         addAccessedBytes(Offset, Size);
3423       }
3424     }
3425     return;
3426   }
3427 
3428   /// See AAFromMustBeExecutedContext
3429   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3430     bool IsNonNull = false;
3431     bool TrackUse = false;
3432     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3433         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3434 
3435     addAccessedBytesForUse(A, U, I);
3436     takeKnownDerefBytesMaximum(DerefBytes);
3437     return TrackUse;
3438   }
3439 
3440   /// See AbstractAttribute::manifest(...).
3441   ChangeStatus manifest(Attributor &A) override {
3442     ChangeStatus Change = AADereferenceable::manifest(A);
3443     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3444       removeAttrs({Attribute::DereferenceableOrNull});
3445       return ChangeStatus::CHANGED;
3446     }
3447     return Change;
3448   }
3449 
3450   void getDeducedAttributes(LLVMContext &Ctx,
3451                             SmallVectorImpl<Attribute> &Attrs) const override {
3452     // TODO: Add *_globally support
3453     if (isAssumedNonNull())
3454       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3455           Ctx, getAssumedDereferenceableBytes()));
3456     else
3457       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3458           Ctx, getAssumedDereferenceableBytes()));
3459   }
3460 
3461   /// See AbstractAttribute::getAsStr().
3462   const std::string getAsStr() const override {
3463     if (!getAssumedDereferenceableBytes())
3464       return "unknown-dereferenceable";
3465     return std::string("dereferenceable") +
3466            (isAssumedNonNull() ? "" : "_or_null") +
3467            (isAssumedGlobal() ? "_globally" : "") + "<" +
3468            std::to_string(getKnownDereferenceableBytes()) + "-" +
3469            std::to_string(getAssumedDereferenceableBytes()) + ">";
3470   }
3471 };
3472 
3473 /// Dereferenceable attribute for a floating value.
3474 struct AADereferenceableFloating
3475     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3476   using Base =
3477       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3478   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
3479 
3480   /// See AbstractAttribute::updateImpl(...).
3481   ChangeStatus updateImpl(Attributor &A) override {
3482     ChangeStatus Change = Base::updateImpl(A);
3483 
3484     const DataLayout &DL = A.getDataLayout();
3485 
3486     auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
3487       unsigned IdxWidth =
3488           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3489       APInt Offset(IdxWidth, 0);
3490       const Value *Base =
3491           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3492 
3493       const auto &AA =
3494           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3495       int64_t DerefBytes = 0;
3496       if (!Stripped && this == &AA) {
3497         // Use IR information if we did not strip anything.
3498         // TODO: track globally.
3499         bool CanBeNull;
3500         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3501         T.GlobalState.indicatePessimisticFixpoint();
3502       } else {
3503         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3504         DerefBytes = DS.DerefBytesState.getAssumed();
3505         T.GlobalState &= DS.GlobalState;
3506       }
3507 
3508       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3509 
3510       // For now we do not try to "increase" dereferenceability due to negative
3511       // indices as we first have to come up with code to deal with loops and
3512       // for overflows of the dereferenceable bytes.
3513       int64_t OffsetSExt = Offset.getSExtValue();
3514       if (OffsetSExt < 0)
3515         OffsetSExt = 0;
3516 
3517       T.takeAssumedDerefBytesMinimum(
3518           std::max(int64_t(0), DerefBytes - OffsetSExt));
3519 
3520       if (this == &AA) {
3521         if (!Stripped) {
3522           // If nothing was stripped IR information is all we got.
3523           T.takeKnownDerefBytesMaximum(
3524               std::max(int64_t(0), DerefBytes - OffsetSExt));
3525           T.indicatePessimisticFixpoint();
3526         } else if (OffsetSExt > 0) {
3527           // If something was stripped but there is circular reasoning we look
3528           // for the offset. If it is positive we basically decrease the
3529           // dereferenceable bytes in a circluar loop now, which will simply
3530           // drive them down to the known value in a very slow way which we
3531           // can accelerate.
3532           T.indicatePessimisticFixpoint();
3533         }
3534       }
3535 
3536       return T.isValidState();
3537     };
3538 
3539     DerefState T;
3540     if (!genericValueTraversal<AADereferenceable, DerefState>(
3541             A, getIRPosition(), *this, T, VisitValueCB))
3542       return indicatePessimisticFixpoint();
3543 
3544     return Change | clampStateAndIndicateChange(getState(), T);
3545   }
3546 
3547   /// See AbstractAttribute::trackStatistics()
3548   void trackStatistics() const override {
3549     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3550   }
3551 };
3552 
3553 /// Dereferenceable attribute for a return value.
3554 struct AADereferenceableReturned final
3555     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3556   AADereferenceableReturned(const IRPosition &IRP)
3557       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3558             IRP) {}
3559 
3560   /// See AbstractAttribute::trackStatistics()
3561   void trackStatistics() const override {
3562     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3563   }
3564 };
3565 
3566 /// Dereferenceable attribute for an argument
3567 struct AADereferenceableArgument final
3568     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3569           AADereferenceable, AADereferenceableImpl> {
3570   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3571       AADereferenceable, AADereferenceableImpl>;
3572   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3573 
3574   /// See AbstractAttribute::trackStatistics()
3575   void trackStatistics() const override {
3576     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3577   }
3578 };
3579 
3580 /// Dereferenceable attribute for a call site argument.
3581 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3582   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3583       : AADereferenceableFloating(IRP) {}
3584 
3585   /// See AbstractAttribute::trackStatistics()
3586   void trackStatistics() const override {
3587     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3588   }
3589 };
3590 
3591 /// Dereferenceable attribute deduction for a call site return value.
3592 struct AADereferenceableCallSiteReturned final
3593     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3594           AADereferenceable, AADereferenceableImpl> {
3595   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3596       AADereferenceable, AADereferenceableImpl>;
3597   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3598 
3599   /// See AbstractAttribute::trackStatistics()
3600   void trackStatistics() const override {
3601     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3602   }
3603 };
3604 
3605 // ------------------------ Align Argument Attribute ------------------------
3606 
3607 static unsigned int getKnownAlignForUse(Attributor &A,
3608                                         AbstractAttribute &QueryingAA,
3609                                         Value &AssociatedValue, const Use *U,
3610                                         const Instruction *I, bool &TrackUse) {
3611   // We need to follow common pointer manipulation uses to the accesses they
3612   // feed into.
3613   if (isa<CastInst>(I)) {
3614     // Follow all but ptr2int casts.
3615     TrackUse = !isa<PtrToIntInst>(I);
3616     return 0;
3617   }
3618   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3619     if (GEP->hasAllConstantIndices()) {
3620       TrackUse = true;
3621       return 0;
3622     }
3623   }
3624 
3625   unsigned Alignment = 0;
3626   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
3627     if (ICS.isBundleOperand(U) || ICS.isCallee(U))
3628       return 0;
3629 
3630     unsigned ArgNo = ICS.getArgumentNo(U);
3631     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
3632     // As long as we only use known information there is no need to track
3633     // dependences here.
3634     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3635                                         /* TrackDependence */ false);
3636     Alignment = AlignAA.getKnownAlign();
3637   }
3638 
3639   const Value *UseV = U->get();
3640   if (auto *SI = dyn_cast<StoreInst>(I)) {
3641     if (SI->getPointerOperand() == UseV)
3642       Alignment = SI->getAlignment();
3643   } else if (auto *LI = dyn_cast<LoadInst>(I))
3644     Alignment = LI->getAlignment();
3645 
3646   if (Alignment <= 1)
3647     return 0;
3648 
3649   auto &DL = A.getDataLayout();
3650   int64_t Offset;
3651 
3652   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3653     if (Base == &AssociatedValue) {
3654       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3655       // So we can say that the maximum power of two which is a divisor of
3656       // gcd(Offset, Alignment) is an alignment.
3657 
3658       uint32_t gcd =
3659           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3660       Alignment = llvm::PowerOf2Floor(gcd);
3661     }
3662   }
3663 
3664   return Alignment;
3665 }
3666 struct AAAlignImpl : AAAlign {
3667   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3668 
3669   /// See AbstractAttribute::initialize(...).
3670   void initialize(Attributor &A) override {
3671     SmallVector<Attribute, 4> Attrs;
3672     getAttrs({Attribute::Alignment}, Attrs);
3673     for (const Attribute &Attr : Attrs)
3674       takeKnownMaximum(Attr.getValueAsInt());
3675 
3676     if (getIRPosition().isFnInterfaceKind() &&
3677         (!getAssociatedFunction() ||
3678          !getAssociatedFunction()->hasExactDefinition()))
3679       indicatePessimisticFixpoint();
3680   }
3681 
3682   /// See AbstractAttribute::manifest(...).
3683   ChangeStatus manifest(Attributor &A) override {
3684     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3685 
3686     // Check for users that allow alignment annotations.
3687     Value &AnchorVal = getIRPosition().getAnchorValue();
3688     for (const Use &U : AnchorVal.uses()) {
3689       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3690         if (SI->getPointerOperand() == &AnchorVal)
3691           if (SI->getAlignment() < getAssumedAlign()) {
3692             STATS_DECLTRACK(AAAlign, Store,
3693                             "Number of times alignment added to a store");
3694             SI->setAlignment(Align(getAssumedAlign()));
3695             LoadStoreChanged = ChangeStatus::CHANGED;
3696           }
3697       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3698         if (LI->getPointerOperand() == &AnchorVal)
3699           if (LI->getAlignment() < getAssumedAlign()) {
3700             LI->setAlignment(Align(getAssumedAlign()));
3701             STATS_DECLTRACK(AAAlign, Load,
3702                             "Number of times alignment added to a load");
3703             LoadStoreChanged = ChangeStatus::CHANGED;
3704           }
3705       }
3706     }
3707 
3708     ChangeStatus Changed = AAAlign::manifest(A);
3709 
3710     MaybeAlign InheritAlign =
3711         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3712     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3713       return LoadStoreChanged;
3714     return Changed | LoadStoreChanged;
3715   }
3716 
3717   // TODO: Provide a helper to determine the implied ABI alignment and check in
3718   //       the existing manifest method and a new one for AAAlignImpl that value
3719   //       to avoid making the alignment explicit if it did not improve.
3720 
3721   /// See AbstractAttribute::getDeducedAttributes
3722   virtual void
3723   getDeducedAttributes(LLVMContext &Ctx,
3724                        SmallVectorImpl<Attribute> &Attrs) const override {
3725     if (getAssumedAlign() > 1)
3726       Attrs.emplace_back(
3727           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3728   }
3729   /// See AAFromMustBeExecutedContext
3730   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3731     bool TrackUse = false;
3732 
3733     unsigned int KnownAlign =
3734         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3735     takeKnownMaximum(KnownAlign);
3736 
3737     return TrackUse;
3738   }
3739 
3740   /// See AbstractAttribute::getAsStr().
3741   const std::string getAsStr() const override {
3742     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3743                                 "-" + std::to_string(getAssumedAlign()) + ">")
3744                              : "unknown-align";
3745   }
3746 };
3747 
3748 /// Align attribute for a floating value.
3749 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3750   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3751   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3752 
3753   /// See AbstractAttribute::updateImpl(...).
3754   ChangeStatus updateImpl(Attributor &A) override {
3755     Base::updateImpl(A);
3756 
3757     const DataLayout &DL = A.getDataLayout();
3758 
3759     auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
3760                             bool Stripped) -> bool {
3761       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3762       if (!Stripped && this == &AA) {
3763         // Use only IR information if we did not strip anything.
3764         const MaybeAlign PA = V.getPointerAlignment(DL);
3765         T.takeKnownMaximum(PA ? PA->value() : 0);
3766         T.indicatePessimisticFixpoint();
3767       } else {
3768         // Use abstract attribute information.
3769         const AAAlign::StateType &DS =
3770             static_cast<const AAAlign::StateType &>(AA.getState());
3771         T ^= DS;
3772       }
3773       return T.isValidState();
3774     };
3775 
3776     StateType T;
3777     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3778                                                    VisitValueCB))
3779       return indicatePessimisticFixpoint();
3780 
3781     // TODO: If we know we visited all incoming values, thus no are assumed
3782     // dead, we can take the known information from the state T.
3783     return clampStateAndIndicateChange(getState(), T);
3784   }
3785 
3786   /// See AbstractAttribute::trackStatistics()
3787   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3788 };
3789 
3790 /// Align attribute for function return value.
3791 struct AAAlignReturned final
3792     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3793   AAAlignReturned(const IRPosition &IRP)
3794       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3795 
3796   /// See AbstractAttribute::trackStatistics()
3797   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3798 };
3799 
3800 /// Align attribute for function argument.
3801 struct AAAlignArgument final
3802     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3803                                                               AAAlignImpl> {
3804   AAAlignArgument(const IRPosition &IRP)
3805       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3806                                                                 AAAlignImpl>(
3807             IRP) {}
3808 
3809   /// See AbstractAttribute::trackStatistics()
3810   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3811 };
3812 
3813 struct AAAlignCallSiteArgument final : AAAlignFloating {
3814   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3815 
3816   /// See AbstractAttribute::manifest(...).
3817   ChangeStatus manifest(Attributor &A) override {
3818     ChangeStatus Changed = AAAlignImpl::manifest(A);
3819     MaybeAlign InheritAlign =
3820         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3821     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3822       Changed = ChangeStatus::UNCHANGED;
3823     return Changed;
3824   }
3825 
3826   /// See AbstractAttribute::updateImpl(Attributor &A).
3827   ChangeStatus updateImpl(Attributor &A) override {
3828     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3829     if (Argument *Arg = getAssociatedArgument()) {
3830       // We only take known information from the argument
3831       // so we do not need to track a dependence.
3832       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3833           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3834       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3835     }
3836     return Changed;
3837   }
3838 
3839   /// See AbstractAttribute::trackStatistics()
3840   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3841 };
3842 
3843 /// Align attribute deduction for a call site return value.
3844 struct AAAlignCallSiteReturned final
3845     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3846                                                              AAAlignImpl> {
3847   using Base =
3848       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3849                                                              AAAlignImpl>;
3850   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3851 
3852   /// See AbstractAttribute::initialize(...).
3853   void initialize(Attributor &A) override {
3854     Base::initialize(A);
3855     Function *F = getAssociatedFunction();
3856     if (!F)
3857       indicatePessimisticFixpoint();
3858   }
3859 
3860   /// See AbstractAttribute::trackStatistics()
3861   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3862 };
3863 
3864 /// ------------------ Function No-Return Attribute ----------------------------
3865 struct AANoReturnImpl : public AANoReturn {
3866   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3867 
3868   /// See AbstractAttribute::initialize(...).
3869   void initialize(Attributor &A) override {
3870     AANoReturn::initialize(A);
3871     Function *F = getAssociatedFunction();
3872     if (!F)
3873       indicatePessimisticFixpoint();
3874   }
3875 
3876   /// See AbstractAttribute::getAsStr().
3877   const std::string getAsStr() const override {
3878     return getAssumed() ? "noreturn" : "may-return";
3879   }
3880 
3881   /// See AbstractAttribute::updateImpl(Attributor &A).
3882   virtual ChangeStatus updateImpl(Attributor &A) override {
3883     auto CheckForNoReturn = [](Instruction &) { return false; };
3884     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3885                                    {(unsigned)Instruction::Ret}))
3886       return indicatePessimisticFixpoint();
3887     return ChangeStatus::UNCHANGED;
3888   }
3889 };
3890 
3891 struct AANoReturnFunction final : AANoReturnImpl {
3892   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3893 
3894   /// See AbstractAttribute::trackStatistics()
3895   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3896 };
3897 
3898 /// NoReturn attribute deduction for a call sites.
3899 struct AANoReturnCallSite final : AANoReturnImpl {
3900   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3901 
3902   /// See AbstractAttribute::updateImpl(...).
3903   ChangeStatus updateImpl(Attributor &A) override {
3904     // TODO: Once we have call site specific value information we can provide
3905     //       call site specific liveness information and then it makes
3906     //       sense to specialize attributes for call sites arguments instead of
3907     //       redirecting requests to the callee argument.
3908     Function *F = getAssociatedFunction();
3909     const IRPosition &FnPos = IRPosition::function(*F);
3910     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3911     return clampStateAndIndicateChange(
3912         getState(),
3913         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3914   }
3915 
3916   /// See AbstractAttribute::trackStatistics()
3917   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3918 };
3919 
3920 /// ----------------------- Variable Capturing ---------------------------------
3921 
3922 /// A class to hold the state of for no-capture attributes.
3923 struct AANoCaptureImpl : public AANoCapture {
3924   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
3925 
3926   /// See AbstractAttribute::initialize(...).
3927   void initialize(Attributor &A) override {
3928     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3929       indicateOptimisticFixpoint();
3930       return;
3931     }
3932     Function *AnchorScope = getAnchorScope();
3933     if (isFnInterfaceKind() &&
3934         (!AnchorScope || !AnchorScope->hasExactDefinition())) {
3935       indicatePessimisticFixpoint();
3936       return;
3937     }
3938 
3939     // You cannot "capture" null in the default address space.
3940     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3941         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3942       indicateOptimisticFixpoint();
3943       return;
3944     }
3945 
3946     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3947 
3948     // Check what state the associated function can actually capture.
3949     if (F)
3950       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3951     else
3952       indicatePessimisticFixpoint();
3953   }
3954 
3955   /// See AbstractAttribute::updateImpl(...).
3956   ChangeStatus updateImpl(Attributor &A) override;
3957 
3958   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3959   virtual void
3960   getDeducedAttributes(LLVMContext &Ctx,
3961                        SmallVectorImpl<Attribute> &Attrs) const override {
3962     if (!isAssumedNoCaptureMaybeReturned())
3963       return;
3964 
3965     if (getArgNo() >= 0) {
3966       if (isAssumedNoCapture())
3967         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3968       else if (ManifestInternal)
3969         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3970     }
3971   }
3972 
3973   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3974   /// depending on the ability of the function associated with \p IRP to capture
3975   /// state in memory and through "returning/throwing", respectively.
3976   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3977                                                    const Function &F,
3978                                                    BitIntegerState &State) {
3979     // TODO: Once we have memory behavior attributes we should use them here.
3980 
3981     // If we know we cannot communicate or write to memory, we do not care about
3982     // ptr2int anymore.
3983     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3984         F.getReturnType()->isVoidTy()) {
3985       State.addKnownBits(NO_CAPTURE);
3986       return;
3987     }
3988 
3989     // A function cannot capture state in memory if it only reads memory, it can
3990     // however return/throw state and the state might be influenced by the
3991     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3992     if (F.onlyReadsMemory())
3993       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3994 
3995     // A function cannot communicate state back if it does not through
3996     // exceptions and doesn not return values.
3997     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3998       State.addKnownBits(NOT_CAPTURED_IN_RET);
3999 
4000     // Check existing "returned" attributes.
4001     int ArgNo = IRP.getArgNo();
4002     if (F.doesNotThrow() && ArgNo >= 0) {
4003       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4004         if (F.hasParamAttribute(u, Attribute::Returned)) {
4005           if (u == unsigned(ArgNo))
4006             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4007           else if (F.onlyReadsMemory())
4008             State.addKnownBits(NO_CAPTURE);
4009           else
4010             State.addKnownBits(NOT_CAPTURED_IN_RET);
4011           break;
4012         }
4013     }
4014   }
4015 
4016   /// See AbstractState::getAsStr().
4017   const std::string getAsStr() const override {
4018     if (isKnownNoCapture())
4019       return "known not-captured";
4020     if (isAssumedNoCapture())
4021       return "assumed not-captured";
4022     if (isKnownNoCaptureMaybeReturned())
4023       return "known not-captured-maybe-returned";
4024     if (isAssumedNoCaptureMaybeReturned())
4025       return "assumed not-captured-maybe-returned";
4026     return "assumed-captured";
4027   }
4028 };
4029 
4030 /// Attributor-aware capture tracker.
4031 struct AACaptureUseTracker final : public CaptureTracker {
4032 
4033   /// Create a capture tracker that can lookup in-flight abstract attributes
4034   /// through the Attributor \p A.
4035   ///
4036   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4037   /// search is stopped. If a use leads to a return instruction,
4038   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4039   /// If a use leads to a ptr2int which may capture the value,
4040   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4041   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4042   /// set. All values in \p PotentialCopies are later tracked as well. For every
4043   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4044   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4045   /// conservatively set to true.
4046   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4047                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4048                       SmallVectorImpl<const Value *> &PotentialCopies,
4049                       unsigned &RemainingUsesToExplore)
4050       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4051         PotentialCopies(PotentialCopies),
4052         RemainingUsesToExplore(RemainingUsesToExplore) {}
4053 
4054   /// Determine if \p V maybe captured. *Also updates the state!*
4055   bool valueMayBeCaptured(const Value *V) {
4056     if (V->getType()->isPointerTy()) {
4057       PointerMayBeCaptured(V, this);
4058     } else {
4059       State.indicatePessimisticFixpoint();
4060     }
4061     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4062   }
4063 
4064   /// See CaptureTracker::tooManyUses().
4065   void tooManyUses() override {
4066     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4067   }
4068 
4069   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4070     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4071       return true;
4072     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4073         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4074         DepClassTy::OPTIONAL);
4075     return DerefAA.getAssumedDereferenceableBytes();
4076   }
4077 
4078   /// See CaptureTracker::captured(...).
4079   bool captured(const Use *U) override {
4080     Instruction *UInst = cast<Instruction>(U->getUser());
4081     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4082                       << "\n");
4083 
4084     // Because we may reuse the tracker multiple times we keep track of the
4085     // number of explored uses ourselves as well.
4086     if (RemainingUsesToExplore-- == 0) {
4087       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4088       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4089                           /* Return */ true);
4090     }
4091 
4092     // Deal with ptr2int by following uses.
4093     if (isa<PtrToIntInst>(UInst)) {
4094       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4095       return valueMayBeCaptured(UInst);
4096     }
4097 
4098     // Explicitly catch return instructions.
4099     if (isa<ReturnInst>(UInst))
4100       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4101                           /* Return */ true);
4102 
4103     // For now we only use special logic for call sites. However, the tracker
4104     // itself knows about a lot of other non-capturing cases already.
4105     CallSite CS(UInst);
4106     if (!CS || !CS.isArgOperand(U))
4107       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4108                           /* Return */ true);
4109 
4110     unsigned ArgNo = CS.getArgumentNo(U);
4111     const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
4112     // If we have a abstract no-capture attribute for the argument we can use
4113     // it to justify a non-capture attribute here. This allows recursion!
4114     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4115     if (ArgNoCaptureAA.isAssumedNoCapture())
4116       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4117                           /* Return */ false);
4118     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4119       addPotentialCopy(CS);
4120       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4121                           /* Return */ false);
4122     }
4123 
4124     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4125     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4126                         /* Return */ true);
4127   }
4128 
4129   /// Register \p CS as potential copy of the value we are checking.
4130   void addPotentialCopy(CallSite CS) {
4131     PotentialCopies.push_back(CS.getInstruction());
4132   }
4133 
4134   /// See CaptureTracker::shouldExplore(...).
4135   bool shouldExplore(const Use *U) override {
4136     // Check liveness, if it is used to stop exploring we need a dependence.
4137     if (IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()))) {
4138       A.recordDependence(IsDeadAA, NoCaptureAA, DepClassTy::OPTIONAL);
4139       return false;
4140     }
4141     return true;
4142   }
4143 
4144   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4145   /// \p CapturedInRet, then return the appropriate value for use in the
4146   /// CaptureTracker::captured() interface.
4147   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4148                     bool CapturedInRet) {
4149     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4150                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4151     if (CapturedInMem)
4152       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4153     if (CapturedInInt)
4154       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4155     if (CapturedInRet)
4156       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4157     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4158   }
4159 
4160 private:
4161   /// The attributor providing in-flight abstract attributes.
4162   Attributor &A;
4163 
4164   /// The abstract attribute currently updated.
4165   AANoCapture &NoCaptureAA;
4166 
4167   /// The abstract liveness state.
4168   const AAIsDead &IsDeadAA;
4169 
4170   /// The state currently updated.
4171   AANoCapture::StateType &State;
4172 
4173   /// Set of potential copies of the tracked value.
4174   SmallVectorImpl<const Value *> &PotentialCopies;
4175 
4176   /// Global counter to limit the number of explored uses.
4177   unsigned &RemainingUsesToExplore;
4178 };
4179 
4180 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4181   const IRPosition &IRP = getIRPosition();
4182   const Value *V =
4183       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4184   if (!V)
4185     return indicatePessimisticFixpoint();
4186 
4187   const Function *F =
4188       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4189   assert(F && "Expected a function!");
4190   const IRPosition &FnPos = IRPosition::function(*F);
4191   const auto &IsDeadAA =
4192       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4193 
4194   AANoCapture::StateType T;
4195 
4196   // Readonly means we cannot capture through memory.
4197   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
4198       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4199   if (FnMemAA.isAssumedReadOnly()) {
4200     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4201     if (FnMemAA.isKnownReadOnly())
4202       addKnownBits(NOT_CAPTURED_IN_MEM);
4203   }
4204 
4205   // Make sure all returned values are different than the underlying value.
4206   // TODO: we could do this in a more sophisticated way inside
4207   //       AAReturnedValues, e.g., track all values that escape through returns
4208   //       directly somehow.
4209   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4210     bool SeenConstant = false;
4211     for (auto &It : RVAA.returned_values()) {
4212       if (isa<Constant>(It.first)) {
4213         if (SeenConstant)
4214           return false;
4215         SeenConstant = true;
4216       } else if (!isa<Argument>(It.first) ||
4217                  It.first == getAssociatedArgument())
4218         return false;
4219     }
4220     return true;
4221   };
4222 
4223   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4224       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4225   if (NoUnwindAA.isAssumedNoUnwind()) {
4226     bool IsVoidTy = F->getReturnType()->isVoidTy();
4227     const AAReturnedValues *RVAA =
4228         IsVoidTy ? nullptr
4229                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4230                                                  /* TrackDependence */ true,
4231                                                  DepClassTy::OPTIONAL);
4232     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4233       T.addKnownBits(NOT_CAPTURED_IN_RET);
4234       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4235         return ChangeStatus::UNCHANGED;
4236       if (NoUnwindAA.isKnownNoUnwind() &&
4237           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4238         addKnownBits(NOT_CAPTURED_IN_RET);
4239         if (isKnown(NOT_CAPTURED_IN_MEM))
4240           return indicateOptimisticFixpoint();
4241       }
4242     }
4243   }
4244 
4245   // Use the CaptureTracker interface and logic with the specialized tracker,
4246   // defined in AACaptureUseTracker, that can look at in-flight abstract
4247   // attributes and directly updates the assumed state.
4248   SmallVector<const Value *, 4> PotentialCopies;
4249   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
4250   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4251                               RemainingUsesToExplore);
4252 
4253   // Check all potential copies of the associated value until we can assume
4254   // none will be captured or we have to assume at least one might be.
4255   unsigned Idx = 0;
4256   PotentialCopies.push_back(V);
4257   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4258     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4259 
4260   AANoCapture::StateType &S = getState();
4261   auto Assumed = S.getAssumed();
4262   S.intersectAssumedBits(T.getAssumed());
4263   if (!isAssumedNoCaptureMaybeReturned())
4264     return indicatePessimisticFixpoint();
4265   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4266                                    : ChangeStatus::CHANGED;
4267 }
4268 
4269 /// NoCapture attribute for function arguments.
4270 struct AANoCaptureArgument final : AANoCaptureImpl {
4271   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4272 
4273   /// See AbstractAttribute::trackStatistics()
4274   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4275 };
4276 
4277 /// NoCapture attribute for call site arguments.
4278 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4279   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4280 
4281   /// See AbstractAttribute::initialize(...).
4282   void initialize(Attributor &A) override {
4283     if (Argument *Arg = getAssociatedArgument())
4284       if (Arg->hasByValAttr())
4285         indicateOptimisticFixpoint();
4286     AANoCaptureImpl::initialize(A);
4287   }
4288 
4289   /// See AbstractAttribute::updateImpl(...).
4290   ChangeStatus updateImpl(Attributor &A) override {
4291     // TODO: Once we have call site specific value information we can provide
4292     //       call site specific liveness information and then it makes
4293     //       sense to specialize attributes for call sites arguments instead of
4294     //       redirecting requests to the callee argument.
4295     Argument *Arg = getAssociatedArgument();
4296     if (!Arg)
4297       return indicatePessimisticFixpoint();
4298     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4299     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4300     return clampStateAndIndicateChange(
4301         getState(),
4302         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4303   }
4304 
4305   /// See AbstractAttribute::trackStatistics()
4306   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4307 };
4308 
4309 /// NoCapture attribute for floating values.
4310 struct AANoCaptureFloating final : AANoCaptureImpl {
4311   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4312 
4313   /// See AbstractAttribute::trackStatistics()
4314   void trackStatistics() const override {
4315     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4316   }
4317 };
4318 
4319 /// NoCapture attribute for function return value.
4320 struct AANoCaptureReturned final : AANoCaptureImpl {
4321   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
4322     llvm_unreachable("NoCapture is not applicable to function returns!");
4323   }
4324 
4325   /// See AbstractAttribute::initialize(...).
4326   void initialize(Attributor &A) override {
4327     llvm_unreachable("NoCapture is not applicable to function returns!");
4328   }
4329 
4330   /// See AbstractAttribute::updateImpl(...).
4331   ChangeStatus updateImpl(Attributor &A) override {
4332     llvm_unreachable("NoCapture is not applicable to function returns!");
4333   }
4334 
4335   /// See AbstractAttribute::trackStatistics()
4336   void trackStatistics() const override {}
4337 };
4338 
4339 /// NoCapture attribute deduction for a call site return value.
4340 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4341   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4342 
4343   /// See AbstractAttribute::trackStatistics()
4344   void trackStatistics() const override {
4345     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4346   }
4347 };
4348 
4349 /// ------------------ Value Simplify Attribute ----------------------------
4350 struct AAValueSimplifyImpl : AAValueSimplify {
4351   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
4352 
4353   /// See AbstractAttribute::initialize(...).
4354   void initialize(Attributor &A) override {
4355     if (getAssociatedValue().getType()->isVoidTy())
4356       indicatePessimisticFixpoint();
4357   }
4358 
4359   /// See AbstractAttribute::getAsStr().
4360   const std::string getAsStr() const override {
4361     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4362                         : "not-simple";
4363   }
4364 
4365   /// See AbstractAttribute::trackStatistics()
4366   void trackStatistics() const override {}
4367 
4368   /// See AAValueSimplify::getAssumedSimplifiedValue()
4369   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4370     if (!getAssumed())
4371       return const_cast<Value *>(&getAssociatedValue());
4372     return SimplifiedAssociatedValue;
4373   }
4374 
4375   /// Helper function for querying AAValueSimplify and updating candicate.
4376   /// \param QueryingValue Value trying to unify with SimplifiedValue
4377   /// \param AccumulatedSimplifiedValue Current simplification result.
4378   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4379                              Value &QueryingValue,
4380                              Optional<Value *> &AccumulatedSimplifiedValue) {
4381     // FIXME: Add a typecast support.
4382 
4383     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4384         QueryingAA, IRPosition::value(QueryingValue));
4385 
4386     Optional<Value *> QueryingValueSimplified =
4387         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4388 
4389     if (!QueryingValueSimplified.hasValue())
4390       return true;
4391 
4392     if (!QueryingValueSimplified.getValue())
4393       return false;
4394 
4395     Value &QueryingValueSimplifiedUnwrapped =
4396         *QueryingValueSimplified.getValue();
4397 
4398     if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4399       return true;
4400 
4401     if (AccumulatedSimplifiedValue.hasValue())
4402       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4403 
4404     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4405                       << " is assumed to be "
4406                       << QueryingValueSimplifiedUnwrapped << "\n");
4407 
4408     AccumulatedSimplifiedValue = QueryingValueSimplified;
4409     return true;
4410   }
4411 
4412   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4413     if (!getAssociatedValue().getType()->isIntegerTy())
4414       return false;
4415 
4416     const auto &ValueConstantRangeAA =
4417         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4418 
4419     Optional<ConstantInt *> COpt =
4420         ValueConstantRangeAA.getAssumedConstantInt(A);
4421     if (COpt.hasValue()) {
4422       if (auto *C = COpt.getValue())
4423         SimplifiedAssociatedValue = C;
4424       else
4425         return false;
4426     } else {
4427       SimplifiedAssociatedValue = llvm::None;
4428     }
4429     return true;
4430   }
4431 
4432   /// See AbstractAttribute::manifest(...).
4433   ChangeStatus manifest(Attributor &A) override {
4434     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4435 
4436     if (!SimplifiedAssociatedValue.hasValue() ||
4437         !SimplifiedAssociatedValue.getValue())
4438       return Changed;
4439 
4440     if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
4441       // We can replace the AssociatedValue with the constant.
4442       Value &V = getAssociatedValue();
4443       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4444         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4445                           << " :: " << *this << "\n");
4446         A.changeValueAfterManifest(V, *C);
4447         Changed = ChangeStatus::CHANGED;
4448       }
4449     }
4450 
4451     return Changed | AAValueSimplify::manifest(A);
4452   }
4453 
4454   /// See AbstractState::indicatePessimisticFixpoint(...).
4455   ChangeStatus indicatePessimisticFixpoint() override {
4456     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4457     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4458     SimplifiedAssociatedValue = &getAssociatedValue();
4459     indicateOptimisticFixpoint();
4460     return ChangeStatus::CHANGED;
4461   }
4462 
4463 protected:
4464   // An assumed simplified value. Initially, it is set to Optional::None, which
4465   // means that the value is not clear under current assumption. If in the
4466   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4467   // returns orignal associated value.
4468   Optional<Value *> SimplifiedAssociatedValue;
4469 };
4470 
4471 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4472   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4473 
4474   void initialize(Attributor &A) override {
4475     AAValueSimplifyImpl::initialize(A);
4476     if (!getAssociatedFunction() || getAssociatedFunction()->isDeclaration())
4477       indicatePessimisticFixpoint();
4478     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4479                 /* IgnoreSubsumingPositions */ true))
4480       indicatePessimisticFixpoint();
4481   }
4482 
4483   /// See AbstractAttribute::updateImpl(...).
4484   ChangeStatus updateImpl(Attributor &A) override {
4485     // Byval is only replacable if it is readonly otherwise we would write into
4486     // the replaced value and not the copy that byval creates implicitly.
4487     Argument *Arg = getAssociatedArgument();
4488     if (Arg->hasByValAttr()) {
4489       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4490       if (!MemAA.isAssumedReadOnly())
4491         return indicatePessimisticFixpoint();
4492     }
4493 
4494     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4495 
4496     auto PredForCallSite = [&](AbstractCallSite ACS) {
4497       // Check if we have an associated argument or not (which can happen for
4498       // callback calls).
4499       Value *ArgOp = ACS.getCallArgOperand(getArgNo());
4500       if (!ArgOp)
4501         return false;
4502       // We can only propagate thread independent values through callbacks.
4503       // This is different to direct/indirect call sites because for them we
4504       // know the thread executing the caller and callee is the same. For
4505       // callbacks this is not guaranteed, thus a thread dependent value could
4506       // be different for the caller and callee, making it invalid to propagate.
4507       if (ACS.isCallbackCall())
4508         if (auto *C = dyn_cast<Constant>(ArgOp))
4509           if (C->isThreadDependent())
4510             return false;
4511       return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
4512     };
4513 
4514     bool AllCallSitesKnown;
4515     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4516                                 AllCallSitesKnown))
4517       if (!askSimplifiedValueForAAValueConstantRange(A))
4518         return indicatePessimisticFixpoint();
4519 
4520     // If a candicate was found in this update, return CHANGED.
4521     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4522                ? ChangeStatus::UNCHANGED
4523                : ChangeStatus ::CHANGED;
4524   }
4525 
4526   /// See AbstractAttribute::trackStatistics()
4527   void trackStatistics() const override {
4528     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4529   }
4530 };
4531 
4532 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4533   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4534 
4535   /// See AbstractAttribute::updateImpl(...).
4536   ChangeStatus updateImpl(Attributor &A) override {
4537     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4538 
4539     auto PredForReturned = [&](Value &V) {
4540       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4541     };
4542 
4543     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4544       if (!askSimplifiedValueForAAValueConstantRange(A))
4545         return indicatePessimisticFixpoint();
4546 
4547     // If a candicate was found in this update, return CHANGED.
4548     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4549                ? ChangeStatus::UNCHANGED
4550                : ChangeStatus ::CHANGED;
4551   }
4552   /// See AbstractAttribute::trackStatistics()
4553   void trackStatistics() const override {
4554     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4555   }
4556 };
4557 
4558 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4559   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4560 
4561   /// See AbstractAttribute::initialize(...).
4562   void initialize(Attributor &A) override {
4563     Value &V = getAnchorValue();
4564 
4565     // TODO: add other stuffs
4566     if (isa<Constant>(V))
4567       indicatePessimisticFixpoint();
4568   }
4569 
4570   /// See AbstractAttribute::updateImpl(...).
4571   ChangeStatus updateImpl(Attributor &A) override {
4572     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4573 
4574     auto VisitValueCB = [&](Value &V, bool &, bool Stripped) -> bool {
4575       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4576       if (!Stripped && this == &AA) {
4577         // TODO: Look the instruction and check recursively.
4578 
4579         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4580                           << "\n");
4581         return false;
4582       }
4583       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4584     };
4585 
4586     bool Dummy = false;
4587     if (!genericValueTraversal<AAValueSimplify, bool>(A, getIRPosition(), *this,
4588                                                       Dummy, VisitValueCB))
4589       if (!askSimplifiedValueForAAValueConstantRange(A))
4590         return indicatePessimisticFixpoint();
4591 
4592     // If a candicate was found in this update, return CHANGED.
4593 
4594     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4595                ? ChangeStatus::UNCHANGED
4596                : ChangeStatus ::CHANGED;
4597   }
4598 
4599   /// See AbstractAttribute::trackStatistics()
4600   void trackStatistics() const override {
4601     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4602   }
4603 };
4604 
4605 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4606   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4607 
4608   /// See AbstractAttribute::initialize(...).
4609   void initialize(Attributor &A) override {
4610     SimplifiedAssociatedValue = &getAnchorValue();
4611     indicateOptimisticFixpoint();
4612   }
4613   /// See AbstractAttribute::initialize(...).
4614   ChangeStatus updateImpl(Attributor &A) override {
4615     llvm_unreachable(
4616         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4617   }
4618   /// See AbstractAttribute::trackStatistics()
4619   void trackStatistics() const override {
4620     STATS_DECLTRACK_FN_ATTR(value_simplify)
4621   }
4622 };
4623 
4624 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4625   AAValueSimplifyCallSite(const IRPosition &IRP)
4626       : AAValueSimplifyFunction(IRP) {}
4627   /// See AbstractAttribute::trackStatistics()
4628   void trackStatistics() const override {
4629     STATS_DECLTRACK_CS_ATTR(value_simplify)
4630   }
4631 };
4632 
4633 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4634   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4635       : AAValueSimplifyReturned(IRP) {}
4636 
4637   void trackStatistics() const override {
4638     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4639   }
4640 };
4641 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4642   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4643       : AAValueSimplifyFloating(IRP) {}
4644 
4645   void trackStatistics() const override {
4646     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4647   }
4648 };
4649 
4650 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4651 struct AAHeapToStackImpl : public AAHeapToStack {
4652   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4653 
4654   const std::string getAsStr() const override {
4655     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4656   }
4657 
4658   ChangeStatus manifest(Attributor &A) override {
4659     assert(getState().isValidState() &&
4660            "Attempted to manifest an invalid state!");
4661 
4662     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4663     Function *F = getAssociatedFunction();
4664     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4665 
4666     for (Instruction *MallocCall : MallocCalls) {
4667       // This malloc cannot be replaced.
4668       if (BadMallocCalls.count(MallocCall))
4669         continue;
4670 
4671       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4672         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4673         A.deleteAfterManifest(*FreeCall);
4674         HasChanged = ChangeStatus::CHANGED;
4675       }
4676 
4677       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4678                         << "\n");
4679 
4680       Constant *Size;
4681       if (isCallocLikeFn(MallocCall, TLI)) {
4682         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4683         auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
4684         APInt TotalSize = SizeT->getValue() * Num->getValue();
4685         Size =
4686             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4687       } else {
4688         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4689       }
4690 
4691       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4692       Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
4693                                        Size, "", MallocCall->getNextNode());
4694 
4695       if (AI->getType() != MallocCall->getType())
4696         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4697                              AI->getNextNode());
4698 
4699       A.replaceAllUsesWith(*MallocCall, *AI);
4700 
4701       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4702         auto *NBB = II->getNormalDest();
4703         BranchInst::Create(NBB, MallocCall->getParent());
4704         A.deleteAfterManifest(*MallocCall);
4705       } else {
4706         A.deleteAfterManifest(*MallocCall);
4707       }
4708 
4709       if (isCallocLikeFn(MallocCall, TLI)) {
4710         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4711                                    AI->getNextNode());
4712         Value *Ops[] = {
4713             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4714             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4715 
4716         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4717         Module *M = F->getParent();
4718         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4719         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4720       }
4721       HasChanged = ChangeStatus::CHANGED;
4722     }
4723 
4724     return HasChanged;
4725   }
4726 
4727   /// Collection of all malloc calls in a function.
4728   SmallSetVector<Instruction *, 4> MallocCalls;
4729 
4730   /// Collection of malloc calls that cannot be converted.
4731   DenseSet<const Instruction *> BadMallocCalls;
4732 
4733   /// A map for each malloc call to the set of associated free calls.
4734   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4735 
4736   ChangeStatus updateImpl(Attributor &A) override;
4737 };
4738 
4739 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4740   const Function *F = getAssociatedFunction();
4741   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4742 
4743   MustBeExecutedContextExplorer &Explorer =
4744       A.getInfoCache().getMustBeExecutedContextExplorer();
4745 
4746   auto FreeCheck = [&](Instruction &I) {
4747     const auto &Frees = FreesForMalloc.lookup(&I);
4748     if (Frees.size() != 1)
4749       return false;
4750     Instruction *UniqueFree = *Frees.begin();
4751     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4752   };
4753 
4754   auto UsesCheck = [&](Instruction &I) {
4755     bool ValidUsesOnly = true;
4756     bool MustUse = true;
4757     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4758       Instruction *UserI = cast<Instruction>(U.getUser());
4759       if (isa<LoadInst>(UserI))
4760         return true;
4761       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4762         if (SI->getValueOperand() == U.get()) {
4763           LLVM_DEBUG(dbgs()
4764                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4765           ValidUsesOnly = false;
4766         } else {
4767           // A store into the malloc'ed memory is fine.
4768         }
4769         return true;
4770       }
4771       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4772         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4773           return true;
4774         // Record malloc.
4775         if (isFreeCall(UserI, TLI)) {
4776           if (MustUse) {
4777             FreesForMalloc[&I].insert(UserI);
4778           } else {
4779             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4780                               << *UserI << "\n");
4781             ValidUsesOnly = false;
4782           }
4783           return true;
4784         }
4785 
4786         unsigned ArgNo = CB->getArgOperandNo(&U);
4787 
4788         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4789             *this, IRPosition::callsite_argument(*CB, ArgNo));
4790 
4791         // If a callsite argument use is nofree, we are fine.
4792         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4793             *this, IRPosition::callsite_argument(*CB, ArgNo));
4794 
4795         if (!NoCaptureAA.isAssumedNoCapture() ||
4796             !ArgNoFreeAA.isAssumedNoFree()) {
4797           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4798           ValidUsesOnly = false;
4799         }
4800         return true;
4801       }
4802 
4803       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4804           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4805         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4806         Follow = true;
4807         return true;
4808       }
4809       // Unknown user for which we can not track uses further (in a way that
4810       // makes sense).
4811       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4812       ValidUsesOnly = false;
4813       return true;
4814     };
4815     A.checkForAllUses(Pred, *this, I);
4816     return ValidUsesOnly;
4817   };
4818 
4819   auto MallocCallocCheck = [&](Instruction &I) {
4820     if (BadMallocCalls.count(&I))
4821       return true;
4822 
4823     bool IsMalloc = isMallocLikeFn(&I, TLI);
4824     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4825     if (!IsMalloc && !IsCalloc) {
4826       BadMallocCalls.insert(&I);
4827       return true;
4828     }
4829 
4830     if (IsMalloc) {
4831       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4832         if (Size->getValue().ule(MaxHeapToStackSize))
4833           if (UsesCheck(I) || FreeCheck(I)) {
4834             MallocCalls.insert(&I);
4835             return true;
4836           }
4837     } else if (IsCalloc) {
4838       bool Overflow = false;
4839       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4840         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4841           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4842                   .ule(MaxHeapToStackSize))
4843             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4844               MallocCalls.insert(&I);
4845               return true;
4846             }
4847     }
4848 
4849     BadMallocCalls.insert(&I);
4850     return true;
4851   };
4852 
4853   size_t NumBadMallocs = BadMallocCalls.size();
4854 
4855   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4856 
4857   if (NumBadMallocs != BadMallocCalls.size())
4858     return ChangeStatus::CHANGED;
4859 
4860   return ChangeStatus::UNCHANGED;
4861 }
4862 
4863 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4864   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
4865 
4866   /// See AbstractAttribute::trackStatistics()
4867   void trackStatistics() const override {
4868     STATS_DECL(MallocCalls, Function,
4869                "Number of malloc calls converted to allocas");
4870     for (auto *C : MallocCalls)
4871       if (!BadMallocCalls.count(C))
4872         ++BUILD_STAT_NAME(MallocCalls, Function);
4873   }
4874 };
4875 
4876 /// ----------------------- Privatizable Pointers ------------------------------
4877 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
4878   AAPrivatizablePtrImpl(const IRPosition &IRP)
4879       : AAPrivatizablePtr(IRP), PrivatizableType(llvm::None) {}
4880 
4881   ChangeStatus indicatePessimisticFixpoint() override {
4882     AAPrivatizablePtr::indicatePessimisticFixpoint();
4883     PrivatizableType = nullptr;
4884     return ChangeStatus::CHANGED;
4885   }
4886 
4887   /// Identify the type we can chose for a private copy of the underlying
4888   /// argument. None means it is not clear yet, nullptr means there is none.
4889   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
4890 
4891   /// Return a privatizable type that encloses both T0 and T1.
4892   /// TODO: This is merely a stub for now as we should manage a mapping as well.
4893   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
4894     if (!T0.hasValue())
4895       return T1;
4896     if (!T1.hasValue())
4897       return T0;
4898     if (T0 == T1)
4899       return T0;
4900     return nullptr;
4901   }
4902 
4903   Optional<Type *> getPrivatizableType() const override {
4904     return PrivatizableType;
4905   }
4906 
4907   const std::string getAsStr() const override {
4908     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
4909   }
4910 
4911 protected:
4912   Optional<Type *> PrivatizableType;
4913 };
4914 
4915 // TODO: Do this for call site arguments (probably also other values) as well.
4916 
4917 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
4918   AAPrivatizablePtrArgument(const IRPosition &IRP)
4919       : AAPrivatizablePtrImpl(IRP) {}
4920 
4921   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
4922   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
4923     // If this is a byval argument and we know all the call sites (so we can
4924     // rewrite them), there is no need to check them explicitly.
4925     bool AllCallSitesKnown;
4926     if (getIRPosition().hasAttr(Attribute::ByVal) &&
4927         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
4928                                true, AllCallSitesKnown))
4929       return getAssociatedValue().getType()->getPointerElementType();
4930 
4931     Optional<Type *> Ty;
4932     unsigned ArgNo = getIRPosition().getArgNo();
4933 
4934     // Make sure the associated call site argument has the same type at all call
4935     // sites and it is an allocation we know is safe to privatize, for now that
4936     // means we only allow alloca instructions.
4937     // TODO: We can additionally analyze the accesses in the callee to  create
4938     //       the type from that information instead. That is a little more
4939     //       involved and will be done in a follow up patch.
4940     auto CallSiteCheck = [&](AbstractCallSite ACS) {
4941       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
4942       // Check if a coresponding argument was found or if it is one not
4943       // associated (which can happen for callback calls).
4944       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4945         return false;
4946 
4947       // Check that all call sites agree on a type.
4948       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
4949       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
4950 
4951       LLVM_DEBUG({
4952         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
4953         if (CSTy.hasValue() && CSTy.getValue())
4954           CSTy.getValue()->print(dbgs());
4955         else if (CSTy.hasValue())
4956           dbgs() << "<nullptr>";
4957         else
4958           dbgs() << "<none>";
4959       });
4960 
4961       Ty = combineTypes(Ty, CSTy);
4962 
4963       LLVM_DEBUG({
4964         dbgs() << " : New Type: ";
4965         if (Ty.hasValue() && Ty.getValue())
4966           Ty.getValue()->print(dbgs());
4967         else if (Ty.hasValue())
4968           dbgs() << "<nullptr>";
4969         else
4970           dbgs() << "<none>";
4971         dbgs() << "\n";
4972       });
4973 
4974       return !Ty.hasValue() || Ty.getValue();
4975     };
4976 
4977     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
4978       return nullptr;
4979     return Ty;
4980   }
4981 
4982   /// See AbstractAttribute::updateImpl(...).
4983   ChangeStatus updateImpl(Attributor &A) override {
4984     PrivatizableType = identifyPrivatizableType(A);
4985     if (!PrivatizableType.hasValue())
4986       return ChangeStatus::UNCHANGED;
4987     if (!PrivatizableType.getValue())
4988       return indicatePessimisticFixpoint();
4989 
4990     // Avoid arguments with padding for now.
4991     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
4992         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
4993                                                 A.getInfoCache().getDL())) {
4994       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
4995       return indicatePessimisticFixpoint();
4996     }
4997 
4998     // Verify callee and caller agree on how the promoted argument would be
4999     // passed.
5000     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5001     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5002     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5003     Function &Fn = *getIRPosition().getAnchorScope();
5004     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5005     ArgsToPromote.insert(getAssociatedArgument());
5006     const auto *TTI =
5007         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5008     if (!TTI ||
5009         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5010             Fn, *TTI, ArgsToPromote, Dummy) ||
5011         ArgsToPromote.empty()) {
5012       LLVM_DEBUG(
5013           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5014                  << Fn.getName() << "\n");
5015       return indicatePessimisticFixpoint();
5016     }
5017 
5018     // Collect the types that will replace the privatizable type in the function
5019     // signature.
5020     SmallVector<Type *, 16> ReplacementTypes;
5021     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5022 
5023     // Register a rewrite of the argument.
5024     Argument *Arg = getAssociatedArgument();
5025     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5026       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5027       return indicatePessimisticFixpoint();
5028     }
5029 
5030     unsigned ArgNo = Arg->getArgNo();
5031 
5032     // Helper to check if for the given call site the associated argument is
5033     // passed to a callback where the privatization would be different.
5034     auto IsCompatiblePrivArgOfCallback = [&](CallSite CS) {
5035       SmallVector<const Use *, 4> CBUses;
5036       AbstractCallSite::getCallbackUses(CS, CBUses);
5037       for (const Use *U : CBUses) {
5038         AbstractCallSite CBACS(U);
5039         assert(CBACS && CBACS.isCallbackCall());
5040         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5041           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5042 
5043           LLVM_DEBUG({
5044             dbgs()
5045                 << "[AAPrivatizablePtr] Argument " << *Arg
5046                 << "check if can be privatized in the context of its parent ("
5047                 << Arg->getParent()->getName()
5048                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5049                    "callback ("
5050                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5051                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5052                 << CBACS.getCallArgOperand(CBArg) << " vs "
5053                 << CS.getArgOperand(ArgNo) << "\n"
5054                 << "[AAPrivatizablePtr] " << CBArg << " : "
5055                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5056           });
5057 
5058           if (CBArgNo != int(ArgNo))
5059             continue;
5060           const auto &CBArgPrivAA =
5061               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5062           if (CBArgPrivAA.isValidState()) {
5063             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5064             if (!CBArgPrivTy.hasValue())
5065               continue;
5066             if (CBArgPrivTy.getValue() == PrivatizableType)
5067               continue;
5068           }
5069 
5070           LLVM_DEBUG({
5071             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5072                    << " cannot be privatized in the context of its parent ("
5073                    << Arg->getParent()->getName()
5074                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5075                       "callback ("
5076                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5077                    << ").\n[AAPrivatizablePtr] for which the argument "
5078                       "privatization is not compatible.\n";
5079           });
5080           return false;
5081         }
5082       }
5083       return true;
5084     };
5085 
5086     // Helper to check if for the given call site the associated argument is
5087     // passed to a direct call where the privatization would be different.
5088     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5089       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5090       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5091       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5092              "Expected a direct call operand for callback call operand");
5093 
5094       LLVM_DEBUG({
5095         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5096                << " check if be privatized in the context of its parent ("
5097                << Arg->getParent()->getName()
5098                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5099                   "direct call of ("
5100                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5101                << ").\n";
5102       });
5103 
5104       Function *DCCallee = DC->getCalledFunction();
5105       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5106         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5107             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5108         if (DCArgPrivAA.isValidState()) {
5109           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5110           if (!DCArgPrivTy.hasValue())
5111             return true;
5112           if (DCArgPrivTy.getValue() == PrivatizableType)
5113             return true;
5114         }
5115       }
5116 
5117       LLVM_DEBUG({
5118         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5119                << " cannot be privatized in the context of its parent ("
5120                << Arg->getParent()->getName()
5121                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5122                   "direct call of ("
5123                << ACS.getCallSite().getCalledFunction()->getName()
5124                << ").\n[AAPrivatizablePtr] for which the argument "
5125                   "privatization is not compatible.\n";
5126       });
5127       return false;
5128     };
5129 
5130     // Helper to check if the associated argument is used at the given abstract
5131     // call site in a way that is incompatible with the privatization assumed
5132     // here.
5133     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5134       if (ACS.isDirectCall())
5135         return IsCompatiblePrivArgOfCallback(ACS.getCallSite());
5136       if (ACS.isCallbackCall())
5137         return IsCompatiblePrivArgOfDirectCS(ACS);
5138       return false;
5139     };
5140 
5141     bool AllCallSitesKnown;
5142     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5143                                 AllCallSitesKnown))
5144       return indicatePessimisticFixpoint();
5145 
5146     return ChangeStatus::UNCHANGED;
5147   }
5148 
5149   /// Given a type to private \p PrivType, collect the constituates (which are
5150   /// used) in \p ReplacementTypes.
5151   static void
5152   identifyReplacementTypes(Type *PrivType,
5153                            SmallVectorImpl<Type *> &ReplacementTypes) {
5154     // TODO: For now we expand the privatization type to the fullest which can
5155     //       lead to dead arguments that need to be removed later.
5156     assert(PrivType && "Expected privatizable type!");
5157 
5158     // Traverse the type, extract constituate types on the outermost level.
5159     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5160       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5161         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5162     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5163       ReplacementTypes.append(PrivArrayType->getNumElements(),
5164                               PrivArrayType->getElementType());
5165     } else {
5166       ReplacementTypes.push_back(PrivType);
5167     }
5168   }
5169 
5170   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5171   /// The values needed are taken from the arguments of \p F starting at
5172   /// position \p ArgNo.
5173   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5174                                    unsigned ArgNo, Instruction &IP) {
5175     assert(PrivType && "Expected privatizable type!");
5176 
5177     IRBuilder<NoFolder> IRB(&IP);
5178     const DataLayout &DL = F.getParent()->getDataLayout();
5179 
5180     // Traverse the type, build GEPs and stores.
5181     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5182       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5183       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5184         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5185         Value *Ptr = constructPointer(
5186             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5187         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5188       }
5189     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5190       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5191       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5192       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5193         Value *Ptr =
5194             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5195         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5196       }
5197     } else {
5198       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5199     }
5200   }
5201 
5202   /// Extract values from \p Base according to the type \p PrivType at the
5203   /// call position \p ACS. The values are appended to \p ReplacementValues.
5204   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5205                                Value *Base,
5206                                SmallVectorImpl<Value *> &ReplacementValues) {
5207     assert(Base && "Expected base value!");
5208     assert(PrivType && "Expected privatizable type!");
5209     Instruction *IP = ACS.getInstruction();
5210 
5211     IRBuilder<NoFolder> IRB(IP);
5212     const DataLayout &DL = IP->getModule()->getDataLayout();
5213 
5214     if (Base->getType()->getPointerElementType() != PrivType)
5215       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5216                                                  "", ACS.getInstruction());
5217 
5218     // TODO: Improve the alignment of the loads.
5219     // Traverse the type, build GEPs and loads.
5220     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5221       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5222       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5223         Type *PointeeTy = PrivStructType->getElementType(u);
5224         Value *Ptr =
5225             constructPointer(PointeeTy->getPointerTo(), Base,
5226                              PrivStructLayout->getElementOffset(u), IRB, DL);
5227         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5228         L->setAlignment(MaybeAlign(1));
5229         ReplacementValues.push_back(L);
5230       }
5231     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5232       Type *PointeeTy = PrivArrayType->getElementType();
5233       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5234       Type *PointeePtrTy = PointeeTy->getPointerTo();
5235       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5236         Value *Ptr =
5237             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5238         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5239         L->setAlignment(MaybeAlign(1));
5240         ReplacementValues.push_back(L);
5241       }
5242     } else {
5243       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5244       L->setAlignment(MaybeAlign(1));
5245       ReplacementValues.push_back(L);
5246     }
5247   }
5248 
5249   /// See AbstractAttribute::manifest(...)
5250   ChangeStatus manifest(Attributor &A) override {
5251     if (!PrivatizableType.hasValue())
5252       return ChangeStatus::UNCHANGED;
5253     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5254 
5255     // Collect all tail calls in the function as we cannot allow new allocas to
5256     // escape into tail recursion.
5257     // TODO: Be smarter about new allocas escaping into tail calls.
5258     SmallVector<CallInst *, 16> TailCalls;
5259     if (!A.checkForAllInstructions(
5260             [&](Instruction &I) {
5261               CallInst &CI = cast<CallInst>(I);
5262               if (CI.isTailCall())
5263                 TailCalls.push_back(&CI);
5264               return true;
5265             },
5266             *this, {Instruction::Call}))
5267       return ChangeStatus::UNCHANGED;
5268 
5269     Argument *Arg = getAssociatedArgument();
5270 
5271     // Callback to repair the associated function. A new alloca is placed at the
5272     // beginning and initialized with the values passed through arguments. The
5273     // new alloca replaces the use of the old pointer argument.
5274     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5275         [=](const Attributor::ArgumentReplacementInfo &ARI,
5276             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5277           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5278           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5279           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5280                                     Arg->getName() + ".priv", IP);
5281           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5282                                ArgIt->getArgNo(), *IP);
5283           Arg->replaceAllUsesWith(AI);
5284 
5285           for (CallInst *CI : TailCalls)
5286             CI->setTailCall(false);
5287         };
5288 
5289     // Callback to repair a call site of the associated function. The elements
5290     // of the privatizable type are loaded prior to the call and passed to the
5291     // new function version.
5292     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5293         [=](const Attributor::ArgumentReplacementInfo &ARI,
5294             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5295           createReplacementValues(
5296               PrivatizableType.getValue(), ACS,
5297               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5298                                   NewArgOperands);
5299         };
5300 
5301     // Collect the types that will replace the privatizable type in the function
5302     // signature.
5303     SmallVector<Type *, 16> ReplacementTypes;
5304     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5305 
5306     // Register a rewrite of the argument.
5307     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5308                                            std::move(FnRepairCB),
5309                                            std::move(ACSRepairCB)))
5310       return ChangeStatus::CHANGED;
5311     return ChangeStatus::UNCHANGED;
5312   }
5313 
5314   /// See AbstractAttribute::trackStatistics()
5315   void trackStatistics() const override {
5316     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5317   }
5318 };
5319 
5320 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5321   AAPrivatizablePtrFloating(const IRPosition &IRP)
5322       : AAPrivatizablePtrImpl(IRP) {}
5323 
5324   /// See AbstractAttribute::initialize(...).
5325   virtual void initialize(Attributor &A) override {
5326     // TODO: We can privatize more than arguments.
5327     indicatePessimisticFixpoint();
5328   }
5329 
5330   ChangeStatus updateImpl(Attributor &A) override {
5331     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5332                      "updateImpl will not be called");
5333   }
5334 
5335   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5336   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5337     Value *Obj =
5338         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5339     if (!Obj) {
5340       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5341       return nullptr;
5342     }
5343 
5344     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5345       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5346         if (CI->isOne())
5347           return Obj->getType()->getPointerElementType();
5348     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5349       auto &PrivArgAA =
5350           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5351       if (PrivArgAA.isAssumedPrivatizablePtr())
5352         return Obj->getType()->getPointerElementType();
5353     }
5354 
5355     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5356                          "alloca nor privatizable argument: "
5357                       << *Obj << "!\n");
5358     return nullptr;
5359   }
5360 
5361   /// See AbstractAttribute::trackStatistics()
5362   void trackStatistics() const override {
5363     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5364   }
5365 };
5366 
5367 struct AAPrivatizablePtrCallSiteArgument final
5368     : public AAPrivatizablePtrFloating {
5369   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP)
5370       : AAPrivatizablePtrFloating(IRP) {}
5371 
5372   /// See AbstractAttribute::initialize(...).
5373   void initialize(Attributor &A) override {
5374     if (getIRPosition().hasAttr(Attribute::ByVal))
5375       indicateOptimisticFixpoint();
5376   }
5377 
5378   /// See AbstractAttribute::updateImpl(...).
5379   ChangeStatus updateImpl(Attributor &A) override {
5380     PrivatizableType = identifyPrivatizableType(A);
5381     if (!PrivatizableType.hasValue())
5382       return ChangeStatus::UNCHANGED;
5383     if (!PrivatizableType.getValue())
5384       return indicatePessimisticFixpoint();
5385 
5386     const IRPosition &IRP = getIRPosition();
5387     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5388     if (!NoCaptureAA.isAssumedNoCapture()) {
5389       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5390       return indicatePessimisticFixpoint();
5391     }
5392 
5393     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5394     if (!NoAliasAA.isAssumedNoAlias()) {
5395       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5396       return indicatePessimisticFixpoint();
5397     }
5398 
5399     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5400     if (!MemBehaviorAA.isAssumedReadOnly()) {
5401       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5402       return indicatePessimisticFixpoint();
5403     }
5404 
5405     return ChangeStatus::UNCHANGED;
5406   }
5407 
5408   /// See AbstractAttribute::trackStatistics()
5409   void trackStatistics() const override {
5410     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5411   }
5412 };
5413 
5414 struct AAPrivatizablePtrCallSiteReturned final
5415     : public AAPrivatizablePtrFloating {
5416   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP)
5417       : AAPrivatizablePtrFloating(IRP) {}
5418 
5419   /// See AbstractAttribute::initialize(...).
5420   void initialize(Attributor &A) override {
5421     // TODO: We can privatize more than arguments.
5422     indicatePessimisticFixpoint();
5423   }
5424 
5425   /// See AbstractAttribute::trackStatistics()
5426   void trackStatistics() const override {
5427     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5428   }
5429 };
5430 
5431 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5432   AAPrivatizablePtrReturned(const IRPosition &IRP)
5433       : AAPrivatizablePtrFloating(IRP) {}
5434 
5435   /// See AbstractAttribute::initialize(...).
5436   void initialize(Attributor &A) override {
5437     // TODO: We can privatize more than arguments.
5438     indicatePessimisticFixpoint();
5439   }
5440 
5441   /// See AbstractAttribute::trackStatistics()
5442   void trackStatistics() const override {
5443     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5444   }
5445 };
5446 
5447 /// -------------------- Memory Behavior Attributes ----------------------------
5448 /// Includes read-none, read-only, and write-only.
5449 /// ----------------------------------------------------------------------------
5450 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5451   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
5452 
5453   /// See AbstractAttribute::initialize(...).
5454   void initialize(Attributor &A) override {
5455     intersectAssumedBits(BEST_STATE);
5456     getKnownStateFromValue(getIRPosition(), getState());
5457     IRAttribute::initialize(A);
5458   }
5459 
5460   /// Return the memory behavior information encoded in the IR for \p IRP.
5461   static void getKnownStateFromValue(const IRPosition &IRP,
5462                                      BitIntegerState &State,
5463                                      bool IgnoreSubsumingPositions = false) {
5464     SmallVector<Attribute, 2> Attrs;
5465     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5466     for (const Attribute &Attr : Attrs) {
5467       switch (Attr.getKindAsEnum()) {
5468       case Attribute::ReadNone:
5469         State.addKnownBits(NO_ACCESSES);
5470         break;
5471       case Attribute::ReadOnly:
5472         State.addKnownBits(NO_WRITES);
5473         break;
5474       case Attribute::WriteOnly:
5475         State.addKnownBits(NO_READS);
5476         break;
5477       default:
5478         llvm_unreachable("Unexpcted attribute!");
5479       }
5480     }
5481 
5482     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5483       if (!I->mayReadFromMemory())
5484         State.addKnownBits(NO_READS);
5485       if (!I->mayWriteToMemory())
5486         State.addKnownBits(NO_WRITES);
5487     }
5488   }
5489 
5490   /// See AbstractAttribute::getDeducedAttributes(...).
5491   void getDeducedAttributes(LLVMContext &Ctx,
5492                             SmallVectorImpl<Attribute> &Attrs) const override {
5493     assert(Attrs.size() == 0);
5494     if (isAssumedReadNone())
5495       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5496     else if (isAssumedReadOnly())
5497       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5498     else if (isAssumedWriteOnly())
5499       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5500     assert(Attrs.size() <= 1);
5501   }
5502 
5503   /// See AbstractAttribute::manifest(...).
5504   ChangeStatus manifest(Attributor &A) override {
5505     const IRPosition &IRP = getIRPosition();
5506 
5507     // Check if we would improve the existing attributes first.
5508     SmallVector<Attribute, 4> DeducedAttrs;
5509     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5510     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5511           return IRP.hasAttr(Attr.getKindAsEnum(),
5512                              /* IgnoreSubsumingPositions */ true);
5513         }))
5514       return ChangeStatus::UNCHANGED;
5515 
5516     // Clear existing attributes.
5517     IRP.removeAttrs(AttrKinds);
5518 
5519     // Use the generic manifest method.
5520     return IRAttribute::manifest(A);
5521   }
5522 
5523   /// See AbstractState::getAsStr().
5524   const std::string getAsStr() const override {
5525     if (isAssumedReadNone())
5526       return "readnone";
5527     if (isAssumedReadOnly())
5528       return "readonly";
5529     if (isAssumedWriteOnly())
5530       return "writeonly";
5531     return "may-read/write";
5532   }
5533 
5534   /// The set of IR attributes AAMemoryBehavior deals with.
5535   static const Attribute::AttrKind AttrKinds[3];
5536 };
5537 
5538 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5539     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5540 
5541 /// Memory behavior attribute for a floating value.
5542 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5543   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5544 
5545   /// See AbstractAttribute::initialize(...).
5546   void initialize(Attributor &A) override {
5547     AAMemoryBehaviorImpl::initialize(A);
5548     // Initialize the use vector with all direct uses of the associated value.
5549     for (const Use &U : getAssociatedValue().uses())
5550       Uses.insert(&U);
5551   }
5552 
5553   /// See AbstractAttribute::updateImpl(...).
5554   ChangeStatus updateImpl(Attributor &A) override;
5555 
5556   /// See AbstractAttribute::trackStatistics()
5557   void trackStatistics() const override {
5558     if (isAssumedReadNone())
5559       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5560     else if (isAssumedReadOnly())
5561       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5562     else if (isAssumedWriteOnly())
5563       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5564   }
5565 
5566 private:
5567   /// Return true if users of \p UserI might access the underlying
5568   /// variable/location described by \p U and should therefore be analyzed.
5569   bool followUsersOfUseIn(Attributor &A, const Use *U,
5570                           const Instruction *UserI);
5571 
5572   /// Update the state according to the effect of use \p U in \p UserI.
5573   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5574 
5575 protected:
5576   /// Container for (transitive) uses of the associated argument.
5577   SetVector<const Use *> Uses;
5578 };
5579 
5580 /// Memory behavior attribute for function argument.
5581 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5582   AAMemoryBehaviorArgument(const IRPosition &IRP)
5583       : AAMemoryBehaviorFloating(IRP) {}
5584 
5585   /// See AbstractAttribute::initialize(...).
5586   void initialize(Attributor &A) override {
5587     intersectAssumedBits(BEST_STATE);
5588     const IRPosition &IRP = getIRPosition();
5589     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5590     // can query it when we use has/getAttr. That would allow us to reuse the
5591     // initialize of the base class here.
5592     bool HasByVal =
5593         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5594     getKnownStateFromValue(IRP, getState(),
5595                            /* IgnoreSubsumingPositions */ HasByVal);
5596 
5597     // Initialize the use vector with all direct uses of the associated value.
5598     Argument *Arg = getAssociatedArgument();
5599     if (!Arg || !Arg->getParent()->hasExactDefinition()) {
5600       indicatePessimisticFixpoint();
5601     } else {
5602       // Initialize the use vector with all direct uses of the associated value.
5603       for (const Use &U : Arg->uses())
5604         Uses.insert(&U);
5605     }
5606   }
5607 
5608   ChangeStatus manifest(Attributor &A) override {
5609     // TODO: From readattrs.ll: "inalloca parameters are always
5610     //                           considered written"
5611     if (hasAttr({Attribute::InAlloca})) {
5612       removeKnownBits(NO_WRITES);
5613       removeAssumedBits(NO_WRITES);
5614     }
5615     return AAMemoryBehaviorFloating::manifest(A);
5616   }
5617 
5618   /// See AbstractAttribute::trackStatistics()
5619   void trackStatistics() const override {
5620     if (isAssumedReadNone())
5621       STATS_DECLTRACK_ARG_ATTR(readnone)
5622     else if (isAssumedReadOnly())
5623       STATS_DECLTRACK_ARG_ATTR(readonly)
5624     else if (isAssumedWriteOnly())
5625       STATS_DECLTRACK_ARG_ATTR(writeonly)
5626   }
5627 };
5628 
5629 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5630   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
5631       : AAMemoryBehaviorArgument(IRP) {}
5632 
5633   /// See AbstractAttribute::initialize(...).
5634   void initialize(Attributor &A) override {
5635     if (Argument *Arg = getAssociatedArgument()) {
5636       if (Arg->hasByValAttr()) {
5637         addKnownBits(NO_WRITES);
5638         removeKnownBits(NO_READS);
5639         removeAssumedBits(NO_READS);
5640       }
5641     } else {
5642     }
5643     AAMemoryBehaviorArgument::initialize(A);
5644   }
5645 
5646   /// See AbstractAttribute::updateImpl(...).
5647   ChangeStatus updateImpl(Attributor &A) override {
5648     // TODO: Once we have call site specific value information we can provide
5649     //       call site specific liveness liveness information and then it makes
5650     //       sense to specialize attributes for call sites arguments instead of
5651     //       redirecting requests to the callee argument.
5652     Argument *Arg = getAssociatedArgument();
5653     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5654     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5655     return clampStateAndIndicateChange(
5656         getState(),
5657         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5658   }
5659 
5660   /// See AbstractAttribute::trackStatistics()
5661   void trackStatistics() const override {
5662     if (isAssumedReadNone())
5663       STATS_DECLTRACK_CSARG_ATTR(readnone)
5664     else if (isAssumedReadOnly())
5665       STATS_DECLTRACK_CSARG_ATTR(readonly)
5666     else if (isAssumedWriteOnly())
5667       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5668   }
5669 };
5670 
5671 /// Memory behavior attribute for a call site return position.
5672 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5673   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
5674       : AAMemoryBehaviorFloating(IRP) {}
5675 
5676   /// See AbstractAttribute::manifest(...).
5677   ChangeStatus manifest(Attributor &A) override {
5678     // We do not annotate returned values.
5679     return ChangeStatus::UNCHANGED;
5680   }
5681 
5682   /// See AbstractAttribute::trackStatistics()
5683   void trackStatistics() const override {}
5684 };
5685 
5686 /// An AA to represent the memory behavior function attributes.
5687 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5688   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5689 
5690   /// See AbstractAttribute::updateImpl(Attributor &A).
5691   virtual ChangeStatus updateImpl(Attributor &A) override;
5692 
5693   /// See AbstractAttribute::manifest(...).
5694   ChangeStatus manifest(Attributor &A) override {
5695     Function &F = cast<Function>(getAnchorValue());
5696     if (isAssumedReadNone()) {
5697       F.removeFnAttr(Attribute::ArgMemOnly);
5698       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5699       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5700     }
5701     return AAMemoryBehaviorImpl::manifest(A);
5702   }
5703 
5704   /// See AbstractAttribute::trackStatistics()
5705   void trackStatistics() const override {
5706     if (isAssumedReadNone())
5707       STATS_DECLTRACK_FN_ATTR(readnone)
5708     else if (isAssumedReadOnly())
5709       STATS_DECLTRACK_FN_ATTR(readonly)
5710     else if (isAssumedWriteOnly())
5711       STATS_DECLTRACK_FN_ATTR(writeonly)
5712   }
5713 };
5714 
5715 /// AAMemoryBehavior attribute for call sites.
5716 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5717   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5718 
5719   /// See AbstractAttribute::initialize(...).
5720   void initialize(Attributor &A) override {
5721     AAMemoryBehaviorImpl::initialize(A);
5722     Function *F = getAssociatedFunction();
5723     if (!F || !F->hasExactDefinition())
5724       indicatePessimisticFixpoint();
5725   }
5726 
5727   /// See AbstractAttribute::updateImpl(...).
5728   ChangeStatus updateImpl(Attributor &A) override {
5729     // TODO: Once we have call site specific value information we can provide
5730     //       call site specific liveness liveness information and then it makes
5731     //       sense to specialize attributes for call sites arguments instead of
5732     //       redirecting requests to the callee argument.
5733     Function *F = getAssociatedFunction();
5734     const IRPosition &FnPos = IRPosition::function(*F);
5735     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5736     return clampStateAndIndicateChange(
5737         getState(),
5738         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5739   }
5740 
5741   /// See AbstractAttribute::trackStatistics()
5742   void trackStatistics() const override {
5743     if (isAssumedReadNone())
5744       STATS_DECLTRACK_CS_ATTR(readnone)
5745     else if (isAssumedReadOnly())
5746       STATS_DECLTRACK_CS_ATTR(readonly)
5747     else if (isAssumedWriteOnly())
5748       STATS_DECLTRACK_CS_ATTR(writeonly)
5749   }
5750 };
5751 } // namespace
5752 
5753 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5754 
5755   // The current assumed state used to determine a change.
5756   auto AssumedState = getAssumed();
5757 
5758   auto CheckRWInst = [&](Instruction &I) {
5759     // If the instruction has an own memory behavior state, use it to restrict
5760     // the local state. No further analysis is required as the other memory
5761     // state is as optimistic as it gets.
5762     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
5763       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5764           *this, IRPosition::callsite_function(ICS));
5765       intersectAssumedBits(MemBehaviorAA.getAssumed());
5766       return !isAtFixpoint();
5767     }
5768 
5769     // Remove access kind modifiers if necessary.
5770     if (I.mayReadFromMemory())
5771       removeAssumedBits(NO_READS);
5772     if (I.mayWriteToMemory())
5773       removeAssumedBits(NO_WRITES);
5774     return !isAtFixpoint();
5775   };
5776 
5777   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5778     return indicatePessimisticFixpoint();
5779 
5780   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5781                                         : ChangeStatus::UNCHANGED;
5782 }
5783 
5784 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5785 
5786   const IRPosition &IRP = getIRPosition();
5787   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5788   AAMemoryBehavior::StateType &S = getState();
5789 
5790   // First, check the function scope. We take the known information and we avoid
5791   // work if the assumed information implies the current assumed information for
5792   // this attribute. This is a valid for all but byval arguments.
5793   Argument *Arg = IRP.getAssociatedArgument();
5794   AAMemoryBehavior::base_t FnMemAssumedState =
5795       AAMemoryBehavior::StateType::getWorstState();
5796   if (!Arg || !Arg->hasByValAttr()) {
5797     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5798         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5799     FnMemAssumedState = FnMemAA.getAssumed();
5800     S.addKnownBits(FnMemAA.getKnown());
5801     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5802       return ChangeStatus::UNCHANGED;
5803   }
5804 
5805   // Make sure the value is not captured (except through "return"), if
5806   // it is, any information derived would be irrelevant anyway as we cannot
5807   // check the potential aliases introduced by the capture. However, no need
5808   // to fall back to anythign less optimistic than the function state.
5809   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5810       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5811   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5812     S.intersectAssumedBits(FnMemAssumedState);
5813     return ChangeStatus::CHANGED;
5814   }
5815 
5816   // The current assumed state used to determine a change.
5817   auto AssumedState = S.getAssumed();
5818 
5819   // Liveness information to exclude dead users.
5820   // TODO: Take the FnPos once we have call site specific liveness information.
5821   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5822       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5823       /* TrackDependence */ false);
5824 
5825   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5826   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5827     const Use *U = Uses[i];
5828     Instruction *UserI = cast<Instruction>(U->getUser());
5829     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5830                       << " [Dead: " << (LivenessAA.isAssumedDead(UserI))
5831                       << "]\n");
5832     if (LivenessAA.isAssumedDead(UserI)) {
5833       A.recordDependence(LivenessAA, *this, DepClassTy::OPTIONAL);
5834       continue;
5835     }
5836 
5837     // Check if the users of UserI should also be visited.
5838     if (followUsersOfUseIn(A, U, UserI))
5839       for (const Use &UserIUse : UserI->uses())
5840         Uses.insert(&UserIUse);
5841 
5842     // If UserI might touch memory we analyze the use in detail.
5843     if (UserI->mayReadOrWriteMemory())
5844       analyzeUseIn(A, U, UserI);
5845   }
5846 
5847   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5848                                         : ChangeStatus::UNCHANGED;
5849 }
5850 
5851 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5852                                                   const Instruction *UserI) {
5853   // The loaded value is unrelated to the pointer argument, no need to
5854   // follow the users of the load.
5855   if (isa<LoadInst>(UserI))
5856     return false;
5857 
5858   // By default we follow all uses assuming UserI might leak information on U,
5859   // we have special handling for call sites operands though.
5860   ImmutableCallSite ICS(UserI);
5861   if (!ICS || !ICS.isArgOperand(U))
5862     return true;
5863 
5864   // If the use is a call argument known not to be captured, the users of
5865   // the call do not need to be visited because they have to be unrelated to
5866   // the input. Note that this check is not trivial even though we disallow
5867   // general capturing of the underlying argument. The reason is that the
5868   // call might the argument "through return", which we allow and for which we
5869   // need to check call users.
5870   if (U->get()->getType()->isPointerTy()) {
5871     unsigned ArgNo = ICS.getArgumentNo(U);
5872     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5873         *this, IRPosition::callsite_argument(ICS, ArgNo),
5874         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5875     return !ArgNoCaptureAA.isAssumedNoCapture();
5876   }
5877 
5878   return true;
5879 }
5880 
5881 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5882                                             const Instruction *UserI) {
5883   assert(UserI->mayReadOrWriteMemory());
5884 
5885   switch (UserI->getOpcode()) {
5886   default:
5887     // TODO: Handle all atomics and other side-effect operations we know of.
5888     break;
5889   case Instruction::Load:
5890     // Loads cause the NO_READS property to disappear.
5891     removeAssumedBits(NO_READS);
5892     return;
5893 
5894   case Instruction::Store:
5895     // Stores cause the NO_WRITES property to disappear if the use is the
5896     // pointer operand. Note that we do assume that capturing was taken care of
5897     // somewhere else.
5898     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5899       removeAssumedBits(NO_WRITES);
5900     return;
5901 
5902   case Instruction::Call:
5903   case Instruction::CallBr:
5904   case Instruction::Invoke: {
5905     // For call sites we look at the argument memory behavior attribute (this
5906     // could be recursive!) in order to restrict our own state.
5907     ImmutableCallSite ICS(UserI);
5908 
5909     // Give up on operand bundles.
5910     if (ICS.isBundleOperand(U)) {
5911       indicatePessimisticFixpoint();
5912       return;
5913     }
5914 
5915     // Calling a function does read the function pointer, maybe write it if the
5916     // function is self-modifying.
5917     if (ICS.isCallee(U)) {
5918       removeAssumedBits(NO_READS);
5919       break;
5920     }
5921 
5922     // Adjust the possible access behavior based on the information on the
5923     // argument.
5924     IRPosition Pos;
5925     if (U->get()->getType()->isPointerTy())
5926       Pos = IRPosition::callsite_argument(ICS, ICS.getArgumentNo(U));
5927     else
5928       Pos = IRPosition::callsite_function(ICS);
5929     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5930         *this, Pos,
5931         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5932     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5933     // and at least "known".
5934     intersectAssumedBits(MemBehaviorAA.getAssumed());
5935     return;
5936   }
5937   };
5938 
5939   // Generally, look at the "may-properties" and adjust the assumed state if we
5940   // did not trigger special handling before.
5941   if (UserI->mayReadFromMemory())
5942     removeAssumedBits(NO_READS);
5943   if (UserI->mayWriteToMemory())
5944     removeAssumedBits(NO_WRITES);
5945 }
5946 /// ------------------ Value Constant Range Attribute -------------------------
5947 
5948 struct AAValueConstantRangeImpl : AAValueConstantRange {
5949   using StateType = IntegerRangeState;
5950   AAValueConstantRangeImpl(const IRPosition &IRP) : AAValueConstantRange(IRP) {}
5951 
5952   /// See AbstractAttribute::getAsStr().
5953   const std::string getAsStr() const override {
5954     std::string Str;
5955     llvm::raw_string_ostream OS(Str);
5956     OS << "range(" << getBitWidth() << ")<";
5957     getKnown().print(OS);
5958     OS << " / ";
5959     getAssumed().print(OS);
5960     OS << ">";
5961     return OS.str();
5962   }
5963 
5964   /// Helper function to get a SCEV expr for the associated value at program
5965   /// point \p I.
5966   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
5967     if (!getAnchorScope())
5968       return nullptr;
5969 
5970     ScalarEvolution *SE =
5971         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
5972             *getAnchorScope());
5973 
5974     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
5975         *getAnchorScope());
5976 
5977     if (!SE || !LI)
5978       return nullptr;
5979 
5980     const SCEV *S = SE->getSCEV(&getAssociatedValue());
5981     if (!I)
5982       return S;
5983 
5984     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
5985   }
5986 
5987   /// Helper function to get a range from SCEV for the associated value at
5988   /// program point \p I.
5989   ConstantRange getConstantRangeFromSCEV(Attributor &A,
5990                                          const Instruction *I = nullptr) const {
5991     if (!getAnchorScope())
5992       return getWorstState(getBitWidth());
5993 
5994     ScalarEvolution *SE =
5995         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
5996             *getAnchorScope());
5997 
5998     const SCEV *S = getSCEV(A, I);
5999     if (!SE || !S)
6000       return getWorstState(getBitWidth());
6001 
6002     return SE->getUnsignedRange(S);
6003   }
6004 
6005   /// Helper function to get a range from LVI for the associated value at
6006   /// program point \p I.
6007   ConstantRange
6008   getConstantRangeFromLVI(Attributor &A,
6009                           const Instruction *CtxI = nullptr) const {
6010     if (!getAnchorScope())
6011       return getWorstState(getBitWidth());
6012 
6013     LazyValueInfo *LVI =
6014         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6015             *getAnchorScope());
6016 
6017     if (!LVI || !CtxI)
6018       return getWorstState(getBitWidth());
6019     return LVI->getConstantRange(&getAssociatedValue(),
6020                                  const_cast<BasicBlock *>(CtxI->getParent()),
6021                                  const_cast<Instruction *>(CtxI));
6022   }
6023 
6024   /// See AAValueConstantRange::getKnownConstantRange(..).
6025   ConstantRange
6026   getKnownConstantRange(Attributor &A,
6027                         const Instruction *CtxI = nullptr) const override {
6028     if (!CtxI || CtxI == getCtxI())
6029       return getKnown();
6030 
6031     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6032     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6033     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6034   }
6035 
6036   /// See AAValueConstantRange::getAssumedConstantRange(..).
6037   ConstantRange
6038   getAssumedConstantRange(Attributor &A,
6039                           const Instruction *CtxI = nullptr) const override {
6040     // TODO: Make SCEV use Attributor assumption.
6041     //       We may be able to bound a variable range via assumptions in
6042     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6043     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6044 
6045     if (!CtxI || CtxI == getCtxI())
6046       return getAssumed();
6047 
6048     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6049     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6050     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6051   }
6052 
6053   /// See AbstractAttribute::initialize(..).
6054   void initialize(Attributor &A) override {
6055     // Intersect a range given by SCEV.
6056     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6057 
6058     // Intersect a range given by LVI.
6059     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6060   }
6061 
6062   /// Helper function to create MDNode for range metadata.
6063   static MDNode *
6064   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6065                             const ConstantRange &AssumedConstantRange) {
6066     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6067                                   Ty, AssumedConstantRange.getLower())),
6068                               ConstantAsMetadata::get(ConstantInt::get(
6069                                   Ty, AssumedConstantRange.getUpper()))};
6070     return MDNode::get(Ctx, LowAndHigh);
6071   }
6072 
6073   /// Return true if \p Assumed is included in \p KnownRanges.
6074   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6075 
6076     if (Assumed.isFullSet())
6077       return false;
6078 
6079     if (!KnownRanges)
6080       return true;
6081 
6082     // If multiple ranges are annotated in IR, we give up to annotate assumed
6083     // range for now.
6084 
6085     // TODO:  If there exists a known range which containts assumed range, we
6086     // can say assumed range is better.
6087     if (KnownRanges->getNumOperands() > 2)
6088       return false;
6089 
6090     ConstantInt *Lower =
6091         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6092     ConstantInt *Upper =
6093         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6094 
6095     ConstantRange Known(Lower->getValue(), Upper->getValue());
6096     return Known.contains(Assumed) && Known != Assumed;
6097   }
6098 
6099   /// Helper function to set range metadata.
6100   static bool
6101   setRangeMetadataIfisBetterRange(Instruction *I,
6102                                   const ConstantRange &AssumedConstantRange) {
6103     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6104     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6105       if (!AssumedConstantRange.isEmptySet()) {
6106         I->setMetadata(LLVMContext::MD_range,
6107                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6108                                                  AssumedConstantRange));
6109         return true;
6110       }
6111     }
6112     return false;
6113   }
6114 
6115   /// See AbstractAttribute::manifest()
6116   ChangeStatus manifest(Attributor &A) override {
6117     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6118     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6119     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6120 
6121     auto &V = getAssociatedValue();
6122     if (!AssumedConstantRange.isEmptySet() &&
6123         !AssumedConstantRange.isSingleElement()) {
6124       if (Instruction *I = dyn_cast<Instruction>(&V))
6125         if (isa<CallInst>(I) || isa<LoadInst>(I))
6126           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6127             Changed = ChangeStatus::CHANGED;
6128     }
6129 
6130     return Changed;
6131   }
6132 };
6133 
6134 struct AAValueConstantRangeArgument final
6135     : AAArgumentFromCallSiteArguments<
6136           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6137   AAValueConstantRangeArgument(const IRPosition &IRP)
6138       : AAArgumentFromCallSiteArguments<
6139             AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>(
6140             IRP) {}
6141 
6142   /// See AbstractAttribute::trackStatistics()
6143   void trackStatistics() const override {
6144     STATS_DECLTRACK_ARG_ATTR(value_range)
6145   }
6146 };
6147 
6148 struct AAValueConstantRangeReturned
6149     : AAReturnedFromReturnedValues<AAValueConstantRange,
6150                                    AAValueConstantRangeImpl> {
6151   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6152                                             AAValueConstantRangeImpl>;
6153   AAValueConstantRangeReturned(const IRPosition &IRP) : Base(IRP) {}
6154 
6155   /// See AbstractAttribute::initialize(...).
6156   void initialize(Attributor &A) override {}
6157 
6158   /// See AbstractAttribute::trackStatistics()
6159   void trackStatistics() const override {
6160     STATS_DECLTRACK_FNRET_ATTR(value_range)
6161   }
6162 };
6163 
6164 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6165   AAValueConstantRangeFloating(const IRPosition &IRP)
6166       : AAValueConstantRangeImpl(IRP) {}
6167 
6168   /// See AbstractAttribute::initialize(...).
6169   void initialize(Attributor &A) override {
6170     AAValueConstantRangeImpl::initialize(A);
6171     Value &V = getAssociatedValue();
6172 
6173     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6174       unionAssumed(ConstantRange(C->getValue()));
6175       indicateOptimisticFixpoint();
6176       return;
6177     }
6178 
6179     if (isa<UndefValue>(&V)) {
6180       indicateOptimisticFixpoint();
6181       return;
6182     }
6183 
6184     if (auto *I = dyn_cast<Instruction>(&V))
6185       if (isa<BinaryOperator>(I) || isa<CmpInst>(I)) {
6186         Value *LHS = I->getOperand(0);
6187         Value *RHS = I->getOperand(1);
6188 
6189         if (LHS->getType()->isIntegerTy() && RHS->getType()->isIntegerTy())
6190           return;
6191       }
6192 
6193     // If it is a load instruction with range metadata, use it.
6194     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6195       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6196         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6197         return;
6198       }
6199 
6200     // We handle casts in the updateImpl.
6201     // TODO: Allow non integers as well.
6202     if (CastInst *CI = dyn_cast<CastInst>(&V))
6203       if (CI->getOperand(0)->getType()->isIntegerTy())
6204         return;
6205 
6206     // We can work with PHI and select instruction as we traverse their operands
6207     // during update.
6208     if (isa<SelectInst>(V) || isa<PHINode>(V))
6209       return;
6210 
6211     // Otherwise we give up.
6212     indicatePessimisticFixpoint();
6213 
6214     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6215                       << getAssociatedValue() << "\n");
6216   }
6217 
6218   bool calculateBinaryOperator(
6219       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6220       Instruction *CtxI,
6221       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6222     Value *LHS = BinOp->getOperand(0);
6223     Value *RHS = BinOp->getOperand(1);
6224 
6225     auto &LHSAA =
6226         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6227     QuerriedAAs.push_back(&LHSAA);
6228     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6229 
6230     auto &RHSAA =
6231         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6232     QuerriedAAs.push_back(&RHSAA);
6233     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6234 
6235     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6236 
6237     T.unionAssumed(AssumedRange);
6238 
6239     // TODO: Track a known state too.
6240 
6241     return T.isValidState();
6242   }
6243 
6244   bool calculateCastInst(
6245       Attributor &A, CastInst *CastI, IntegerRangeState &T, Instruction *CtxI,
6246       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6247     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
6248     // TODO: Allow non integers as well.
6249     Value &OpV = *CastI->getOperand(0);
6250     assert(OpV.getType()->isIntegerTy() && "Expected integer cast");
6251 
6252     auto &OpAA =
6253         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
6254     QuerriedAAs.push_back(&OpAA);
6255     T.unionAssumed(
6256         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
6257     return T.isValidState();
6258   }
6259 
6260   bool
6261   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
6262                    Instruction *CtxI,
6263                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6264     Value *LHS = CmpI->getOperand(0);
6265     Value *RHS = CmpI->getOperand(1);
6266 
6267     auto &LHSAA =
6268         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6269     QuerriedAAs.push_back(&LHSAA);
6270     auto &RHSAA =
6271         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6272     QuerriedAAs.push_back(&RHSAA);
6273 
6274     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6275     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6276 
6277     // If one of them is empty set, we can't decide.
6278     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
6279       return true;
6280 
6281     bool MustTrue = false, MustFalse = false;
6282 
6283     auto AllowedRegion =
6284         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
6285 
6286     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
6287         CmpI->getPredicate(), RHSAARange);
6288 
6289     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
6290       MustFalse = true;
6291 
6292     if (SatisfyingRegion.contains(LHSAARange))
6293       MustTrue = true;
6294 
6295     assert((!MustTrue || !MustFalse) &&
6296            "Either MustTrue or MustFalse should be false!");
6297 
6298     if (MustTrue)
6299       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
6300     else if (MustFalse)
6301       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
6302     else
6303       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
6304 
6305     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
6306                       << " " << RHSAA << "\n");
6307 
6308     // TODO: Track a known state too.
6309     return T.isValidState();
6310   }
6311 
6312   /// See AbstractAttribute::updateImpl(...).
6313   ChangeStatus updateImpl(Attributor &A) override {
6314     Instruction *CtxI = getCtxI();
6315     auto VisitValueCB = [&](Value &V, IntegerRangeState &T,
6316                             bool Stripped) -> bool {
6317       Instruction *I = dyn_cast<Instruction>(&V);
6318       if (!I) {
6319 
6320         // If the value is not instruction, we query AA to Attributor.
6321         const auto &AA =
6322             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
6323 
6324         // Clamp operator is not used to utilize a program point CtxI.
6325         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
6326 
6327         return T.isValidState();
6328       }
6329 
6330       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
6331       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
6332         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
6333           return false;
6334       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
6335         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
6336           return false;
6337       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
6338         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
6339           return false;
6340       } else {
6341         // Give up with other instructions.
6342         // TODO: Add other instructions
6343 
6344         T.indicatePessimisticFixpoint();
6345         return false;
6346       }
6347 
6348       // Catch circular reasoning in a pessimistic way for now.
6349       // TODO: Check how the range evolves and if we stripped anything, see also
6350       //       AADereferenceable or AAAlign for similar situations.
6351       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
6352         if (QueriedAA != this)
6353           continue;
6354         // If we are in a stady state we do not need to worry.
6355         if (T.getAssumed() == getState().getAssumed())
6356           continue;
6357         T.indicatePessimisticFixpoint();
6358       }
6359 
6360       return T.isValidState();
6361     };
6362 
6363     IntegerRangeState T(getBitWidth());
6364 
6365     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
6366             A, getIRPosition(), *this, T, VisitValueCB))
6367       return indicatePessimisticFixpoint();
6368 
6369     return clampStateAndIndicateChange(getState(), T);
6370   }
6371 
6372   /// See AbstractAttribute::trackStatistics()
6373   void trackStatistics() const override {
6374     STATS_DECLTRACK_FLOATING_ATTR(value_range)
6375   }
6376 };
6377 
6378 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
6379   AAValueConstantRangeFunction(const IRPosition &IRP)
6380       : AAValueConstantRangeImpl(IRP) {}
6381 
6382   /// See AbstractAttribute::initialize(...).
6383   ChangeStatus updateImpl(Attributor &A) override {
6384     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
6385                      "not be called");
6386   }
6387 
6388   /// See AbstractAttribute::trackStatistics()
6389   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
6390 };
6391 
6392 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
6393   AAValueConstantRangeCallSite(const IRPosition &IRP)
6394       : AAValueConstantRangeFunction(IRP) {}
6395 
6396   /// See AbstractAttribute::trackStatistics()
6397   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
6398 };
6399 
6400 struct AAValueConstantRangeCallSiteReturned
6401     : AACallSiteReturnedFromReturned<AAValueConstantRange,
6402                                      AAValueConstantRangeImpl> {
6403   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP)
6404       : AACallSiteReturnedFromReturned<AAValueConstantRange,
6405                                        AAValueConstantRangeImpl>(IRP) {}
6406 
6407   /// See AbstractAttribute::initialize(...).
6408   void initialize(Attributor &A) override {
6409     // If it is a load instruction with range metadata, use the metadata.
6410     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
6411       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
6412         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6413 
6414     AAValueConstantRangeImpl::initialize(A);
6415   }
6416 
6417   /// See AbstractAttribute::trackStatistics()
6418   void trackStatistics() const override {
6419     STATS_DECLTRACK_CSRET_ATTR(value_range)
6420   }
6421 };
6422 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
6423   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP)
6424       : AAValueConstantRangeFloating(IRP) {}
6425 
6426   /// See AbstractAttribute::trackStatistics()
6427   void trackStatistics() const override {
6428     STATS_DECLTRACK_CSARG_ATTR(value_range)
6429   }
6430 };
6431 /// ----------------------------------------------------------------------------
6432 ///                               Attributor
6433 /// ----------------------------------------------------------------------------
6434 
6435 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
6436                                const AAIsDead *LivenessAA) {
6437   const Instruction *CtxI = AA.getIRPosition().getCtxI();
6438   if (!CtxI || !Functions.count(const_cast<Function *>(CtxI->getFunction())))
6439     return false;
6440 
6441   // TODO: Find a good way to utilize fine and coarse grained liveness
6442   // information.
6443   if (!LivenessAA)
6444     LivenessAA =
6445         &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
6446                             /* TrackDependence */ false);
6447 
6448   // Don't check liveness for AAIsDead.
6449   if (&AA == LivenessAA)
6450     return false;
6451 
6452   if (!LivenessAA->isAssumedDead(CtxI))
6453     return false;
6454 
6455   // We actually used liveness information so we have to record a dependence.
6456   recordDependence(*LivenessAA, AA, DepClassTy::OPTIONAL);
6457 
6458   return true;
6459 }
6460 
6461 bool Attributor::checkForAllUses(
6462     const function_ref<bool(const Use &, bool &)> &Pred,
6463     const AbstractAttribute &QueryingAA, const Value &V) {
6464   const IRPosition &IRP = QueryingAA.getIRPosition();
6465   SmallVector<const Use *, 16> Worklist;
6466   SmallPtrSet<const Use *, 16> Visited;
6467 
6468   for (const Use &U : V.uses())
6469     Worklist.push_back(&U);
6470 
6471   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
6472                     << " initial uses to check\n");
6473 
6474   if (Worklist.empty())
6475     return true;
6476 
6477   bool AnyDead = false;
6478   const Function *ScopeFn = IRP.getAnchorScope();
6479   const auto *LivenessAA =
6480       ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
6481                                     /* TrackDependence */ false)
6482               : nullptr;
6483 
6484   while (!Worklist.empty()) {
6485     const Use *U = Worklist.pop_back_val();
6486     if (!Visited.insert(U).second)
6487       continue;
6488     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << **U << "\n");
6489     if (Instruction *UserI = dyn_cast<Instruction>(U->getUser()))
6490       if (LivenessAA && LivenessAA->isAssumedDead(UserI)) {
6491         LLVM_DEBUG(dbgs() << "[Attributor] Dead user: " << *UserI << ": "
6492                           << *LivenessAA << "\n");
6493         AnyDead = true;
6494         continue;
6495       }
6496 
6497     bool Follow = false;
6498     if (!Pred(*U, Follow))
6499       return false;
6500     if (!Follow)
6501       continue;
6502     for (const Use &UU : U->getUser()->uses())
6503       Worklist.push_back(&UU);
6504   }
6505 
6506   if (AnyDead)
6507     recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
6508 
6509   return true;
6510 }
6511 
6512 bool Attributor::checkForAllCallSites(
6513     const function_ref<bool(AbstractCallSite)> &Pred,
6514     const AbstractAttribute &QueryingAA, bool RequireAllCallSites,
6515     bool &AllCallSitesKnown) {
6516   // We can try to determine information from
6517   // the call sites. However, this is only possible all call sites are known,
6518   // hence the function has internal linkage.
6519   const IRPosition &IRP = QueryingAA.getIRPosition();
6520   const Function *AssociatedFunction = IRP.getAssociatedFunction();
6521   if (!AssociatedFunction) {
6522     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
6523                       << "\n");
6524     AllCallSitesKnown = false;
6525     return false;
6526   }
6527 
6528   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
6529                               &QueryingAA, AllCallSitesKnown);
6530 }
6531 
6532 bool Attributor::checkForAllCallSites(
6533     const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
6534     bool RequireAllCallSites, const AbstractAttribute *QueryingAA,
6535     bool &AllCallSitesKnown) {
6536   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
6537     LLVM_DEBUG(
6538         dbgs()
6539         << "[Attributor] Function " << Fn.getName()
6540         << " has no internal linkage, hence not all call sites are known\n");
6541     AllCallSitesKnown = false;
6542     return false;
6543   }
6544 
6545   // If we do not require all call sites we might not see all.
6546   AllCallSitesKnown = RequireAllCallSites;
6547 
6548   for (const Use &U : Fn.uses()) {
6549     AbstractCallSite ACS(&U);
6550     if (!ACS) {
6551       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
6552                         << " has non call site use " << *U.get() << " in "
6553                         << *U.getUser() << "\n");
6554       // BlockAddress users are allowed.
6555       if (isa<BlockAddress>(U.getUser()))
6556         continue;
6557       return false;
6558     }
6559 
6560     Instruction *I = ACS.getInstruction();
6561     Function *Caller = I->getFunction();
6562 
6563     const auto *LivenessAA =
6564         lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
6565                               /* TrackDependence */ false);
6566 
6567     // Skip dead calls.
6568     if (LivenessAA && LivenessAA->isAssumedDead(I)) {
6569       // We actually used liveness information so we have to record a
6570       // dependence.
6571       if (QueryingAA)
6572         recordDependence(*LivenessAA, *QueryingAA, DepClassTy::OPTIONAL);
6573       AllCallSitesKnown = false;
6574       continue;
6575     }
6576 
6577     const Use *EffectiveUse =
6578         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
6579     if (!ACS.isCallee(EffectiveUse)) {
6580       if (!RequireAllCallSites)
6581         continue;
6582       LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
6583                         << " is an invalid use of " << Fn.getName() << "\n");
6584       return false;
6585     }
6586 
6587     if (Pred(ACS))
6588       continue;
6589 
6590     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
6591                       << *ACS.getInstruction() << "\n");
6592     return false;
6593   }
6594 
6595   return true;
6596 }
6597 
6598 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
6599     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
6600         &Pred,
6601     const AbstractAttribute &QueryingAA) {
6602 
6603   const IRPosition &IRP = QueryingAA.getIRPosition();
6604   // Since we need to provide return instructions we have to have an exact
6605   // definition.
6606   const Function *AssociatedFunction = IRP.getAssociatedFunction();
6607   if (!AssociatedFunction)
6608     return false;
6609 
6610   // If this is a call site query we use the call site specific return values
6611   // and liveness information.
6612   // TODO: use the function scope once we have call site AAReturnedValues.
6613   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
6614   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
6615   if (!AARetVal.getState().isValidState())
6616     return false;
6617 
6618   return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
6619 }
6620 
6621 bool Attributor::checkForAllReturnedValues(
6622     const function_ref<bool(Value &)> &Pred,
6623     const AbstractAttribute &QueryingAA) {
6624 
6625   const IRPosition &IRP = QueryingAA.getIRPosition();
6626   const Function *AssociatedFunction = IRP.getAssociatedFunction();
6627   if (!AssociatedFunction)
6628     return false;
6629 
6630   // TODO: use the function scope once we have call site AAReturnedValues.
6631   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
6632   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
6633   if (!AARetVal.getState().isValidState())
6634     return false;
6635 
6636   return AARetVal.checkForAllReturnedValuesAndReturnInsts(
6637       [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
6638         return Pred(RV);
6639       });
6640 }
6641 
6642 static bool
6643 checkForAllInstructionsImpl(InformationCache::OpcodeInstMapTy &OpcodeInstMap,
6644                             const function_ref<bool(Instruction &)> &Pred,
6645                             const AAIsDead *LivenessAA, bool &AnyDead,
6646                             const ArrayRef<unsigned> &Opcodes) {
6647   for (unsigned Opcode : Opcodes) {
6648     for (Instruction *I : OpcodeInstMap[Opcode]) {
6649       // Skip dead instructions.
6650       if (LivenessAA && LivenessAA->isAssumedDead(I)) {
6651         AnyDead = true;
6652         continue;
6653       }
6654 
6655       if (!Pred(*I))
6656         return false;
6657     }
6658   }
6659   return true;
6660 }
6661 
6662 bool Attributor::checkForAllInstructions(
6663     const llvm::function_ref<bool(Instruction &)> &Pred,
6664     const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
6665 
6666   const IRPosition &IRP = QueryingAA.getIRPosition();
6667   // Since we need to provide instructions we have to have an exact definition.
6668   const Function *AssociatedFunction = IRP.getAssociatedFunction();
6669   if (!AssociatedFunction)
6670     return false;
6671 
6672   // TODO: use the function scope once we have call site AAReturnedValues.
6673   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
6674   const auto &LivenessAA =
6675       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
6676   bool AnyDead = false;
6677 
6678   auto &OpcodeInstMap =
6679       InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
6680   if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
6681                                    Opcodes))
6682     return false;
6683 
6684   // If we actually used liveness information so we have to record a dependence.
6685   if (AnyDead)
6686     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
6687 
6688   return true;
6689 }
6690 
6691 bool Attributor::checkForAllReadWriteInstructions(
6692     const llvm::function_ref<bool(Instruction &)> &Pred,
6693     AbstractAttribute &QueryingAA) {
6694 
6695   const Function *AssociatedFunction =
6696       QueryingAA.getIRPosition().getAssociatedFunction();
6697   if (!AssociatedFunction)
6698     return false;
6699 
6700   // TODO: use the function scope once we have call site AAReturnedValues.
6701   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
6702   const auto &LivenessAA =
6703       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
6704   bool AnyDead = false;
6705 
6706   for (Instruction *I :
6707        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
6708     // Skip dead instructions.
6709     if (LivenessAA.isAssumedDead(I)) {
6710       AnyDead = true;
6711       continue;
6712     }
6713 
6714     if (!Pred(*I))
6715       return false;
6716   }
6717 
6718   // If we actually used liveness information so we have to record a dependence.
6719   if (AnyDead)
6720     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
6721 
6722   return true;
6723 }
6724 
6725 ChangeStatus Attributor::run() {
6726   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
6727                     << AllAbstractAttributes.size()
6728                     << " abstract attributes.\n");
6729 
6730   // Now that all abstract attributes are collected and initialized we start
6731   // the abstract analysis.
6732 
6733   unsigned IterationCounter = 1;
6734 
6735   SmallVector<AbstractAttribute *, 64> ChangedAAs;
6736   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
6737   Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
6738 
6739   bool RecomputeDependences = false;
6740 
6741   do {
6742     // Remember the size to determine new attributes.
6743     size_t NumAAs = AllAbstractAttributes.size();
6744     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
6745                       << ", Worklist size: " << Worklist.size() << "\n");
6746 
6747     // For invalid AAs we can fix dependent AAs that have a required dependence,
6748     // thereby folding long dependence chains in a single step without the need
6749     // to run updates.
6750     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
6751       AbstractAttribute *InvalidAA = InvalidAAs[u];
6752       auto &QuerriedAAs = QueryMap[InvalidAA];
6753       LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "
6754                         << QuerriedAAs.RequiredAAs.size() << "/"
6755                         << QuerriedAAs.OptionalAAs.size()
6756                         << " required/optional dependences\n");
6757       for (AbstractAttribute *DepOnInvalidAA : QuerriedAAs.RequiredAAs) {
6758         AbstractState &DOIAAState = DepOnInvalidAA->getState();
6759         DOIAAState.indicatePessimisticFixpoint();
6760         ++NumAttributesFixedDueToRequiredDependences;
6761         assert(DOIAAState.isAtFixpoint() && "Expected fixpoint state!");
6762         if (!DOIAAState.isValidState())
6763           InvalidAAs.insert(DepOnInvalidAA);
6764         else
6765           ChangedAAs.push_back(DepOnInvalidAA);
6766       }
6767       if (!RecomputeDependences)
6768         Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
6769                         QuerriedAAs.OptionalAAs.end());
6770     }
6771 
6772     // If dependences (=QueryMap) are recomputed we have to look at all abstract
6773     // attributes again, regardless of what changed in the last iteration.
6774     if (RecomputeDependences) {
6775       LLVM_DEBUG(
6776           dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
6777       QueryMap.clear();
6778       ChangedAAs.clear();
6779       Worklist.insert(AllAbstractAttributes.begin(),
6780                       AllAbstractAttributes.end());
6781     }
6782 
6783     // Add all abstract attributes that are potentially dependent on one that
6784     // changed to the work list.
6785     for (AbstractAttribute *ChangedAA : ChangedAAs) {
6786       auto &QuerriedAAs = QueryMap[ChangedAA];
6787       Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
6788                       QuerriedAAs.OptionalAAs.end());
6789       Worklist.insert(QuerriedAAs.RequiredAAs.begin(),
6790                       QuerriedAAs.RequiredAAs.end());
6791     }
6792 
6793     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
6794                       << ", Worklist+Dependent size: " << Worklist.size()
6795                       << "\n");
6796 
6797     // Reset the changed and invalid set.
6798     ChangedAAs.clear();
6799     InvalidAAs.clear();
6800 
6801     // Update all abstract attribute in the work list and record the ones that
6802     // changed.
6803     for (AbstractAttribute *AA : Worklist)
6804       if (!AA->getState().isAtFixpoint() && !isAssumedDead(*AA, nullptr)) {
6805         QueriedNonFixAA = false;
6806         if (AA->update(*this) == ChangeStatus::CHANGED) {
6807           ChangedAAs.push_back(AA);
6808           if (!AA->getState().isValidState())
6809             InvalidAAs.insert(AA);
6810         } else if (!QueriedNonFixAA) {
6811           // If the attribute did not query any non-fix information, the state
6812           // will not change and we can indicate that right away.
6813           AA->getState().indicateOptimisticFixpoint();
6814         }
6815       }
6816 
6817     // Check if we recompute the dependences in the next iteration.
6818     RecomputeDependences = (DepRecomputeInterval > 0 &&
6819                             IterationCounter % DepRecomputeInterval == 0);
6820 
6821     // Add attributes to the changed set if they have been created in the last
6822     // iteration.
6823     ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
6824                       AllAbstractAttributes.end());
6825 
6826     // Reset the work list and repopulate with the changed abstract attributes.
6827     // Note that dependent ones are added above.
6828     Worklist.clear();
6829     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
6830 
6831   } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
6832                                  VerifyMaxFixpointIterations));
6833 
6834   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
6835                     << IterationCounter << "/" << MaxFixpointIterations
6836                     << " iterations\n");
6837 
6838   size_t NumFinalAAs = AllAbstractAttributes.size();
6839 
6840   // Reset abstract arguments not settled in a sound fixpoint by now. This
6841   // happens when we stopped the fixpoint iteration early. Note that only the
6842   // ones marked as "changed" *and* the ones transitively depending on them
6843   // need to be reverted to a pessimistic state. Others might not be in a
6844   // fixpoint state but we can use the optimistic results for them anyway.
6845   SmallPtrSet<AbstractAttribute *, 32> Visited;
6846   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
6847     AbstractAttribute *ChangedAA = ChangedAAs[u];
6848     if (!Visited.insert(ChangedAA).second)
6849       continue;
6850 
6851     AbstractState &State = ChangedAA->getState();
6852     if (!State.isAtFixpoint()) {
6853       State.indicatePessimisticFixpoint();
6854 
6855       NumAttributesTimedOut++;
6856     }
6857 
6858     auto &QuerriedAAs = QueryMap[ChangedAA];
6859     ChangedAAs.append(QuerriedAAs.OptionalAAs.begin(),
6860                       QuerriedAAs.OptionalAAs.end());
6861     ChangedAAs.append(QuerriedAAs.RequiredAAs.begin(),
6862                       QuerriedAAs.RequiredAAs.end());
6863   }
6864 
6865   LLVM_DEBUG({
6866     if (!Visited.empty())
6867       dbgs() << "\n[Attributor] Finalized " << Visited.size()
6868              << " abstract attributes.\n";
6869   });
6870 
6871   unsigned NumManifested = 0;
6872   unsigned NumAtFixpoint = 0;
6873   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
6874   for (AbstractAttribute *AA : AllAbstractAttributes) {
6875     AbstractState &State = AA->getState();
6876 
6877     // If there is not already a fixpoint reached, we can now take the
6878     // optimistic state. This is correct because we enforced a pessimistic one
6879     // on abstract attributes that were transitively dependent on a changed one
6880     // already above.
6881     if (!State.isAtFixpoint())
6882       State.indicateOptimisticFixpoint();
6883 
6884     // If the state is invalid, we do not try to manifest it.
6885     if (!State.isValidState())
6886       continue;
6887 
6888     // Skip dead code.
6889     if (isAssumedDead(*AA, nullptr))
6890       continue;
6891     // Manifest the state and record if we changed the IR.
6892     ChangeStatus LocalChange = AA->manifest(*this);
6893     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
6894       AA->trackStatistics();
6895 
6896     ManifestChange = ManifestChange | LocalChange;
6897 
6898     NumAtFixpoint++;
6899     NumManifested += (LocalChange == ChangeStatus::CHANGED);
6900   }
6901 
6902   (void)NumManifested;
6903   (void)NumAtFixpoint;
6904   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
6905                     << " arguments while " << NumAtFixpoint
6906                     << " were in a valid fixpoint state\n");
6907 
6908   NumAttributesManifested += NumManifested;
6909   NumAttributesValidFixpoint += NumAtFixpoint;
6910 
6911   (void)NumFinalAAs;
6912   assert(NumFinalAAs == AllAbstractAttributes.size() &&
6913          "Expected the final number of abstract attributes to remain "
6914          "unchanged!");
6915 
6916   // Delete stuff at the end to avoid invalid references and a nice order.
6917   {
6918     LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
6919                       << ToBeDeletedFunctions.size() << " functions and "
6920                       << ToBeDeletedBlocks.size() << " blocks and "
6921                       << ToBeDeletedInsts.size() << " instructions and "
6922                       << ToBeChangedUses.size() << " uses\n");
6923 
6924     SmallVector<WeakTrackingVH, 32> DeadInsts;
6925     SmallVector<Instruction *, 32> TerminatorsToFold;
6926 
6927     for (auto &It : ToBeChangedUses) {
6928       Use *U = It.first;
6929       Value *NewV = It.second;
6930       Value *OldV = U->get();
6931       LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
6932                         << " instead of " << *OldV << "\n");
6933       U->set(NewV);
6934       if (Instruction *I = dyn_cast<Instruction>(OldV)) {
6935         CGModifiedFunctions.insert(I->getFunction());
6936         if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
6937             isInstructionTriviallyDead(I))
6938           DeadInsts.push_back(I);
6939       }
6940       if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
6941         Instruction *UserI = cast<Instruction>(U->getUser());
6942         if (isa<UndefValue>(NewV)) {
6943           ToBeChangedToUnreachableInsts.insert(UserI);
6944         } else {
6945           TerminatorsToFold.push_back(UserI);
6946         }
6947       }
6948     }
6949     for (auto &V : InvokeWithDeadSuccessor)
6950       if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
6951         bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
6952         bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
6953         bool Invoke2CallAllowed =
6954             !AAIsDeadFunction::mayCatchAsynchronousExceptions(
6955                 *II->getFunction());
6956         assert((UnwindBBIsDead || NormalBBIsDead) &&
6957                "Invoke does not have dead successors!");
6958         BasicBlock *BB = II->getParent();
6959         BasicBlock *NormalDestBB = II->getNormalDest();
6960         if (UnwindBBIsDead) {
6961           Instruction *NormalNextIP = &NormalDestBB->front();
6962           if (Invoke2CallAllowed) {
6963             changeToCall(II);
6964             NormalNextIP = BB->getTerminator();
6965           }
6966           if (NormalBBIsDead)
6967             ToBeChangedToUnreachableInsts.insert(NormalNextIP);
6968         } else {
6969           assert(NormalBBIsDead && "Broken invariant!");
6970           if (!NormalDestBB->getUniquePredecessor())
6971             NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
6972           ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
6973         }
6974       }
6975     for (auto &V : ToBeChangedToUnreachableInsts)
6976       if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
6977         CGModifiedFunctions.insert(I->getFunction());
6978         changeToUnreachable(I, /* UseLLVMTrap */ false);
6979       }
6980     for (Instruction *I : TerminatorsToFold) {
6981       CGModifiedFunctions.insert(I->getFunction());
6982       ConstantFoldTerminator(I->getParent());
6983     }
6984 
6985     for (auto &V : ToBeDeletedInsts) {
6986       if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
6987         CGModifiedFunctions.insert(I->getFunction());
6988         I->replaceAllUsesWith(UndefValue::get(I->getType()));
6989         if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
6990           DeadInsts.push_back(I);
6991         else
6992           I->eraseFromParent();
6993       }
6994     }
6995 
6996     RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
6997 
6998     if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
6999       SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
7000       ToBeDeletedBBs.reserve(NumDeadBlocks);
7001       for (BasicBlock *BB : ToBeDeletedBlocks) {
7002         CGModifiedFunctions.insert(BB->getParent());
7003         ToBeDeletedBBs.push_back(BB);
7004       }
7005       // Actually we do not delete the blocks but squash them into a single
7006       // unreachable but untangling branches that jump here is something we need
7007       // to do in a more generic way.
7008       DetatchDeadBlocks(ToBeDeletedBBs, nullptr);
7009       STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
7010       BUILD_STAT_NAME(AAIsDead, BasicBlock) += ToBeDeletedBlocks.size();
7011     }
7012 
7013     // Identify dead internal functions and delete them. This happens outside
7014     // the other fixpoint analysis as we might treat potentially dead functions
7015     // as live to lower the number of iterations. If they happen to be dead, the
7016     // below fixpoint loop will identify and eliminate them.
7017     SmallVector<Function *, 8> InternalFns;
7018     for (Function *F : Functions)
7019       if (F->hasLocalLinkage())
7020         InternalFns.push_back(F);
7021 
7022     bool FoundDeadFn = true;
7023     while (FoundDeadFn) {
7024       FoundDeadFn = false;
7025       for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
7026         Function *F = InternalFns[u];
7027         if (!F)
7028           continue;
7029 
7030         bool AllCallSitesKnown;
7031         if (!checkForAllCallSites(
7032                 [this](AbstractCallSite ACS) {
7033                   return ToBeDeletedFunctions.count(
7034                       ACS.getInstruction()->getFunction());
7035                 },
7036                 *F, true, nullptr, AllCallSitesKnown))
7037           continue;
7038 
7039         ToBeDeletedFunctions.insert(F);
7040         InternalFns[u] = nullptr;
7041         FoundDeadFn = true;
7042       }
7043     }
7044   }
7045 
7046   // Rewrite the functions as requested during manifest.
7047   ManifestChange =
7048       ManifestChange | rewriteFunctionSignatures(CGModifiedFunctions);
7049 
7050   for (Function *Fn : CGModifiedFunctions)
7051     CGUpdater.reanalyzeFunction(*Fn);
7052 
7053   STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
7054   BUILD_STAT_NAME(AAIsDead, Function) += ToBeDeletedFunctions.size();
7055 
7056   for (Function *Fn : ToBeDeletedFunctions)
7057     CGUpdater.removeFunction(*Fn);
7058 
7059   if (VerifyMaxFixpointIterations &&
7060       IterationCounter != MaxFixpointIterations) {
7061     errs() << "\n[Attributor] Fixpoint iteration done after: "
7062            << IterationCounter << "/" << MaxFixpointIterations
7063            << " iterations\n";
7064     llvm_unreachable("The fixpoint was not reached with exactly the number of "
7065                      "specified iterations!");
7066   }
7067 
7068   return ManifestChange;
7069 }
7070 
7071 bool Attributor::isValidFunctionSignatureRewrite(
7072     Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
7073 
7074   auto CallSiteCanBeChanged = [](AbstractCallSite ACS) {
7075     // Forbid must-tail calls for now.
7076     return !ACS.isCallbackCall() && !ACS.getCallSite().isMustTailCall();
7077   };
7078 
7079   Function *Fn = Arg.getParent();
7080   // Avoid var-arg functions for now.
7081   if (Fn->isVarArg()) {
7082     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
7083     return false;
7084   }
7085 
7086   // Avoid functions with complicated argument passing semantics.
7087   AttributeList FnAttributeList = Fn->getAttributes();
7088   if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
7089       FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
7090       FnAttributeList.hasAttrSomewhere(Attribute::InAlloca)) {
7091     LLVM_DEBUG(
7092         dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
7093     return false;
7094   }
7095 
7096   // Avoid callbacks for now.
7097   bool AllCallSitesKnown;
7098   if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
7099                             AllCallSitesKnown)) {
7100     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
7101     return false;
7102   }
7103 
7104   auto InstPred = [](Instruction &I) {
7105     if (auto *CI = dyn_cast<CallInst>(&I))
7106       return !CI->isMustTailCall();
7107     return true;
7108   };
7109 
7110   // Forbid must-tail calls for now.
7111   // TODO:
7112   bool AnyDead;
7113   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
7114   if (!checkForAllInstructionsImpl(OpcodeInstMap, InstPred, nullptr, AnyDead,
7115                                    {Instruction::Call})) {
7116     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
7117     return false;
7118   }
7119 
7120   return true;
7121 }
7122 
7123 bool Attributor::registerFunctionSignatureRewrite(
7124     Argument &Arg, ArrayRef<Type *> ReplacementTypes,
7125     ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
7126     ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
7127   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
7128                     << Arg.getParent()->getName() << " with "
7129                     << ReplacementTypes.size() << " replacements\n");
7130   assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
7131          "Cannot register an invalid rewrite");
7132 
7133   Function *Fn = Arg.getParent();
7134   SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = ArgumentReplacementMap[Fn];
7135   if (ARIs.empty())
7136     ARIs.resize(Fn->arg_size());
7137 
7138   // If we have a replacement already with less than or equal new arguments,
7139   // ignore this request.
7140   ArgumentReplacementInfo *&ARI = ARIs[Arg.getArgNo()];
7141   if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
7142     LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
7143     return false;
7144   }
7145 
7146   // If we have a replacement already but we like the new one better, delete
7147   // the old.
7148   if (ARI)
7149     delete ARI;
7150 
7151   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
7152                     << Arg.getParent()->getName() << " with "
7153                     << ReplacementTypes.size() << " replacements\n");
7154 
7155   // Remember the replacement.
7156   ARI = new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
7157                                     std::move(CalleeRepairCB),
7158                                     std::move(ACSRepairCB));
7159 
7160   return true;
7161 }
7162 
7163 ChangeStatus Attributor::rewriteFunctionSignatures(
7164     SmallPtrSetImpl<Function *> &ModifiedFns) {
7165   ChangeStatus Changed = ChangeStatus::UNCHANGED;
7166 
7167   for (auto &It : ArgumentReplacementMap) {
7168     Function *OldFn = It.getFirst();
7169 
7170     // Deleted functions do not require rewrites.
7171     if (ToBeDeletedFunctions.count(OldFn))
7172       continue;
7173 
7174     const SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = It.getSecond();
7175     assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
7176 
7177     SmallVector<Type *, 16> NewArgumentTypes;
7178     SmallVector<AttributeSet, 16> NewArgumentAttributes;
7179 
7180     // Collect replacement argument types and copy over existing attributes.
7181     AttributeList OldFnAttributeList = OldFn->getAttributes();
7182     for (Argument &Arg : OldFn->args()) {
7183       if (ArgumentReplacementInfo *ARI = ARIs[Arg.getArgNo()]) {
7184         NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
7185                                 ARI->ReplacementTypes.end());
7186         NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
7187                                      AttributeSet());
7188       } else {
7189         NewArgumentTypes.push_back(Arg.getType());
7190         NewArgumentAttributes.push_back(
7191             OldFnAttributeList.getParamAttributes(Arg.getArgNo()));
7192       }
7193     }
7194 
7195     FunctionType *OldFnTy = OldFn->getFunctionType();
7196     Type *RetTy = OldFnTy->getReturnType();
7197 
7198     // Construct the new function type using the new arguments types.
7199     FunctionType *NewFnTy =
7200         FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
7201 
7202     LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
7203                       << "' from " << *OldFn->getFunctionType() << " to "
7204                       << *NewFnTy << "\n");
7205 
7206     // Create the new function body and insert it into the module.
7207     Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
7208                                        OldFn->getAddressSpace(), "");
7209     OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
7210     NewFn->takeName(OldFn);
7211     NewFn->copyAttributesFrom(OldFn);
7212 
7213     // Patch the pointer to LLVM function in debug info descriptor.
7214     NewFn->setSubprogram(OldFn->getSubprogram());
7215     OldFn->setSubprogram(nullptr);
7216 
7217     // Recompute the parameter attributes list based on the new arguments for
7218     // the function.
7219     LLVMContext &Ctx = OldFn->getContext();
7220     NewFn->setAttributes(AttributeList::get(
7221         Ctx, OldFnAttributeList.getFnAttributes(),
7222         OldFnAttributeList.getRetAttributes(), NewArgumentAttributes));
7223 
7224     // Since we have now created the new function, splice the body of the old
7225     // function right into the new function, leaving the old rotting hulk of the
7226     // function empty.
7227     NewFn->getBasicBlockList().splice(NewFn->begin(),
7228                                       OldFn->getBasicBlockList());
7229 
7230     // Set of all "call-like" instructions that invoke the old function mapped
7231     // to their new replacements.
7232     SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
7233 
7234     // Callback to create a new "call-like" instruction for a given one.
7235     auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
7236       CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
7237       const AttributeList &OldCallAttributeList = OldCB->getAttributes();
7238 
7239       // Collect the new argument operands for the replacement call site.
7240       SmallVector<Value *, 16> NewArgOperands;
7241       SmallVector<AttributeSet, 16> NewArgOperandAttributes;
7242       for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
7243         unsigned NewFirstArgNum = NewArgOperands.size();
7244         (void)NewFirstArgNum; // only used inside assert.
7245         if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
7246           if (ARI->ACSRepairCB)
7247             ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
7248           assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
7249                      NewArgOperands.size() &&
7250                  "ACS repair callback did not provide as many operand as new "
7251                  "types were registered!");
7252           // TODO: Exose the attribute set to the ACS repair callback
7253           NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
7254                                          AttributeSet());
7255         } else {
7256           NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
7257           NewArgOperandAttributes.push_back(
7258               OldCallAttributeList.getParamAttributes(OldArgNum));
7259         }
7260       }
7261 
7262       assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
7263              "Mismatch # argument operands vs. # argument operand attributes!");
7264       assert(NewArgOperands.size() == NewFn->arg_size() &&
7265              "Mismatch # argument operands vs. # function arguments!");
7266 
7267       SmallVector<OperandBundleDef, 4> OperandBundleDefs;
7268       OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
7269 
7270       // Create a new call or invoke instruction to replace the old one.
7271       CallBase *NewCB;
7272       if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
7273         NewCB =
7274             InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
7275                                NewArgOperands, OperandBundleDefs, "", OldCB);
7276       } else {
7277         auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
7278                                        "", OldCB);
7279         NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
7280         NewCB = NewCI;
7281       }
7282 
7283       // Copy over various properties and the new attributes.
7284       uint64_t W;
7285       if (OldCB->extractProfTotalWeight(W))
7286         NewCB->setProfWeight(W);
7287       NewCB->setCallingConv(OldCB->getCallingConv());
7288       NewCB->setDebugLoc(OldCB->getDebugLoc());
7289       NewCB->takeName(OldCB);
7290       NewCB->setAttributes(AttributeList::get(
7291           Ctx, OldCallAttributeList.getFnAttributes(),
7292           OldCallAttributeList.getRetAttributes(), NewArgOperandAttributes));
7293 
7294       CallSitePairs.push_back({OldCB, NewCB});
7295       return true;
7296     };
7297 
7298     // Use the CallSiteReplacementCreator to create replacement call sites.
7299     bool AllCallSitesKnown;
7300     bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
7301                                         true, nullptr, AllCallSitesKnown);
7302     (void)Success;
7303     assert(Success && "Assumed call site replacement to succeed!");
7304 
7305     // Rewire the arguments.
7306     auto OldFnArgIt = OldFn->arg_begin();
7307     auto NewFnArgIt = NewFn->arg_begin();
7308     for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
7309          ++OldArgNum, ++OldFnArgIt) {
7310       if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
7311         if (ARI->CalleeRepairCB)
7312           ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
7313         NewFnArgIt += ARI->ReplacementTypes.size();
7314       } else {
7315         NewFnArgIt->takeName(&*OldFnArgIt);
7316         OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
7317         ++NewFnArgIt;
7318       }
7319     }
7320 
7321     // Eliminate the instructions *after* we visited all of them.
7322     for (auto &CallSitePair : CallSitePairs) {
7323       CallBase &OldCB = *CallSitePair.first;
7324       CallBase &NewCB = *CallSitePair.second;
7325       // We do not modify the call graph here but simply reanalyze the old
7326       // function. This should be revisited once the old PM is gone.
7327       ModifiedFns.insert(OldCB.getFunction());
7328       OldCB.replaceAllUsesWith(&NewCB);
7329       OldCB.eraseFromParent();
7330     }
7331 
7332     // Replace the function in the call graph (if any).
7333     CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
7334 
7335     // If the old function was modified and needed to be reanalyzed, the new one
7336     // does now.
7337     if (ModifiedFns.erase(OldFn))
7338       ModifiedFns.insert(NewFn);
7339 
7340     Changed = ChangeStatus::CHANGED;
7341   }
7342 
7343   return Changed;
7344 }
7345 
7346 void Attributor::initializeInformationCache(Function &F) {
7347 
7348   // Walk all instructions to find interesting instructions that might be
7349   // queried by abstract attributes during their initialization or update.
7350   // This has to happen before we create attributes.
7351   auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
7352   auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
7353 
7354   for (Instruction &I : instructions(&F)) {
7355     bool IsInterestingOpcode = false;
7356 
7357     // To allow easy access to all instructions in a function with a given
7358     // opcode we store them in the InfoCache. As not all opcodes are interesting
7359     // to concrete attributes we only cache the ones that are as identified in
7360     // the following switch.
7361     // Note: There are no concrete attributes now so this is initially empty.
7362     switch (I.getOpcode()) {
7363     default:
7364       assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
7365              "New call site/base instruction type needs to be known int the "
7366              "Attributor.");
7367       break;
7368     case Instruction::Load:
7369       // The alignment of a pointer is interesting for loads.
7370     case Instruction::Store:
7371       // The alignment of a pointer is interesting for stores.
7372     case Instruction::Call:
7373     case Instruction::CallBr:
7374     case Instruction::Invoke:
7375     case Instruction::CleanupRet:
7376     case Instruction::CatchSwitch:
7377     case Instruction::AtomicRMW:
7378     case Instruction::AtomicCmpXchg:
7379     case Instruction::Br:
7380     case Instruction::Resume:
7381     case Instruction::Ret:
7382       IsInterestingOpcode = true;
7383     }
7384     if (IsInterestingOpcode)
7385       InstOpcodeMap[I.getOpcode()].push_back(&I);
7386     if (I.mayReadOrWriteMemory())
7387       ReadOrWriteInsts.push_back(&I);
7388   }
7389 }
7390 
7391 void Attributor::recordDependence(const AbstractAttribute &FromAA,
7392                                   const AbstractAttribute &ToAA,
7393                                   DepClassTy DepClass) {
7394   if (FromAA.getState().isAtFixpoint())
7395     return;
7396 
7397   if (DepClass == DepClassTy::REQUIRED)
7398     QueryMap[&FromAA].RequiredAAs.insert(
7399         const_cast<AbstractAttribute *>(&ToAA));
7400   else
7401     QueryMap[&FromAA].OptionalAAs.insert(
7402         const_cast<AbstractAttribute *>(&ToAA));
7403   QueriedNonFixAA = true;
7404 }
7405 
7406 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
7407   if (!VisitedFunctions.insert(&F).second)
7408     return;
7409   if (F.isDeclaration())
7410     return;
7411 
7412   IRPosition FPos = IRPosition::function(F);
7413 
7414   // Check for dead BasicBlocks in every function.
7415   // We need dead instruction detection because we do not want to deal with
7416   // broken IR in which SSA rules do not apply.
7417   getOrCreateAAFor<AAIsDead>(FPos);
7418 
7419   // Every function might be "will-return".
7420   getOrCreateAAFor<AAWillReturn>(FPos);
7421 
7422   // Every function might contain instructions that cause "undefined behavior".
7423   getOrCreateAAFor<AAUndefinedBehavior>(FPos);
7424 
7425   // Every function can be nounwind.
7426   getOrCreateAAFor<AANoUnwind>(FPos);
7427 
7428   // Every function might be marked "nosync"
7429   getOrCreateAAFor<AANoSync>(FPos);
7430 
7431   // Every function might be "no-free".
7432   getOrCreateAAFor<AANoFree>(FPos);
7433 
7434   // Every function might be "no-return".
7435   getOrCreateAAFor<AANoReturn>(FPos);
7436 
7437   // Every function might be "no-recurse".
7438   getOrCreateAAFor<AANoRecurse>(FPos);
7439 
7440   // Every function might be "readnone/readonly/writeonly/...".
7441   getOrCreateAAFor<AAMemoryBehavior>(FPos);
7442 
7443   // Every function might be applicable for Heap-To-Stack conversion.
7444   if (EnableHeapToStack)
7445     getOrCreateAAFor<AAHeapToStack>(FPos);
7446 
7447   // Return attributes are only appropriate if the return type is non void.
7448   Type *ReturnType = F.getReturnType();
7449   if (!ReturnType->isVoidTy()) {
7450     // Argument attribute "returned" --- Create only one per function even
7451     // though it is an argument attribute.
7452     getOrCreateAAFor<AAReturnedValues>(FPos);
7453 
7454     IRPosition RetPos = IRPosition::returned(F);
7455 
7456     // Every returned value might be dead.
7457     getOrCreateAAFor<AAIsDead>(RetPos);
7458 
7459     // Every function might be simplified.
7460     getOrCreateAAFor<AAValueSimplify>(RetPos);
7461 
7462     if (ReturnType->isPointerTy()) {
7463 
7464       // Every function with pointer return type might be marked align.
7465       getOrCreateAAFor<AAAlign>(RetPos);
7466 
7467       // Every function with pointer return type might be marked nonnull.
7468       getOrCreateAAFor<AANonNull>(RetPos);
7469 
7470       // Every function with pointer return type might be marked noalias.
7471       getOrCreateAAFor<AANoAlias>(RetPos);
7472 
7473       // Every function with pointer return type might be marked
7474       // dereferenceable.
7475       getOrCreateAAFor<AADereferenceable>(RetPos);
7476     }
7477   }
7478 
7479   for (Argument &Arg : F.args()) {
7480     IRPosition ArgPos = IRPosition::argument(Arg);
7481 
7482     // Every argument might be simplified.
7483     getOrCreateAAFor<AAValueSimplify>(ArgPos);
7484 
7485     if (Arg.getType()->isPointerTy()) {
7486       // Every argument with pointer type might be marked nonnull.
7487       getOrCreateAAFor<AANonNull>(ArgPos);
7488 
7489       // Every argument with pointer type might be marked noalias.
7490       getOrCreateAAFor<AANoAlias>(ArgPos);
7491 
7492       // Every argument with pointer type might be marked dereferenceable.
7493       getOrCreateAAFor<AADereferenceable>(ArgPos);
7494 
7495       // Every argument with pointer type might be marked align.
7496       getOrCreateAAFor<AAAlign>(ArgPos);
7497 
7498       // Every argument with pointer type might be marked nocapture.
7499       getOrCreateAAFor<AANoCapture>(ArgPos);
7500 
7501       // Every argument with pointer type might be marked
7502       // "readnone/readonly/writeonly/..."
7503       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
7504 
7505       // Every argument with pointer type might be marked nofree.
7506       getOrCreateAAFor<AANoFree>(ArgPos);
7507 
7508       // Every argument with pointer type might be privatizable (or promotable)
7509       getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
7510     }
7511   }
7512 
7513   auto CallSitePred = [&](Instruction &I) -> bool {
7514     CallSite CS(&I);
7515     if (Function *Callee = CS.getCalledFunction()) {
7516       // Skip declerations except if annotations on their call sites were
7517       // explicitly requested.
7518       if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
7519           !Callee->hasMetadata(LLVMContext::MD_callback))
7520         return true;
7521 
7522       if (!Callee->getReturnType()->isVoidTy() && !CS->use_empty()) {
7523 
7524         IRPosition CSRetPos = IRPosition::callsite_returned(CS);
7525 
7526         // Call site return values might be dead.
7527         getOrCreateAAFor<AAIsDead>(CSRetPos);
7528 
7529         // Call site return integer values might be limited by a constant range.
7530         if (Callee->getReturnType()->isIntegerTy()) {
7531           getOrCreateAAFor<AAValueConstantRange>(CSRetPos);
7532         }
7533       }
7534 
7535       for (int i = 0, e = CS.getNumArgOperands(); i < e; i++) {
7536 
7537         IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
7538 
7539         // Every call site argument might be dead.
7540         getOrCreateAAFor<AAIsDead>(CSArgPos);
7541 
7542         // Call site argument might be simplified.
7543         getOrCreateAAFor<AAValueSimplify>(CSArgPos);
7544 
7545         if (!CS.getArgument(i)->getType()->isPointerTy())
7546           continue;
7547 
7548         // Call site argument attribute "non-null".
7549         getOrCreateAAFor<AANonNull>(CSArgPos);
7550 
7551         // Call site argument attribute "no-alias".
7552         getOrCreateAAFor<AANoAlias>(CSArgPos);
7553 
7554         // Call site argument attribute "dereferenceable".
7555         getOrCreateAAFor<AADereferenceable>(CSArgPos);
7556 
7557         // Call site argument attribute "align".
7558         getOrCreateAAFor<AAAlign>(CSArgPos);
7559 
7560         // Call site argument attribute
7561         // "readnone/readonly/writeonly/..."
7562         getOrCreateAAFor<AAMemoryBehavior>(CSArgPos);
7563 
7564         // Call site argument attribute "nofree".
7565         getOrCreateAAFor<AANoFree>(CSArgPos);
7566       }
7567     }
7568     return true;
7569   };
7570 
7571   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
7572   bool Success, AnyDead = false;
7573   Success = checkForAllInstructionsImpl(
7574       OpcodeInstMap, CallSitePred, nullptr, AnyDead,
7575       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
7576        (unsigned)Instruction::Call});
7577   (void)Success;
7578   assert(Success && !AnyDead && "Expected the check call to be successful!");
7579 
7580   auto LoadStorePred = [&](Instruction &I) -> bool {
7581     if (isa<LoadInst>(I))
7582       getOrCreateAAFor<AAAlign>(
7583           IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
7584     else
7585       getOrCreateAAFor<AAAlign>(
7586           IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
7587     return true;
7588   };
7589   Success = checkForAllInstructionsImpl(
7590       OpcodeInstMap, LoadStorePred, nullptr, AnyDead,
7591       {(unsigned)Instruction::Load, (unsigned)Instruction::Store});
7592   (void)Success;
7593   assert(Success && !AnyDead && "Expected the check call to be successful!");
7594 }
7595 
7596 /// Helpers to ease debugging through output streams and print calls.
7597 ///
7598 ///{
7599 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
7600   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
7601 }
7602 
7603 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
7604   switch (AP) {
7605   case IRPosition::IRP_INVALID:
7606     return OS << "inv";
7607   case IRPosition::IRP_FLOAT:
7608     return OS << "flt";
7609   case IRPosition::IRP_RETURNED:
7610     return OS << "fn_ret";
7611   case IRPosition::IRP_CALL_SITE_RETURNED:
7612     return OS << "cs_ret";
7613   case IRPosition::IRP_FUNCTION:
7614     return OS << "fn";
7615   case IRPosition::IRP_CALL_SITE:
7616     return OS << "cs";
7617   case IRPosition::IRP_ARGUMENT:
7618     return OS << "arg";
7619   case IRPosition::IRP_CALL_SITE_ARGUMENT:
7620     return OS << "cs_arg";
7621   }
7622   llvm_unreachable("Unknown attribute position!");
7623 }
7624 
7625 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
7626   const Value &AV = Pos.getAssociatedValue();
7627   return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
7628             << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
7629 }
7630 
7631 template <typename base_ty, base_ty BestState, base_ty WorstState>
7632 raw_ostream &
7633 llvm::operator<<(raw_ostream &OS,
7634                  const IntegerStateBase<base_ty, BestState, WorstState> &S) {
7635   return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
7636             << static_cast<const AbstractState &>(S);
7637 }
7638 
7639 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
7640   OS << "range-state(" << S.getBitWidth() << ")<";
7641   S.getKnown().print(OS);
7642   OS << " / ";
7643   S.getAssumed().print(OS);
7644   OS << ">";
7645 
7646   return OS << static_cast<const AbstractState &>(S);
7647 }
7648 
7649 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
7650   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
7651 }
7652 
7653 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
7654   AA.print(OS);
7655   return OS;
7656 }
7657 
7658 void AbstractAttribute::print(raw_ostream &OS) const {
7659   OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
7660      << "]";
7661 }
7662 ///}
7663 
7664 /// ----------------------------------------------------------------------------
7665 ///                       Pass (Manager) Boilerplate
7666 /// ----------------------------------------------------------------------------
7667 
7668 static bool runAttributorOnFunctions(InformationCache &InfoCache,
7669                                      SetVector<Function *> &Functions,
7670                                      AnalysisGetter &AG,
7671                                      CallGraphUpdater &CGUpdater) {
7672   if (DisableAttributor || Functions.empty())
7673     return false;
7674 
7675   LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << Functions.size()
7676                     << " functions.\n");
7677 
7678   // Create an Attributor and initially empty information cache that is filled
7679   // while we identify default attribute opportunities.
7680   Attributor A(Functions, InfoCache, CGUpdater, DepRecInterval);
7681 
7682   for (Function *F : Functions)
7683     A.initializeInformationCache(*F);
7684 
7685   for (Function *F : Functions) {
7686     if (F->hasExactDefinition())
7687       NumFnWithExactDefinition++;
7688     else
7689       NumFnWithoutExactDefinition++;
7690 
7691     // We look at internal functions only on-demand but if any use is not a
7692     // direct call or outside the current set of analyzed functions, we have to
7693     // do it eagerly.
7694     if (F->hasLocalLinkage()) {
7695       if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
7696             ImmutableCallSite ICS(U.getUser());
7697             return ICS && ICS.isCallee(&U) &&
7698                    Functions.count(const_cast<Function *>(ICS.getCaller()));
7699           }))
7700         continue;
7701     }
7702 
7703     // Populate the Attributor with abstract attribute opportunities in the
7704     // function and the information cache with IR information.
7705     A.identifyDefaultAbstractAttributes(*F);
7706   }
7707 
7708   bool Changed = A.run() == ChangeStatus::CHANGED;
7709   assert(!verifyModule(*Functions.front()->getParent(), &errs()) &&
7710          "Module verification failed!");
7711   return Changed;
7712 }
7713 
7714 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
7715   FunctionAnalysisManager &FAM =
7716       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
7717   AnalysisGetter AG(FAM);
7718 
7719   SetVector<Function *> Functions;
7720   for (Function &F : M)
7721     Functions.insert(&F);
7722 
7723   CallGraphUpdater CGUpdater;
7724   InformationCache InfoCache(M, AG, /* CGSCC */ nullptr);
7725   if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater)) {
7726     // FIXME: Think about passes we will preserve and add them here.
7727     return PreservedAnalyses::none();
7728   }
7729   return PreservedAnalyses::all();
7730 }
7731 
7732 PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
7733                                            CGSCCAnalysisManager &AM,
7734                                            LazyCallGraph &CG,
7735                                            CGSCCUpdateResult &UR) {
7736   FunctionAnalysisManager &FAM =
7737       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
7738   AnalysisGetter AG(FAM);
7739 
7740   SetVector<Function *> Functions;
7741   for (LazyCallGraph::Node &N : C)
7742     Functions.insert(&N.getFunction());
7743 
7744   if (Functions.empty())
7745     return PreservedAnalyses::all();
7746 
7747   Module &M = *Functions.back()->getParent();
7748   CallGraphUpdater CGUpdater;
7749   CGUpdater.initialize(CG, C, AM, UR);
7750   InformationCache InfoCache(M, AG, /* CGSCC */ &Functions);
7751   if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater)) {
7752     // FIXME: Think about passes we will preserve and add them here.
7753     return PreservedAnalyses::none();
7754   }
7755   return PreservedAnalyses::all();
7756 }
7757 
7758 namespace {
7759 
7760 struct AttributorLegacyPass : public ModulePass {
7761   static char ID;
7762 
7763   AttributorLegacyPass() : ModulePass(ID) {
7764     initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
7765   }
7766 
7767   bool runOnModule(Module &M) override {
7768     if (skipModule(M))
7769       return false;
7770 
7771     AnalysisGetter AG;
7772     SetVector<Function *> Functions;
7773     for (Function &F : M)
7774       Functions.insert(&F);
7775 
7776     CallGraphUpdater CGUpdater;
7777     InformationCache InfoCache(M, AG, /* CGSCC */ nullptr);
7778     return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater);
7779   }
7780 
7781   void getAnalysisUsage(AnalysisUsage &AU) const override {
7782     // FIXME: Think about passes we will preserve and add them here.
7783     AU.addRequired<TargetLibraryInfoWrapperPass>();
7784   }
7785 };
7786 
7787 struct AttributorCGSCCLegacyPass : public CallGraphSCCPass {
7788   CallGraphUpdater CGUpdater;
7789   static char ID;
7790 
7791   AttributorCGSCCLegacyPass() : CallGraphSCCPass(ID) {
7792     initializeAttributorCGSCCLegacyPassPass(*PassRegistry::getPassRegistry());
7793   }
7794 
7795   bool runOnSCC(CallGraphSCC &SCC) override {
7796     if (skipSCC(SCC))
7797       return false;
7798 
7799     SetVector<Function *> Functions;
7800     for (CallGraphNode *CGN : SCC)
7801       if (Function *Fn = CGN->getFunction())
7802         if (!Fn->isDeclaration())
7803           Functions.insert(Fn);
7804 
7805     if (Functions.empty())
7806       return false;
7807 
7808     AnalysisGetter AG;
7809     CallGraph &CG = const_cast<CallGraph &>(SCC.getCallGraph());
7810     CGUpdater.initialize(CG, SCC);
7811     Module &M = *Functions.back()->getParent();
7812     InformationCache InfoCache(M, AG, /* CGSCC */ &Functions);
7813     return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater);
7814   }
7815 
7816   bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); }
7817 
7818   void getAnalysisUsage(AnalysisUsage &AU) const override {
7819     // FIXME: Think about passes we will preserve and add them here.
7820     AU.addRequired<TargetLibraryInfoWrapperPass>();
7821     CallGraphSCCPass::getAnalysisUsage(AU);
7822   }
7823 };
7824 
7825 } // end anonymous namespace
7826 
7827 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
7828 Pass *llvm::createAttributorCGSCCLegacyPass() {
7829   return new AttributorCGSCCLegacyPass();
7830 }
7831 
7832 char AttributorLegacyPass::ID = 0;
7833 char AttributorCGSCCLegacyPass::ID = 0;
7834 
7835 const char AAReturnedValues::ID = 0;
7836 const char AANoUnwind::ID = 0;
7837 const char AANoSync::ID = 0;
7838 const char AANoFree::ID = 0;
7839 const char AANonNull::ID = 0;
7840 const char AANoRecurse::ID = 0;
7841 const char AAWillReturn::ID = 0;
7842 const char AAUndefinedBehavior::ID = 0;
7843 const char AANoAlias::ID = 0;
7844 const char AAReachability::ID = 0;
7845 const char AANoReturn::ID = 0;
7846 const char AAIsDead::ID = 0;
7847 const char AADereferenceable::ID = 0;
7848 const char AAAlign::ID = 0;
7849 const char AANoCapture::ID = 0;
7850 const char AAValueSimplify::ID = 0;
7851 const char AAHeapToStack::ID = 0;
7852 const char AAPrivatizablePtr::ID = 0;
7853 const char AAMemoryBehavior::ID = 0;
7854 const char AAValueConstantRange::ID = 0;
7855 
7856 // Macro magic to create the static generator function for attributes that
7857 // follow the naming scheme.
7858 
7859 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
7860   case IRPosition::PK:                                                         \
7861     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
7862 
7863 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
7864   case IRPosition::PK:                                                         \
7865     AA = new CLASS##SUFFIX(IRP);                                               \
7866     break;
7867 
7868 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
7869   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7870     CLASS *AA = nullptr;                                                       \
7871     switch (IRP.getPositionKind()) {                                           \
7872       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7873       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7874       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7875       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7876       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7877       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7878       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7879       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7880     }                                                                          \
7881     return *AA;                                                                \
7882   }
7883 
7884 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
7885   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7886     CLASS *AA = nullptr;                                                       \
7887     switch (IRP.getPositionKind()) {                                           \
7888       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7889       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
7890       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7891       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7892       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7893       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7894       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7895       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7896     }                                                                          \
7897     return *AA;                                                                \
7898   }
7899 
7900 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
7901   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7902     CLASS *AA = nullptr;                                                       \
7903     switch (IRP.getPositionKind()) {                                           \
7904       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7905       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7906       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7907       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7908       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7909       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7910       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7911       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7912     }                                                                          \
7913     return *AA;                                                                \
7914   }
7915 
7916 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
7917   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7918     CLASS *AA = nullptr;                                                       \
7919     switch (IRP.getPositionKind()) {                                           \
7920       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7921       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7922       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7923       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7924       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7925       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7926       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7927       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7928     }                                                                          \
7929     return *AA;                                                                \
7930   }
7931 
7932 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
7933   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7934     CLASS *AA = nullptr;                                                       \
7935     switch (IRP.getPositionKind()) {                                           \
7936       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7937       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7938       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7939       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7940       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7941       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7942       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7943       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7944     }                                                                          \
7945     return *AA;                                                                \
7946   }
7947 
7948 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7949 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7950 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7951 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7952 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7953 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7954 
7955 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7956 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7957 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7958 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7959 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7960 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7961 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7962 
7963 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7964 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7965 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7966 
7967 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7968 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7969 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7970 
7971 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7972 
7973 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7974 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7975 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7976 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7977 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7978 #undef SWITCH_PK_CREATE
7979 #undef SWITCH_PK_INV
7980 
7981 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
7982                       "Deduce and propagate attributes", false, false)
7983 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
7984 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
7985                     "Deduce and propagate attributes", false, false)
7986 INITIALIZE_PASS_BEGIN(AttributorCGSCCLegacyPass, "attributor-cgscc",
7987                       "Deduce and propagate attributes (CGSCC pass)", false,
7988                       false)
7989 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
7990 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
7991 INITIALIZE_PASS_END(AttributorCGSCCLegacyPass, "attributor-cgscc",
7992                     "Deduce and propagate attributes (CGSCC pass)", false,
7993                     false)
7994