1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CallGraph.h"
24 #include "llvm/Analysis/CallGraphSCCPass.h"
25 #include "llvm/Analysis/CaptureTracking.h"
26 #include "llvm/Analysis/EHPersonalities.h"
27 #include "llvm/Analysis/GlobalsModRef.h"
28 #include "llvm/Analysis/LazyValueInfo.h"
29 #include "llvm/Analysis/Loads.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/ScalarEvolution.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Argument.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/CFG.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/InstIterator.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Verifier.h"
40 #include "llvm/InitializePasses.h"
41 #include "llvm/IR/NoFolder.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/Local.h"
48 
49 #include <cassert>
50 
51 using namespace llvm;
52 
53 #define DEBUG_TYPE "attributor"
54 
55 STATISTIC(NumFnWithExactDefinition,
56           "Number of function with exact definitions");
57 STATISTIC(NumFnWithoutExactDefinition,
58           "Number of function without exact definitions");
59 STATISTIC(NumAttributesTimedOut,
60           "Number of abstract attributes timed out before fixpoint");
61 STATISTIC(NumAttributesValidFixpoint,
62           "Number of abstract attributes in a valid fixpoint state");
63 STATISTIC(NumAttributesManifested,
64           "Number of abstract attributes manifested in IR");
65 STATISTIC(NumAttributesFixedDueToRequiredDependences,
66           "Number of abstract attributes fixed due to required dependences");
67 
68 // Some helper macros to deal with statistics tracking.
69 //
70 // Usage:
71 // For simple IR attribute tracking overload trackStatistics in the abstract
72 // attribute and choose the right STATS_DECLTRACK_********* macro,
73 // e.g.,:
74 //  void trackStatistics() const override {
75 //    STATS_DECLTRACK_ARG_ATTR(returned)
76 //  }
77 // If there is a single "increment" side one can use the macro
78 // STATS_DECLTRACK with a custom message. If there are multiple increment
79 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
80 //
81 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
82   ("Number of " #TYPE " marked '" #NAME "'")
83 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
84 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
85 #define STATS_DECL(NAME, TYPE, MSG)                                            \
86   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
87 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
88 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
89   {                                                                            \
90     STATS_DECL(NAME, TYPE, MSG)                                                \
91     STATS_TRACK(NAME, TYPE)                                                    \
92   }
93 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
94   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
95 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
96   STATS_DECLTRACK(NAME, CSArguments,                                           \
97                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
98 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
99   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
100 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
101   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
102 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
103   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
104                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
105 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
106   STATS_DECLTRACK(NAME, CSReturn,                                              \
107                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
108 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
109   STATS_DECLTRACK(NAME, Floating,                                              \
110                   ("Number of floating values known to be '" #NAME "'"))
111 
112 // Specialization of the operator<< for abstract attributes subclasses. This
113 // disambiguates situations where multiple operators are applicable.
114 namespace llvm {
115 #define PIPE_OPERATOR(CLASS)                                                   \
116   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
117     return OS << static_cast<const AbstractAttribute &>(AA);                   \
118   }
119 
120 PIPE_OPERATOR(AAIsDead)
121 PIPE_OPERATOR(AANoUnwind)
122 PIPE_OPERATOR(AANoSync)
123 PIPE_OPERATOR(AANoRecurse)
124 PIPE_OPERATOR(AAWillReturn)
125 PIPE_OPERATOR(AANoReturn)
126 PIPE_OPERATOR(AAReturnedValues)
127 PIPE_OPERATOR(AANonNull)
128 PIPE_OPERATOR(AANoAlias)
129 PIPE_OPERATOR(AADereferenceable)
130 PIPE_OPERATOR(AAAlign)
131 PIPE_OPERATOR(AANoCapture)
132 PIPE_OPERATOR(AAValueSimplify)
133 PIPE_OPERATOR(AANoFree)
134 PIPE_OPERATOR(AAHeapToStack)
135 PIPE_OPERATOR(AAReachability)
136 PIPE_OPERATOR(AAMemoryBehavior)
137 PIPE_OPERATOR(AAMemoryLocation)
138 PIPE_OPERATOR(AAValueConstantRange)
139 PIPE_OPERATOR(AAPrivatizablePtr)
140 
141 #undef PIPE_OPERATOR
142 } // namespace llvm
143 
144 // TODO: Determine a good default value.
145 //
146 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
147 // (when run with the first 5 abstract attributes). The results also indicate
148 // that we never reach 32 iterations but always find a fixpoint sooner.
149 //
150 // This will become more evolved once we perform two interleaved fixpoint
151 // iterations: bottom-up and top-down.
152 static cl::opt<unsigned>
153     MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
154                           cl::desc("Maximal number of fixpoint iterations."),
155                           cl::init(32));
156 static cl::opt<bool> VerifyMaxFixpointIterations(
157     "attributor-max-iterations-verify", cl::Hidden,
158     cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
159     cl::init(false));
160 
161 static cl::opt<bool> DisableAttributor(
162     "attributor-disable", cl::Hidden,
163     cl::desc("Disable the attributor inter-procedural deduction pass."),
164     cl::init(true));
165 
166 static cl::opt<bool> AnnotateDeclarationCallSites(
167     "attributor-annotate-decl-cs", cl::Hidden,
168     cl::desc("Annotate call sites of function declarations."), cl::init(false));
169 
170 static cl::opt<bool> ManifestInternal(
171     "attributor-manifest-internal", cl::Hidden,
172     cl::desc("Manifest Attributor internal string attributes."),
173     cl::init(false));
174 
175 static cl::opt<unsigned> DepRecInterval(
176     "attributor-dependence-recompute-interval", cl::Hidden,
177     cl::desc("Number of iterations until dependences are recomputed."),
178     cl::init(4));
179 
180 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
181                                        cl::init(true), cl::Hidden);
182 
183 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
184                                        cl::Hidden);
185 
186 /// Logic operators for the change status enum class.
187 ///
188 ///{
189 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
190   return l == ChangeStatus::CHANGED ? l : r;
191 }
192 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
193   return l == ChangeStatus::UNCHANGED ? l : r;
194 }
195 ///}
196 
197 Argument *IRPosition::getAssociatedArgument() const {
198   if (getPositionKind() == IRP_ARGUMENT)
199     return cast<Argument>(&getAnchorValue());
200 
201   // Not an Argument and no argument number means this is not a call site
202   // argument, thus we cannot find a callback argument to return.
203   int ArgNo = getArgNo();
204   if (ArgNo < 0)
205     return nullptr;
206 
207   // Use abstract call sites to make the connection between the call site
208   // values and the ones in callbacks. If a callback was found that makes use
209   // of the underlying call site operand, we want the corresponding callback
210   // callee argument and not the direct callee argument.
211   Optional<Argument *> CBCandidateArg;
212   SmallVector<const Use *, 4> CBUses;
213   ImmutableCallSite ICS(&getAnchorValue());
214   AbstractCallSite::getCallbackUses(ICS, CBUses);
215   for (const Use *U : CBUses) {
216     AbstractCallSite ACS(U);
217     assert(ACS && ACS.isCallbackCall());
218     if (!ACS.getCalledFunction())
219       continue;
220 
221     for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
222 
223       // Test if the underlying call site operand is argument number u of the
224       // callback callee.
225       if (ACS.getCallArgOperandNo(u) != ArgNo)
226         continue;
227 
228       assert(ACS.getCalledFunction()->arg_size() > u &&
229              "ACS mapped into var-args arguments!");
230       if (CBCandidateArg.hasValue()) {
231         CBCandidateArg = nullptr;
232         break;
233       }
234       CBCandidateArg = ACS.getCalledFunction()->getArg(u);
235     }
236   }
237 
238   // If we found a unique callback candidate argument, return it.
239   if (CBCandidateArg.hasValue() && CBCandidateArg.getValue())
240     return CBCandidateArg.getValue();
241 
242   // If no callbacks were found, or none used the underlying call site operand
243   // exclusively, use the direct callee argument if available.
244   const Function *Callee = ICS.getCalledFunction();
245   if (Callee && Callee->arg_size() > unsigned(ArgNo))
246     return Callee->getArg(ArgNo);
247 
248   return nullptr;
249 }
250 
251 static Optional<ConstantInt *>
252 getAssumedConstant(Attributor &A, const Value &V, const AbstractAttribute &AA,
253                    bool &UsedAssumedInformation) {
254   const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
255       AA, IRPosition::value(V), /* TrackDependence */ false);
256   Optional<Value *> SimplifiedV = ValueSimplifyAA.getAssumedSimplifiedValue(A);
257   bool IsKnown = ValueSimplifyAA.isKnown();
258   UsedAssumedInformation |= !IsKnown;
259   if (!SimplifiedV.hasValue()) {
260     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
261     return llvm::None;
262   }
263   if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue())) {
264     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
265     return llvm::None;
266   }
267   ConstantInt *CI = dyn_cast_or_null<ConstantInt>(SimplifiedV.getValue());
268   if (CI)
269     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
270   return CI;
271 }
272 
273 /// Get pointer operand of memory accessing instruction. If \p I is
274 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
275 /// is set to false and the instruction is volatile, return nullptr.
276 static const Value *getPointerOperand(const Instruction *I,
277                                       bool AllowVolatile) {
278   if (auto *LI = dyn_cast<LoadInst>(I)) {
279     if (!AllowVolatile && LI->isVolatile())
280       return nullptr;
281     return LI->getPointerOperand();
282   }
283 
284   if (auto *SI = dyn_cast<StoreInst>(I)) {
285     if (!AllowVolatile && SI->isVolatile())
286       return nullptr;
287     return SI->getPointerOperand();
288   }
289 
290   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
291     if (!AllowVolatile && CXI->isVolatile())
292       return nullptr;
293     return CXI->getPointerOperand();
294   }
295 
296   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
297     if (!AllowVolatile && RMWI->isVolatile())
298       return nullptr;
299     return RMWI->getPointerOperand();
300   }
301 
302   return nullptr;
303 }
304 
305 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
306 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
307 /// getelement pointer instructions that traverse the natural type of \p Ptr if
308 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
309 /// through a cast to i8*.
310 ///
311 /// TODO: This could probably live somewhere more prominantly if it doesn't
312 ///       already exist.
313 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
314                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
315   assert(Offset >= 0 && "Negative offset not supported yet!");
316   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
317                     << "-bytes as " << *ResTy << "\n");
318 
319   // The initial type we are trying to traverse to get nice GEPs.
320   Type *Ty = Ptr->getType();
321 
322   SmallVector<Value *, 4> Indices;
323   std::string GEPName = Ptr->getName().str();
324   while (Offset) {
325     uint64_t Idx, Rem;
326 
327     if (auto *STy = dyn_cast<StructType>(Ty)) {
328       const StructLayout *SL = DL.getStructLayout(STy);
329       if (int64_t(SL->getSizeInBytes()) < Offset)
330         break;
331       Idx = SL->getElementContainingOffset(Offset);
332       assert(Idx < STy->getNumElements() && "Offset calculation error!");
333       Rem = Offset - SL->getElementOffset(Idx);
334       Ty = STy->getElementType(Idx);
335     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
336       Ty = PTy->getElementType();
337       if (!Ty->isSized())
338         break;
339       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
340       assert(ElementSize && "Expected type with size!");
341       Idx = Offset / ElementSize;
342       Rem = Offset % ElementSize;
343     } else {
344       // Non-aggregate type, we cast and make byte-wise progress now.
345       break;
346     }
347 
348     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
349                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
350 
351     GEPName += "." + std::to_string(Idx);
352     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
353     Offset = Rem;
354   }
355 
356   // Create a GEP if we collected indices above.
357   if (Indices.size())
358     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
359 
360   // If an offset is left we use byte-wise adjustment.
361   if (Offset) {
362     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
363     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
364                         GEPName + ".b" + Twine(Offset));
365   }
366 
367   // Ensure the result has the requested type.
368   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
369 
370   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
371   return Ptr;
372 }
373 
374 /// Recursively visit all values that might become \p IRP at some point. This
375 /// will be done by looking through cast instructions, selects, phis, and calls
376 /// with the "returned" attribute. Once we cannot look through the value any
377 /// further, the callback \p VisitValueCB is invoked and passed the current
378 /// value, the \p State, and a flag to indicate if we stripped anything.
379 /// Stripped means that we unpacked the value associated with \p IRP at least
380 /// once. Note that the value used for the callback may still be the value
381 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
382 /// we will never visit more values than specified by \p MaxValues.
383 template <typename AAType, typename StateTy>
384 static bool genericValueTraversal(
385     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
386     const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
387     int MaxValues = 8, const function_ref<Value *(Value *)> StripCB = nullptr) {
388 
389   const AAIsDead *LivenessAA = nullptr;
390   if (IRP.getAnchorScope())
391     LivenessAA = &A.getAAFor<AAIsDead>(
392         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
393         /* TrackDependence */ false);
394   bool AnyDead = false;
395 
396   // TODO: Use Positions here to allow context sensitivity in VisitValueCB
397   SmallPtrSet<Value *, 16> Visited;
398   SmallVector<Value *, 16> Worklist;
399   Worklist.push_back(&IRP.getAssociatedValue());
400 
401   int Iteration = 0;
402   do {
403     Value *V = Worklist.pop_back_val();
404     if (StripCB)
405       V = StripCB(V);
406 
407     // Check if we should process the current value. To prevent endless
408     // recursion keep a record of the values we followed!
409     if (!Visited.insert(V).second)
410       continue;
411 
412     // Make sure we limit the compile time for complex expressions.
413     if (Iteration++ >= MaxValues)
414       return false;
415 
416     // Explicitly look through calls with a "returned" attribute if we do
417     // not have a pointer as stripPointerCasts only works on them.
418     Value *NewV = nullptr;
419     if (V->getType()->isPointerTy()) {
420       NewV = V->stripPointerCasts();
421     } else {
422       CallSite CS(V);
423       if (CS && CS.getCalledFunction()) {
424         for (Argument &Arg : CS.getCalledFunction()->args())
425           if (Arg.hasReturnedAttr()) {
426             NewV = CS.getArgOperand(Arg.getArgNo());
427             break;
428           }
429       }
430     }
431     if (NewV && NewV != V) {
432       Worklist.push_back(NewV);
433       continue;
434     }
435 
436     // Look through select instructions, visit both potential values.
437     if (auto *SI = dyn_cast<SelectInst>(V)) {
438       Worklist.push_back(SI->getTrueValue());
439       Worklist.push_back(SI->getFalseValue());
440       continue;
441     }
442 
443     // Look through phi nodes, visit all live operands.
444     if (auto *PHI = dyn_cast<PHINode>(V)) {
445       assert(LivenessAA &&
446              "Expected liveness in the presence of instructions!");
447       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
448         const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
449         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
450                             LivenessAA,
451                             /* CheckBBLivenessOnly */ true)) {
452           AnyDead = true;
453           continue;
454         }
455         Worklist.push_back(PHI->getIncomingValue(u));
456       }
457       continue;
458     }
459 
460     // Once a leaf is reached we inform the user through the callback.
461     if (!VisitValueCB(*V, State, Iteration > 1))
462       return false;
463   } while (!Worklist.empty());
464 
465   // If we actually used liveness information so we have to record a dependence.
466   if (AnyDead)
467     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
468 
469   // All values have been visited.
470   return true;
471 }
472 
473 /// Return true if \p New is equal or worse than \p Old.
474 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
475   if (!Old.isIntAttribute())
476     return true;
477 
478   return Old.getValueAsInt() >= New.getValueAsInt();
479 }
480 
481 /// Return true if the information provided by \p Attr was added to the
482 /// attribute list \p Attrs. This is only the case if it was not already present
483 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
484 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
485                              AttributeList &Attrs, int AttrIdx) {
486 
487   if (Attr.isEnumAttribute()) {
488     Attribute::AttrKind Kind = Attr.getKindAsEnum();
489     if (Attrs.hasAttribute(AttrIdx, Kind))
490       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
491         return false;
492     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
493     return true;
494   }
495   if (Attr.isStringAttribute()) {
496     StringRef Kind = Attr.getKindAsString();
497     if (Attrs.hasAttribute(AttrIdx, Kind))
498       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
499         return false;
500     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
501     return true;
502   }
503   if (Attr.isIntAttribute()) {
504     Attribute::AttrKind Kind = Attr.getKindAsEnum();
505     if (Attrs.hasAttribute(AttrIdx, Kind))
506       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
507         return false;
508     Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
509     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
510     return true;
511   }
512 
513   llvm_unreachable("Expected enum or string attribute!");
514 }
515 
516 static const Value *
517 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
518                                      const DataLayout &DL,
519                                      bool AllowNonInbounds = false) {
520   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
521   if (!Ptr)
522     return nullptr;
523 
524   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
525                                           AllowNonInbounds);
526 }
527 
528 ChangeStatus AbstractAttribute::update(Attributor &A) {
529   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
530   if (getState().isAtFixpoint())
531     return HasChanged;
532 
533   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
534 
535   HasChanged = updateImpl(A);
536 
537   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
538                     << "\n");
539 
540   return HasChanged;
541 }
542 
543 ChangeStatus
544 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
545                                    const ArrayRef<Attribute> &DeducedAttrs) {
546   Function *ScopeFn = IRP.getAssociatedFunction();
547   IRPosition::Kind PK = IRP.getPositionKind();
548 
549   // In the following some generic code that will manifest attributes in
550   // DeducedAttrs if they improve the current IR. Due to the different
551   // annotation positions we use the underlying AttributeList interface.
552 
553   AttributeList Attrs;
554   switch (PK) {
555   case IRPosition::IRP_INVALID:
556   case IRPosition::IRP_FLOAT:
557     return ChangeStatus::UNCHANGED;
558   case IRPosition::IRP_ARGUMENT:
559   case IRPosition::IRP_FUNCTION:
560   case IRPosition::IRP_RETURNED:
561     Attrs = ScopeFn->getAttributes();
562     break;
563   case IRPosition::IRP_CALL_SITE:
564   case IRPosition::IRP_CALL_SITE_RETURNED:
565   case IRPosition::IRP_CALL_SITE_ARGUMENT:
566     Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
567     break;
568   }
569 
570   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
571   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
572   for (const Attribute &Attr : DeducedAttrs) {
573     if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
574       continue;
575 
576     HasChanged = ChangeStatus::CHANGED;
577   }
578 
579   if (HasChanged == ChangeStatus::UNCHANGED)
580     return HasChanged;
581 
582   switch (PK) {
583   case IRPosition::IRP_ARGUMENT:
584   case IRPosition::IRP_FUNCTION:
585   case IRPosition::IRP_RETURNED:
586     ScopeFn->setAttributes(Attrs);
587     break;
588   case IRPosition::IRP_CALL_SITE:
589   case IRPosition::IRP_CALL_SITE_RETURNED:
590   case IRPosition::IRP_CALL_SITE_ARGUMENT:
591     CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
592     break;
593   case IRPosition::IRP_INVALID:
594   case IRPosition::IRP_FLOAT:
595     break;
596   }
597 
598   return HasChanged;
599 }
600 
601 const IRPosition IRPosition::EmptyKey(255);
602 const IRPosition IRPosition::TombstoneKey(256);
603 
604 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
605   IRPositions.emplace_back(IRP);
606 
607   ImmutableCallSite ICS(&IRP.getAnchorValue());
608   switch (IRP.getPositionKind()) {
609   case IRPosition::IRP_INVALID:
610   case IRPosition::IRP_FLOAT:
611   case IRPosition::IRP_FUNCTION:
612     return;
613   case IRPosition::IRP_ARGUMENT:
614   case IRPosition::IRP_RETURNED:
615     IRPositions.emplace_back(
616         IRPosition::function(*IRP.getAssociatedFunction()));
617     return;
618   case IRPosition::IRP_CALL_SITE:
619     assert(ICS && "Expected call site!");
620     // TODO: We need to look at the operand bundles similar to the redirection
621     //       in CallBase.
622     if (!ICS.hasOperandBundles())
623       if (const Function *Callee = ICS.getCalledFunction())
624         IRPositions.emplace_back(IRPosition::function(*Callee));
625     return;
626   case IRPosition::IRP_CALL_SITE_RETURNED:
627     assert(ICS && "Expected call site!");
628     // TODO: We need to look at the operand bundles similar to the redirection
629     //       in CallBase.
630     if (!ICS.hasOperandBundles()) {
631       if (const Function *Callee = ICS.getCalledFunction()) {
632         IRPositions.emplace_back(IRPosition::returned(*Callee));
633         IRPositions.emplace_back(IRPosition::function(*Callee));
634       }
635     }
636     IRPositions.emplace_back(
637         IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
638     return;
639   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
640     int ArgNo = IRP.getArgNo();
641     assert(ICS && ArgNo >= 0 && "Expected call site!");
642     // TODO: We need to look at the operand bundles similar to the redirection
643     //       in CallBase.
644     if (!ICS.hasOperandBundles()) {
645       const Function *Callee = ICS.getCalledFunction();
646       if (Callee && Callee->arg_size() > unsigned(ArgNo))
647         IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
648       if (Callee)
649         IRPositions.emplace_back(IRPosition::function(*Callee));
650     }
651     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
652     return;
653   }
654   }
655 }
656 
657 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
658                          bool IgnoreSubsumingPositions) const {
659   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
660     for (Attribute::AttrKind AK : AKs)
661       if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
662         return true;
663     // The first position returned by the SubsumingPositionIterator is
664     // always the position itself. If we ignore subsuming positions we
665     // are done after the first iteration.
666     if (IgnoreSubsumingPositions)
667       break;
668   }
669   return false;
670 }
671 
672 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
673                           SmallVectorImpl<Attribute> &Attrs,
674                           bool IgnoreSubsumingPositions) const {
675   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
676     for (Attribute::AttrKind AK : AKs) {
677       const Attribute &Attr = EquivIRP.getAttr(AK);
678       if (Attr.getKindAsEnum() == AK)
679         Attrs.push_back(Attr);
680     }
681     // The first position returned by the SubsumingPositionIterator is
682     // always the position itself. If we ignore subsuming positions we
683     // are done after the first iteration.
684     if (IgnoreSubsumingPositions)
685       break;
686   }
687 }
688 
689 void IRPosition::verify() {
690   switch (KindOrArgNo) {
691   default:
692     assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
693     assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
694            "Expected call base or argument for positive attribute index!");
695     if (isa<Argument>(AnchorVal)) {
696       assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
697              "Argument number mismatch!");
698       assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
699              "Associated value mismatch!");
700     } else {
701       assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
702              "Call site argument number mismatch!");
703       assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
704                  &getAssociatedValue() &&
705              "Associated value mismatch!");
706     }
707     break;
708   case IRP_INVALID:
709     assert(!AnchorVal && "Expected no value for an invalid position!");
710     break;
711   case IRP_FLOAT:
712     assert((!isa<CallBase>(&getAssociatedValue()) &&
713             !isa<Argument>(&getAssociatedValue())) &&
714            "Expected specialized kind for call base and argument values!");
715     break;
716   case IRP_RETURNED:
717     assert(isa<Function>(AnchorVal) &&
718            "Expected function for a 'returned' position!");
719     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
720     break;
721   case IRP_CALL_SITE_RETURNED:
722     assert((isa<CallBase>(AnchorVal)) &&
723            "Expected call base for 'call site returned' position!");
724     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
725     break;
726   case IRP_CALL_SITE:
727     assert((isa<CallBase>(AnchorVal)) &&
728            "Expected call base for 'call site function' position!");
729     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
730     break;
731   case IRP_FUNCTION:
732     assert(isa<Function>(AnchorVal) &&
733            "Expected function for a 'function' position!");
734     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
735     break;
736   }
737 }
738 
739 /// Helper function to clamp a state \p S of type \p StateType with the
740 /// information in \p R and indicate/return if \p S did change (as-in update is
741 /// required to be run again).
742 template <typename StateType>
743 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
744   auto Assumed = S.getAssumed();
745   S ^= R;
746   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
747                                    : ChangeStatus::CHANGED;
748 }
749 
750 /// Clamp the information known for all returned values of a function
751 /// (identified by \p QueryingAA) into \p S.
752 template <typename AAType, typename StateType = typename AAType::StateType>
753 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
754                                      StateType &S) {
755   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
756                     << QueryingAA << " into " << S << "\n");
757 
758   assert((QueryingAA.getIRPosition().getPositionKind() ==
759               IRPosition::IRP_RETURNED ||
760           QueryingAA.getIRPosition().getPositionKind() ==
761               IRPosition::IRP_CALL_SITE_RETURNED) &&
762          "Can only clamp returned value states for a function returned or call "
763          "site returned position!");
764 
765   // Use an optional state as there might not be any return values and we want
766   // to join (IntegerState::operator&) the state of all there are.
767   Optional<StateType> T;
768 
769   // Callback for each possibly returned value.
770   auto CheckReturnValue = [&](Value &RV) -> bool {
771     const IRPosition &RVPos = IRPosition::value(RV);
772     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
773     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
774                       << " @ " << RVPos << "\n");
775     const StateType &AAS = static_cast<const StateType &>(AA.getState());
776     if (T.hasValue())
777       *T &= AAS;
778     else
779       T = AAS;
780     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
781                       << "\n");
782     return T->isValidState();
783   };
784 
785   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
786     S.indicatePessimisticFixpoint();
787   else if (T.hasValue())
788     S ^= *T;
789 }
790 
791 /// Helper class to compose two generic deduction
792 template <typename AAType, typename Base, typename StateType,
793           template <typename...> class F, template <typename...> class G>
794 struct AAComposeTwoGenericDeduction
795     : public F<AAType, G<AAType, Base, StateType>, StateType> {
796   AAComposeTwoGenericDeduction(const IRPosition &IRP)
797       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
798 
799   /// See AbstractAttribute::updateImpl(...).
800   ChangeStatus updateImpl(Attributor &A) override {
801     ChangeStatus ChangedF =
802         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
803     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
804     return ChangedF | ChangedG;
805   }
806 };
807 
808 /// Helper class for generic deduction: return value -> returned position.
809 template <typename AAType, typename Base,
810           typename StateType = typename Base::StateType>
811 struct AAReturnedFromReturnedValues : public Base {
812   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
813 
814   /// See AbstractAttribute::updateImpl(...).
815   ChangeStatus updateImpl(Attributor &A) override {
816     StateType S(StateType::getBestState(this->getState()));
817     clampReturnedValueStates<AAType, StateType>(A, *this, S);
818     // TODO: If we know we visited all returned values, thus no are assumed
819     // dead, we can take the known information from the state T.
820     return clampStateAndIndicateChange<StateType>(this->getState(), S);
821   }
822 };
823 
824 /// Clamp the information known at all call sites for a given argument
825 /// (identified by \p QueryingAA) into \p S.
826 template <typename AAType, typename StateType = typename AAType::StateType>
827 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
828                                         StateType &S) {
829   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
830                     << QueryingAA << " into " << S << "\n");
831 
832   assert(QueryingAA.getIRPosition().getPositionKind() ==
833              IRPosition::IRP_ARGUMENT &&
834          "Can only clamp call site argument states for an argument position!");
835 
836   // Use an optional state as there might not be any return values and we want
837   // to join (IntegerState::operator&) the state of all there are.
838   Optional<StateType> T;
839 
840   // The argument number which is also the call site argument number.
841   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
842 
843   auto CallSiteCheck = [&](AbstractCallSite ACS) {
844     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
845     // Check if a coresponding argument was found or if it is on not associated
846     // (which can happen for callback calls).
847     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
848       return false;
849 
850     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
851     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
852                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
853     const StateType &AAS = static_cast<const StateType &>(AA.getState());
854     if (T.hasValue())
855       *T &= AAS;
856     else
857       T = AAS;
858     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
859                       << "\n");
860     return T->isValidState();
861   };
862 
863   bool AllCallSitesKnown;
864   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
865                               AllCallSitesKnown))
866     S.indicatePessimisticFixpoint();
867   else if (T.hasValue())
868     S ^= *T;
869 }
870 
871 /// Helper class for generic deduction: call site argument -> argument position.
872 template <typename AAType, typename Base,
873           typename StateType = typename AAType::StateType>
874 struct AAArgumentFromCallSiteArguments : public Base {
875   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
876 
877   /// See AbstractAttribute::updateImpl(...).
878   ChangeStatus updateImpl(Attributor &A) override {
879     StateType S(StateType::getBestState(this->getState()));
880     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
881     // TODO: If we know we visited all incoming values, thus no are assumed
882     // dead, we can take the known information from the state T.
883     return clampStateAndIndicateChange<StateType>(this->getState(), S);
884   }
885 };
886 
887 /// Helper class for generic replication: function returned -> cs returned.
888 template <typename AAType, typename Base,
889           typename StateType = typename Base::StateType>
890 struct AACallSiteReturnedFromReturned : public Base {
891   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
892 
893   /// See AbstractAttribute::updateImpl(...).
894   ChangeStatus updateImpl(Attributor &A) override {
895     assert(this->getIRPosition().getPositionKind() ==
896                IRPosition::IRP_CALL_SITE_RETURNED &&
897            "Can only wrap function returned positions for call site returned "
898            "positions!");
899     auto &S = this->getState();
900 
901     const Function *AssociatedFunction =
902         this->getIRPosition().getAssociatedFunction();
903     if (!AssociatedFunction)
904       return S.indicatePessimisticFixpoint();
905 
906     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
907     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
908     return clampStateAndIndicateChange(
909         S, static_cast<const StateType &>(AA.getState()));
910   }
911 };
912 
913 /// Helper class for generic deduction using must-be-executed-context
914 /// Base class is required to have `followUse` method.
915 
916 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
917 /// U - Underlying use.
918 /// I - The user of the \p U.
919 /// `followUse` returns true if the value should be tracked transitively.
920 
921 template <typename AAType, typename Base,
922           typename StateType = typename AAType::StateType>
923 struct AAFromMustBeExecutedContext : public Base {
924   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
925 
926   void initialize(Attributor &A) override {
927     Base::initialize(A);
928     const IRPosition &IRP = this->getIRPosition();
929     Instruction *CtxI = IRP.getCtxI();
930 
931     if (!CtxI)
932       return;
933 
934     for (const Use &U : IRP.getAssociatedValue().uses())
935       Uses.insert(&U);
936   }
937 
938   /// See AbstractAttribute::updateImpl(...).
939   ChangeStatus updateImpl(Attributor &A) override {
940     auto BeforeState = this->getState();
941     auto &S = this->getState();
942     Instruction *CtxI = this->getIRPosition().getCtxI();
943     if (!CtxI)
944       return ChangeStatus::UNCHANGED;
945 
946     MustBeExecutedContextExplorer &Explorer =
947         A.getInfoCache().getMustBeExecutedContextExplorer();
948 
949     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
950     for (unsigned u = 0; u < Uses.size(); ++u) {
951       const Use *U = Uses[u];
952       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
953         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
954         if (Found && Base::followUse(A, U, UserI))
955           for (const Use &Us : UserI->uses())
956             Uses.insert(&Us);
957       }
958     }
959 
960     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
961   }
962 
963 private:
964   /// Container for (transitive) uses of the associated value.
965   SetVector<const Use *> Uses;
966 };
967 
968 template <typename AAType, typename Base,
969           typename StateType = typename AAType::StateType>
970 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
971     AAComposeTwoGenericDeduction<AAType, Base, StateType,
972                                  AAFromMustBeExecutedContext,
973                                  AAArgumentFromCallSiteArguments>;
974 
975 template <typename AAType, typename Base,
976           typename StateType = typename AAType::StateType>
977 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
978     AAComposeTwoGenericDeduction<AAType, Base, StateType,
979                                  AAFromMustBeExecutedContext,
980                                  AACallSiteReturnedFromReturned>;
981 
982 /// -----------------------NoUnwind Function Attribute--------------------------
983 
984 struct AANoUnwindImpl : AANoUnwind {
985   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
986 
987   const std::string getAsStr() const override {
988     return getAssumed() ? "nounwind" : "may-unwind";
989   }
990 
991   /// See AbstractAttribute::updateImpl(...).
992   ChangeStatus updateImpl(Attributor &A) override {
993     auto Opcodes = {
994         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
995         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
996         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
997 
998     auto CheckForNoUnwind = [&](Instruction &I) {
999       if (!I.mayThrow())
1000         return true;
1001 
1002       if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1003         const auto &NoUnwindAA =
1004             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
1005         return NoUnwindAA.isAssumedNoUnwind();
1006       }
1007       return false;
1008     };
1009 
1010     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
1011       return indicatePessimisticFixpoint();
1012 
1013     return ChangeStatus::UNCHANGED;
1014   }
1015 };
1016 
1017 struct AANoUnwindFunction final : public AANoUnwindImpl {
1018   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
1019 
1020   /// See AbstractAttribute::trackStatistics()
1021   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1022 };
1023 
1024 /// NoUnwind attribute deduction for a call sites.
1025 struct AANoUnwindCallSite final : AANoUnwindImpl {
1026   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
1027 
1028   /// See AbstractAttribute::initialize(...).
1029   void initialize(Attributor &A) override {
1030     AANoUnwindImpl::initialize(A);
1031     Function *F = getAssociatedFunction();
1032     if (!F)
1033       indicatePessimisticFixpoint();
1034   }
1035 
1036   /// See AbstractAttribute::updateImpl(...).
1037   ChangeStatus updateImpl(Attributor &A) override {
1038     // TODO: Once we have call site specific value information we can provide
1039     //       call site specific liveness information and then it makes
1040     //       sense to specialize attributes for call sites arguments instead of
1041     //       redirecting requests to the callee argument.
1042     Function *F = getAssociatedFunction();
1043     const IRPosition &FnPos = IRPosition::function(*F);
1044     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
1045     return clampStateAndIndicateChange(
1046         getState(),
1047         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
1048   }
1049 
1050   /// See AbstractAttribute::trackStatistics()
1051   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1052 };
1053 
1054 /// --------------------- Function Return Values -------------------------------
1055 
1056 /// "Attribute" that collects all potential returned values and the return
1057 /// instructions that they arise from.
1058 ///
1059 /// If there is a unique returned value R, the manifest method will:
1060 ///   - mark R with the "returned" attribute, if R is an argument.
1061 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1062 
1063   /// Mapping of values potentially returned by the associated function to the
1064   /// return instructions that might return them.
1065   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1066 
1067   /// Mapping to remember the number of returned values for a call site such
1068   /// that we can avoid updates if nothing changed.
1069   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
1070 
1071   /// Set of unresolved calls returned by the associated function.
1072   SmallSetVector<CallBase *, 4> UnresolvedCalls;
1073 
1074   /// State flags
1075   ///
1076   ///{
1077   bool IsFixed = false;
1078   bool IsValidState = true;
1079   ///}
1080 
1081 public:
1082   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
1083 
1084   /// See AbstractAttribute::initialize(...).
1085   void initialize(Attributor &A) override {
1086     // Reset the state.
1087     IsFixed = false;
1088     IsValidState = true;
1089     ReturnedValues.clear();
1090 
1091     Function *F = getAssociatedFunction();
1092     if (!F) {
1093       indicatePessimisticFixpoint();
1094       return;
1095     }
1096     assert(!F->getReturnType()->isVoidTy() &&
1097            "Did not expect a void return type!");
1098 
1099     // The map from instruction opcodes to those instructions in the function.
1100     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1101 
1102     // Look through all arguments, if one is marked as returned we are done.
1103     for (Argument &Arg : F->args()) {
1104       if (Arg.hasReturnedAttr()) {
1105         auto &ReturnInstSet = ReturnedValues[&Arg];
1106         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
1107           ReturnInstSet.insert(cast<ReturnInst>(RI));
1108 
1109         indicateOptimisticFixpoint();
1110         return;
1111       }
1112     }
1113 
1114     if (!F->hasExactDefinition())
1115       indicatePessimisticFixpoint();
1116   }
1117 
1118   /// See AbstractAttribute::manifest(...).
1119   ChangeStatus manifest(Attributor &A) override;
1120 
1121   /// See AbstractAttribute::getState(...).
1122   AbstractState &getState() override { return *this; }
1123 
1124   /// See AbstractAttribute::getState(...).
1125   const AbstractState &getState() const override { return *this; }
1126 
1127   /// See AbstractAttribute::updateImpl(Attributor &A).
1128   ChangeStatus updateImpl(Attributor &A) override;
1129 
1130   llvm::iterator_range<iterator> returned_values() override {
1131     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1132   }
1133 
1134   llvm::iterator_range<const_iterator> returned_values() const override {
1135     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1136   }
1137 
1138   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
1139     return UnresolvedCalls;
1140   }
1141 
1142   /// Return the number of potential return values, -1 if unknown.
1143   size_t getNumReturnValues() const override {
1144     return isValidState() ? ReturnedValues.size() : -1;
1145   }
1146 
1147   /// Return an assumed unique return value if a single candidate is found. If
1148   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1149   /// Optional::NoneType.
1150   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1151 
1152   /// See AbstractState::checkForAllReturnedValues(...).
1153   bool checkForAllReturnedValuesAndReturnInsts(
1154       const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1155           &Pred) const override;
1156 
1157   /// Pretty print the attribute similar to the IR representation.
1158   const std::string getAsStr() const override;
1159 
1160   /// See AbstractState::isAtFixpoint().
1161   bool isAtFixpoint() const override { return IsFixed; }
1162 
1163   /// See AbstractState::isValidState().
1164   bool isValidState() const override { return IsValidState; }
1165 
1166   /// See AbstractState::indicateOptimisticFixpoint(...).
1167   ChangeStatus indicateOptimisticFixpoint() override {
1168     IsFixed = true;
1169     return ChangeStatus::UNCHANGED;
1170   }
1171 
1172   ChangeStatus indicatePessimisticFixpoint() override {
1173     IsFixed = true;
1174     IsValidState = false;
1175     return ChangeStatus::CHANGED;
1176   }
1177 };
1178 
1179 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1180   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1181 
1182   // Bookkeeping.
1183   assert(isValidState());
1184   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1185                   "Number of function with known return values");
1186 
1187   // Check if we have an assumed unique return value that we could manifest.
1188   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1189 
1190   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1191     return Changed;
1192 
1193   // Bookkeeping.
1194   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1195                   "Number of function with unique return");
1196 
1197   // Callback to replace the uses of CB with the constant C.
1198   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
1199     if (CB.getNumUses() == 0 || CB.isMustTailCall())
1200       return ChangeStatus::UNCHANGED;
1201     if (A.changeValueAfterManifest(CB, C))
1202       return ChangeStatus::CHANGED;
1203     return ChangeStatus::UNCHANGED;
1204   };
1205 
1206   // If the assumed unique return value is an argument, annotate it.
1207   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1208     // TODO: This should be handled differently!
1209     this->AnchorVal = UniqueRVArg;
1210     this->KindOrArgNo = UniqueRVArg->getArgNo();
1211     Changed = IRAttribute::manifest(A);
1212   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
1213     // We can replace the returned value with the unique returned constant.
1214     Value &AnchorValue = getAnchorValue();
1215     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1216       for (const Use &U : F->uses())
1217         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1218           if (CB->isCallee(&U)) {
1219             Constant *RVCCast =
1220                 CB->getType() == RVC->getType()
1221                     ? RVC
1222                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
1223             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1224           }
1225     } else {
1226       assert(isa<CallBase>(AnchorValue) &&
1227              "Expcected a function or call base anchor!");
1228       Constant *RVCCast =
1229           AnchorValue.getType() == RVC->getType()
1230               ? RVC
1231               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1232       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1233     }
1234     if (Changed == ChangeStatus::CHANGED)
1235       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1236                       "Number of function returns replaced by constant return");
1237   }
1238 
1239   return Changed;
1240 }
1241 
1242 const std::string AAReturnedValuesImpl::getAsStr() const {
1243   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1244          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1245          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1246 }
1247 
1248 Optional<Value *>
1249 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1250   // If checkForAllReturnedValues provides a unique value, ignoring potential
1251   // undef values that can also be present, it is assumed to be the actual
1252   // return value and forwarded to the caller of this method. If there are
1253   // multiple, a nullptr is returned indicating there cannot be a unique
1254   // returned value.
1255   Optional<Value *> UniqueRV;
1256 
1257   auto Pred = [&](Value &RV) -> bool {
1258     // If we found a second returned value and neither the current nor the saved
1259     // one is an undef, there is no unique returned value. Undefs are special
1260     // since we can pretend they have any value.
1261     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1262         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1263       UniqueRV = nullptr;
1264       return false;
1265     }
1266 
1267     // Do not overwrite a value with an undef.
1268     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1269       UniqueRV = &RV;
1270 
1271     return true;
1272   };
1273 
1274   if (!A.checkForAllReturnedValues(Pred, *this))
1275     UniqueRV = nullptr;
1276 
1277   return UniqueRV;
1278 }
1279 
1280 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1281     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1282         &Pred) const {
1283   if (!isValidState())
1284     return false;
1285 
1286   // Check all returned values but ignore call sites as long as we have not
1287   // encountered an overdefined one during an update.
1288   for (auto &It : ReturnedValues) {
1289     Value *RV = It.first;
1290 
1291     CallBase *CB = dyn_cast<CallBase>(RV);
1292     if (CB && !UnresolvedCalls.count(CB))
1293       continue;
1294 
1295     if (!Pred(*RV, It.second))
1296       return false;
1297   }
1298 
1299   return true;
1300 }
1301 
1302 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1303   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1304   bool Changed = false;
1305 
1306   // State used in the value traversals starting in returned values.
1307   struct RVState {
1308     // The map in which we collect return values -> return instrs.
1309     decltype(ReturnedValues) &RetValsMap;
1310     // The flag to indicate a change.
1311     bool &Changed;
1312     // The return instrs we come from.
1313     SmallSetVector<ReturnInst *, 4> RetInsts;
1314   };
1315 
1316   // Callback for a leaf value returned by the associated function.
1317   auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1318     auto Size = RVS.RetValsMap[&Val].size();
1319     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1320     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1321     RVS.Changed |= Inserted;
1322     LLVM_DEBUG({
1323       if (Inserted)
1324         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1325                << " => " << RVS.RetInsts.size() << "\n";
1326     });
1327     return true;
1328   };
1329 
1330   // Helper method to invoke the generic value traversal.
1331   auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1332     IRPosition RetValPos = IRPosition::value(RV);
1333     return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1334                                                             RVS, VisitValueCB);
1335   };
1336 
1337   // Callback for all "return intructions" live in the associated function.
1338   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1339     ReturnInst &Ret = cast<ReturnInst>(I);
1340     RVState RVS({ReturnedValues, Changed, {}});
1341     RVS.RetInsts.insert(&Ret);
1342     return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1343   };
1344 
1345   // Start by discovering returned values from all live returned instructions in
1346   // the associated function.
1347   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1348     return indicatePessimisticFixpoint();
1349 
1350   // Once returned values "directly" present in the code are handled we try to
1351   // resolve returned calls.
1352   decltype(ReturnedValues) NewRVsMap;
1353   for (auto &It : ReturnedValues) {
1354     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1355                       << " by #" << It.second.size() << " RIs\n");
1356     CallBase *CB = dyn_cast<CallBase>(It.first);
1357     if (!CB || UnresolvedCalls.count(CB))
1358       continue;
1359 
1360     if (!CB->getCalledFunction()) {
1361       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1362                         << "\n");
1363       UnresolvedCalls.insert(CB);
1364       continue;
1365     }
1366 
1367     // TODO: use the function scope once we have call site AAReturnedValues.
1368     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1369         *this, IRPosition::function(*CB->getCalledFunction()));
1370     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1371                       << RetValAA << "\n");
1372 
1373     // Skip dead ends, thus if we do not know anything about the returned
1374     // call we mark it as unresolved and it will stay that way.
1375     if (!RetValAA.getState().isValidState()) {
1376       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1377                         << "\n");
1378       UnresolvedCalls.insert(CB);
1379       continue;
1380     }
1381 
1382     // Do not try to learn partial information. If the callee has unresolved
1383     // return values we will treat the call as unresolved/opaque.
1384     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1385     if (!RetValAAUnresolvedCalls.empty()) {
1386       UnresolvedCalls.insert(CB);
1387       continue;
1388     }
1389 
1390     // Now check if we can track transitively returned values. If possible, thus
1391     // if all return value can be represented in the current scope, do so.
1392     bool Unresolved = false;
1393     for (auto &RetValAAIt : RetValAA.returned_values()) {
1394       Value *RetVal = RetValAAIt.first;
1395       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1396           isa<Constant>(RetVal))
1397         continue;
1398       // Anything that did not fit in the above categories cannot be resolved,
1399       // mark the call as unresolved.
1400       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1401                            "cannot be translated: "
1402                         << *RetVal << "\n");
1403       UnresolvedCalls.insert(CB);
1404       Unresolved = true;
1405       break;
1406     }
1407 
1408     if (Unresolved)
1409       continue;
1410 
1411     // Now track transitively returned values.
1412     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1413     if (NumRetAA == RetValAA.getNumReturnValues()) {
1414       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1415                            "changed since it was seen last\n");
1416       continue;
1417     }
1418     NumRetAA = RetValAA.getNumReturnValues();
1419 
1420     for (auto &RetValAAIt : RetValAA.returned_values()) {
1421       Value *RetVal = RetValAAIt.first;
1422       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1423         // Arguments are mapped to call site operands and we begin the traversal
1424         // again.
1425         bool Unused = false;
1426         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1427         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1428         continue;
1429       } else if (isa<CallBase>(RetVal)) {
1430         // Call sites are resolved by the callee attribute over time, no need to
1431         // do anything for us.
1432         continue;
1433       } else if (isa<Constant>(RetVal)) {
1434         // Constants are valid everywhere, we can simply take them.
1435         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1436         continue;
1437       }
1438     }
1439   }
1440 
1441   // To avoid modifications to the ReturnedValues map while we iterate over it
1442   // we kept record of potential new entries in a copy map, NewRVsMap.
1443   for (auto &It : NewRVsMap) {
1444     assert(!It.second.empty() && "Entry does not add anything.");
1445     auto &ReturnInsts = ReturnedValues[It.first];
1446     for (ReturnInst *RI : It.second)
1447       if (ReturnInsts.insert(RI)) {
1448         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1449                           << *It.first << " => " << *RI << "\n");
1450         Changed = true;
1451       }
1452   }
1453 
1454   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1455   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1456 }
1457 
1458 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1459   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1460 
1461   /// See AbstractAttribute::trackStatistics()
1462   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1463 };
1464 
1465 /// Returned values information for a call sites.
1466 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1467   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1468 
1469   /// See AbstractAttribute::initialize(...).
1470   void initialize(Attributor &A) override {
1471     // TODO: Once we have call site specific value information we can provide
1472     //       call site specific liveness information and then it makes
1473     //       sense to specialize attributes for call sites instead of
1474     //       redirecting requests to the callee.
1475     llvm_unreachable("Abstract attributes for returned values are not "
1476                      "supported for call sites yet!");
1477   }
1478 
1479   /// See AbstractAttribute::updateImpl(...).
1480   ChangeStatus updateImpl(Attributor &A) override {
1481     return indicatePessimisticFixpoint();
1482   }
1483 
1484   /// See AbstractAttribute::trackStatistics()
1485   void trackStatistics() const override {}
1486 };
1487 
1488 /// ------------------------ NoSync Function Attribute -------------------------
1489 
1490 struct AANoSyncImpl : AANoSync {
1491   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1492 
1493   const std::string getAsStr() const override {
1494     return getAssumed() ? "nosync" : "may-sync";
1495   }
1496 
1497   /// See AbstractAttribute::updateImpl(...).
1498   ChangeStatus updateImpl(Attributor &A) override;
1499 
1500   /// Helper function used to determine whether an instruction is non-relaxed
1501   /// atomic. In other words, if an atomic instruction does not have unordered
1502   /// or monotonic ordering
1503   static bool isNonRelaxedAtomic(Instruction *I);
1504 
1505   /// Helper function used to determine whether an instruction is volatile.
1506   static bool isVolatile(Instruction *I);
1507 
1508   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1509   /// memset).
1510   static bool isNoSyncIntrinsic(Instruction *I);
1511 };
1512 
1513 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1514   if (!I->isAtomic())
1515     return false;
1516 
1517   AtomicOrdering Ordering;
1518   switch (I->getOpcode()) {
1519   case Instruction::AtomicRMW:
1520     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1521     break;
1522   case Instruction::Store:
1523     Ordering = cast<StoreInst>(I)->getOrdering();
1524     break;
1525   case Instruction::Load:
1526     Ordering = cast<LoadInst>(I)->getOrdering();
1527     break;
1528   case Instruction::Fence: {
1529     auto *FI = cast<FenceInst>(I);
1530     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1531       return false;
1532     Ordering = FI->getOrdering();
1533     break;
1534   }
1535   case Instruction::AtomicCmpXchg: {
1536     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1537     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1538     // Only if both are relaxed, than it can be treated as relaxed.
1539     // Otherwise it is non-relaxed.
1540     if (Success != AtomicOrdering::Unordered &&
1541         Success != AtomicOrdering::Monotonic)
1542       return true;
1543     if (Failure != AtomicOrdering::Unordered &&
1544         Failure != AtomicOrdering::Monotonic)
1545       return true;
1546     return false;
1547   }
1548   default:
1549     llvm_unreachable(
1550         "New atomic operations need to be known in the attributor.");
1551   }
1552 
1553   // Relaxed.
1554   if (Ordering == AtomicOrdering::Unordered ||
1555       Ordering == AtomicOrdering::Monotonic)
1556     return false;
1557   return true;
1558 }
1559 
1560 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1561 /// FIXME: We should ipmrove the handling of intrinsics.
1562 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1563   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1564     switch (II->getIntrinsicID()) {
1565     /// Element wise atomic memory intrinsics are can only be unordered,
1566     /// therefore nosync.
1567     case Intrinsic::memset_element_unordered_atomic:
1568     case Intrinsic::memmove_element_unordered_atomic:
1569     case Intrinsic::memcpy_element_unordered_atomic:
1570       return true;
1571     case Intrinsic::memset:
1572     case Intrinsic::memmove:
1573     case Intrinsic::memcpy:
1574       if (!cast<MemIntrinsic>(II)->isVolatile())
1575         return true;
1576       return false;
1577     default:
1578       return false;
1579     }
1580   }
1581   return false;
1582 }
1583 
1584 bool AANoSyncImpl::isVolatile(Instruction *I) {
1585   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1586          "Calls should not be checked here");
1587 
1588   switch (I->getOpcode()) {
1589   case Instruction::AtomicRMW:
1590     return cast<AtomicRMWInst>(I)->isVolatile();
1591   case Instruction::Store:
1592     return cast<StoreInst>(I)->isVolatile();
1593   case Instruction::Load:
1594     return cast<LoadInst>(I)->isVolatile();
1595   case Instruction::AtomicCmpXchg:
1596     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1597   default:
1598     return false;
1599   }
1600 }
1601 
1602 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1603 
1604   auto CheckRWInstForNoSync = [&](Instruction &I) {
1605     /// We are looking for volatile instructions or Non-Relaxed atomics.
1606     /// FIXME: We should improve the handling of intrinsics.
1607 
1608     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1609       return true;
1610 
1611     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1612       if (ICS.hasFnAttr(Attribute::NoSync))
1613         return true;
1614 
1615       const auto &NoSyncAA =
1616           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1617       if (NoSyncAA.isAssumedNoSync())
1618         return true;
1619       return false;
1620     }
1621 
1622     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1623       return true;
1624 
1625     return false;
1626   };
1627 
1628   auto CheckForNoSync = [&](Instruction &I) {
1629     // At this point we handled all read/write effects and they are all
1630     // nosync, so they can be skipped.
1631     if (I.mayReadOrWriteMemory())
1632       return true;
1633 
1634     // non-convergent and readnone imply nosync.
1635     return !ImmutableCallSite(&I).isConvergent();
1636   };
1637 
1638   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1639       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1640     return indicatePessimisticFixpoint();
1641 
1642   return ChangeStatus::UNCHANGED;
1643 }
1644 
1645 struct AANoSyncFunction final : public AANoSyncImpl {
1646   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1647 
1648   /// See AbstractAttribute::trackStatistics()
1649   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1650 };
1651 
1652 /// NoSync attribute deduction for a call sites.
1653 struct AANoSyncCallSite final : AANoSyncImpl {
1654   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1655 
1656   /// See AbstractAttribute::initialize(...).
1657   void initialize(Attributor &A) override {
1658     AANoSyncImpl::initialize(A);
1659     Function *F = getAssociatedFunction();
1660     if (!F)
1661       indicatePessimisticFixpoint();
1662   }
1663 
1664   /// See AbstractAttribute::updateImpl(...).
1665   ChangeStatus updateImpl(Attributor &A) override {
1666     // TODO: Once we have call site specific value information we can provide
1667     //       call site specific liveness information and then it makes
1668     //       sense to specialize attributes for call sites arguments instead of
1669     //       redirecting requests to the callee argument.
1670     Function *F = getAssociatedFunction();
1671     const IRPosition &FnPos = IRPosition::function(*F);
1672     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1673     return clampStateAndIndicateChange(
1674         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1675   }
1676 
1677   /// See AbstractAttribute::trackStatistics()
1678   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1679 };
1680 
1681 /// ------------------------ No-Free Attributes ----------------------------
1682 
1683 struct AANoFreeImpl : public AANoFree {
1684   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1685 
1686   /// See AbstractAttribute::updateImpl(...).
1687   ChangeStatus updateImpl(Attributor &A) override {
1688     auto CheckForNoFree = [&](Instruction &I) {
1689       ImmutableCallSite ICS(&I);
1690       if (ICS.hasFnAttr(Attribute::NoFree))
1691         return true;
1692 
1693       const auto &NoFreeAA =
1694           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1695       return NoFreeAA.isAssumedNoFree();
1696     };
1697 
1698     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1699       return indicatePessimisticFixpoint();
1700     return ChangeStatus::UNCHANGED;
1701   }
1702 
1703   /// See AbstractAttribute::getAsStr().
1704   const std::string getAsStr() const override {
1705     return getAssumed() ? "nofree" : "may-free";
1706   }
1707 };
1708 
1709 struct AANoFreeFunction final : public AANoFreeImpl {
1710   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1711 
1712   /// See AbstractAttribute::trackStatistics()
1713   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1714 };
1715 
1716 /// NoFree attribute deduction for a call sites.
1717 struct AANoFreeCallSite final : AANoFreeImpl {
1718   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1719 
1720   /// See AbstractAttribute::initialize(...).
1721   void initialize(Attributor &A) override {
1722     AANoFreeImpl::initialize(A);
1723     Function *F = getAssociatedFunction();
1724     if (!F)
1725       indicatePessimisticFixpoint();
1726   }
1727 
1728   /// See AbstractAttribute::updateImpl(...).
1729   ChangeStatus updateImpl(Attributor &A) override {
1730     // TODO: Once we have call site specific value information we can provide
1731     //       call site specific liveness information and then it makes
1732     //       sense to specialize attributes for call sites arguments instead of
1733     //       redirecting requests to the callee argument.
1734     Function *F = getAssociatedFunction();
1735     const IRPosition &FnPos = IRPosition::function(*F);
1736     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1737     return clampStateAndIndicateChange(
1738         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1739   }
1740 
1741   /// See AbstractAttribute::trackStatistics()
1742   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1743 };
1744 
1745 /// NoFree attribute for floating values.
1746 struct AANoFreeFloating : AANoFreeImpl {
1747   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1748 
1749   /// See AbstractAttribute::trackStatistics()
1750   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1751 
1752   /// See Abstract Attribute::updateImpl(...).
1753   ChangeStatus updateImpl(Attributor &A) override {
1754     const IRPosition &IRP = getIRPosition();
1755 
1756     const auto &NoFreeAA =
1757         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1758     if (NoFreeAA.isAssumedNoFree())
1759       return ChangeStatus::UNCHANGED;
1760 
1761     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1762     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1763       Instruction *UserI = cast<Instruction>(U.getUser());
1764       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1765         if (CB->isBundleOperand(&U))
1766           return false;
1767         if (!CB->isArgOperand(&U))
1768           return true;
1769         unsigned ArgNo = CB->getArgOperandNo(&U);
1770 
1771         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1772             *this, IRPosition::callsite_argument(*CB, ArgNo));
1773         return NoFreeArg.isAssumedNoFree();
1774       }
1775 
1776       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1777           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1778         Follow = true;
1779         return true;
1780       }
1781       if (isa<ReturnInst>(UserI))
1782         return true;
1783 
1784       // Unknown user.
1785       return false;
1786     };
1787     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1788       return indicatePessimisticFixpoint();
1789 
1790     return ChangeStatus::UNCHANGED;
1791   }
1792 };
1793 
1794 /// NoFree attribute for a call site argument.
1795 struct AANoFreeArgument final : AANoFreeFloating {
1796   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1797 
1798   /// See AbstractAttribute::trackStatistics()
1799   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1800 };
1801 
1802 /// NoFree attribute for call site arguments.
1803 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1804   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1805 
1806   /// See AbstractAttribute::updateImpl(...).
1807   ChangeStatus updateImpl(Attributor &A) override {
1808     // TODO: Once we have call site specific value information we can provide
1809     //       call site specific liveness information and then it makes
1810     //       sense to specialize attributes for call sites arguments instead of
1811     //       redirecting requests to the callee argument.
1812     Argument *Arg = getAssociatedArgument();
1813     if (!Arg)
1814       return indicatePessimisticFixpoint();
1815     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1816     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1817     return clampStateAndIndicateChange(
1818         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1819   }
1820 
1821   /// See AbstractAttribute::trackStatistics()
1822   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1823 };
1824 
1825 /// NoFree attribute for function return value.
1826 struct AANoFreeReturned final : AANoFreeFloating {
1827   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1828     llvm_unreachable("NoFree is not applicable to function returns!");
1829   }
1830 
1831   /// See AbstractAttribute::initialize(...).
1832   void initialize(Attributor &A) override {
1833     llvm_unreachable("NoFree is not applicable to function returns!");
1834   }
1835 
1836   /// See AbstractAttribute::updateImpl(...).
1837   ChangeStatus updateImpl(Attributor &A) override {
1838     llvm_unreachable("NoFree is not applicable to function returns!");
1839   }
1840 
1841   /// See AbstractAttribute::trackStatistics()
1842   void trackStatistics() const override {}
1843 };
1844 
1845 /// NoFree attribute deduction for a call site return value.
1846 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1847   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1848 
1849   ChangeStatus manifest(Attributor &A) override {
1850     return ChangeStatus::UNCHANGED;
1851   }
1852   /// See AbstractAttribute::trackStatistics()
1853   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1854 };
1855 
1856 /// ------------------------ NonNull Argument Attribute ------------------------
1857 static int64_t getKnownNonNullAndDerefBytesForUse(
1858     Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1859     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1860   TrackUse = false;
1861 
1862   const Value *UseV = U->get();
1863   if (!UseV->getType()->isPointerTy())
1864     return 0;
1865 
1866   Type *PtrTy = UseV->getType();
1867   const Function *F = I->getFunction();
1868   bool NullPointerIsDefined =
1869       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1870   const DataLayout &DL = A.getInfoCache().getDL();
1871   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1872     if (ICS.isBundleOperand(U))
1873       return 0;
1874 
1875     if (ICS.isCallee(U)) {
1876       IsNonNull |= !NullPointerIsDefined;
1877       return 0;
1878     }
1879 
1880     unsigned ArgNo = ICS.getArgumentNo(U);
1881     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1882     // As long as we only use known information there is no need to track
1883     // dependences here.
1884     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1885                                                   /* TrackDependence */ false);
1886     IsNonNull |= DerefAA.isKnownNonNull();
1887     return DerefAA.getKnownDereferenceableBytes();
1888   }
1889 
1890   // We need to follow common pointer manipulation uses to the accesses they
1891   // feed into. We can try to be smart to avoid looking through things we do not
1892   // like for now, e.g., non-inbounds GEPs.
1893   if (isa<CastInst>(I)) {
1894     TrackUse = true;
1895     return 0;
1896   }
1897   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1898     if (GEP->hasAllConstantIndices()) {
1899       TrackUse = true;
1900       return 0;
1901     }
1902 
1903   int64_t Offset;
1904   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1905     if (Base == &AssociatedValue &&
1906         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1907       int64_t DerefBytes =
1908           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1909 
1910       IsNonNull |= !NullPointerIsDefined;
1911       return std::max(int64_t(0), DerefBytes);
1912     }
1913   }
1914 
1915   /// Corner case when an offset is 0.
1916   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1917           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1918     if (Offset == 0 && Base == &AssociatedValue &&
1919         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1920       int64_t DerefBytes =
1921           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1922       IsNonNull |= !NullPointerIsDefined;
1923       return std::max(int64_t(0), DerefBytes);
1924     }
1925   }
1926 
1927   return 0;
1928 }
1929 
1930 struct AANonNullImpl : AANonNull {
1931   AANonNullImpl(const IRPosition &IRP)
1932       : AANonNull(IRP),
1933         NullIsDefined(NullPointerIsDefined(
1934             getAnchorScope(),
1935             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1936 
1937   /// See AbstractAttribute::initialize(...).
1938   void initialize(Attributor &A) override {
1939     if (!NullIsDefined &&
1940         hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1941       indicateOptimisticFixpoint();
1942     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1943       indicatePessimisticFixpoint();
1944     else
1945       AANonNull::initialize(A);
1946   }
1947 
1948   /// See AAFromMustBeExecutedContext
1949   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1950     bool IsNonNull = false;
1951     bool TrackUse = false;
1952     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1953                                        IsNonNull, TrackUse);
1954     setKnown(IsNonNull);
1955     return TrackUse;
1956   }
1957 
1958   /// See AbstractAttribute::getAsStr().
1959   const std::string getAsStr() const override {
1960     return getAssumed() ? "nonnull" : "may-null";
1961   }
1962 
1963   /// Flag to determine if the underlying value can be null and still allow
1964   /// valid accesses.
1965   const bool NullIsDefined;
1966 };
1967 
1968 /// NonNull attribute for a floating value.
1969 struct AANonNullFloating
1970     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1971   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1972   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1973 
1974   /// See AbstractAttribute::updateImpl(...).
1975   ChangeStatus updateImpl(Attributor &A) override {
1976     ChangeStatus Change = Base::updateImpl(A);
1977     if (isKnownNonNull())
1978       return Change;
1979 
1980     if (!NullIsDefined) {
1981       const auto &DerefAA =
1982           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1983       if (DerefAA.getAssumedDereferenceableBytes())
1984         return Change;
1985     }
1986 
1987     const DataLayout &DL = A.getDataLayout();
1988 
1989     DominatorTree *DT = nullptr;
1990     InformationCache &InfoCache = A.getInfoCache();
1991     if (const Function *Fn = getAnchorScope())
1992       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1993 
1994     auto VisitValueCB = [&](Value &V, AANonNull::StateType &T,
1995                             bool Stripped) -> bool {
1996       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1997       if (!Stripped && this == &AA) {
1998         if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, getCtxI(), DT))
1999           T.indicatePessimisticFixpoint();
2000       } else {
2001         // Use abstract attribute information.
2002         const AANonNull::StateType &NS =
2003             static_cast<const AANonNull::StateType &>(AA.getState());
2004         T ^= NS;
2005       }
2006       return T.isValidState();
2007     };
2008 
2009     StateType T;
2010     if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
2011                                                      T, VisitValueCB))
2012       return indicatePessimisticFixpoint();
2013 
2014     return clampStateAndIndicateChange(getState(), T);
2015   }
2016 
2017   /// See AbstractAttribute::trackStatistics()
2018   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2019 };
2020 
2021 /// NonNull attribute for function return value.
2022 struct AANonNullReturned final
2023     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
2024   AANonNullReturned(const IRPosition &IRP)
2025       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
2026 
2027   /// See AbstractAttribute::trackStatistics()
2028   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2029 };
2030 
2031 /// NonNull attribute for function argument.
2032 struct AANonNullArgument final
2033     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
2034                                                               AANonNullImpl> {
2035   AANonNullArgument(const IRPosition &IRP)
2036       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
2037                                                                 AANonNullImpl>(
2038             IRP) {}
2039 
2040   /// See AbstractAttribute::trackStatistics()
2041   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2042 };
2043 
2044 struct AANonNullCallSiteArgument final : AANonNullFloating {
2045   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
2046 
2047   /// See AbstractAttribute::trackStatistics()
2048   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2049 };
2050 
2051 /// NonNull attribute for a call site return position.
2052 struct AANonNullCallSiteReturned final
2053     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
2054                                                              AANonNullImpl> {
2055   AANonNullCallSiteReturned(const IRPosition &IRP)
2056       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
2057                                                                AANonNullImpl>(
2058             IRP) {}
2059 
2060   /// See AbstractAttribute::trackStatistics()
2061   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2062 };
2063 
2064 /// ------------------------ No-Recurse Attributes ----------------------------
2065 
2066 struct AANoRecurseImpl : public AANoRecurse {
2067   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
2068 
2069   /// See AbstractAttribute::getAsStr()
2070   const std::string getAsStr() const override {
2071     return getAssumed() ? "norecurse" : "may-recurse";
2072   }
2073 };
2074 
2075 struct AANoRecurseFunction final : AANoRecurseImpl {
2076   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
2077 
2078   /// See AbstractAttribute::initialize(...).
2079   void initialize(Attributor &A) override {
2080     AANoRecurseImpl::initialize(A);
2081     if (const Function *F = getAnchorScope())
2082       if (A.getInfoCache().getSccSize(*F) != 1)
2083         indicatePessimisticFixpoint();
2084   }
2085 
2086   /// See AbstractAttribute::updateImpl(...).
2087   ChangeStatus updateImpl(Attributor &A) override {
2088 
2089     // If all live call sites are known to be no-recurse, we are as well.
2090     auto CallSitePred = [&](AbstractCallSite ACS) {
2091       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2092           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2093           /* TrackDependence */ false, DepClassTy::OPTIONAL);
2094       return NoRecurseAA.isKnownNoRecurse();
2095     };
2096     bool AllCallSitesKnown;
2097     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2098       // If we know all call sites and all are known no-recurse, we are done.
2099       // If all known call sites, which might not be all that exist, are known
2100       // to be no-recurse, we are not done but we can continue to assume
2101       // no-recurse. If one of the call sites we have not visited will become
2102       // live, another update is triggered.
2103       if (AllCallSitesKnown)
2104         indicateOptimisticFixpoint();
2105       return ChangeStatus::UNCHANGED;
2106     }
2107 
2108     // If the above check does not hold anymore we look at the calls.
2109     auto CheckForNoRecurse = [&](Instruction &I) {
2110       ImmutableCallSite ICS(&I);
2111       if (ICS.hasFnAttr(Attribute::NoRecurse))
2112         return true;
2113 
2114       const auto &NoRecurseAA =
2115           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
2116       if (!NoRecurseAA.isAssumedNoRecurse())
2117         return false;
2118 
2119       // Recursion to the same function
2120       if (ICS.getCalledFunction() == getAnchorScope())
2121         return false;
2122 
2123       return true;
2124     };
2125 
2126     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
2127       return indicatePessimisticFixpoint();
2128     return ChangeStatus::UNCHANGED;
2129   }
2130 
2131   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2132 };
2133 
2134 /// NoRecurse attribute deduction for a call sites.
2135 struct AANoRecurseCallSite final : AANoRecurseImpl {
2136   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
2137 
2138   /// See AbstractAttribute::initialize(...).
2139   void initialize(Attributor &A) override {
2140     AANoRecurseImpl::initialize(A);
2141     Function *F = getAssociatedFunction();
2142     if (!F)
2143       indicatePessimisticFixpoint();
2144   }
2145 
2146   /// See AbstractAttribute::updateImpl(...).
2147   ChangeStatus updateImpl(Attributor &A) override {
2148     // TODO: Once we have call site specific value information we can provide
2149     //       call site specific liveness information and then it makes
2150     //       sense to specialize attributes for call sites arguments instead of
2151     //       redirecting requests to the callee argument.
2152     Function *F = getAssociatedFunction();
2153     const IRPosition &FnPos = IRPosition::function(*F);
2154     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
2155     return clampStateAndIndicateChange(
2156         getState(),
2157         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
2158   }
2159 
2160   /// See AbstractAttribute::trackStatistics()
2161   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2162 };
2163 
2164 /// -------------------- Undefined-Behavior Attributes ------------------------
2165 
2166 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2167   AAUndefinedBehaviorImpl(const IRPosition &IRP) : AAUndefinedBehavior(IRP) {}
2168 
2169   /// See AbstractAttribute::updateImpl(...).
2170   // through a pointer (i.e. also branches etc.)
2171   ChangeStatus updateImpl(Attributor &A) override {
2172     const size_t UBPrevSize = KnownUBInsts.size();
2173     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2174 
2175     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2176       // Skip instructions that are already saved.
2177       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2178         return true;
2179 
2180       // If we reach here, we know we have an instruction
2181       // that accesses memory through a pointer operand,
2182       // for which getPointerOperand() should give it to us.
2183       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
2184       assert(PtrOp &&
2185              "Expected pointer operand of memory accessing instruction");
2186 
2187       // A memory access through a pointer is considered UB
2188       // only if the pointer has constant null value.
2189       // TODO: Expand it to not only check constant values.
2190       if (!isa<ConstantPointerNull>(PtrOp)) {
2191         AssumedNoUBInsts.insert(&I);
2192         return true;
2193       }
2194       const Type *PtrTy = PtrOp->getType();
2195 
2196       // Because we only consider instructions inside functions,
2197       // assume that a parent function exists.
2198       const Function *F = I.getFunction();
2199 
2200       // A memory access using constant null pointer is only considered UB
2201       // if null pointer is _not_ defined for the target platform.
2202       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2203         AssumedNoUBInsts.insert(&I);
2204       else
2205         KnownUBInsts.insert(&I);
2206       return true;
2207     };
2208 
2209     auto InspectBrInstForUB = [&](Instruction &I) {
2210       // A conditional branch instruction is considered UB if it has `undef`
2211       // condition.
2212 
2213       // Skip instructions that are already saved.
2214       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2215         return true;
2216 
2217       // We know we have a branch instruction.
2218       auto BrInst = cast<BranchInst>(&I);
2219 
2220       // Unconditional branches are never considered UB.
2221       if (BrInst->isUnconditional())
2222         return true;
2223 
2224       // Either we stopped and the appropriate action was taken,
2225       // or we got back a simplified value to continue.
2226       Optional<Value *> SimplifiedCond =
2227           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2228       if (!SimplifiedCond.hasValue())
2229         return true;
2230       AssumedNoUBInsts.insert(&I);
2231       return true;
2232     };
2233 
2234     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2235                               {Instruction::Load, Instruction::Store,
2236                                Instruction::AtomicCmpXchg,
2237                                Instruction::AtomicRMW},
2238                               /* CheckBBLivenessOnly */ true);
2239     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2240                               /* CheckBBLivenessOnly */ true);
2241     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2242         UBPrevSize != KnownUBInsts.size())
2243       return ChangeStatus::CHANGED;
2244     return ChangeStatus::UNCHANGED;
2245   }
2246 
2247   bool isKnownToCauseUB(Instruction *I) const override {
2248     return KnownUBInsts.count(I);
2249   }
2250 
2251   bool isAssumedToCauseUB(Instruction *I) const override {
2252     // In simple words, if an instruction is not in the assumed to _not_
2253     // cause UB, then it is assumed UB (that includes those
2254     // in the KnownUBInsts set). The rest is boilerplate
2255     // is to ensure that it is one of the instructions we test
2256     // for UB.
2257 
2258     switch (I->getOpcode()) {
2259     case Instruction::Load:
2260     case Instruction::Store:
2261     case Instruction::AtomicCmpXchg:
2262     case Instruction::AtomicRMW:
2263       return !AssumedNoUBInsts.count(I);
2264     case Instruction::Br: {
2265       auto BrInst = cast<BranchInst>(I);
2266       if (BrInst->isUnconditional())
2267         return false;
2268       return !AssumedNoUBInsts.count(I);
2269     } break;
2270     default:
2271       return false;
2272     }
2273     return false;
2274   }
2275 
2276   ChangeStatus manifest(Attributor &A) override {
2277     if (KnownUBInsts.empty())
2278       return ChangeStatus::UNCHANGED;
2279     for (Instruction *I : KnownUBInsts)
2280       A.changeToUnreachableAfterManifest(I);
2281     return ChangeStatus::CHANGED;
2282   }
2283 
2284   /// See AbstractAttribute::getAsStr()
2285   const std::string getAsStr() const override {
2286     return getAssumed() ? "undefined-behavior" : "no-ub";
2287   }
2288 
2289   /// Note: The correctness of this analysis depends on the fact that the
2290   /// following 2 sets will stop changing after some point.
2291   /// "Change" here means that their size changes.
2292   /// The size of each set is monotonically increasing
2293   /// (we only add items to them) and it is upper bounded by the number of
2294   /// instructions in the processed function (we can never save more
2295   /// elements in either set than this number). Hence, at some point,
2296   /// they will stop increasing.
2297   /// Consequently, at some point, both sets will have stopped
2298   /// changing, effectively making the analysis reach a fixpoint.
2299 
2300   /// Note: These 2 sets are disjoint and an instruction can be considered
2301   /// one of 3 things:
2302   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2303   ///    the KnownUBInsts set.
2304   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2305   ///    has a reason to assume it).
2306   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2307   ///    could not find a reason to assume or prove that it can cause UB,
2308   ///    hence it assumes it doesn't. We have a set for these instructions
2309   ///    so that we don't reprocess them in every update.
2310   ///    Note however that instructions in this set may cause UB.
2311 
2312 protected:
2313   /// A set of all live instructions _known_ to cause UB.
2314   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2315 
2316 private:
2317   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2318   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2319 
2320   // Should be called on updates in which if we're processing an instruction
2321   // \p I that depends on a value \p V, one of the following has to happen:
2322   // - If the value is assumed, then stop.
2323   // - If the value is known but undef, then consider it UB.
2324   // - Otherwise, do specific processing with the simplified value.
2325   // We return None in the first 2 cases to signify that an appropriate
2326   // action was taken and the caller should stop.
2327   // Otherwise, we return the simplified value that the caller should
2328   // use for specific processing.
2329   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2330                                          Instruction *I) {
2331     const auto &ValueSimplifyAA =
2332         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2333     Optional<Value *> SimplifiedV =
2334         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2335     if (!ValueSimplifyAA.isKnown()) {
2336       // Don't depend on assumed values.
2337       return llvm::None;
2338     }
2339     if (!SimplifiedV.hasValue()) {
2340       // If it is known (which we tested above) but it doesn't have a value,
2341       // then we can assume `undef` and hence the instruction is UB.
2342       KnownUBInsts.insert(I);
2343       return llvm::None;
2344     }
2345     Value *Val = SimplifiedV.getValue();
2346     if (isa<UndefValue>(Val)) {
2347       KnownUBInsts.insert(I);
2348       return llvm::None;
2349     }
2350     return Val;
2351   }
2352 };
2353 
2354 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2355   AAUndefinedBehaviorFunction(const IRPosition &IRP)
2356       : AAUndefinedBehaviorImpl(IRP) {}
2357 
2358   /// See AbstractAttribute::trackStatistics()
2359   void trackStatistics() const override {
2360     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2361                "Number of instructions known to have UB");
2362     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2363         KnownUBInsts.size();
2364   }
2365 };
2366 
2367 /// ------------------------ Will-Return Attributes ----------------------------
2368 
2369 // Helper function that checks whether a function has any cycle.
2370 // TODO: Replace with more efficent code
2371 static bool containsCycle(Function &F) {
2372   SmallPtrSet<BasicBlock *, 32> Visited;
2373 
2374   // Traverse BB by dfs and check whether successor is already visited.
2375   for (BasicBlock *BB : depth_first(&F)) {
2376     Visited.insert(BB);
2377     for (auto *SuccBB : successors(BB)) {
2378       if (Visited.count(SuccBB))
2379         return true;
2380     }
2381   }
2382   return false;
2383 }
2384 
2385 // Helper function that checks the function have a loop which might become an
2386 // endless loop
2387 // FIXME: Any cycle is regarded as endless loop for now.
2388 //        We have to allow some patterns.
2389 static bool containsPossiblyEndlessLoop(Function *F) {
2390   return !F || !F->hasExactDefinition() || containsCycle(*F);
2391 }
2392 
2393 struct AAWillReturnImpl : public AAWillReturn {
2394   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
2395 
2396   /// See AbstractAttribute::initialize(...).
2397   void initialize(Attributor &A) override {
2398     AAWillReturn::initialize(A);
2399 
2400     Function *F = getAssociatedFunction();
2401     if (containsPossiblyEndlessLoop(F))
2402       indicatePessimisticFixpoint();
2403   }
2404 
2405   /// See AbstractAttribute::updateImpl(...).
2406   ChangeStatus updateImpl(Attributor &A) override {
2407     auto CheckForWillReturn = [&](Instruction &I) {
2408       IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
2409       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2410       if (WillReturnAA.isKnownWillReturn())
2411         return true;
2412       if (!WillReturnAA.isAssumedWillReturn())
2413         return false;
2414       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2415       return NoRecurseAA.isAssumedNoRecurse();
2416     };
2417 
2418     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2419       return indicatePessimisticFixpoint();
2420 
2421     return ChangeStatus::UNCHANGED;
2422   }
2423 
2424   /// See AbstractAttribute::getAsStr()
2425   const std::string getAsStr() const override {
2426     return getAssumed() ? "willreturn" : "may-noreturn";
2427   }
2428 };
2429 
2430 struct AAWillReturnFunction final : AAWillReturnImpl {
2431   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2432 
2433   /// See AbstractAttribute::trackStatistics()
2434   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2435 };
2436 
2437 /// WillReturn attribute deduction for a call sites.
2438 struct AAWillReturnCallSite final : AAWillReturnImpl {
2439   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2440 
2441   /// See AbstractAttribute::initialize(...).
2442   void initialize(Attributor &A) override {
2443     AAWillReturnImpl::initialize(A);
2444     Function *F = getAssociatedFunction();
2445     if (!F)
2446       indicatePessimisticFixpoint();
2447   }
2448 
2449   /// See AbstractAttribute::updateImpl(...).
2450   ChangeStatus updateImpl(Attributor &A) override {
2451     // TODO: Once we have call site specific value information we can provide
2452     //       call site specific liveness information and then it makes
2453     //       sense to specialize attributes for call sites arguments instead of
2454     //       redirecting requests to the callee argument.
2455     Function *F = getAssociatedFunction();
2456     const IRPosition &FnPos = IRPosition::function(*F);
2457     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2458     return clampStateAndIndicateChange(
2459         getState(),
2460         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2461   }
2462 
2463   /// See AbstractAttribute::trackStatistics()
2464   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2465 };
2466 
2467 /// -------------------AAReachability Attribute--------------------------
2468 
2469 struct AAReachabilityImpl : AAReachability {
2470   AAReachabilityImpl(const IRPosition &IRP) : AAReachability(IRP) {}
2471 
2472   const std::string getAsStr() const override {
2473     // TODO: Return the number of reachable queries.
2474     return "reachable";
2475   }
2476 
2477   /// See AbstractAttribute::initialize(...).
2478   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2479 
2480   /// See AbstractAttribute::updateImpl(...).
2481   ChangeStatus updateImpl(Attributor &A) override {
2482     return indicatePessimisticFixpoint();
2483   }
2484 };
2485 
2486 struct AAReachabilityFunction final : public AAReachabilityImpl {
2487   AAReachabilityFunction(const IRPosition &IRP) : AAReachabilityImpl(IRP) {}
2488 
2489   /// See AbstractAttribute::trackStatistics()
2490   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2491 };
2492 
2493 /// ------------------------ NoAlias Argument Attribute ------------------------
2494 
2495 struct AANoAliasImpl : AANoAlias {
2496   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {
2497     assert(getAssociatedType()->isPointerTy() &&
2498            "Noalias is a pointer attribute");
2499   }
2500 
2501   const std::string getAsStr() const override {
2502     return getAssumed() ? "noalias" : "may-alias";
2503   }
2504 };
2505 
2506 /// NoAlias attribute for a floating value.
2507 struct AANoAliasFloating final : AANoAliasImpl {
2508   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2509 
2510   /// See AbstractAttribute::initialize(...).
2511   void initialize(Attributor &A) override {
2512     AANoAliasImpl::initialize(A);
2513     Value *Val = &getAssociatedValue();
2514     do {
2515       CastInst *CI = dyn_cast<CastInst>(Val);
2516       if (!CI)
2517         break;
2518       Value *Base = CI->getOperand(0);
2519       if (Base->getNumUses() != 1)
2520         break;
2521       Val = Base;
2522     } while (true);
2523 
2524     if (!Val->getType()->isPointerTy()) {
2525       indicatePessimisticFixpoint();
2526       return;
2527     }
2528 
2529     if (isa<AllocaInst>(Val))
2530       indicateOptimisticFixpoint();
2531     else if (isa<ConstantPointerNull>(Val) &&
2532              !NullPointerIsDefined(getAnchorScope(),
2533                                    Val->getType()->getPointerAddressSpace()))
2534       indicateOptimisticFixpoint();
2535     else if (Val != &getAssociatedValue()) {
2536       const auto &ValNoAliasAA =
2537           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2538       if (ValNoAliasAA.isKnownNoAlias())
2539         indicateOptimisticFixpoint();
2540     }
2541   }
2542 
2543   /// See AbstractAttribute::updateImpl(...).
2544   ChangeStatus updateImpl(Attributor &A) override {
2545     // TODO: Implement this.
2546     return indicatePessimisticFixpoint();
2547   }
2548 
2549   /// See AbstractAttribute::trackStatistics()
2550   void trackStatistics() const override {
2551     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2552   }
2553 };
2554 
2555 /// NoAlias attribute for an argument.
2556 struct AANoAliasArgument final
2557     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2558   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2559   AANoAliasArgument(const IRPosition &IRP) : Base(IRP) {}
2560 
2561   /// See AbstractAttribute::initialize(...).
2562   void initialize(Attributor &A) override {
2563     Base::initialize(A);
2564     // See callsite argument attribute and callee argument attribute.
2565     if (hasAttr({Attribute::ByVal}))
2566       indicateOptimisticFixpoint();
2567   }
2568 
2569   /// See AbstractAttribute::update(...).
2570   ChangeStatus updateImpl(Attributor &A) override {
2571     // We have to make sure no-alias on the argument does not break
2572     // synchronization when this is a callback argument, see also [1] below.
2573     // If synchronization cannot be affected, we delegate to the base updateImpl
2574     // function, otherwise we give up for now.
2575 
2576     // If the function is no-sync, no-alias cannot break synchronization.
2577     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2578         *this, IRPosition::function_scope(getIRPosition()));
2579     if (NoSyncAA.isAssumedNoSync())
2580       return Base::updateImpl(A);
2581 
2582     // If the argument is read-only, no-alias cannot break synchronization.
2583     const auto &MemBehaviorAA =
2584         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2585     if (MemBehaviorAA.isAssumedReadOnly())
2586       return Base::updateImpl(A);
2587 
2588     // If the argument is never passed through callbacks, no-alias cannot break
2589     // synchronization.
2590     bool AllCallSitesKnown;
2591     if (A.checkForAllCallSites(
2592             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2593             true, AllCallSitesKnown))
2594       return Base::updateImpl(A);
2595 
2596     // TODO: add no-alias but make sure it doesn't break synchronization by
2597     // introducing fake uses. See:
2598     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2599     //     International Workshop on OpenMP 2018,
2600     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2601 
2602     return indicatePessimisticFixpoint();
2603   }
2604 
2605   /// See AbstractAttribute::trackStatistics()
2606   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2607 };
2608 
2609 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2610   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2611 
2612   /// See AbstractAttribute::initialize(...).
2613   void initialize(Attributor &A) override {
2614     // See callsite argument attribute and callee argument attribute.
2615     ImmutableCallSite ICS(&getAnchorValue());
2616     if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2617       indicateOptimisticFixpoint();
2618     Value &Val = getAssociatedValue();
2619     if (isa<ConstantPointerNull>(Val) &&
2620         !NullPointerIsDefined(getAnchorScope(),
2621                               Val.getType()->getPointerAddressSpace()))
2622       indicateOptimisticFixpoint();
2623   }
2624 
2625   /// Determine if the underlying value may alias with the call site argument
2626   /// \p OtherArgNo of \p ICS (= the underlying call site).
2627   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2628                             const AAMemoryBehavior &MemBehaviorAA,
2629                             ImmutableCallSite ICS, unsigned OtherArgNo) {
2630     // We do not need to worry about aliasing with the underlying IRP.
2631     if (this->getArgNo() == (int)OtherArgNo)
2632       return false;
2633 
2634     // If it is not a pointer or pointer vector we do not alias.
2635     const Value *ArgOp = ICS.getArgOperand(OtherArgNo);
2636     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2637       return false;
2638 
2639     auto &ICSArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2640         *this, IRPosition::callsite_argument(ICS, OtherArgNo),
2641         /* TrackDependence */ false);
2642 
2643     // If the argument is readnone, there is no read-write aliasing.
2644     if (ICSArgMemBehaviorAA.isAssumedReadNone()) {
2645       A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2646       return false;
2647     }
2648 
2649     // If the argument is readonly and the underlying value is readonly, there
2650     // is no read-write aliasing.
2651     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2652     if (ICSArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2653       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2654       A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2655       return false;
2656     }
2657 
2658     // We have to utilize actual alias analysis queries so we need the object.
2659     if (!AAR)
2660       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2661 
2662     // Try to rule it out at the call site.
2663     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2664     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2665                          "callsite arguments: "
2666                       << getAssociatedValue() << " " << *ArgOp << " => "
2667                       << (IsAliasing ? "" : "no-") << "alias \n");
2668 
2669     return IsAliasing;
2670   }
2671 
2672   bool
2673   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2674                                          const AAMemoryBehavior &MemBehaviorAA,
2675                                          const AANoAlias &NoAliasAA) {
2676     // We can deduce "noalias" if the following conditions hold.
2677     // (i)   Associated value is assumed to be noalias in the definition.
2678     // (ii)  Associated value is assumed to be no-capture in all the uses
2679     //       possibly executed before this callsite.
2680     // (iii) There is no other pointer argument which could alias with the
2681     //       value.
2682 
2683     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2684     if (!AssociatedValueIsNoAliasAtDef) {
2685       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2686                         << " is not no-alias at the definition\n");
2687       return false;
2688     }
2689 
2690     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2691     auto &NoCaptureAA =
2692         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2693     // Check whether the value is captured in the scope using AANoCapture.
2694     // FIXME: This is conservative though, it is better to look at CFG and
2695     //        check only uses possibly executed before this callsite.
2696     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2697       LLVM_DEBUG(
2698           dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2699                  << " cannot be noalias as it is potentially captured\n");
2700       return false;
2701     }
2702     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2703 
2704     // Check there is no other pointer argument which could alias with the
2705     // value passed at this call site.
2706     // TODO: AbstractCallSite
2707     ImmutableCallSite ICS(&getAnchorValue());
2708     for (unsigned OtherArgNo = 0; OtherArgNo < ICS.getNumArgOperands();
2709          OtherArgNo++)
2710       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, ICS, OtherArgNo))
2711         return false;
2712 
2713     return true;
2714   }
2715 
2716   /// See AbstractAttribute::updateImpl(...).
2717   ChangeStatus updateImpl(Attributor &A) override {
2718     // If the argument is readnone we are done as there are no accesses via the
2719     // argument.
2720     auto &MemBehaviorAA =
2721         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2722                                      /* TrackDependence */ false);
2723     if (MemBehaviorAA.isAssumedReadNone()) {
2724       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2725       return ChangeStatus::UNCHANGED;
2726     }
2727 
2728     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2729     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2730                                                   /* TrackDependence */ false);
2731 
2732     AAResults *AAR = nullptr;
2733     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2734                                                NoAliasAA)) {
2735       LLVM_DEBUG(
2736           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2737       return ChangeStatus::UNCHANGED;
2738     }
2739 
2740     return indicatePessimisticFixpoint();
2741   }
2742 
2743   /// See AbstractAttribute::trackStatistics()
2744   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2745 };
2746 
2747 /// NoAlias attribute for function return value.
2748 struct AANoAliasReturned final : AANoAliasImpl {
2749   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2750 
2751   /// See AbstractAttribute::updateImpl(...).
2752   virtual ChangeStatus updateImpl(Attributor &A) override {
2753 
2754     auto CheckReturnValue = [&](Value &RV) -> bool {
2755       if (Constant *C = dyn_cast<Constant>(&RV))
2756         if (C->isNullValue() || isa<UndefValue>(C))
2757           return true;
2758 
2759       /// For now, we can only deduce noalias if we have call sites.
2760       /// FIXME: add more support.
2761       ImmutableCallSite ICS(&RV);
2762       if (!ICS)
2763         return false;
2764 
2765       const IRPosition &RVPos = IRPosition::value(RV);
2766       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2767       if (!NoAliasAA.isAssumedNoAlias())
2768         return false;
2769 
2770       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2771       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2772     };
2773 
2774     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2775       return indicatePessimisticFixpoint();
2776 
2777     return ChangeStatus::UNCHANGED;
2778   }
2779 
2780   /// See AbstractAttribute::trackStatistics()
2781   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2782 };
2783 
2784 /// NoAlias attribute deduction for a call site return value.
2785 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2786   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2787 
2788   /// See AbstractAttribute::initialize(...).
2789   void initialize(Attributor &A) override {
2790     AANoAliasImpl::initialize(A);
2791     Function *F = getAssociatedFunction();
2792     if (!F)
2793       indicatePessimisticFixpoint();
2794   }
2795 
2796   /// See AbstractAttribute::updateImpl(...).
2797   ChangeStatus updateImpl(Attributor &A) override {
2798     // TODO: Once we have call site specific value information we can provide
2799     //       call site specific liveness information and then it makes
2800     //       sense to specialize attributes for call sites arguments instead of
2801     //       redirecting requests to the callee argument.
2802     Function *F = getAssociatedFunction();
2803     const IRPosition &FnPos = IRPosition::returned(*F);
2804     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2805     return clampStateAndIndicateChange(
2806         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2807   }
2808 
2809   /// See AbstractAttribute::trackStatistics()
2810   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2811 };
2812 
2813 /// -------------------AAIsDead Function Attribute-----------------------
2814 
2815 struct AAIsDeadValueImpl : public AAIsDead {
2816   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2817 
2818   /// See AAIsDead::isAssumedDead().
2819   bool isAssumedDead() const override { return getAssumed(); }
2820 
2821   /// See AAIsDead::isKnownDead().
2822   bool isKnownDead() const override { return getKnown(); }
2823 
2824   /// See AAIsDead::isAssumedDead(BasicBlock *).
2825   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2826 
2827   /// See AAIsDead::isKnownDead(BasicBlock *).
2828   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2829 
2830   /// See AAIsDead::isAssumedDead(Instruction *I).
2831   bool isAssumedDead(const Instruction *I) const override {
2832     return I == getCtxI() && isAssumedDead();
2833   }
2834 
2835   /// See AAIsDead::isKnownDead(Instruction *I).
2836   bool isKnownDead(const Instruction *I) const override {
2837     return isAssumedDead(I) && getKnown();
2838   }
2839 
2840   /// See AbstractAttribute::getAsStr().
2841   const std::string getAsStr() const override {
2842     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2843   }
2844 
2845   /// Check if all uses are assumed dead.
2846   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2847     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2848     // Explicitly set the dependence class to required because we want a long
2849     // chain of N dependent instructions to be considered live as soon as one is
2850     // without going through N update cycles. This is not required for
2851     // correctness.
2852     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2853   }
2854 
2855   /// Determine if \p I is assumed to be side-effect free.
2856   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2857     if (!I || wouldInstructionBeTriviallyDead(I))
2858       return true;
2859 
2860     auto *CB = dyn_cast<CallBase>(I);
2861     if (!CB || isa<IntrinsicInst>(CB))
2862       return false;
2863 
2864     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2865     const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, CallIRP);
2866     if (!NoUnwindAA.isAssumedNoUnwind())
2867       return false;
2868 
2869     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, CallIRP);
2870     if (!MemBehaviorAA.isAssumedReadOnly())
2871       return false;
2872 
2873     return true;
2874   }
2875 };
2876 
2877 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2878   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2879 
2880   /// See AbstractAttribute::initialize(...).
2881   void initialize(Attributor &A) override {
2882     if (isa<UndefValue>(getAssociatedValue())) {
2883       indicatePessimisticFixpoint();
2884       return;
2885     }
2886 
2887     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2888     if (!isAssumedSideEffectFree(A, I))
2889       indicatePessimisticFixpoint();
2890   }
2891 
2892   /// See AbstractAttribute::updateImpl(...).
2893   ChangeStatus updateImpl(Attributor &A) override {
2894     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2895     if (!isAssumedSideEffectFree(A, I))
2896       return indicatePessimisticFixpoint();
2897 
2898     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2899       return indicatePessimisticFixpoint();
2900     return ChangeStatus::UNCHANGED;
2901   }
2902 
2903   /// See AbstractAttribute::manifest(...).
2904   ChangeStatus manifest(Attributor &A) override {
2905     Value &V = getAssociatedValue();
2906     if (auto *I = dyn_cast<Instruction>(&V)) {
2907       // If we get here we basically know the users are all dead. We check if
2908       // isAssumedSideEffectFree returns true here again because it might not be
2909       // the case and only the users are dead but the instruction (=call) is
2910       // still needed.
2911       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2912         A.deleteAfterManifest(*I);
2913         return ChangeStatus::CHANGED;
2914       }
2915     }
2916     if (V.use_empty())
2917       return ChangeStatus::UNCHANGED;
2918 
2919     bool UsedAssumedInformation = false;
2920     Optional<ConstantInt *> CI =
2921         getAssumedConstant(A, V, *this, UsedAssumedInformation);
2922     if (CI.hasValue() && CI.getValue())
2923       return ChangeStatus::UNCHANGED;
2924 
2925     UndefValue &UV = *UndefValue::get(V.getType());
2926     bool AnyChange = A.changeValueAfterManifest(V, UV);
2927     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2928   }
2929 
2930   /// See AbstractAttribute::trackStatistics()
2931   void trackStatistics() const override {
2932     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2933   }
2934 };
2935 
2936 struct AAIsDeadArgument : public AAIsDeadFloating {
2937   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2938 
2939   /// See AbstractAttribute::initialize(...).
2940   void initialize(Attributor &A) override {
2941     if (!getAssociatedFunction()->hasExactDefinition())
2942       indicatePessimisticFixpoint();
2943   }
2944 
2945   /// See AbstractAttribute::manifest(...).
2946   ChangeStatus manifest(Attributor &A) override {
2947     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2948     Argument &Arg = *getAssociatedArgument();
2949     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2950       if (A.registerFunctionSignatureRewrite(
2951               Arg, /* ReplacementTypes */ {},
2952               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2953               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{}))
2954         return ChangeStatus::CHANGED;
2955     return Changed;
2956   }
2957 
2958   /// See AbstractAttribute::trackStatistics()
2959   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2960 };
2961 
2962 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2963   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2964 
2965   /// See AbstractAttribute::initialize(...).
2966   void initialize(Attributor &A) override {
2967     if (isa<UndefValue>(getAssociatedValue()))
2968       indicatePessimisticFixpoint();
2969   }
2970 
2971   /// See AbstractAttribute::updateImpl(...).
2972   ChangeStatus updateImpl(Attributor &A) override {
2973     // TODO: Once we have call site specific value information we can provide
2974     //       call site specific liveness information and then it makes
2975     //       sense to specialize attributes for call sites arguments instead of
2976     //       redirecting requests to the callee argument.
2977     Argument *Arg = getAssociatedArgument();
2978     if (!Arg)
2979       return indicatePessimisticFixpoint();
2980     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2981     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2982     return clampStateAndIndicateChange(
2983         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2984   }
2985 
2986   /// See AbstractAttribute::manifest(...).
2987   ChangeStatus manifest(Attributor &A) override {
2988     CallBase &CB = cast<CallBase>(getAnchorValue());
2989     Use &U = CB.getArgOperandUse(getArgNo());
2990     assert(!isa<UndefValue>(U.get()) &&
2991            "Expected undef values to be filtered out!");
2992     UndefValue &UV = *UndefValue::get(U->getType());
2993     if (A.changeUseAfterManifest(U, UV))
2994       return ChangeStatus::CHANGED;
2995     return ChangeStatus::UNCHANGED;
2996   }
2997 
2998   /// See AbstractAttribute::trackStatistics()
2999   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3000 };
3001 
3002 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3003   AAIsDeadCallSiteReturned(const IRPosition &IRP)
3004       : AAIsDeadFloating(IRP), IsAssumedSideEffectFree(true) {}
3005 
3006   /// See AAIsDead::isAssumedDead().
3007   bool isAssumedDead() const override {
3008     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3009   }
3010 
3011   /// See AbstractAttribute::initialize(...).
3012   void initialize(Attributor &A) override {
3013     if (isa<UndefValue>(getAssociatedValue())) {
3014       indicatePessimisticFixpoint();
3015       return;
3016     }
3017 
3018     // We track this separately as a secondary state.
3019     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3020   }
3021 
3022   /// See AbstractAttribute::updateImpl(...).
3023   ChangeStatus updateImpl(Attributor &A) override {
3024     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3025     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3026       IsAssumedSideEffectFree = false;
3027       Changed = ChangeStatus::CHANGED;
3028     }
3029 
3030     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3031       return indicatePessimisticFixpoint();
3032     return Changed;
3033   }
3034 
3035   /// See AbstractAttribute::manifest(...).
3036   ChangeStatus manifest(Attributor &A) override {
3037     if (auto *CI = dyn_cast<CallInst>(&getAssociatedValue()))
3038       if (CI->isMustTailCall())
3039         return ChangeStatus::UNCHANGED;
3040     return AAIsDeadFloating::manifest(A);
3041   }
3042 
3043   /// See AbstractAttribute::trackStatistics()
3044   void trackStatistics() const override {
3045     if (IsAssumedSideEffectFree)
3046       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3047     else
3048       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3049   }
3050 
3051   /// See AbstractAttribute::getAsStr().
3052   const std::string getAsStr() const override {
3053     return isAssumedDead()
3054                ? "assumed-dead"
3055                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3056   }
3057 
3058 private:
3059   bool IsAssumedSideEffectFree;
3060 };
3061 
3062 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3063   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
3064 
3065   /// See AbstractAttribute::updateImpl(...).
3066   ChangeStatus updateImpl(Attributor &A) override {
3067 
3068     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3069                               {Instruction::Ret});
3070 
3071     auto PredForCallSite = [&](AbstractCallSite ACS) {
3072       if (ACS.isCallbackCall() || !ACS.getInstruction())
3073         return false;
3074       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3075     };
3076 
3077     bool AllCallSitesKnown;
3078     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3079                                 AllCallSitesKnown))
3080       return indicatePessimisticFixpoint();
3081 
3082     return ChangeStatus::UNCHANGED;
3083   }
3084 
3085   /// See AbstractAttribute::manifest(...).
3086   ChangeStatus manifest(Attributor &A) override {
3087     // TODO: Rewrite the signature to return void?
3088     bool AnyChange = false;
3089     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3090     auto RetInstPred = [&](Instruction &I) {
3091       ReturnInst &RI = cast<ReturnInst>(I);
3092       if (auto *CI = dyn_cast<CallInst>(RI.getReturnValue()))
3093         if (CI->isMustTailCall())
3094           return true;
3095       if (!isa<UndefValue>(RI.getReturnValue()))
3096         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3097       return true;
3098     };
3099     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3100     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3101   }
3102 
3103   /// See AbstractAttribute::trackStatistics()
3104   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3105 };
3106 
3107 struct AAIsDeadFunction : public AAIsDead {
3108   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
3109 
3110   /// See AbstractAttribute::initialize(...).
3111   void initialize(Attributor &A) override {
3112     const Function *F = getAssociatedFunction();
3113     if (F && !F->isDeclaration()) {
3114       ToBeExploredFrom.insert(&F->getEntryBlock().front());
3115       assumeLive(A, F->getEntryBlock());
3116     }
3117   }
3118 
3119   /// See AbstractAttribute::getAsStr().
3120   const std::string getAsStr() const override {
3121     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3122            std::to_string(getAssociatedFunction()->size()) + "][#TBEP " +
3123            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3124            std::to_string(KnownDeadEnds.size()) + "]";
3125   }
3126 
3127   /// See AbstractAttribute::manifest(...).
3128   ChangeStatus manifest(Attributor &A) override {
3129     assert(getState().isValidState() &&
3130            "Attempted to manifest an invalid state!");
3131 
3132     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3133     Function &F = *getAssociatedFunction();
3134 
3135     if (AssumedLiveBlocks.empty()) {
3136       A.deleteAfterManifest(F);
3137       return ChangeStatus::CHANGED;
3138     }
3139 
3140     // Flag to determine if we can change an invoke to a call assuming the
3141     // callee is nounwind. This is not possible if the personality of the
3142     // function allows to catch asynchronous exceptions.
3143     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3144 
3145     KnownDeadEnds.set_union(ToBeExploredFrom);
3146     for (const Instruction *DeadEndI : KnownDeadEnds) {
3147       auto *CB = dyn_cast<CallBase>(DeadEndI);
3148       if (!CB)
3149         continue;
3150       const auto &NoReturnAA =
3151           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
3152       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3153       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3154         continue;
3155 
3156       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3157         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3158       else
3159         A.changeToUnreachableAfterManifest(
3160             const_cast<Instruction *>(DeadEndI->getNextNode()));
3161       HasChanged = ChangeStatus::CHANGED;
3162     }
3163 
3164     for (BasicBlock &BB : F)
3165       if (!AssumedLiveBlocks.count(&BB))
3166         A.deleteAfterManifest(BB);
3167 
3168     return HasChanged;
3169   }
3170 
3171   /// See AbstractAttribute::updateImpl(...).
3172   ChangeStatus updateImpl(Attributor &A) override;
3173 
3174   /// See AbstractAttribute::trackStatistics()
3175   void trackStatistics() const override {}
3176 
3177   /// Returns true if the function is assumed dead.
3178   bool isAssumedDead() const override { return false; }
3179 
3180   /// See AAIsDead::isKnownDead().
3181   bool isKnownDead() const override { return false; }
3182 
3183   /// See AAIsDead::isAssumedDead(BasicBlock *).
3184   bool isAssumedDead(const BasicBlock *BB) const override {
3185     assert(BB->getParent() == getAssociatedFunction() &&
3186            "BB must be in the same anchor scope function.");
3187 
3188     if (!getAssumed())
3189       return false;
3190     return !AssumedLiveBlocks.count(BB);
3191   }
3192 
3193   /// See AAIsDead::isKnownDead(BasicBlock *).
3194   bool isKnownDead(const BasicBlock *BB) const override {
3195     return getKnown() && isAssumedDead(BB);
3196   }
3197 
3198   /// See AAIsDead::isAssumed(Instruction *I).
3199   bool isAssumedDead(const Instruction *I) const override {
3200     assert(I->getParent()->getParent() == getAssociatedFunction() &&
3201            "Instruction must be in the same anchor scope function.");
3202 
3203     if (!getAssumed())
3204       return false;
3205 
3206     // If it is not in AssumedLiveBlocks then it for sure dead.
3207     // Otherwise, it can still be after noreturn call in a live block.
3208     if (!AssumedLiveBlocks.count(I->getParent()))
3209       return true;
3210 
3211     // If it is not after a liveness barrier it is live.
3212     const Instruction *PrevI = I->getPrevNode();
3213     while (PrevI) {
3214       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3215         return true;
3216       PrevI = PrevI->getPrevNode();
3217     }
3218     return false;
3219   }
3220 
3221   /// See AAIsDead::isKnownDead(Instruction *I).
3222   bool isKnownDead(const Instruction *I) const override {
3223     return getKnown() && isAssumedDead(I);
3224   }
3225 
3226   /// Determine if \p F might catch asynchronous exceptions.
3227   static bool mayCatchAsynchronousExceptions(const Function &F) {
3228     return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
3229   }
3230 
3231   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3232   /// that internal function called from \p BB should now be looked at.
3233   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3234     if (!AssumedLiveBlocks.insert(&BB).second)
3235       return false;
3236 
3237     // We assume that all of BB is (probably) live now and if there are calls to
3238     // internal functions we will assume that those are now live as well. This
3239     // is a performance optimization for blocks with calls to a lot of internal
3240     // functions. It can however cause dead functions to be treated as live.
3241     for (const Instruction &I : BB)
3242       if (ImmutableCallSite ICS = ImmutableCallSite(&I))
3243         if (const Function *F = ICS.getCalledFunction())
3244           if (F->hasLocalLinkage())
3245             A.markLiveInternalFunction(*F);
3246     return true;
3247   }
3248 
3249   /// Collection of instructions that need to be explored again, e.g., we
3250   /// did assume they do not transfer control to (one of their) successors.
3251   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3252 
3253   /// Collection of instructions that are known to not transfer control.
3254   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3255 
3256   /// Collection of all assumed live BasicBlocks.
3257   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3258 };
3259 
3260 static bool
3261 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3262                         AbstractAttribute &AA,
3263                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3264   const IRPosition &IPos = IRPosition::callsite_function(CB);
3265 
3266   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
3267   if (NoReturnAA.isAssumedNoReturn())
3268     return !NoReturnAA.isKnownNoReturn();
3269   if (CB.isTerminator())
3270     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3271   else
3272     AliveSuccessors.push_back(CB.getNextNode());
3273   return false;
3274 }
3275 
3276 static bool
3277 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3278                         AbstractAttribute &AA,
3279                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3280   bool UsedAssumedInformation =
3281       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3282 
3283   // First, determine if we can change an invoke to a call assuming the
3284   // callee is nounwind. This is not possible if the personality of the
3285   // function allows to catch asynchronous exceptions.
3286   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3287     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3288   } else {
3289     const IRPosition &IPos = IRPosition::callsite_function(II);
3290     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
3291     if (AANoUnw.isAssumedNoUnwind()) {
3292       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3293     } else {
3294       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3295     }
3296   }
3297   return UsedAssumedInformation;
3298 }
3299 
3300 static bool
3301 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3302                         AbstractAttribute &AA,
3303                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3304   bool UsedAssumedInformation = false;
3305   if (BI.getNumSuccessors() == 1) {
3306     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3307   } else {
3308     Optional<ConstantInt *> CI =
3309         getAssumedConstant(A, *BI.getCondition(), AA, UsedAssumedInformation);
3310     if (!CI.hasValue()) {
3311       // No value yet, assume both edges are dead.
3312     } else if (CI.getValue()) {
3313       const BasicBlock *SuccBB =
3314           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3315       AliveSuccessors.push_back(&SuccBB->front());
3316     } else {
3317       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3318       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3319       UsedAssumedInformation = false;
3320     }
3321   }
3322   return UsedAssumedInformation;
3323 }
3324 
3325 static bool
3326 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3327                         AbstractAttribute &AA,
3328                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3329   bool UsedAssumedInformation = false;
3330   Optional<ConstantInt *> CI =
3331       getAssumedConstant(A, *SI.getCondition(), AA, UsedAssumedInformation);
3332   if (!CI.hasValue()) {
3333     // No value yet, assume all edges are dead.
3334   } else if (CI.getValue()) {
3335     for (auto &CaseIt : SI.cases()) {
3336       if (CaseIt.getCaseValue() == CI.getValue()) {
3337         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3338         return UsedAssumedInformation;
3339       }
3340     }
3341     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3342     return UsedAssumedInformation;
3343   } else {
3344     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3345       AliveSuccessors.push_back(&SuccBB->front());
3346   }
3347   return UsedAssumedInformation;
3348 }
3349 
3350 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3351   ChangeStatus Change = ChangeStatus::UNCHANGED;
3352 
3353   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3354                     << getAssociatedFunction()->size() << "] BBs and "
3355                     << ToBeExploredFrom.size() << " exploration points and "
3356                     << KnownDeadEnds.size() << " known dead ends\n");
3357 
3358   // Copy and clear the list of instructions we need to explore from. It is
3359   // refilled with instructions the next update has to look at.
3360   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3361                                                ToBeExploredFrom.end());
3362   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3363 
3364   SmallVector<const Instruction *, 8> AliveSuccessors;
3365   while (!Worklist.empty()) {
3366     const Instruction *I = Worklist.pop_back_val();
3367     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3368 
3369     AliveSuccessors.clear();
3370 
3371     bool UsedAssumedInformation = false;
3372     switch (I->getOpcode()) {
3373     // TODO: look for (assumed) UB to backwards propagate "deadness".
3374     default:
3375       if (I->isTerminator()) {
3376         for (const BasicBlock *SuccBB : successors(I->getParent()))
3377           AliveSuccessors.push_back(&SuccBB->front());
3378       } else {
3379         AliveSuccessors.push_back(I->getNextNode());
3380       }
3381       break;
3382     case Instruction::Call:
3383       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3384                                                        *this, AliveSuccessors);
3385       break;
3386     case Instruction::Invoke:
3387       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3388                                                        *this, AliveSuccessors);
3389       break;
3390     case Instruction::Br:
3391       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3392                                                        *this, AliveSuccessors);
3393       break;
3394     case Instruction::Switch:
3395       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3396                                                        *this, AliveSuccessors);
3397       break;
3398     }
3399 
3400     if (UsedAssumedInformation) {
3401       NewToBeExploredFrom.insert(I);
3402     } else {
3403       Change = ChangeStatus::CHANGED;
3404       if (AliveSuccessors.empty() ||
3405           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3406         KnownDeadEnds.insert(I);
3407     }
3408 
3409     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3410                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3411                       << UsedAssumedInformation << "\n");
3412 
3413     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3414       if (!I->isTerminator()) {
3415         assert(AliveSuccessors.size() == 1 &&
3416                "Non-terminator expected to have a single successor!");
3417         Worklist.push_back(AliveSuccessor);
3418       } else {
3419         if (assumeLive(A, *AliveSuccessor->getParent()))
3420           Worklist.push_back(AliveSuccessor);
3421       }
3422     }
3423   }
3424 
3425   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3426 
3427   // If we know everything is live there is no need to query for liveness.
3428   // Instead, indicating a pessimistic fixpoint will cause the state to be
3429   // "invalid" and all queries to be answered conservatively without lookups.
3430   // To be in this state we have to (1) finished the exploration and (3) not
3431   // discovered any non-trivial dead end and (2) not ruled unreachable code
3432   // dead.
3433   if (ToBeExploredFrom.empty() &&
3434       getAssociatedFunction()->size() == AssumedLiveBlocks.size() &&
3435       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3436         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3437       }))
3438     return indicatePessimisticFixpoint();
3439   return Change;
3440 }
3441 
3442 /// Liveness information for a call sites.
3443 struct AAIsDeadCallSite final : AAIsDeadFunction {
3444   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
3445 
3446   /// See AbstractAttribute::initialize(...).
3447   void initialize(Attributor &A) override {
3448     // TODO: Once we have call site specific value information we can provide
3449     //       call site specific liveness information and then it makes
3450     //       sense to specialize attributes for call sites instead of
3451     //       redirecting requests to the callee.
3452     llvm_unreachable("Abstract attributes for liveness are not "
3453                      "supported for call sites yet!");
3454   }
3455 
3456   /// See AbstractAttribute::updateImpl(...).
3457   ChangeStatus updateImpl(Attributor &A) override {
3458     return indicatePessimisticFixpoint();
3459   }
3460 
3461   /// See AbstractAttribute::trackStatistics()
3462   void trackStatistics() const override {}
3463 };
3464 
3465 /// -------------------- Dereferenceable Argument Attribute --------------------
3466 
3467 template <>
3468 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3469                                                      const DerefState &R) {
3470   ChangeStatus CS0 =
3471       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3472   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3473   return CS0 | CS1;
3474 }
3475 
3476 struct AADereferenceableImpl : AADereferenceable {
3477   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
3478   using StateType = DerefState;
3479 
3480   void initialize(Attributor &A) override {
3481     SmallVector<Attribute, 4> Attrs;
3482     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3483              Attrs);
3484     for (const Attribute &Attr : Attrs)
3485       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3486 
3487     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition(),
3488                                        /* TrackDependence */ false);
3489 
3490     const IRPosition &IRP = this->getIRPosition();
3491     bool IsFnInterface = IRP.isFnInterfaceKind();
3492     const Function *FnScope = IRP.getAnchorScope();
3493     if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
3494       indicatePessimisticFixpoint();
3495   }
3496 
3497   /// See AbstractAttribute::getState()
3498   /// {
3499   StateType &getState() override { return *this; }
3500   const StateType &getState() const override { return *this; }
3501   /// }
3502 
3503   /// Helper function for collecting accessed bytes in must-be-executed-context
3504   void addAccessedBytesForUse(Attributor &A, const Use *U,
3505                               const Instruction *I) {
3506     const Value *UseV = U->get();
3507     if (!UseV->getType()->isPointerTy())
3508       return;
3509 
3510     Type *PtrTy = UseV->getType();
3511     const DataLayout &DL = A.getDataLayout();
3512     int64_t Offset;
3513     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3514             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3515       if (Base == &getAssociatedValue() &&
3516           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3517         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3518         addAccessedBytes(Offset, Size);
3519       }
3520     }
3521     return;
3522   }
3523 
3524   /// See AAFromMustBeExecutedContext
3525   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3526     bool IsNonNull = false;
3527     bool TrackUse = false;
3528     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3529         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3530 
3531     addAccessedBytesForUse(A, U, I);
3532     takeKnownDerefBytesMaximum(DerefBytes);
3533     return TrackUse;
3534   }
3535 
3536   /// See AbstractAttribute::manifest(...).
3537   ChangeStatus manifest(Attributor &A) override {
3538     ChangeStatus Change = AADereferenceable::manifest(A);
3539     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3540       removeAttrs({Attribute::DereferenceableOrNull});
3541       return ChangeStatus::CHANGED;
3542     }
3543     return Change;
3544   }
3545 
3546   void getDeducedAttributes(LLVMContext &Ctx,
3547                             SmallVectorImpl<Attribute> &Attrs) const override {
3548     // TODO: Add *_globally support
3549     if (isAssumedNonNull())
3550       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3551           Ctx, getAssumedDereferenceableBytes()));
3552     else
3553       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3554           Ctx, getAssumedDereferenceableBytes()));
3555   }
3556 
3557   /// See AbstractAttribute::getAsStr().
3558   const std::string getAsStr() const override {
3559     if (!getAssumedDereferenceableBytes())
3560       return "unknown-dereferenceable";
3561     return std::string("dereferenceable") +
3562            (isAssumedNonNull() ? "" : "_or_null") +
3563            (isAssumedGlobal() ? "_globally" : "") + "<" +
3564            std::to_string(getKnownDereferenceableBytes()) + "-" +
3565            std::to_string(getAssumedDereferenceableBytes()) + ">";
3566   }
3567 };
3568 
3569 /// Dereferenceable attribute for a floating value.
3570 struct AADereferenceableFloating
3571     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3572   using Base =
3573       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3574   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
3575 
3576   /// See AbstractAttribute::updateImpl(...).
3577   ChangeStatus updateImpl(Attributor &A) override {
3578     ChangeStatus Change = Base::updateImpl(A);
3579 
3580     const DataLayout &DL = A.getDataLayout();
3581 
3582     auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
3583       unsigned IdxWidth =
3584           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3585       APInt Offset(IdxWidth, 0);
3586       const Value *Base =
3587           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3588 
3589       const auto &AA =
3590           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3591       int64_t DerefBytes = 0;
3592       if (!Stripped && this == &AA) {
3593         // Use IR information if we did not strip anything.
3594         // TODO: track globally.
3595         bool CanBeNull;
3596         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3597         T.GlobalState.indicatePessimisticFixpoint();
3598       } else {
3599         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3600         DerefBytes = DS.DerefBytesState.getAssumed();
3601         T.GlobalState &= DS.GlobalState;
3602       }
3603 
3604       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3605 
3606       // For now we do not try to "increase" dereferenceability due to negative
3607       // indices as we first have to come up with code to deal with loops and
3608       // for overflows of the dereferenceable bytes.
3609       int64_t OffsetSExt = Offset.getSExtValue();
3610       if (OffsetSExt < 0)
3611         OffsetSExt = 0;
3612 
3613       T.takeAssumedDerefBytesMinimum(
3614           std::max(int64_t(0), DerefBytes - OffsetSExt));
3615 
3616       if (this == &AA) {
3617         if (!Stripped) {
3618           // If nothing was stripped IR information is all we got.
3619           T.takeKnownDerefBytesMaximum(
3620               std::max(int64_t(0), DerefBytes - OffsetSExt));
3621           T.indicatePessimisticFixpoint();
3622         } else if (OffsetSExt > 0) {
3623           // If something was stripped but there is circular reasoning we look
3624           // for the offset. If it is positive we basically decrease the
3625           // dereferenceable bytes in a circluar loop now, which will simply
3626           // drive them down to the known value in a very slow way which we
3627           // can accelerate.
3628           T.indicatePessimisticFixpoint();
3629         }
3630       }
3631 
3632       return T.isValidState();
3633     };
3634 
3635     DerefState T;
3636     if (!genericValueTraversal<AADereferenceable, DerefState>(
3637             A, getIRPosition(), *this, T, VisitValueCB))
3638       return indicatePessimisticFixpoint();
3639 
3640     return Change | clampStateAndIndicateChange(getState(), T);
3641   }
3642 
3643   /// See AbstractAttribute::trackStatistics()
3644   void trackStatistics() const override {
3645     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3646   }
3647 };
3648 
3649 /// Dereferenceable attribute for a return value.
3650 struct AADereferenceableReturned final
3651     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3652   AADereferenceableReturned(const IRPosition &IRP)
3653       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3654             IRP) {}
3655 
3656   /// See AbstractAttribute::trackStatistics()
3657   void trackStatistics() const override {
3658     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3659   }
3660 };
3661 
3662 /// Dereferenceable attribute for an argument
3663 struct AADereferenceableArgument final
3664     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3665           AADereferenceable, AADereferenceableImpl> {
3666   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3667       AADereferenceable, AADereferenceableImpl>;
3668   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3669 
3670   /// See AbstractAttribute::trackStatistics()
3671   void trackStatistics() const override {
3672     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3673   }
3674 };
3675 
3676 /// Dereferenceable attribute for a call site argument.
3677 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3678   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3679       : AADereferenceableFloating(IRP) {}
3680 
3681   /// See AbstractAttribute::trackStatistics()
3682   void trackStatistics() const override {
3683     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3684   }
3685 };
3686 
3687 /// Dereferenceable attribute deduction for a call site return value.
3688 struct AADereferenceableCallSiteReturned final
3689     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3690           AADereferenceable, AADereferenceableImpl> {
3691   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3692       AADereferenceable, AADereferenceableImpl>;
3693   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3694 
3695   /// See AbstractAttribute::trackStatistics()
3696   void trackStatistics() const override {
3697     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3698   }
3699 };
3700 
3701 // ------------------------ Align Argument Attribute ------------------------
3702 
3703 static unsigned int getKnownAlignForUse(Attributor &A,
3704                                         AbstractAttribute &QueryingAA,
3705                                         Value &AssociatedValue, const Use *U,
3706                                         const Instruction *I, bool &TrackUse) {
3707   // We need to follow common pointer manipulation uses to the accesses they
3708   // feed into.
3709   if (isa<CastInst>(I)) {
3710     // Follow all but ptr2int casts.
3711     TrackUse = !isa<PtrToIntInst>(I);
3712     return 0;
3713   }
3714   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3715     if (GEP->hasAllConstantIndices()) {
3716       TrackUse = true;
3717       return 0;
3718     }
3719   }
3720 
3721   unsigned Alignment = 0;
3722   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
3723     if (ICS.isBundleOperand(U) || ICS.isCallee(U))
3724       return 0;
3725 
3726     unsigned ArgNo = ICS.getArgumentNo(U);
3727     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
3728     // As long as we only use known information there is no need to track
3729     // dependences here.
3730     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3731                                         /* TrackDependence */ false);
3732     Alignment = AlignAA.getKnownAlign();
3733   }
3734 
3735   const Value *UseV = U->get();
3736   if (auto *SI = dyn_cast<StoreInst>(I)) {
3737     if (SI->getPointerOperand() == UseV)
3738       Alignment = SI->getAlignment();
3739   } else if (auto *LI = dyn_cast<LoadInst>(I))
3740     Alignment = LI->getAlignment();
3741 
3742   if (Alignment <= 1)
3743     return 0;
3744 
3745   auto &DL = A.getDataLayout();
3746   int64_t Offset;
3747 
3748   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3749     if (Base == &AssociatedValue) {
3750       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3751       // So we can say that the maximum power of two which is a divisor of
3752       // gcd(Offset, Alignment) is an alignment.
3753 
3754       uint32_t gcd =
3755           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3756       Alignment = llvm::PowerOf2Floor(gcd);
3757     }
3758   }
3759 
3760   return Alignment;
3761 }
3762 struct AAAlignImpl : AAAlign {
3763   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3764 
3765   /// See AbstractAttribute::initialize(...).
3766   void initialize(Attributor &A) override {
3767     SmallVector<Attribute, 4> Attrs;
3768     getAttrs({Attribute::Alignment}, Attrs);
3769     for (const Attribute &Attr : Attrs)
3770       takeKnownMaximum(Attr.getValueAsInt());
3771 
3772     if (getIRPosition().isFnInterfaceKind() &&
3773         (!getAssociatedFunction() ||
3774          !getAssociatedFunction()->hasExactDefinition()))
3775       indicatePessimisticFixpoint();
3776   }
3777 
3778   /// See AbstractAttribute::manifest(...).
3779   ChangeStatus manifest(Attributor &A) override {
3780     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3781 
3782     // Check for users that allow alignment annotations.
3783     Value &AnchorVal = getIRPosition().getAnchorValue();
3784     for (const Use &U : AnchorVal.uses()) {
3785       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3786         if (SI->getPointerOperand() == &AnchorVal)
3787           if (SI->getAlignment() < getAssumedAlign()) {
3788             STATS_DECLTRACK(AAAlign, Store,
3789                             "Number of times alignment added to a store");
3790             SI->setAlignment(Align(getAssumedAlign()));
3791             LoadStoreChanged = ChangeStatus::CHANGED;
3792           }
3793       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3794         if (LI->getPointerOperand() == &AnchorVal)
3795           if (LI->getAlignment() < getAssumedAlign()) {
3796             LI->setAlignment(Align(getAssumedAlign()));
3797             STATS_DECLTRACK(AAAlign, Load,
3798                             "Number of times alignment added to a load");
3799             LoadStoreChanged = ChangeStatus::CHANGED;
3800           }
3801       }
3802     }
3803 
3804     ChangeStatus Changed = AAAlign::manifest(A);
3805 
3806     MaybeAlign InheritAlign =
3807         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3808     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3809       return LoadStoreChanged;
3810     return Changed | LoadStoreChanged;
3811   }
3812 
3813   // TODO: Provide a helper to determine the implied ABI alignment and check in
3814   //       the existing manifest method and a new one for AAAlignImpl that value
3815   //       to avoid making the alignment explicit if it did not improve.
3816 
3817   /// See AbstractAttribute::getDeducedAttributes
3818   virtual void
3819   getDeducedAttributes(LLVMContext &Ctx,
3820                        SmallVectorImpl<Attribute> &Attrs) const override {
3821     if (getAssumedAlign() > 1)
3822       Attrs.emplace_back(
3823           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3824   }
3825   /// See AAFromMustBeExecutedContext
3826   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3827     bool TrackUse = false;
3828 
3829     unsigned int KnownAlign =
3830         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3831     takeKnownMaximum(KnownAlign);
3832 
3833     return TrackUse;
3834   }
3835 
3836   /// See AbstractAttribute::getAsStr().
3837   const std::string getAsStr() const override {
3838     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3839                                 "-" + std::to_string(getAssumedAlign()) + ">")
3840                              : "unknown-align";
3841   }
3842 };
3843 
3844 /// Align attribute for a floating value.
3845 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3846   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3847   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3848 
3849   /// See AbstractAttribute::updateImpl(...).
3850   ChangeStatus updateImpl(Attributor &A) override {
3851     Base::updateImpl(A);
3852 
3853     const DataLayout &DL = A.getDataLayout();
3854 
3855     auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
3856                             bool Stripped) -> bool {
3857       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3858       if (!Stripped && this == &AA) {
3859         // Use only IR information if we did not strip anything.
3860         const MaybeAlign PA = V.getPointerAlignment(DL);
3861         T.takeKnownMaximum(PA ? PA->value() : 0);
3862         T.indicatePessimisticFixpoint();
3863       } else {
3864         // Use abstract attribute information.
3865         const AAAlign::StateType &DS =
3866             static_cast<const AAAlign::StateType &>(AA.getState());
3867         T ^= DS;
3868       }
3869       return T.isValidState();
3870     };
3871 
3872     StateType T;
3873     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3874                                                    VisitValueCB))
3875       return indicatePessimisticFixpoint();
3876 
3877     // TODO: If we know we visited all incoming values, thus no are assumed
3878     // dead, we can take the known information from the state T.
3879     return clampStateAndIndicateChange(getState(), T);
3880   }
3881 
3882   /// See AbstractAttribute::trackStatistics()
3883   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3884 };
3885 
3886 /// Align attribute for function return value.
3887 struct AAAlignReturned final
3888     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3889   AAAlignReturned(const IRPosition &IRP)
3890       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3891 
3892   /// See AbstractAttribute::trackStatistics()
3893   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3894 };
3895 
3896 /// Align attribute for function argument.
3897 struct AAAlignArgument final
3898     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3899                                                               AAAlignImpl> {
3900   AAAlignArgument(const IRPosition &IRP)
3901       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3902                                                                 AAAlignImpl>(
3903             IRP) {}
3904 
3905   /// See AbstractAttribute::trackStatistics()
3906   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3907 };
3908 
3909 struct AAAlignCallSiteArgument final : AAAlignFloating {
3910   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3911 
3912   /// See AbstractAttribute::manifest(...).
3913   ChangeStatus manifest(Attributor &A) override {
3914     ChangeStatus Changed = AAAlignImpl::manifest(A);
3915     MaybeAlign InheritAlign =
3916         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3917     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3918       Changed = ChangeStatus::UNCHANGED;
3919     return Changed;
3920   }
3921 
3922   /// See AbstractAttribute::updateImpl(Attributor &A).
3923   ChangeStatus updateImpl(Attributor &A) override {
3924     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3925     if (Argument *Arg = getAssociatedArgument()) {
3926       // We only take known information from the argument
3927       // so we do not need to track a dependence.
3928       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3929           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3930       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3931     }
3932     return Changed;
3933   }
3934 
3935   /// See AbstractAttribute::trackStatistics()
3936   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3937 };
3938 
3939 /// Align attribute deduction for a call site return value.
3940 struct AAAlignCallSiteReturned final
3941     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3942                                                              AAAlignImpl> {
3943   using Base =
3944       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3945                                                              AAAlignImpl>;
3946   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3947 
3948   /// See AbstractAttribute::initialize(...).
3949   void initialize(Attributor &A) override {
3950     Base::initialize(A);
3951     Function *F = getAssociatedFunction();
3952     if (!F)
3953       indicatePessimisticFixpoint();
3954   }
3955 
3956   /// See AbstractAttribute::trackStatistics()
3957   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3958 };
3959 
3960 /// ------------------ Function No-Return Attribute ----------------------------
3961 struct AANoReturnImpl : public AANoReturn {
3962   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3963 
3964   /// See AbstractAttribute::initialize(...).
3965   void initialize(Attributor &A) override {
3966     AANoReturn::initialize(A);
3967     Function *F = getAssociatedFunction();
3968     if (!F)
3969       indicatePessimisticFixpoint();
3970   }
3971 
3972   /// See AbstractAttribute::getAsStr().
3973   const std::string getAsStr() const override {
3974     return getAssumed() ? "noreturn" : "may-return";
3975   }
3976 
3977   /// See AbstractAttribute::updateImpl(Attributor &A).
3978   virtual ChangeStatus updateImpl(Attributor &A) override {
3979     auto CheckForNoReturn = [](Instruction &) { return false; };
3980     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3981                                    {(unsigned)Instruction::Ret}))
3982       return indicatePessimisticFixpoint();
3983     return ChangeStatus::UNCHANGED;
3984   }
3985 };
3986 
3987 struct AANoReturnFunction final : AANoReturnImpl {
3988   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3989 
3990   /// See AbstractAttribute::trackStatistics()
3991   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3992 };
3993 
3994 /// NoReturn attribute deduction for a call sites.
3995 struct AANoReturnCallSite final : AANoReturnImpl {
3996   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3997 
3998   /// See AbstractAttribute::updateImpl(...).
3999   ChangeStatus updateImpl(Attributor &A) override {
4000     // TODO: Once we have call site specific value information we can provide
4001     //       call site specific liveness information and then it makes
4002     //       sense to specialize attributes for call sites arguments instead of
4003     //       redirecting requests to the callee argument.
4004     Function *F = getAssociatedFunction();
4005     const IRPosition &FnPos = IRPosition::function(*F);
4006     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
4007     return clampStateAndIndicateChange(
4008         getState(),
4009         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
4010   }
4011 
4012   /// See AbstractAttribute::trackStatistics()
4013   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4014 };
4015 
4016 /// ----------------------- Variable Capturing ---------------------------------
4017 
4018 /// A class to hold the state of for no-capture attributes.
4019 struct AANoCaptureImpl : public AANoCapture {
4020   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
4021 
4022   /// See AbstractAttribute::initialize(...).
4023   void initialize(Attributor &A) override {
4024     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4025       indicateOptimisticFixpoint();
4026       return;
4027     }
4028     Function *AnchorScope = getAnchorScope();
4029     if (isFnInterfaceKind() &&
4030         (!AnchorScope || !AnchorScope->hasExactDefinition())) {
4031       indicatePessimisticFixpoint();
4032       return;
4033     }
4034 
4035     // You cannot "capture" null in the default address space.
4036     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4037         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4038       indicateOptimisticFixpoint();
4039       return;
4040     }
4041 
4042     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
4043 
4044     // Check what state the associated function can actually capture.
4045     if (F)
4046       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4047     else
4048       indicatePessimisticFixpoint();
4049   }
4050 
4051   /// See AbstractAttribute::updateImpl(...).
4052   ChangeStatus updateImpl(Attributor &A) override;
4053 
4054   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4055   virtual void
4056   getDeducedAttributes(LLVMContext &Ctx,
4057                        SmallVectorImpl<Attribute> &Attrs) const override {
4058     if (!isAssumedNoCaptureMaybeReturned())
4059       return;
4060 
4061     if (getArgNo() >= 0) {
4062       if (isAssumedNoCapture())
4063         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4064       else if (ManifestInternal)
4065         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4066     }
4067   }
4068 
4069   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4070   /// depending on the ability of the function associated with \p IRP to capture
4071   /// state in memory and through "returning/throwing", respectively.
4072   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4073                                                    const Function &F,
4074                                                    BitIntegerState &State) {
4075     // TODO: Once we have memory behavior attributes we should use them here.
4076 
4077     // If we know we cannot communicate or write to memory, we do not care about
4078     // ptr2int anymore.
4079     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4080         F.getReturnType()->isVoidTy()) {
4081       State.addKnownBits(NO_CAPTURE);
4082       return;
4083     }
4084 
4085     // A function cannot capture state in memory if it only reads memory, it can
4086     // however return/throw state and the state might be influenced by the
4087     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4088     if (F.onlyReadsMemory())
4089       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4090 
4091     // A function cannot communicate state back if it does not through
4092     // exceptions and doesn not return values.
4093     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4094       State.addKnownBits(NOT_CAPTURED_IN_RET);
4095 
4096     // Check existing "returned" attributes.
4097     int ArgNo = IRP.getArgNo();
4098     if (F.doesNotThrow() && ArgNo >= 0) {
4099       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4100         if (F.hasParamAttribute(u, Attribute::Returned)) {
4101           if (u == unsigned(ArgNo))
4102             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4103           else if (F.onlyReadsMemory())
4104             State.addKnownBits(NO_CAPTURE);
4105           else
4106             State.addKnownBits(NOT_CAPTURED_IN_RET);
4107           break;
4108         }
4109     }
4110   }
4111 
4112   /// See AbstractState::getAsStr().
4113   const std::string getAsStr() const override {
4114     if (isKnownNoCapture())
4115       return "known not-captured";
4116     if (isAssumedNoCapture())
4117       return "assumed not-captured";
4118     if (isKnownNoCaptureMaybeReturned())
4119       return "known not-captured-maybe-returned";
4120     if (isAssumedNoCaptureMaybeReturned())
4121       return "assumed not-captured-maybe-returned";
4122     return "assumed-captured";
4123   }
4124 };
4125 
4126 /// Attributor-aware capture tracker.
4127 struct AACaptureUseTracker final : public CaptureTracker {
4128 
4129   /// Create a capture tracker that can lookup in-flight abstract attributes
4130   /// through the Attributor \p A.
4131   ///
4132   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4133   /// search is stopped. If a use leads to a return instruction,
4134   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4135   /// If a use leads to a ptr2int which may capture the value,
4136   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4137   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4138   /// set. All values in \p PotentialCopies are later tracked as well. For every
4139   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4140   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4141   /// conservatively set to true.
4142   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4143                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4144                       SmallVectorImpl<const Value *> &PotentialCopies,
4145                       unsigned &RemainingUsesToExplore)
4146       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4147         PotentialCopies(PotentialCopies),
4148         RemainingUsesToExplore(RemainingUsesToExplore) {}
4149 
4150   /// Determine if \p V maybe captured. *Also updates the state!*
4151   bool valueMayBeCaptured(const Value *V) {
4152     if (V->getType()->isPointerTy()) {
4153       PointerMayBeCaptured(V, this);
4154     } else {
4155       State.indicatePessimisticFixpoint();
4156     }
4157     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4158   }
4159 
4160   /// See CaptureTracker::tooManyUses().
4161   void tooManyUses() override {
4162     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4163   }
4164 
4165   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4166     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4167       return true;
4168     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4169         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4170         DepClassTy::OPTIONAL);
4171     return DerefAA.getAssumedDereferenceableBytes();
4172   }
4173 
4174   /// See CaptureTracker::captured(...).
4175   bool captured(const Use *U) override {
4176     Instruction *UInst = cast<Instruction>(U->getUser());
4177     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4178                       << "\n");
4179 
4180     // Because we may reuse the tracker multiple times we keep track of the
4181     // number of explored uses ourselves as well.
4182     if (RemainingUsesToExplore-- == 0) {
4183       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4184       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4185                           /* Return */ true);
4186     }
4187 
4188     // Deal with ptr2int by following uses.
4189     if (isa<PtrToIntInst>(UInst)) {
4190       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4191       return valueMayBeCaptured(UInst);
4192     }
4193 
4194     // Explicitly catch return instructions.
4195     if (isa<ReturnInst>(UInst))
4196       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4197                           /* Return */ true);
4198 
4199     // For now we only use special logic for call sites. However, the tracker
4200     // itself knows about a lot of other non-capturing cases already.
4201     CallSite CS(UInst);
4202     if (!CS || !CS.isArgOperand(U))
4203       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4204                           /* Return */ true);
4205 
4206     unsigned ArgNo = CS.getArgumentNo(U);
4207     const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
4208     // If we have a abstract no-capture attribute for the argument we can use
4209     // it to justify a non-capture attribute here. This allows recursion!
4210     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4211     if (ArgNoCaptureAA.isAssumedNoCapture())
4212       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4213                           /* Return */ false);
4214     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4215       addPotentialCopy(CS);
4216       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4217                           /* Return */ false);
4218     }
4219 
4220     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4221     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4222                         /* Return */ true);
4223   }
4224 
4225   /// Register \p CS as potential copy of the value we are checking.
4226   void addPotentialCopy(CallSite CS) {
4227     PotentialCopies.push_back(CS.getInstruction());
4228   }
4229 
4230   /// See CaptureTracker::shouldExplore(...).
4231   bool shouldExplore(const Use *U) override {
4232     // Check liveness.
4233     return !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4234   }
4235 
4236   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4237   /// \p CapturedInRet, then return the appropriate value for use in the
4238   /// CaptureTracker::captured() interface.
4239   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4240                     bool CapturedInRet) {
4241     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4242                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4243     if (CapturedInMem)
4244       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4245     if (CapturedInInt)
4246       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4247     if (CapturedInRet)
4248       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4249     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4250   }
4251 
4252 private:
4253   /// The attributor providing in-flight abstract attributes.
4254   Attributor &A;
4255 
4256   /// The abstract attribute currently updated.
4257   AANoCapture &NoCaptureAA;
4258 
4259   /// The abstract liveness state.
4260   const AAIsDead &IsDeadAA;
4261 
4262   /// The state currently updated.
4263   AANoCapture::StateType &State;
4264 
4265   /// Set of potential copies of the tracked value.
4266   SmallVectorImpl<const Value *> &PotentialCopies;
4267 
4268   /// Global counter to limit the number of explored uses.
4269   unsigned &RemainingUsesToExplore;
4270 };
4271 
4272 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4273   const IRPosition &IRP = getIRPosition();
4274   const Value *V =
4275       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4276   if (!V)
4277     return indicatePessimisticFixpoint();
4278 
4279   const Function *F =
4280       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4281   assert(F && "Expected a function!");
4282   const IRPosition &FnPos = IRPosition::function(*F);
4283   const auto &IsDeadAA =
4284       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4285 
4286   AANoCapture::StateType T;
4287 
4288   // Readonly means we cannot capture through memory.
4289   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
4290       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4291   if (FnMemAA.isAssumedReadOnly()) {
4292     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4293     if (FnMemAA.isKnownReadOnly())
4294       addKnownBits(NOT_CAPTURED_IN_MEM);
4295   }
4296 
4297   // Make sure all returned values are different than the underlying value.
4298   // TODO: we could do this in a more sophisticated way inside
4299   //       AAReturnedValues, e.g., track all values that escape through returns
4300   //       directly somehow.
4301   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4302     bool SeenConstant = false;
4303     for (auto &It : RVAA.returned_values()) {
4304       if (isa<Constant>(It.first)) {
4305         if (SeenConstant)
4306           return false;
4307         SeenConstant = true;
4308       } else if (!isa<Argument>(It.first) ||
4309                  It.first == getAssociatedArgument())
4310         return false;
4311     }
4312     return true;
4313   };
4314 
4315   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4316       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4317   if (NoUnwindAA.isAssumedNoUnwind()) {
4318     bool IsVoidTy = F->getReturnType()->isVoidTy();
4319     const AAReturnedValues *RVAA =
4320         IsVoidTy ? nullptr
4321                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4322                                                  /* TrackDependence */ true,
4323                                                  DepClassTy::OPTIONAL);
4324     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4325       T.addKnownBits(NOT_CAPTURED_IN_RET);
4326       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4327         return ChangeStatus::UNCHANGED;
4328       if (NoUnwindAA.isKnownNoUnwind() &&
4329           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4330         addKnownBits(NOT_CAPTURED_IN_RET);
4331         if (isKnown(NOT_CAPTURED_IN_MEM))
4332           return indicateOptimisticFixpoint();
4333       }
4334     }
4335   }
4336 
4337   // Use the CaptureTracker interface and logic with the specialized tracker,
4338   // defined in AACaptureUseTracker, that can look at in-flight abstract
4339   // attributes and directly updates the assumed state.
4340   SmallVector<const Value *, 4> PotentialCopies;
4341   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
4342   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4343                               RemainingUsesToExplore);
4344 
4345   // Check all potential copies of the associated value until we can assume
4346   // none will be captured or we have to assume at least one might be.
4347   unsigned Idx = 0;
4348   PotentialCopies.push_back(V);
4349   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4350     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4351 
4352   AANoCapture::StateType &S = getState();
4353   auto Assumed = S.getAssumed();
4354   S.intersectAssumedBits(T.getAssumed());
4355   if (!isAssumedNoCaptureMaybeReturned())
4356     return indicatePessimisticFixpoint();
4357   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4358                                    : ChangeStatus::CHANGED;
4359 }
4360 
4361 /// NoCapture attribute for function arguments.
4362 struct AANoCaptureArgument final : AANoCaptureImpl {
4363   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4364 
4365   /// See AbstractAttribute::trackStatistics()
4366   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4367 };
4368 
4369 /// NoCapture attribute for call site arguments.
4370 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4371   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4372 
4373   /// See AbstractAttribute::initialize(...).
4374   void initialize(Attributor &A) override {
4375     if (Argument *Arg = getAssociatedArgument())
4376       if (Arg->hasByValAttr())
4377         indicateOptimisticFixpoint();
4378     AANoCaptureImpl::initialize(A);
4379   }
4380 
4381   /// See AbstractAttribute::updateImpl(...).
4382   ChangeStatus updateImpl(Attributor &A) override {
4383     // TODO: Once we have call site specific value information we can provide
4384     //       call site specific liveness information and then it makes
4385     //       sense to specialize attributes for call sites arguments instead of
4386     //       redirecting requests to the callee argument.
4387     Argument *Arg = getAssociatedArgument();
4388     if (!Arg)
4389       return indicatePessimisticFixpoint();
4390     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4391     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4392     return clampStateAndIndicateChange(
4393         getState(),
4394         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4395   }
4396 
4397   /// See AbstractAttribute::trackStatistics()
4398   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4399 };
4400 
4401 /// NoCapture attribute for floating values.
4402 struct AANoCaptureFloating final : AANoCaptureImpl {
4403   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4404 
4405   /// See AbstractAttribute::trackStatistics()
4406   void trackStatistics() const override {
4407     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4408   }
4409 };
4410 
4411 /// NoCapture attribute for function return value.
4412 struct AANoCaptureReturned final : AANoCaptureImpl {
4413   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
4414     llvm_unreachable("NoCapture is not applicable to function returns!");
4415   }
4416 
4417   /// See AbstractAttribute::initialize(...).
4418   void initialize(Attributor &A) override {
4419     llvm_unreachable("NoCapture is not applicable to function returns!");
4420   }
4421 
4422   /// See AbstractAttribute::updateImpl(...).
4423   ChangeStatus updateImpl(Attributor &A) override {
4424     llvm_unreachable("NoCapture is not applicable to function returns!");
4425   }
4426 
4427   /// See AbstractAttribute::trackStatistics()
4428   void trackStatistics() const override {}
4429 };
4430 
4431 /// NoCapture attribute deduction for a call site return value.
4432 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4433   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4434 
4435   /// See AbstractAttribute::trackStatistics()
4436   void trackStatistics() const override {
4437     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4438   }
4439 };
4440 
4441 /// ------------------ Value Simplify Attribute ----------------------------
4442 struct AAValueSimplifyImpl : AAValueSimplify {
4443   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
4444 
4445   /// See AbstractAttribute::initialize(...).
4446   void initialize(Attributor &A) override {
4447     if (getAssociatedValue().getType()->isVoidTy())
4448       indicatePessimisticFixpoint();
4449   }
4450 
4451   /// See AbstractAttribute::getAsStr().
4452   const std::string getAsStr() const override {
4453     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4454                         : "not-simple";
4455   }
4456 
4457   /// See AbstractAttribute::trackStatistics()
4458   void trackStatistics() const override {}
4459 
4460   /// See AAValueSimplify::getAssumedSimplifiedValue()
4461   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4462     if (!getAssumed())
4463       return const_cast<Value *>(&getAssociatedValue());
4464     return SimplifiedAssociatedValue;
4465   }
4466 
4467   /// Helper function for querying AAValueSimplify and updating candicate.
4468   /// \param QueryingValue Value trying to unify with SimplifiedValue
4469   /// \param AccumulatedSimplifiedValue Current simplification result.
4470   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4471                              Value &QueryingValue,
4472                              Optional<Value *> &AccumulatedSimplifiedValue) {
4473     // FIXME: Add a typecast support.
4474 
4475     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4476         QueryingAA, IRPosition::value(QueryingValue));
4477 
4478     Optional<Value *> QueryingValueSimplified =
4479         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4480 
4481     if (!QueryingValueSimplified.hasValue())
4482       return true;
4483 
4484     if (!QueryingValueSimplified.getValue())
4485       return false;
4486 
4487     Value &QueryingValueSimplifiedUnwrapped =
4488         *QueryingValueSimplified.getValue();
4489 
4490     if (AccumulatedSimplifiedValue.hasValue() &&
4491         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4492         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4493       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4494     if (AccumulatedSimplifiedValue.hasValue() &&
4495         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4496       return true;
4497 
4498     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4499                       << " is assumed to be "
4500                       << QueryingValueSimplifiedUnwrapped << "\n");
4501 
4502     AccumulatedSimplifiedValue = QueryingValueSimplified;
4503     return true;
4504   }
4505 
4506   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4507     if (!getAssociatedValue().getType()->isIntegerTy())
4508       return false;
4509 
4510     const auto &ValueConstantRangeAA =
4511         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4512 
4513     Optional<ConstantInt *> COpt =
4514         ValueConstantRangeAA.getAssumedConstantInt(A);
4515     if (COpt.hasValue()) {
4516       if (auto *C = COpt.getValue())
4517         SimplifiedAssociatedValue = C;
4518       else
4519         return false;
4520     } else {
4521       SimplifiedAssociatedValue = llvm::None;
4522     }
4523     return true;
4524   }
4525 
4526   /// See AbstractAttribute::manifest(...).
4527   ChangeStatus manifest(Attributor &A) override {
4528     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4529 
4530     if (SimplifiedAssociatedValue.hasValue() &&
4531         !SimplifiedAssociatedValue.getValue())
4532       return Changed;
4533 
4534     Value &V = getAssociatedValue();
4535     auto *C = SimplifiedAssociatedValue.hasValue()
4536                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4537                   : UndefValue::get(V.getType());
4538     if (C) {
4539       // We can replace the AssociatedValue with the constant.
4540       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4541         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4542                           << " :: " << *this << "\n");
4543         if (A.changeValueAfterManifest(V, *C))
4544           Changed = ChangeStatus::CHANGED;
4545       }
4546     }
4547 
4548     return Changed | AAValueSimplify::manifest(A);
4549   }
4550 
4551   /// See AbstractState::indicatePessimisticFixpoint(...).
4552   ChangeStatus indicatePessimisticFixpoint() override {
4553     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4554     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4555     SimplifiedAssociatedValue = &getAssociatedValue();
4556     indicateOptimisticFixpoint();
4557     return ChangeStatus::CHANGED;
4558   }
4559 
4560 protected:
4561   // An assumed simplified value. Initially, it is set to Optional::None, which
4562   // means that the value is not clear under current assumption. If in the
4563   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4564   // returns orignal associated value.
4565   Optional<Value *> SimplifiedAssociatedValue;
4566 };
4567 
4568 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4569   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4570 
4571   void initialize(Attributor &A) override {
4572     AAValueSimplifyImpl::initialize(A);
4573     if (!getAssociatedFunction() || getAssociatedFunction()->isDeclaration())
4574       indicatePessimisticFixpoint();
4575     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4576                 /* IgnoreSubsumingPositions */ true))
4577       indicatePessimisticFixpoint();
4578   }
4579 
4580   /// See AbstractAttribute::updateImpl(...).
4581   ChangeStatus updateImpl(Attributor &A) override {
4582     // Byval is only replacable if it is readonly otherwise we would write into
4583     // the replaced value and not the copy that byval creates implicitly.
4584     Argument *Arg = getAssociatedArgument();
4585     if (Arg->hasByValAttr()) {
4586       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4587       if (!MemAA.isAssumedReadOnly())
4588         return indicatePessimisticFixpoint();
4589     }
4590 
4591     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4592 
4593     auto PredForCallSite = [&](AbstractCallSite ACS) {
4594       // Check if we have an associated argument or not (which can happen for
4595       // callback calls).
4596       Value *ArgOp = ACS.getCallArgOperand(getArgNo());
4597       if (!ArgOp)
4598         return false;
4599       // We can only propagate thread independent values through callbacks.
4600       // This is different to direct/indirect call sites because for them we
4601       // know the thread executing the caller and callee is the same. For
4602       // callbacks this is not guaranteed, thus a thread dependent value could
4603       // be different for the caller and callee, making it invalid to propagate.
4604       if (ACS.isCallbackCall())
4605         if (auto *C = dyn_cast<Constant>(ArgOp))
4606           if (C->isThreadDependent())
4607             return false;
4608       return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
4609     };
4610 
4611     bool AllCallSitesKnown;
4612     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4613                                 AllCallSitesKnown))
4614       if (!askSimplifiedValueForAAValueConstantRange(A))
4615         return indicatePessimisticFixpoint();
4616 
4617     // If a candicate was found in this update, return CHANGED.
4618     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4619                ? ChangeStatus::UNCHANGED
4620                : ChangeStatus ::CHANGED;
4621   }
4622 
4623   /// See AbstractAttribute::trackStatistics()
4624   void trackStatistics() const override {
4625     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4626   }
4627 };
4628 
4629 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4630   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4631 
4632   /// See AbstractAttribute::updateImpl(...).
4633   ChangeStatus updateImpl(Attributor &A) override {
4634     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4635 
4636     auto PredForReturned = [&](Value &V) {
4637       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4638     };
4639 
4640     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4641       if (!askSimplifiedValueForAAValueConstantRange(A))
4642         return indicatePessimisticFixpoint();
4643 
4644     // If a candicate was found in this update, return CHANGED.
4645     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4646                ? ChangeStatus::UNCHANGED
4647                : ChangeStatus ::CHANGED;
4648   }
4649 
4650   ChangeStatus manifest(Attributor &A) override {
4651     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4652 
4653     if (SimplifiedAssociatedValue.hasValue() &&
4654         !SimplifiedAssociatedValue.getValue())
4655       return Changed;
4656 
4657     Value &V = getAssociatedValue();
4658     auto *C = SimplifiedAssociatedValue.hasValue()
4659                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4660                   : UndefValue::get(V.getType());
4661     if (C) {
4662       auto PredForReturned =
4663           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4664             // We can replace the AssociatedValue with the constant.
4665             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4666               return true;
4667             if (auto *CI = dyn_cast<CallInst>(&V))
4668               if (CI->isMustTailCall())
4669                 return true;
4670 
4671             for (ReturnInst *RI : RetInsts) {
4672               if (RI->getFunction() != getAnchorScope())
4673                 continue;
4674               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4675                                 << " in " << *RI << " :: " << *this << "\n");
4676               if (A.changeUseAfterManifest(RI->getOperandUse(0), *C))
4677                 Changed = ChangeStatus::CHANGED;
4678             }
4679             return true;
4680           };
4681       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4682     }
4683 
4684     return Changed | AAValueSimplify::manifest(A);
4685   }
4686 
4687   /// See AbstractAttribute::trackStatistics()
4688   void trackStatistics() const override {
4689     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4690   }
4691 };
4692 
4693 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4694   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4695 
4696   /// See AbstractAttribute::initialize(...).
4697   void initialize(Attributor &A) override {
4698     Value &V = getAnchorValue();
4699 
4700     // TODO: add other stuffs
4701     if (isa<Constant>(V))
4702       indicatePessimisticFixpoint();
4703   }
4704 
4705   /// See AbstractAttribute::updateImpl(...).
4706   ChangeStatus updateImpl(Attributor &A) override {
4707     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4708 
4709     auto VisitValueCB = [&](Value &V, bool &, bool Stripped) -> bool {
4710       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4711       if (!Stripped && this == &AA) {
4712         // TODO: Look the instruction and check recursively.
4713 
4714         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4715                           << "\n");
4716         return false;
4717       }
4718       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4719     };
4720 
4721     bool Dummy = false;
4722     if (!genericValueTraversal<AAValueSimplify, bool>(A, getIRPosition(), *this,
4723                                                       Dummy, VisitValueCB))
4724       if (!askSimplifiedValueForAAValueConstantRange(A))
4725         return indicatePessimisticFixpoint();
4726 
4727     // If a candicate was found in this update, return CHANGED.
4728 
4729     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4730                ? ChangeStatus::UNCHANGED
4731                : ChangeStatus ::CHANGED;
4732   }
4733 
4734   /// See AbstractAttribute::trackStatistics()
4735   void trackStatistics() const override {
4736     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4737   }
4738 };
4739 
4740 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4741   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4742 
4743   /// See AbstractAttribute::initialize(...).
4744   void initialize(Attributor &A) override {
4745     SimplifiedAssociatedValue = &getAnchorValue();
4746     indicateOptimisticFixpoint();
4747   }
4748   /// See AbstractAttribute::initialize(...).
4749   ChangeStatus updateImpl(Attributor &A) override {
4750     llvm_unreachable(
4751         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4752   }
4753   /// See AbstractAttribute::trackStatistics()
4754   void trackStatistics() const override {
4755     STATS_DECLTRACK_FN_ATTR(value_simplify)
4756   }
4757 };
4758 
4759 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4760   AAValueSimplifyCallSite(const IRPosition &IRP)
4761       : AAValueSimplifyFunction(IRP) {}
4762   /// See AbstractAttribute::trackStatistics()
4763   void trackStatistics() const override {
4764     STATS_DECLTRACK_CS_ATTR(value_simplify)
4765   }
4766 };
4767 
4768 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4769   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4770       : AAValueSimplifyReturned(IRP) {}
4771 
4772   /// See AbstractAttribute::manifest(...).
4773   ChangeStatus manifest(Attributor &A) override {
4774     if (auto *CI = dyn_cast<CallInst>(&getAssociatedValue()))
4775       if (CI->isMustTailCall())
4776         return ChangeStatus::UNCHANGED;
4777     return AAValueSimplifyImpl::manifest(A);
4778   }
4779 
4780   void trackStatistics() const override {
4781     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4782   }
4783 };
4784 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4785   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4786       : AAValueSimplifyFloating(IRP) {}
4787 
4788   void trackStatistics() const override {
4789     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4790   }
4791 };
4792 
4793 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4794 struct AAHeapToStackImpl : public AAHeapToStack {
4795   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4796 
4797   const std::string getAsStr() const override {
4798     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4799   }
4800 
4801   ChangeStatus manifest(Attributor &A) override {
4802     assert(getState().isValidState() &&
4803            "Attempted to manifest an invalid state!");
4804 
4805     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4806     Function *F = getAssociatedFunction();
4807     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4808 
4809     for (Instruction *MallocCall : MallocCalls) {
4810       // This malloc cannot be replaced.
4811       if (BadMallocCalls.count(MallocCall))
4812         continue;
4813 
4814       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4815         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4816         A.deleteAfterManifest(*FreeCall);
4817         HasChanged = ChangeStatus::CHANGED;
4818       }
4819 
4820       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4821                         << "\n");
4822 
4823       Constant *Size;
4824       if (isCallocLikeFn(MallocCall, TLI)) {
4825         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4826         auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
4827         APInt TotalSize = SizeT->getValue() * Num->getValue();
4828         Size =
4829             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4830       } else {
4831         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4832       }
4833 
4834       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4835       Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
4836                                        Size, "", MallocCall->getNextNode());
4837 
4838       if (AI->getType() != MallocCall->getType())
4839         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4840                              AI->getNextNode());
4841 
4842       A.changeValueAfterManifest(*MallocCall, *AI);
4843 
4844       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4845         auto *NBB = II->getNormalDest();
4846         BranchInst::Create(NBB, MallocCall->getParent());
4847         A.deleteAfterManifest(*MallocCall);
4848       } else {
4849         A.deleteAfterManifest(*MallocCall);
4850       }
4851 
4852       if (isCallocLikeFn(MallocCall, TLI)) {
4853         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4854                                    AI->getNextNode());
4855         Value *Ops[] = {
4856             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4857             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4858 
4859         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4860         Module *M = F->getParent();
4861         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4862         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4863       }
4864       HasChanged = ChangeStatus::CHANGED;
4865     }
4866 
4867     return HasChanged;
4868   }
4869 
4870   /// Collection of all malloc calls in a function.
4871   SmallSetVector<Instruction *, 4> MallocCalls;
4872 
4873   /// Collection of malloc calls that cannot be converted.
4874   DenseSet<const Instruction *> BadMallocCalls;
4875 
4876   /// A map for each malloc call to the set of associated free calls.
4877   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4878 
4879   ChangeStatus updateImpl(Attributor &A) override;
4880 };
4881 
4882 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4883   const Function *F = getAssociatedFunction();
4884   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4885 
4886   MustBeExecutedContextExplorer &Explorer =
4887       A.getInfoCache().getMustBeExecutedContextExplorer();
4888 
4889   auto FreeCheck = [&](Instruction &I) {
4890     const auto &Frees = FreesForMalloc.lookup(&I);
4891     if (Frees.size() != 1)
4892       return false;
4893     Instruction *UniqueFree = *Frees.begin();
4894     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4895   };
4896 
4897   auto UsesCheck = [&](Instruction &I) {
4898     bool ValidUsesOnly = true;
4899     bool MustUse = true;
4900     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4901       Instruction *UserI = cast<Instruction>(U.getUser());
4902       if (isa<LoadInst>(UserI))
4903         return true;
4904       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4905         if (SI->getValueOperand() == U.get()) {
4906           LLVM_DEBUG(dbgs()
4907                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4908           ValidUsesOnly = false;
4909         } else {
4910           // A store into the malloc'ed memory is fine.
4911         }
4912         return true;
4913       }
4914       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4915         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4916           return true;
4917         // Record malloc.
4918         if (isFreeCall(UserI, TLI)) {
4919           if (MustUse) {
4920             FreesForMalloc[&I].insert(UserI);
4921           } else {
4922             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4923                               << *UserI << "\n");
4924             ValidUsesOnly = false;
4925           }
4926           return true;
4927         }
4928 
4929         unsigned ArgNo = CB->getArgOperandNo(&U);
4930 
4931         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4932             *this, IRPosition::callsite_argument(*CB, ArgNo));
4933 
4934         // If a callsite argument use is nofree, we are fine.
4935         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4936             *this, IRPosition::callsite_argument(*CB, ArgNo));
4937 
4938         if (!NoCaptureAA.isAssumedNoCapture() ||
4939             !ArgNoFreeAA.isAssumedNoFree()) {
4940           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4941           ValidUsesOnly = false;
4942         }
4943         return true;
4944       }
4945 
4946       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4947           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4948         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4949         Follow = true;
4950         return true;
4951       }
4952       // Unknown user for which we can not track uses further (in a way that
4953       // makes sense).
4954       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4955       ValidUsesOnly = false;
4956       return true;
4957     };
4958     A.checkForAllUses(Pred, *this, I);
4959     return ValidUsesOnly;
4960   };
4961 
4962   auto MallocCallocCheck = [&](Instruction &I) {
4963     if (BadMallocCalls.count(&I))
4964       return true;
4965 
4966     bool IsMalloc = isMallocLikeFn(&I, TLI);
4967     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4968     if (!IsMalloc && !IsCalloc) {
4969       BadMallocCalls.insert(&I);
4970       return true;
4971     }
4972 
4973     if (IsMalloc) {
4974       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4975         if (Size->getValue().ule(MaxHeapToStackSize))
4976           if (UsesCheck(I) || FreeCheck(I)) {
4977             MallocCalls.insert(&I);
4978             return true;
4979           }
4980     } else if (IsCalloc) {
4981       bool Overflow = false;
4982       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4983         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4984           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4985                   .ule(MaxHeapToStackSize))
4986             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4987               MallocCalls.insert(&I);
4988               return true;
4989             }
4990     }
4991 
4992     BadMallocCalls.insert(&I);
4993     return true;
4994   };
4995 
4996   size_t NumBadMallocs = BadMallocCalls.size();
4997 
4998   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4999 
5000   if (NumBadMallocs != BadMallocCalls.size())
5001     return ChangeStatus::CHANGED;
5002 
5003   return ChangeStatus::UNCHANGED;
5004 }
5005 
5006 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5007   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
5008 
5009   /// See AbstractAttribute::trackStatistics()
5010   void trackStatistics() const override {
5011     STATS_DECL(MallocCalls, Function,
5012                "Number of malloc calls converted to allocas");
5013     for (auto *C : MallocCalls)
5014       if (!BadMallocCalls.count(C))
5015         ++BUILD_STAT_NAME(MallocCalls, Function);
5016   }
5017 };
5018 
5019 /// ----------------------- Privatizable Pointers ------------------------------
5020 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5021   AAPrivatizablePtrImpl(const IRPosition &IRP)
5022       : AAPrivatizablePtr(IRP), PrivatizableType(llvm::None) {}
5023 
5024   ChangeStatus indicatePessimisticFixpoint() override {
5025     AAPrivatizablePtr::indicatePessimisticFixpoint();
5026     PrivatizableType = nullptr;
5027     return ChangeStatus::CHANGED;
5028   }
5029 
5030   /// Identify the type we can chose for a private copy of the underlying
5031   /// argument. None means it is not clear yet, nullptr means there is none.
5032   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5033 
5034   /// Return a privatizable type that encloses both T0 and T1.
5035   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5036   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5037     if (!T0.hasValue())
5038       return T1;
5039     if (!T1.hasValue())
5040       return T0;
5041     if (T0 == T1)
5042       return T0;
5043     return nullptr;
5044   }
5045 
5046   Optional<Type *> getPrivatizableType() const override {
5047     return PrivatizableType;
5048   }
5049 
5050   const std::string getAsStr() const override {
5051     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5052   }
5053 
5054 protected:
5055   Optional<Type *> PrivatizableType;
5056 };
5057 
5058 // TODO: Do this for call site arguments (probably also other values) as well.
5059 
5060 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5061   AAPrivatizablePtrArgument(const IRPosition &IRP)
5062       : AAPrivatizablePtrImpl(IRP) {}
5063 
5064   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5065   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5066     // If this is a byval argument and we know all the call sites (so we can
5067     // rewrite them), there is no need to check them explicitly.
5068     bool AllCallSitesKnown;
5069     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5070         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5071                                true, AllCallSitesKnown))
5072       return getAssociatedValue().getType()->getPointerElementType();
5073 
5074     Optional<Type *> Ty;
5075     unsigned ArgNo = getIRPosition().getArgNo();
5076 
5077     // Make sure the associated call site argument has the same type at all call
5078     // sites and it is an allocation we know is safe to privatize, for now that
5079     // means we only allow alloca instructions.
5080     // TODO: We can additionally analyze the accesses in the callee to  create
5081     //       the type from that information instead. That is a little more
5082     //       involved and will be done in a follow up patch.
5083     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5084       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5085       // Check if a coresponding argument was found or if it is one not
5086       // associated (which can happen for callback calls).
5087       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5088         return false;
5089 
5090       // Check that all call sites agree on a type.
5091       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
5092       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5093 
5094       LLVM_DEBUG({
5095         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5096         if (CSTy.hasValue() && CSTy.getValue())
5097           CSTy.getValue()->print(dbgs());
5098         else if (CSTy.hasValue())
5099           dbgs() << "<nullptr>";
5100         else
5101           dbgs() << "<none>";
5102       });
5103 
5104       Ty = combineTypes(Ty, CSTy);
5105 
5106       LLVM_DEBUG({
5107         dbgs() << " : New Type: ";
5108         if (Ty.hasValue() && Ty.getValue())
5109           Ty.getValue()->print(dbgs());
5110         else if (Ty.hasValue())
5111           dbgs() << "<nullptr>";
5112         else
5113           dbgs() << "<none>";
5114         dbgs() << "\n";
5115       });
5116 
5117       return !Ty.hasValue() || Ty.getValue();
5118     };
5119 
5120     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5121       return nullptr;
5122     return Ty;
5123   }
5124 
5125   /// See AbstractAttribute::updateImpl(...).
5126   ChangeStatus updateImpl(Attributor &A) override {
5127     PrivatizableType = identifyPrivatizableType(A);
5128     if (!PrivatizableType.hasValue())
5129       return ChangeStatus::UNCHANGED;
5130     if (!PrivatizableType.getValue())
5131       return indicatePessimisticFixpoint();
5132 
5133     // Avoid arguments with padding for now.
5134     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5135         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5136                                                 A.getInfoCache().getDL())) {
5137       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5138       return indicatePessimisticFixpoint();
5139     }
5140 
5141     // Verify callee and caller agree on how the promoted argument would be
5142     // passed.
5143     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5144     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5145     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5146     Function &Fn = *getIRPosition().getAnchorScope();
5147     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5148     ArgsToPromote.insert(getAssociatedArgument());
5149     const auto *TTI =
5150         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5151     if (!TTI ||
5152         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5153             Fn, *TTI, ArgsToPromote, Dummy) ||
5154         ArgsToPromote.empty()) {
5155       LLVM_DEBUG(
5156           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5157                  << Fn.getName() << "\n");
5158       return indicatePessimisticFixpoint();
5159     }
5160 
5161     // Collect the types that will replace the privatizable type in the function
5162     // signature.
5163     SmallVector<Type *, 16> ReplacementTypes;
5164     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5165 
5166     // Register a rewrite of the argument.
5167     Argument *Arg = getAssociatedArgument();
5168     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5169       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5170       return indicatePessimisticFixpoint();
5171     }
5172 
5173     unsigned ArgNo = Arg->getArgNo();
5174 
5175     // Helper to check if for the given call site the associated argument is
5176     // passed to a callback where the privatization would be different.
5177     auto IsCompatiblePrivArgOfCallback = [&](CallSite CS) {
5178       SmallVector<const Use *, 4> CBUses;
5179       AbstractCallSite::getCallbackUses(CS, CBUses);
5180       for (const Use *U : CBUses) {
5181         AbstractCallSite CBACS(U);
5182         assert(CBACS && CBACS.isCallbackCall());
5183         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5184           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5185 
5186           LLVM_DEBUG({
5187             dbgs()
5188                 << "[AAPrivatizablePtr] Argument " << *Arg
5189                 << "check if can be privatized in the context of its parent ("
5190                 << Arg->getParent()->getName()
5191                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5192                    "callback ("
5193                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5194                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5195                 << CBACS.getCallArgOperand(CBArg) << " vs "
5196                 << CS.getArgOperand(ArgNo) << "\n"
5197                 << "[AAPrivatizablePtr] " << CBArg << " : "
5198                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5199           });
5200 
5201           if (CBArgNo != int(ArgNo))
5202             continue;
5203           const auto &CBArgPrivAA =
5204               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5205           if (CBArgPrivAA.isValidState()) {
5206             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5207             if (!CBArgPrivTy.hasValue())
5208               continue;
5209             if (CBArgPrivTy.getValue() == PrivatizableType)
5210               continue;
5211           }
5212 
5213           LLVM_DEBUG({
5214             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5215                    << " cannot be privatized in the context of its parent ("
5216                    << Arg->getParent()->getName()
5217                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5218                       "callback ("
5219                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5220                    << ").\n[AAPrivatizablePtr] for which the argument "
5221                       "privatization is not compatible.\n";
5222           });
5223           return false;
5224         }
5225       }
5226       return true;
5227     };
5228 
5229     // Helper to check if for the given call site the associated argument is
5230     // passed to a direct call where the privatization would be different.
5231     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5232       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5233       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5234       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5235              "Expected a direct call operand for callback call operand");
5236 
5237       LLVM_DEBUG({
5238         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5239                << " check if be privatized in the context of its parent ("
5240                << Arg->getParent()->getName()
5241                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5242                   "direct call of ("
5243                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5244                << ").\n";
5245       });
5246 
5247       Function *DCCallee = DC->getCalledFunction();
5248       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5249         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5250             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5251         if (DCArgPrivAA.isValidState()) {
5252           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5253           if (!DCArgPrivTy.hasValue())
5254             return true;
5255           if (DCArgPrivTy.getValue() == PrivatizableType)
5256             return true;
5257         }
5258       }
5259 
5260       LLVM_DEBUG({
5261         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5262                << " cannot be privatized in the context of its parent ("
5263                << Arg->getParent()->getName()
5264                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5265                   "direct call of ("
5266                << ACS.getCallSite().getCalledFunction()->getName()
5267                << ").\n[AAPrivatizablePtr] for which the argument "
5268                   "privatization is not compatible.\n";
5269       });
5270       return false;
5271     };
5272 
5273     // Helper to check if the associated argument is used at the given abstract
5274     // call site in a way that is incompatible with the privatization assumed
5275     // here.
5276     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5277       if (ACS.isDirectCall())
5278         return IsCompatiblePrivArgOfCallback(ACS.getCallSite());
5279       if (ACS.isCallbackCall())
5280         return IsCompatiblePrivArgOfDirectCS(ACS);
5281       return false;
5282     };
5283 
5284     bool AllCallSitesKnown;
5285     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5286                                 AllCallSitesKnown))
5287       return indicatePessimisticFixpoint();
5288 
5289     return ChangeStatus::UNCHANGED;
5290   }
5291 
5292   /// Given a type to private \p PrivType, collect the constituates (which are
5293   /// used) in \p ReplacementTypes.
5294   static void
5295   identifyReplacementTypes(Type *PrivType,
5296                            SmallVectorImpl<Type *> &ReplacementTypes) {
5297     // TODO: For now we expand the privatization type to the fullest which can
5298     //       lead to dead arguments that need to be removed later.
5299     assert(PrivType && "Expected privatizable type!");
5300 
5301     // Traverse the type, extract constituate types on the outermost level.
5302     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5303       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5304         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5305     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5306       ReplacementTypes.append(PrivArrayType->getNumElements(),
5307                               PrivArrayType->getElementType());
5308     } else {
5309       ReplacementTypes.push_back(PrivType);
5310     }
5311   }
5312 
5313   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5314   /// The values needed are taken from the arguments of \p F starting at
5315   /// position \p ArgNo.
5316   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5317                                    unsigned ArgNo, Instruction &IP) {
5318     assert(PrivType && "Expected privatizable type!");
5319 
5320     IRBuilder<NoFolder> IRB(&IP);
5321     const DataLayout &DL = F.getParent()->getDataLayout();
5322 
5323     // Traverse the type, build GEPs and stores.
5324     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5325       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5326       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5327         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5328         Value *Ptr = constructPointer(
5329             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5330         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5331       }
5332     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5333       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5334       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5335       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5336         Value *Ptr =
5337             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5338         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5339       }
5340     } else {
5341       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5342     }
5343   }
5344 
5345   /// Extract values from \p Base according to the type \p PrivType at the
5346   /// call position \p ACS. The values are appended to \p ReplacementValues.
5347   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5348                                Value *Base,
5349                                SmallVectorImpl<Value *> &ReplacementValues) {
5350     assert(Base && "Expected base value!");
5351     assert(PrivType && "Expected privatizable type!");
5352     Instruction *IP = ACS.getInstruction();
5353 
5354     IRBuilder<NoFolder> IRB(IP);
5355     const DataLayout &DL = IP->getModule()->getDataLayout();
5356 
5357     if (Base->getType()->getPointerElementType() != PrivType)
5358       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5359                                                  "", ACS.getInstruction());
5360 
5361     // TODO: Improve the alignment of the loads.
5362     // Traverse the type, build GEPs and loads.
5363     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5364       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5365       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5366         Type *PointeeTy = PrivStructType->getElementType(u);
5367         Value *Ptr =
5368             constructPointer(PointeeTy->getPointerTo(), Base,
5369                              PrivStructLayout->getElementOffset(u), IRB, DL);
5370         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5371         L->setAlignment(MaybeAlign(1));
5372         ReplacementValues.push_back(L);
5373       }
5374     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5375       Type *PointeeTy = PrivArrayType->getElementType();
5376       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5377       Type *PointeePtrTy = PointeeTy->getPointerTo();
5378       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5379         Value *Ptr =
5380             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5381         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5382         L->setAlignment(MaybeAlign(1));
5383         ReplacementValues.push_back(L);
5384       }
5385     } else {
5386       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5387       L->setAlignment(MaybeAlign(1));
5388       ReplacementValues.push_back(L);
5389     }
5390   }
5391 
5392   /// See AbstractAttribute::manifest(...)
5393   ChangeStatus manifest(Attributor &A) override {
5394     if (!PrivatizableType.hasValue())
5395       return ChangeStatus::UNCHANGED;
5396     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5397 
5398     // Collect all tail calls in the function as we cannot allow new allocas to
5399     // escape into tail recursion.
5400     // TODO: Be smarter about new allocas escaping into tail calls.
5401     SmallVector<CallInst *, 16> TailCalls;
5402     if (!A.checkForAllInstructions(
5403             [&](Instruction &I) {
5404               CallInst &CI = cast<CallInst>(I);
5405               if (CI.isTailCall())
5406                 TailCalls.push_back(&CI);
5407               return true;
5408             },
5409             *this, {Instruction::Call}))
5410       return ChangeStatus::UNCHANGED;
5411 
5412     Argument *Arg = getAssociatedArgument();
5413 
5414     // Callback to repair the associated function. A new alloca is placed at the
5415     // beginning and initialized with the values passed through arguments. The
5416     // new alloca replaces the use of the old pointer argument.
5417     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5418         [=](const Attributor::ArgumentReplacementInfo &ARI,
5419             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5420           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5421           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5422           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5423                                     Arg->getName() + ".priv", IP);
5424           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5425                                ArgIt->getArgNo(), *IP);
5426           Arg->replaceAllUsesWith(AI);
5427 
5428           for (CallInst *CI : TailCalls)
5429             CI->setTailCall(false);
5430         };
5431 
5432     // Callback to repair a call site of the associated function. The elements
5433     // of the privatizable type are loaded prior to the call and passed to the
5434     // new function version.
5435     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5436         [=](const Attributor::ArgumentReplacementInfo &ARI,
5437             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5438           createReplacementValues(
5439               PrivatizableType.getValue(), ACS,
5440               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5441                                   NewArgOperands);
5442         };
5443 
5444     // Collect the types that will replace the privatizable type in the function
5445     // signature.
5446     SmallVector<Type *, 16> ReplacementTypes;
5447     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5448 
5449     // Register a rewrite of the argument.
5450     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5451                                            std::move(FnRepairCB),
5452                                            std::move(ACSRepairCB)))
5453       return ChangeStatus::CHANGED;
5454     return ChangeStatus::UNCHANGED;
5455   }
5456 
5457   /// See AbstractAttribute::trackStatistics()
5458   void trackStatistics() const override {
5459     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5460   }
5461 };
5462 
5463 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5464   AAPrivatizablePtrFloating(const IRPosition &IRP)
5465       : AAPrivatizablePtrImpl(IRP) {}
5466 
5467   /// See AbstractAttribute::initialize(...).
5468   virtual void initialize(Attributor &A) override {
5469     // TODO: We can privatize more than arguments.
5470     indicatePessimisticFixpoint();
5471   }
5472 
5473   ChangeStatus updateImpl(Attributor &A) override {
5474     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5475                      "updateImpl will not be called");
5476   }
5477 
5478   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5479   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5480     Value *Obj =
5481         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5482     if (!Obj) {
5483       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5484       return nullptr;
5485     }
5486 
5487     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5488       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5489         if (CI->isOne())
5490           return Obj->getType()->getPointerElementType();
5491     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5492       auto &PrivArgAA =
5493           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5494       if (PrivArgAA.isAssumedPrivatizablePtr())
5495         return Obj->getType()->getPointerElementType();
5496     }
5497 
5498     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5499                          "alloca nor privatizable argument: "
5500                       << *Obj << "!\n");
5501     return nullptr;
5502   }
5503 
5504   /// See AbstractAttribute::trackStatistics()
5505   void trackStatistics() const override {
5506     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5507   }
5508 };
5509 
5510 struct AAPrivatizablePtrCallSiteArgument final
5511     : public AAPrivatizablePtrFloating {
5512   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP)
5513       : AAPrivatizablePtrFloating(IRP) {}
5514 
5515   /// See AbstractAttribute::initialize(...).
5516   void initialize(Attributor &A) override {
5517     if (getIRPosition().hasAttr(Attribute::ByVal))
5518       indicateOptimisticFixpoint();
5519   }
5520 
5521   /// See AbstractAttribute::updateImpl(...).
5522   ChangeStatus updateImpl(Attributor &A) override {
5523     PrivatizableType = identifyPrivatizableType(A);
5524     if (!PrivatizableType.hasValue())
5525       return ChangeStatus::UNCHANGED;
5526     if (!PrivatizableType.getValue())
5527       return indicatePessimisticFixpoint();
5528 
5529     const IRPosition &IRP = getIRPosition();
5530     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5531     if (!NoCaptureAA.isAssumedNoCapture()) {
5532       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5533       return indicatePessimisticFixpoint();
5534     }
5535 
5536     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5537     if (!NoAliasAA.isAssumedNoAlias()) {
5538       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5539       return indicatePessimisticFixpoint();
5540     }
5541 
5542     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5543     if (!MemBehaviorAA.isAssumedReadOnly()) {
5544       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5545       return indicatePessimisticFixpoint();
5546     }
5547 
5548     return ChangeStatus::UNCHANGED;
5549   }
5550 
5551   /// See AbstractAttribute::trackStatistics()
5552   void trackStatistics() const override {
5553     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5554   }
5555 };
5556 
5557 struct AAPrivatizablePtrCallSiteReturned final
5558     : public AAPrivatizablePtrFloating {
5559   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP)
5560       : AAPrivatizablePtrFloating(IRP) {}
5561 
5562   /// See AbstractAttribute::initialize(...).
5563   void initialize(Attributor &A) override {
5564     // TODO: We can privatize more than arguments.
5565     indicatePessimisticFixpoint();
5566   }
5567 
5568   /// See AbstractAttribute::trackStatistics()
5569   void trackStatistics() const override {
5570     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5571   }
5572 };
5573 
5574 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5575   AAPrivatizablePtrReturned(const IRPosition &IRP)
5576       : AAPrivatizablePtrFloating(IRP) {}
5577 
5578   /// See AbstractAttribute::initialize(...).
5579   void initialize(Attributor &A) override {
5580     // TODO: We can privatize more than arguments.
5581     indicatePessimisticFixpoint();
5582   }
5583 
5584   /// See AbstractAttribute::trackStatistics()
5585   void trackStatistics() const override {
5586     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5587   }
5588 };
5589 
5590 /// -------------------- Memory Behavior Attributes ----------------------------
5591 /// Includes read-none, read-only, and write-only.
5592 /// ----------------------------------------------------------------------------
5593 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5594   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
5595 
5596   /// See AbstractAttribute::initialize(...).
5597   void initialize(Attributor &A) override {
5598     intersectAssumedBits(BEST_STATE);
5599     getKnownStateFromValue(getIRPosition(), getState());
5600     IRAttribute::initialize(A);
5601   }
5602 
5603   /// Return the memory behavior information encoded in the IR for \p IRP.
5604   static void getKnownStateFromValue(const IRPosition &IRP,
5605                                      BitIntegerState &State,
5606                                      bool IgnoreSubsumingPositions = false) {
5607     SmallVector<Attribute, 2> Attrs;
5608     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5609     for (const Attribute &Attr : Attrs) {
5610       switch (Attr.getKindAsEnum()) {
5611       case Attribute::ReadNone:
5612         State.addKnownBits(NO_ACCESSES);
5613         break;
5614       case Attribute::ReadOnly:
5615         State.addKnownBits(NO_WRITES);
5616         break;
5617       case Attribute::WriteOnly:
5618         State.addKnownBits(NO_READS);
5619         break;
5620       default:
5621         llvm_unreachable("Unexpected attribute!");
5622       }
5623     }
5624 
5625     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5626       if (!I->mayReadFromMemory())
5627         State.addKnownBits(NO_READS);
5628       if (!I->mayWriteToMemory())
5629         State.addKnownBits(NO_WRITES);
5630     }
5631   }
5632 
5633   /// See AbstractAttribute::getDeducedAttributes(...).
5634   void getDeducedAttributes(LLVMContext &Ctx,
5635                             SmallVectorImpl<Attribute> &Attrs) const override {
5636     assert(Attrs.size() == 0);
5637     if (isAssumedReadNone())
5638       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5639     else if (isAssumedReadOnly())
5640       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5641     else if (isAssumedWriteOnly())
5642       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5643     assert(Attrs.size() <= 1);
5644   }
5645 
5646   /// See AbstractAttribute::manifest(...).
5647   ChangeStatus manifest(Attributor &A) override {
5648     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5649       return ChangeStatus::UNCHANGED;
5650 
5651     const IRPosition &IRP = getIRPosition();
5652 
5653     // Check if we would improve the existing attributes first.
5654     SmallVector<Attribute, 4> DeducedAttrs;
5655     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5656     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5657           return IRP.hasAttr(Attr.getKindAsEnum(),
5658                              /* IgnoreSubsumingPositions */ true);
5659         }))
5660       return ChangeStatus::UNCHANGED;
5661 
5662     // Clear existing attributes.
5663     IRP.removeAttrs(AttrKinds);
5664 
5665     // Use the generic manifest method.
5666     return IRAttribute::manifest(A);
5667   }
5668 
5669   /// See AbstractState::getAsStr().
5670   const std::string getAsStr() const override {
5671     if (isAssumedReadNone())
5672       return "readnone";
5673     if (isAssumedReadOnly())
5674       return "readonly";
5675     if (isAssumedWriteOnly())
5676       return "writeonly";
5677     return "may-read/write";
5678   }
5679 
5680   /// The set of IR attributes AAMemoryBehavior deals with.
5681   static const Attribute::AttrKind AttrKinds[3];
5682 };
5683 
5684 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5685     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5686 
5687 /// Memory behavior attribute for a floating value.
5688 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5689   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5690 
5691   /// See AbstractAttribute::initialize(...).
5692   void initialize(Attributor &A) override {
5693     AAMemoryBehaviorImpl::initialize(A);
5694     // Initialize the use vector with all direct uses of the associated value.
5695     for (const Use &U : getAssociatedValue().uses())
5696       Uses.insert(&U);
5697   }
5698 
5699   /// See AbstractAttribute::updateImpl(...).
5700   ChangeStatus updateImpl(Attributor &A) override;
5701 
5702   /// See AbstractAttribute::trackStatistics()
5703   void trackStatistics() const override {
5704     if (isAssumedReadNone())
5705       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5706     else if (isAssumedReadOnly())
5707       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5708     else if (isAssumedWriteOnly())
5709       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5710   }
5711 
5712 private:
5713   /// Return true if users of \p UserI might access the underlying
5714   /// variable/location described by \p U and should therefore be analyzed.
5715   bool followUsersOfUseIn(Attributor &A, const Use *U,
5716                           const Instruction *UserI);
5717 
5718   /// Update the state according to the effect of use \p U in \p UserI.
5719   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5720 
5721 protected:
5722   /// Container for (transitive) uses of the associated argument.
5723   SetVector<const Use *> Uses;
5724 };
5725 
5726 /// Memory behavior attribute for function argument.
5727 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5728   AAMemoryBehaviorArgument(const IRPosition &IRP)
5729       : AAMemoryBehaviorFloating(IRP) {}
5730 
5731   /// See AbstractAttribute::initialize(...).
5732   void initialize(Attributor &A) override {
5733     intersectAssumedBits(BEST_STATE);
5734     const IRPosition &IRP = getIRPosition();
5735     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5736     // can query it when we use has/getAttr. That would allow us to reuse the
5737     // initialize of the base class here.
5738     bool HasByVal =
5739         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5740     getKnownStateFromValue(IRP, getState(),
5741                            /* IgnoreSubsumingPositions */ HasByVal);
5742 
5743     // Initialize the use vector with all direct uses of the associated value.
5744     Argument *Arg = getAssociatedArgument();
5745     if (!Arg || !Arg->getParent()->hasExactDefinition()) {
5746       indicatePessimisticFixpoint();
5747     } else {
5748       // Initialize the use vector with all direct uses of the associated value.
5749       for (const Use &U : Arg->uses())
5750         Uses.insert(&U);
5751     }
5752   }
5753 
5754   ChangeStatus manifest(Attributor &A) override {
5755     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5756     if (!getAssociatedValue().getType()->isPointerTy())
5757       return ChangeStatus::UNCHANGED;
5758 
5759     // TODO: From readattrs.ll: "inalloca parameters are always
5760     //                           considered written"
5761     if (hasAttr({Attribute::InAlloca})) {
5762       removeKnownBits(NO_WRITES);
5763       removeAssumedBits(NO_WRITES);
5764     }
5765     return AAMemoryBehaviorFloating::manifest(A);
5766   }
5767 
5768   /// See AbstractAttribute::trackStatistics()
5769   void trackStatistics() const override {
5770     if (isAssumedReadNone())
5771       STATS_DECLTRACK_ARG_ATTR(readnone)
5772     else if (isAssumedReadOnly())
5773       STATS_DECLTRACK_ARG_ATTR(readonly)
5774     else if (isAssumedWriteOnly())
5775       STATS_DECLTRACK_ARG_ATTR(writeonly)
5776   }
5777 };
5778 
5779 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5780   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
5781       : AAMemoryBehaviorArgument(IRP) {}
5782 
5783   /// See AbstractAttribute::initialize(...).
5784   void initialize(Attributor &A) override {
5785     if (Argument *Arg = getAssociatedArgument()) {
5786       if (Arg->hasByValAttr()) {
5787         addKnownBits(NO_WRITES);
5788         removeKnownBits(NO_READS);
5789         removeAssumedBits(NO_READS);
5790       }
5791     } else {
5792     }
5793     AAMemoryBehaviorArgument::initialize(A);
5794   }
5795 
5796   /// See AbstractAttribute::updateImpl(...).
5797   ChangeStatus updateImpl(Attributor &A) override {
5798     // TODO: Once we have call site specific value information we can provide
5799     //       call site specific liveness liveness information and then it makes
5800     //       sense to specialize attributes for call sites arguments instead of
5801     //       redirecting requests to the callee argument.
5802     Argument *Arg = getAssociatedArgument();
5803     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5804     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5805     return clampStateAndIndicateChange(
5806         getState(),
5807         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5808   }
5809 
5810   /// See AbstractAttribute::trackStatistics()
5811   void trackStatistics() const override {
5812     if (isAssumedReadNone())
5813       STATS_DECLTRACK_CSARG_ATTR(readnone)
5814     else if (isAssumedReadOnly())
5815       STATS_DECLTRACK_CSARG_ATTR(readonly)
5816     else if (isAssumedWriteOnly())
5817       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5818   }
5819 };
5820 
5821 /// Memory behavior attribute for a call site return position.
5822 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5823   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
5824       : AAMemoryBehaviorFloating(IRP) {}
5825 
5826   /// See AbstractAttribute::manifest(...).
5827   ChangeStatus manifest(Attributor &A) override {
5828     // We do not annotate returned values.
5829     return ChangeStatus::UNCHANGED;
5830   }
5831 
5832   /// See AbstractAttribute::trackStatistics()
5833   void trackStatistics() const override {}
5834 };
5835 
5836 /// An AA to represent the memory behavior function attributes.
5837 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5838   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5839 
5840   /// See AbstractAttribute::updateImpl(Attributor &A).
5841   virtual ChangeStatus updateImpl(Attributor &A) override;
5842 
5843   /// See AbstractAttribute::manifest(...).
5844   ChangeStatus manifest(Attributor &A) override {
5845     Function &F = cast<Function>(getAnchorValue());
5846     if (isAssumedReadNone()) {
5847       F.removeFnAttr(Attribute::ArgMemOnly);
5848       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5849       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5850     }
5851     return AAMemoryBehaviorImpl::manifest(A);
5852   }
5853 
5854   /// See AbstractAttribute::trackStatistics()
5855   void trackStatistics() const override {
5856     if (isAssumedReadNone())
5857       STATS_DECLTRACK_FN_ATTR(readnone)
5858     else if (isAssumedReadOnly())
5859       STATS_DECLTRACK_FN_ATTR(readonly)
5860     else if (isAssumedWriteOnly())
5861       STATS_DECLTRACK_FN_ATTR(writeonly)
5862   }
5863 };
5864 
5865 /// AAMemoryBehavior attribute for call sites.
5866 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5867   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5868 
5869   /// See AbstractAttribute::initialize(...).
5870   void initialize(Attributor &A) override {
5871     AAMemoryBehaviorImpl::initialize(A);
5872     Function *F = getAssociatedFunction();
5873     if (!F || !F->hasExactDefinition())
5874       indicatePessimisticFixpoint();
5875   }
5876 
5877   /// See AbstractAttribute::updateImpl(...).
5878   ChangeStatus updateImpl(Attributor &A) override {
5879     // TODO: Once we have call site specific value information we can provide
5880     //       call site specific liveness liveness information and then it makes
5881     //       sense to specialize attributes for call sites arguments instead of
5882     //       redirecting requests to the callee argument.
5883     Function *F = getAssociatedFunction();
5884     const IRPosition &FnPos = IRPosition::function(*F);
5885     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5886     return clampStateAndIndicateChange(
5887         getState(),
5888         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5889   }
5890 
5891   /// See AbstractAttribute::trackStatistics()
5892   void trackStatistics() const override {
5893     if (isAssumedReadNone())
5894       STATS_DECLTRACK_CS_ATTR(readnone)
5895     else if (isAssumedReadOnly())
5896       STATS_DECLTRACK_CS_ATTR(readonly)
5897     else if (isAssumedWriteOnly())
5898       STATS_DECLTRACK_CS_ATTR(writeonly)
5899   }
5900 };
5901 
5902 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5903 
5904   // The current assumed state used to determine a change.
5905   auto AssumedState = getAssumed();
5906 
5907   auto CheckRWInst = [&](Instruction &I) {
5908     // If the instruction has an own memory behavior state, use it to restrict
5909     // the local state. No further analysis is required as the other memory
5910     // state is as optimistic as it gets.
5911     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
5912       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5913           *this, IRPosition::callsite_function(ICS));
5914       intersectAssumedBits(MemBehaviorAA.getAssumed());
5915       return !isAtFixpoint();
5916     }
5917 
5918     // Remove access kind modifiers if necessary.
5919     if (I.mayReadFromMemory())
5920       removeAssumedBits(NO_READS);
5921     if (I.mayWriteToMemory())
5922       removeAssumedBits(NO_WRITES);
5923     return !isAtFixpoint();
5924   };
5925 
5926   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5927     return indicatePessimisticFixpoint();
5928 
5929   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5930                                         : ChangeStatus::UNCHANGED;
5931 }
5932 
5933 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5934 
5935   const IRPosition &IRP = getIRPosition();
5936   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5937   AAMemoryBehavior::StateType &S = getState();
5938 
5939   // First, check the function scope. We take the known information and we avoid
5940   // work if the assumed information implies the current assumed information for
5941   // this attribute. This is a valid for all but byval arguments.
5942   Argument *Arg = IRP.getAssociatedArgument();
5943   AAMemoryBehavior::base_t FnMemAssumedState =
5944       AAMemoryBehavior::StateType::getWorstState();
5945   if (!Arg || !Arg->hasByValAttr()) {
5946     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5947         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5948     FnMemAssumedState = FnMemAA.getAssumed();
5949     S.addKnownBits(FnMemAA.getKnown());
5950     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5951       return ChangeStatus::UNCHANGED;
5952   }
5953 
5954   // Make sure the value is not captured (except through "return"), if
5955   // it is, any information derived would be irrelevant anyway as we cannot
5956   // check the potential aliases introduced by the capture. However, no need
5957   // to fall back to anythign less optimistic than the function state.
5958   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5959       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5960   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5961     S.intersectAssumedBits(FnMemAssumedState);
5962     return ChangeStatus::CHANGED;
5963   }
5964 
5965   // The current assumed state used to determine a change.
5966   auto AssumedState = S.getAssumed();
5967 
5968   // Liveness information to exclude dead users.
5969   // TODO: Take the FnPos once we have call site specific liveness information.
5970   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5971       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5972       /* TrackDependence */ false);
5973 
5974   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5975   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5976     const Use *U = Uses[i];
5977     Instruction *UserI = cast<Instruction>(U->getUser());
5978     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5979                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
5980                       << "]\n");
5981     if (A.isAssumedDead(*U, this, &LivenessAA))
5982       continue;
5983 
5984     // Check if the users of UserI should also be visited.
5985     if (followUsersOfUseIn(A, U, UserI))
5986       for (const Use &UserIUse : UserI->uses())
5987         Uses.insert(&UserIUse);
5988 
5989     // If UserI might touch memory we analyze the use in detail.
5990     if (UserI->mayReadOrWriteMemory())
5991       analyzeUseIn(A, U, UserI);
5992   }
5993 
5994   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5995                                         : ChangeStatus::UNCHANGED;
5996 }
5997 
5998 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5999                                                   const Instruction *UserI) {
6000   // The loaded value is unrelated to the pointer argument, no need to
6001   // follow the users of the load.
6002   if (isa<LoadInst>(UserI))
6003     return false;
6004 
6005   // By default we follow all uses assuming UserI might leak information on U,
6006   // we have special handling for call sites operands though.
6007   ImmutableCallSite ICS(UserI);
6008   if (!ICS || !ICS.isArgOperand(U))
6009     return true;
6010 
6011   // If the use is a call argument known not to be captured, the users of
6012   // the call do not need to be visited because they have to be unrelated to
6013   // the input. Note that this check is not trivial even though we disallow
6014   // general capturing of the underlying argument. The reason is that the
6015   // call might the argument "through return", which we allow and for which we
6016   // need to check call users.
6017   if (U->get()->getType()->isPointerTy()) {
6018     unsigned ArgNo = ICS.getArgumentNo(U);
6019     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6020         *this, IRPosition::callsite_argument(ICS, ArgNo),
6021         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6022     return !ArgNoCaptureAA.isAssumedNoCapture();
6023   }
6024 
6025   return true;
6026 }
6027 
6028 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6029                                             const Instruction *UserI) {
6030   assert(UserI->mayReadOrWriteMemory());
6031 
6032   switch (UserI->getOpcode()) {
6033   default:
6034     // TODO: Handle all atomics and other side-effect operations we know of.
6035     break;
6036   case Instruction::Load:
6037     // Loads cause the NO_READS property to disappear.
6038     removeAssumedBits(NO_READS);
6039     return;
6040 
6041   case Instruction::Store:
6042     // Stores cause the NO_WRITES property to disappear if the use is the
6043     // pointer operand. Note that we do assume that capturing was taken care of
6044     // somewhere else.
6045     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6046       removeAssumedBits(NO_WRITES);
6047     return;
6048 
6049   case Instruction::Call:
6050   case Instruction::CallBr:
6051   case Instruction::Invoke: {
6052     // For call sites we look at the argument memory behavior attribute (this
6053     // could be recursive!) in order to restrict our own state.
6054     ImmutableCallSite ICS(UserI);
6055 
6056     // Give up on operand bundles.
6057     if (ICS.isBundleOperand(U)) {
6058       indicatePessimisticFixpoint();
6059       return;
6060     }
6061 
6062     // Calling a function does read the function pointer, maybe write it if the
6063     // function is self-modifying.
6064     if (ICS.isCallee(U)) {
6065       removeAssumedBits(NO_READS);
6066       break;
6067     }
6068 
6069     // Adjust the possible access behavior based on the information on the
6070     // argument.
6071     IRPosition Pos;
6072     if (U->get()->getType()->isPointerTy())
6073       Pos = IRPosition::callsite_argument(ICS, ICS.getArgumentNo(U));
6074     else
6075       Pos = IRPosition::callsite_function(ICS);
6076     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6077         *this, Pos,
6078         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6079     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6080     // and at least "known".
6081     intersectAssumedBits(MemBehaviorAA.getAssumed());
6082     return;
6083   }
6084   };
6085 
6086   // Generally, look at the "may-properties" and adjust the assumed state if we
6087   // did not trigger special handling before.
6088   if (UserI->mayReadFromMemory())
6089     removeAssumedBits(NO_READS);
6090   if (UserI->mayWriteToMemory())
6091     removeAssumedBits(NO_WRITES);
6092 }
6093 
6094 /// -------------------- Memory Locations Attributes ---------------------------
6095 /// Includes read-none, argmemonly, inaccessiblememonly,
6096 /// inaccessiblememorargmemonly
6097 /// ----------------------------------------------------------------------------
6098 
6099 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6100     AAMemoryLocation::MemoryLocationsKind MLK) {
6101   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6102     return "all memory";
6103   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6104     return "no memory";
6105   std::string S = "memory:";
6106   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6107     S += "stack,";
6108   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6109     S += "constant,";
6110   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6111     S += "internal global,";
6112   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6113     S += "external global,";
6114   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6115     S += "argument,";
6116   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6117     S += "inaccessible,";
6118   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6119     S += "malloced,";
6120   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6121     S += "unknown,";
6122   S.pop_back();
6123   return S;
6124 }
6125 
6126 struct AAMemoryLocationImpl : public AAMemoryLocation {
6127 
6128   AAMemoryLocationImpl(const IRPosition &IRP) : AAMemoryLocation(IRP) {}
6129 
6130   /// See AbstractAttribute::initialize(...).
6131   void initialize(Attributor &A) override {
6132     intersectAssumedBits(BEST_STATE);
6133     getKnownStateFromValue(getIRPosition(), getState());
6134     IRAttribute::initialize(A);
6135   }
6136 
6137   /// Return the memory behavior information encoded in the IR for \p IRP.
6138   static void getKnownStateFromValue(const IRPosition &IRP,
6139                                      BitIntegerState &State,
6140                                      bool IgnoreSubsumingPositions = false) {
6141     SmallVector<Attribute, 2> Attrs;
6142     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6143     for (const Attribute &Attr : Attrs) {
6144       switch (Attr.getKindAsEnum()) {
6145       case Attribute::ReadNone:
6146         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6147         break;
6148       case Attribute::InaccessibleMemOnly:
6149         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6150         break;
6151       case Attribute::ArgMemOnly:
6152         State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6153         break;
6154       case Attribute::InaccessibleMemOrArgMemOnly:
6155         State.addKnownBits(
6156             inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6157         break;
6158       default:
6159         llvm_unreachable("Unexpected attribute!");
6160       }
6161     }
6162   }
6163 
6164   /// See AbstractAttribute::getDeducedAttributes(...).
6165   void getDeducedAttributes(LLVMContext &Ctx,
6166                             SmallVectorImpl<Attribute> &Attrs) const override {
6167     assert(Attrs.size() == 0);
6168     if (isAssumedReadNone()) {
6169       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6170     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6171       if (isAssumedInaccessibleMemOnly())
6172         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6173       else if (isAssumedArgMemOnly())
6174         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6175       else if (isAssumedInaccessibleOrArgMemOnly())
6176         Attrs.push_back(
6177             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6178     }
6179     assert(Attrs.size() <= 1);
6180   }
6181 
6182   /// See AbstractAttribute::manifest(...).
6183   ChangeStatus manifest(Attributor &A) override {
6184     const IRPosition &IRP = getIRPosition();
6185 
6186     // Check if we would improve the existing attributes first.
6187     SmallVector<Attribute, 4> DeducedAttrs;
6188     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6189     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6190           return IRP.hasAttr(Attr.getKindAsEnum(),
6191                              /* IgnoreSubsumingPositions */ true);
6192         }))
6193       return ChangeStatus::UNCHANGED;
6194 
6195     // Clear existing attributes.
6196     IRP.removeAttrs(AttrKinds);
6197     if (isAssumedReadNone())
6198       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6199 
6200     // Use the generic manifest method.
6201     return IRAttribute::manifest(A);
6202   }
6203 
6204   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6205   bool checkForAllAccessesToMemoryKind(
6206       const function_ref<bool(const Instruction *, const Value *, AccessKind,
6207                               MemoryLocationsKind)> &Pred,
6208       MemoryLocationsKind RequestedMLK) const override {
6209     if (!isValidState())
6210       return false;
6211 
6212     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6213     if (AssumedMLK == NO_LOCATIONS)
6214       return true;
6215 
6216     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6217       if (CurMLK & RequestedMLK)
6218         continue;
6219 
6220       const auto &Accesses = AccessKindAccessesMap.lookup(CurMLK);
6221       for (const AccessInfo &AI : Accesses) {
6222         if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6223           return false;
6224       }
6225     }
6226 
6227     return true;
6228   }
6229 
6230   ChangeStatus indicatePessimisticFixpoint() override {
6231     // If we give up and indicate a pessimistic fixpoint this instruction will
6232     // become an access for all potential access kinds:
6233     // TODO: Add pointers for argmemonly and globals to improve the results of
6234     //       checkForAllAccessesToMemoryKind.
6235     bool Changed = false;
6236     MemoryLocationsKind KnownMLK = getKnown();
6237     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6238     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6239       if (!(CurMLK & KnownMLK))
6240         updateStateAndAccessesMap(getState(), AccessKindAccessesMap, CurMLK, I,
6241                                   nullptr, Changed);
6242     return AAMemoryLocation::indicatePessimisticFixpoint();
6243   }
6244 
6245 protected:
6246   /// Helper struct to tie together an instruction that has a read or write
6247   /// effect with the pointer it accesses (if any).
6248   struct AccessInfo {
6249 
6250     /// The instruction that caused the access.
6251     const Instruction *I;
6252 
6253     /// The base pointer that is accessed, or null if unknown.
6254     const Value *Ptr;
6255 
6256     /// The kind of access (read/write/read+write).
6257     AccessKind Kind;
6258 
6259     bool operator==(const AccessInfo &RHS) const {
6260       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6261     }
6262     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6263       if (LHS.I != RHS.I)
6264         return LHS.I < RHS.I;
6265       if (LHS.Ptr != RHS.Ptr)
6266         return LHS.Ptr < RHS.Ptr;
6267       if (LHS.Kind != RHS.Kind)
6268         return LHS.Kind < RHS.Kind;
6269       return false;
6270     }
6271   };
6272 
6273   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6274   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6275   using AccessKindAccessesMapTy =
6276       DenseMap<unsigned, SmallSet<AccessInfo, 8, AccessInfo>>;
6277   AccessKindAccessesMapTy AccessKindAccessesMap;
6278 
6279   /// Return the kind(s) of location that may be accessed by \p V.
6280   AAMemoryLocation::MemoryLocationsKind
6281   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6282 
6283   /// Update the state \p State and the AccessKindAccessesMap given that \p I is
6284   /// an access to a \p MLK memory location with the access pointer \p Ptr.
6285   static void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6286                                         AccessKindAccessesMapTy &AccessMap,
6287                                         MemoryLocationsKind MLK,
6288                                         const Instruction *I, const Value *Ptr,
6289                                         bool &Changed) {
6290     // TODO: The kind should be determined at the call sites based on the
6291     // information we have there.
6292     AccessKind Kind = READ_WRITE;
6293     if (I) {
6294       Kind = I->mayReadFromMemory() ? READ : NONE;
6295       Kind = AccessKind(Kind | (I->mayWriteToMemory() ? WRITE : NONE));
6296     }
6297 
6298     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6299     Changed |= AccessMap[MLK].insert(AccessInfo{I, Ptr, Kind}).second;
6300     State.removeAssumedBits(MLK);
6301   }
6302 
6303   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6304   /// arguments, and update the state and access map accordingly.
6305   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6306                           AAMemoryLocation::StateType &State, bool &Changed);
6307 
6308   /// The set of IR attributes AAMemoryLocation deals with.
6309   static const Attribute::AttrKind AttrKinds[4];
6310 };
6311 
6312 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6313     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6314     Attribute::InaccessibleMemOrArgMemOnly};
6315 
6316 void AAMemoryLocationImpl::categorizePtrValue(
6317     Attributor &A, const Instruction &I, const Value &Ptr,
6318     AAMemoryLocation::StateType &State, bool &Changed) {
6319   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6320                     << Ptr << " ["
6321                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6322 
6323   auto StripGEPCB = [](Value *V) -> Value * {
6324     auto *GEP = dyn_cast<GEPOperator>(V);
6325     while (GEP) {
6326       V = GEP->getPointerOperand();
6327       GEP = dyn_cast<GEPOperator>(V);
6328     }
6329     return V;
6330   };
6331 
6332   auto VisitValueCB = [&](Value &V, AAMemoryLocation::StateType &T,
6333                           bool Stripped) -> bool {
6334     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6335     if (isa<UndefValue>(V))
6336       return true;
6337     if (auto *Arg = dyn_cast<Argument>(&V)) {
6338       if (Arg->hasByValAttr())
6339         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_LOCAL_MEM, &I,
6340                                   &V, Changed);
6341       else
6342         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_ARGUMENT_MEM, &I,
6343                                   &V, Changed);
6344       return true;
6345     }
6346     if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6347       if (GV->hasLocalLinkage())
6348         updateStateAndAccessesMap(T, AccessKindAccessesMap,
6349                                   NO_GLOBAL_INTERNAL_MEM, &I, &V, Changed);
6350       else
6351         updateStateAndAccessesMap(T, AccessKindAccessesMap,
6352                                   NO_GLOBAL_EXTERNAL_MEM, &I, &V, Changed);
6353       return true;
6354     }
6355     if (isa<AllocaInst>(V)) {
6356       updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_LOCAL_MEM, &I, &V,
6357                                 Changed);
6358       return true;
6359     }
6360     if (ImmutableCallSite ICS = ImmutableCallSite(&V)) {
6361       const auto &NoAliasAA =
6362           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(ICS));
6363       if (NoAliasAA.isAssumedNoAlias()) {
6364         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_MALLOCED_MEM, &I,
6365                                   &V, Changed);
6366         return true;
6367       }
6368     }
6369 
6370     updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_UNKOWN_MEM, &I, &V,
6371                               Changed);
6372     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6373                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6374                       << "\n");
6375     return true;
6376   };
6377 
6378   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6379           A, IRPosition::value(Ptr), *this, State, VisitValueCB,
6380           /* MaxValues */ 32, StripGEPCB)) {
6381     LLVM_DEBUG(
6382         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6383     updateStateAndAccessesMap(State, AccessKindAccessesMap, NO_UNKOWN_MEM, &I,
6384                               nullptr, Changed);
6385   } else {
6386     LLVM_DEBUG(
6387         dbgs()
6388         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6389         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6390   }
6391 }
6392 
6393 AAMemoryLocation::MemoryLocationsKind
6394 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6395                                                   bool &Changed) {
6396   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6397                     << I << "\n");
6398 
6399   AAMemoryLocation::StateType AccessedLocs;
6400   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6401 
6402   if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
6403 
6404     // First check if we assume any memory is access is visible.
6405     const auto &ICSMemLocationAA =
6406         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(ICS));
6407     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6408                       << " [" << ICSMemLocationAA << "]\n");
6409 
6410     if (ICSMemLocationAA.isAssumedReadNone())
6411       return NO_LOCATIONS;
6412 
6413     if (ICSMemLocationAA.isAssumedInaccessibleMemOnly()) {
6414       updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap,
6415                                 NO_INACCESSIBLE_MEM, &I, nullptr, Changed);
6416       return AccessedLocs.getAssumed();
6417     }
6418 
6419     uint32_t ICSAssumedNotAccessedLocs =
6420         ICSMemLocationAA.getAssumedNotAccessedLocation();
6421 
6422     // Set the argmemonly and global bit as we handle them separately below.
6423     uint32_t ICSAssumedNotAccessedLocsNoArgMem =
6424         ICSAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6425 
6426     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6427       if (ICSAssumedNotAccessedLocsNoArgMem & CurMLK)
6428         continue;
6429       updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, CurMLK, &I,
6430                                 nullptr, Changed);
6431     }
6432 
6433     // Now handle global memory if it might be accessed.
6434     bool HasGlobalAccesses = !(ICSAssumedNotAccessedLocs & NO_GLOBAL_MEM);
6435     if (HasGlobalAccesses) {
6436       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6437                             AccessKind Kind, MemoryLocationsKind MLK) {
6438         updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, MLK, &I,
6439                                   Ptr, Changed);
6440         return true;
6441       };
6442       if (!ICSMemLocationAA.checkForAllAccessesToMemoryKind(
6443               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6444         return AccessedLocs.getWorstState();
6445     }
6446 
6447     LLVM_DEBUG(
6448         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6449                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6450 
6451     // Now handle argument memory if it might be accessed.
6452     bool HasArgAccesses = !(ICSAssumedNotAccessedLocs & NO_ARGUMENT_MEM);
6453     if (HasArgAccesses) {
6454       for (unsigned ArgNo = 0, e = ICS.getNumArgOperands(); ArgNo < e;
6455            ++ArgNo) {
6456 
6457         // Skip non-pointer arguments.
6458         const Value *ArgOp = ICS.getArgOperand(ArgNo);
6459         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6460           continue;
6461 
6462         // Skip readnone arguments.
6463         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(ICS, ArgNo);
6464         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6465             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6466 
6467         if (ArgOpMemLocationAA.isAssumedReadNone())
6468           continue;
6469 
6470         // Categorize potentially accessed pointer arguments as if there was an
6471         // access instruction with them as pointer.
6472         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6473       }
6474     }
6475 
6476     LLVM_DEBUG(
6477         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6478                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6479 
6480     return AccessedLocs.getAssumed();
6481   }
6482 
6483   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6484     LLVM_DEBUG(
6485         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6486                << I << " [" << *Ptr << "]\n");
6487     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6488     return AccessedLocs.getAssumed();
6489   }
6490 
6491   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6492                     << I << "\n");
6493   updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, NO_UNKOWN_MEM,
6494                             &I, nullptr, Changed);
6495   return AccessedLocs.getAssumed();
6496 }
6497 
6498 /// An AA to represent the memory behavior function attributes.
6499 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6500   AAMemoryLocationFunction(const IRPosition &IRP) : AAMemoryLocationImpl(IRP) {}
6501 
6502   /// See AbstractAttribute::updateImpl(Attributor &A).
6503   virtual ChangeStatus updateImpl(Attributor &A) override {
6504 
6505     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6506         *this, getIRPosition(), /* TrackDependence */ false);
6507     if (MemBehaviorAA.isAssumedReadNone()) {
6508       if (MemBehaviorAA.isKnownReadNone())
6509         return indicateOptimisticFixpoint();
6510       assert(isAssumedReadNone() &&
6511              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6512       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6513       return ChangeStatus::UNCHANGED;
6514     }
6515 
6516     // The current assumed state used to determine a change.
6517     auto AssumedState = getAssumed();
6518     bool Changed = false;
6519 
6520     auto CheckRWInst = [&](Instruction &I) {
6521       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6522       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6523                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6524       removeAssumedBits(inverseLocation(MLK, false, false));
6525       return true;
6526     };
6527 
6528     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6529       return indicatePessimisticFixpoint();
6530 
6531     Changed |= AssumedState != getAssumed();
6532     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6533   }
6534 
6535   /// See AbstractAttribute::trackStatistics()
6536   void trackStatistics() const override {
6537     if (isAssumedReadNone())
6538       STATS_DECLTRACK_FN_ATTR(readnone)
6539     else if (isAssumedArgMemOnly())
6540       STATS_DECLTRACK_FN_ATTR(argmemonly)
6541     else if (isAssumedInaccessibleMemOnly())
6542       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6543     else if (isAssumedInaccessibleOrArgMemOnly())
6544       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6545   }
6546 };
6547 
6548 /// AAMemoryLocation attribute for call sites.
6549 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6550   AAMemoryLocationCallSite(const IRPosition &IRP) : AAMemoryLocationImpl(IRP) {}
6551 
6552   /// See AbstractAttribute::initialize(...).
6553   void initialize(Attributor &A) override {
6554     AAMemoryLocationImpl::initialize(A);
6555     Function *F = getAssociatedFunction();
6556     if (!F || !F->hasExactDefinition())
6557       indicatePessimisticFixpoint();
6558   }
6559 
6560   /// See AbstractAttribute::updateImpl(...).
6561   ChangeStatus updateImpl(Attributor &A) override {
6562     // TODO: Once we have call site specific value information we can provide
6563     //       call site specific liveness liveness information and then it makes
6564     //       sense to specialize attributes for call sites arguments instead of
6565     //       redirecting requests to the callee argument.
6566     Function *F = getAssociatedFunction();
6567     const IRPosition &FnPos = IRPosition::function(*F);
6568     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6569     bool Changed = false;
6570     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6571                           AccessKind Kind, MemoryLocationsKind MLK) {
6572       updateStateAndAccessesMap(getState(), AccessKindAccessesMap, MLK, I, Ptr,
6573                                 Changed);
6574       return true;
6575     };
6576     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6577       return indicatePessimisticFixpoint();
6578     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6579   }
6580 
6581   /// See AbstractAttribute::trackStatistics()
6582   void trackStatistics() const override {
6583     if (isAssumedReadNone())
6584       STATS_DECLTRACK_CS_ATTR(readnone)
6585   }
6586 };
6587 
6588 /// ------------------ Value Constant Range Attribute -------------------------
6589 
6590 struct AAValueConstantRangeImpl : AAValueConstantRange {
6591   using StateType = IntegerRangeState;
6592   AAValueConstantRangeImpl(const IRPosition &IRP) : AAValueConstantRange(IRP) {}
6593 
6594   /// See AbstractAttribute::getAsStr().
6595   const std::string getAsStr() const override {
6596     std::string Str;
6597     llvm::raw_string_ostream OS(Str);
6598     OS << "range(" << getBitWidth() << ")<";
6599     getKnown().print(OS);
6600     OS << " / ";
6601     getAssumed().print(OS);
6602     OS << ">";
6603     return OS.str();
6604   }
6605 
6606   /// Helper function to get a SCEV expr for the associated value at program
6607   /// point \p I.
6608   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6609     if (!getAnchorScope())
6610       return nullptr;
6611 
6612     ScalarEvolution *SE =
6613         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6614             *getAnchorScope());
6615 
6616     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6617         *getAnchorScope());
6618 
6619     if (!SE || !LI)
6620       return nullptr;
6621 
6622     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6623     if (!I)
6624       return S;
6625 
6626     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6627   }
6628 
6629   /// Helper function to get a range from SCEV for the associated value at
6630   /// program point \p I.
6631   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6632                                          const Instruction *I = nullptr) const {
6633     if (!getAnchorScope())
6634       return getWorstState(getBitWidth());
6635 
6636     ScalarEvolution *SE =
6637         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6638             *getAnchorScope());
6639 
6640     const SCEV *S = getSCEV(A, I);
6641     if (!SE || !S)
6642       return getWorstState(getBitWidth());
6643 
6644     return SE->getUnsignedRange(S);
6645   }
6646 
6647   /// Helper function to get a range from LVI for the associated value at
6648   /// program point \p I.
6649   ConstantRange
6650   getConstantRangeFromLVI(Attributor &A,
6651                           const Instruction *CtxI = nullptr) const {
6652     if (!getAnchorScope())
6653       return getWorstState(getBitWidth());
6654 
6655     LazyValueInfo *LVI =
6656         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6657             *getAnchorScope());
6658 
6659     if (!LVI || !CtxI)
6660       return getWorstState(getBitWidth());
6661     return LVI->getConstantRange(&getAssociatedValue(),
6662                                  const_cast<BasicBlock *>(CtxI->getParent()),
6663                                  const_cast<Instruction *>(CtxI));
6664   }
6665 
6666   /// See AAValueConstantRange::getKnownConstantRange(..).
6667   ConstantRange
6668   getKnownConstantRange(Attributor &A,
6669                         const Instruction *CtxI = nullptr) const override {
6670     if (!CtxI || CtxI == getCtxI())
6671       return getKnown();
6672 
6673     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6674     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6675     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6676   }
6677 
6678   /// See AAValueConstantRange::getAssumedConstantRange(..).
6679   ConstantRange
6680   getAssumedConstantRange(Attributor &A,
6681                           const Instruction *CtxI = nullptr) const override {
6682     // TODO: Make SCEV use Attributor assumption.
6683     //       We may be able to bound a variable range via assumptions in
6684     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6685     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6686 
6687     if (!CtxI || CtxI == getCtxI())
6688       return getAssumed();
6689 
6690     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6691     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6692     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6693   }
6694 
6695   /// See AbstractAttribute::initialize(..).
6696   void initialize(Attributor &A) override {
6697     // Intersect a range given by SCEV.
6698     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6699 
6700     // Intersect a range given by LVI.
6701     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6702   }
6703 
6704   /// Helper function to create MDNode for range metadata.
6705   static MDNode *
6706   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6707                             const ConstantRange &AssumedConstantRange) {
6708     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6709                                   Ty, AssumedConstantRange.getLower())),
6710                               ConstantAsMetadata::get(ConstantInt::get(
6711                                   Ty, AssumedConstantRange.getUpper()))};
6712     return MDNode::get(Ctx, LowAndHigh);
6713   }
6714 
6715   /// Return true if \p Assumed is included in \p KnownRanges.
6716   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6717 
6718     if (Assumed.isFullSet())
6719       return false;
6720 
6721     if (!KnownRanges)
6722       return true;
6723 
6724     // If multiple ranges are annotated in IR, we give up to annotate assumed
6725     // range for now.
6726 
6727     // TODO:  If there exists a known range which containts assumed range, we
6728     // can say assumed range is better.
6729     if (KnownRanges->getNumOperands() > 2)
6730       return false;
6731 
6732     ConstantInt *Lower =
6733         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6734     ConstantInt *Upper =
6735         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6736 
6737     ConstantRange Known(Lower->getValue(), Upper->getValue());
6738     return Known.contains(Assumed) && Known != Assumed;
6739   }
6740 
6741   /// Helper function to set range metadata.
6742   static bool
6743   setRangeMetadataIfisBetterRange(Instruction *I,
6744                                   const ConstantRange &AssumedConstantRange) {
6745     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6746     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6747       if (!AssumedConstantRange.isEmptySet()) {
6748         I->setMetadata(LLVMContext::MD_range,
6749                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6750                                                  AssumedConstantRange));
6751         return true;
6752       }
6753     }
6754     return false;
6755   }
6756 
6757   /// See AbstractAttribute::manifest()
6758   ChangeStatus manifest(Attributor &A) override {
6759     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6760     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6761     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6762 
6763     auto &V = getAssociatedValue();
6764     if (!AssumedConstantRange.isEmptySet() &&
6765         !AssumedConstantRange.isSingleElement()) {
6766       if (Instruction *I = dyn_cast<Instruction>(&V))
6767         if (isa<CallInst>(I) || isa<LoadInst>(I))
6768           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6769             Changed = ChangeStatus::CHANGED;
6770     }
6771 
6772     return Changed;
6773   }
6774 };
6775 
6776 struct AAValueConstantRangeArgument final
6777     : AAArgumentFromCallSiteArguments<
6778           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6779   AAValueConstantRangeArgument(const IRPosition &IRP)
6780       : AAArgumentFromCallSiteArguments<
6781             AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>(
6782             IRP) {}
6783 
6784   /// See AbstractAttribute::trackStatistics()
6785   void trackStatistics() const override {
6786     STATS_DECLTRACK_ARG_ATTR(value_range)
6787   }
6788 };
6789 
6790 struct AAValueConstantRangeReturned
6791     : AAReturnedFromReturnedValues<AAValueConstantRange,
6792                                    AAValueConstantRangeImpl> {
6793   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6794                                             AAValueConstantRangeImpl>;
6795   AAValueConstantRangeReturned(const IRPosition &IRP) : Base(IRP) {}
6796 
6797   /// See AbstractAttribute::initialize(...).
6798   void initialize(Attributor &A) override {}
6799 
6800   /// See AbstractAttribute::trackStatistics()
6801   void trackStatistics() const override {
6802     STATS_DECLTRACK_FNRET_ATTR(value_range)
6803   }
6804 };
6805 
6806 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6807   AAValueConstantRangeFloating(const IRPosition &IRP)
6808       : AAValueConstantRangeImpl(IRP) {}
6809 
6810   /// See AbstractAttribute::initialize(...).
6811   void initialize(Attributor &A) override {
6812     AAValueConstantRangeImpl::initialize(A);
6813     Value &V = getAssociatedValue();
6814 
6815     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6816       unionAssumed(ConstantRange(C->getValue()));
6817       indicateOptimisticFixpoint();
6818       return;
6819     }
6820 
6821     if (isa<UndefValue>(&V)) {
6822       // Collapse the undef state to 0.
6823       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6824       indicateOptimisticFixpoint();
6825       return;
6826     }
6827 
6828     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6829       return;
6830     // If it is a load instruction with range metadata, use it.
6831     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6832       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6833         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6834         return;
6835       }
6836 
6837     // We can work with PHI and select instruction as we traverse their operands
6838     // during update.
6839     if (isa<SelectInst>(V) || isa<PHINode>(V))
6840       return;
6841 
6842     // Otherwise we give up.
6843     indicatePessimisticFixpoint();
6844 
6845     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6846                       << getAssociatedValue() << "\n");
6847   }
6848 
6849   bool calculateBinaryOperator(
6850       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6851       Instruction *CtxI,
6852       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6853     Value *LHS = BinOp->getOperand(0);
6854     Value *RHS = BinOp->getOperand(1);
6855     // TODO: Allow non integers as well.
6856     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6857       return false;
6858 
6859     auto &LHSAA =
6860         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6861     QuerriedAAs.push_back(&LHSAA);
6862     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6863 
6864     auto &RHSAA =
6865         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6866     QuerriedAAs.push_back(&RHSAA);
6867     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6868 
6869     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6870 
6871     T.unionAssumed(AssumedRange);
6872 
6873     // TODO: Track a known state too.
6874 
6875     return T.isValidState();
6876   }
6877 
6878   bool calculateCastInst(
6879       Attributor &A, CastInst *CastI, IntegerRangeState &T, Instruction *CtxI,
6880       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6881     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
6882     // TODO: Allow non integers as well.
6883     Value &OpV = *CastI->getOperand(0);
6884     if (!OpV.getType()->isIntegerTy())
6885       return false;
6886 
6887     auto &OpAA =
6888         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
6889     QuerriedAAs.push_back(&OpAA);
6890     T.unionAssumed(
6891         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
6892     return T.isValidState();
6893   }
6894 
6895   bool
6896   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
6897                    Instruction *CtxI,
6898                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6899     Value *LHS = CmpI->getOperand(0);
6900     Value *RHS = CmpI->getOperand(1);
6901     // TODO: Allow non integers as well.
6902     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6903       return false;
6904 
6905     auto &LHSAA =
6906         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6907     QuerriedAAs.push_back(&LHSAA);
6908     auto &RHSAA =
6909         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6910     QuerriedAAs.push_back(&RHSAA);
6911 
6912     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6913     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6914 
6915     // If one of them is empty set, we can't decide.
6916     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
6917       return true;
6918 
6919     bool MustTrue = false, MustFalse = false;
6920 
6921     auto AllowedRegion =
6922         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
6923 
6924     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
6925         CmpI->getPredicate(), RHSAARange);
6926 
6927     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
6928       MustFalse = true;
6929 
6930     if (SatisfyingRegion.contains(LHSAARange))
6931       MustTrue = true;
6932 
6933     assert((!MustTrue || !MustFalse) &&
6934            "Either MustTrue or MustFalse should be false!");
6935 
6936     if (MustTrue)
6937       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
6938     else if (MustFalse)
6939       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
6940     else
6941       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
6942 
6943     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
6944                       << " " << RHSAA << "\n");
6945 
6946     // TODO: Track a known state too.
6947     return T.isValidState();
6948   }
6949 
6950   /// See AbstractAttribute::updateImpl(...).
6951   ChangeStatus updateImpl(Attributor &A) override {
6952     Instruction *CtxI = getCtxI();
6953     auto VisitValueCB = [&](Value &V, IntegerRangeState &T,
6954                             bool Stripped) -> bool {
6955       Instruction *I = dyn_cast<Instruction>(&V);
6956       if (!I) {
6957 
6958         // If the value is not instruction, we query AA to Attributor.
6959         const auto &AA =
6960             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
6961 
6962         // Clamp operator is not used to utilize a program point CtxI.
6963         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
6964 
6965         return T.isValidState();
6966       }
6967 
6968       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
6969       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
6970         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
6971           return false;
6972       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
6973         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
6974           return false;
6975       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
6976         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
6977           return false;
6978       } else {
6979         // Give up with other instructions.
6980         // TODO: Add other instructions
6981 
6982         T.indicatePessimisticFixpoint();
6983         return false;
6984       }
6985 
6986       // Catch circular reasoning in a pessimistic way for now.
6987       // TODO: Check how the range evolves and if we stripped anything, see also
6988       //       AADereferenceable or AAAlign for similar situations.
6989       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
6990         if (QueriedAA != this)
6991           continue;
6992         // If we are in a stady state we do not need to worry.
6993         if (T.getAssumed() == getState().getAssumed())
6994           continue;
6995         T.indicatePessimisticFixpoint();
6996       }
6997 
6998       return T.isValidState();
6999     };
7000 
7001     IntegerRangeState T(getBitWidth());
7002 
7003     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7004             A, getIRPosition(), *this, T, VisitValueCB))
7005       return indicatePessimisticFixpoint();
7006 
7007     return clampStateAndIndicateChange(getState(), T);
7008   }
7009 
7010   /// See AbstractAttribute::trackStatistics()
7011   void trackStatistics() const override {
7012     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7013   }
7014 };
7015 
7016 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7017   AAValueConstantRangeFunction(const IRPosition &IRP)
7018       : AAValueConstantRangeImpl(IRP) {}
7019 
7020   /// See AbstractAttribute::initialize(...).
7021   ChangeStatus updateImpl(Attributor &A) override {
7022     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7023                      "not be called");
7024   }
7025 
7026   /// See AbstractAttribute::trackStatistics()
7027   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7028 };
7029 
7030 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7031   AAValueConstantRangeCallSite(const IRPosition &IRP)
7032       : AAValueConstantRangeFunction(IRP) {}
7033 
7034   /// See AbstractAttribute::trackStatistics()
7035   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7036 };
7037 
7038 struct AAValueConstantRangeCallSiteReturned
7039     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7040                                      AAValueConstantRangeImpl> {
7041   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP)
7042       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7043                                        AAValueConstantRangeImpl>(IRP) {}
7044 
7045   /// See AbstractAttribute::initialize(...).
7046   void initialize(Attributor &A) override {
7047     // If it is a load instruction with range metadata, use the metadata.
7048     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7049       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7050         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7051 
7052     AAValueConstantRangeImpl::initialize(A);
7053   }
7054 
7055   /// See AbstractAttribute::trackStatistics()
7056   void trackStatistics() const override {
7057     STATS_DECLTRACK_CSRET_ATTR(value_range)
7058   }
7059 };
7060 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7061   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP)
7062       : AAValueConstantRangeFloating(IRP) {}
7063 
7064   /// See AbstractAttribute::trackStatistics()
7065   void trackStatistics() const override {
7066     STATS_DECLTRACK_CSARG_ATTR(value_range)
7067   }
7068 };
7069 /// ----------------------------------------------------------------------------
7070 ///                               Attributor
7071 /// ----------------------------------------------------------------------------
7072 
7073 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
7074                                const AAIsDead *FnLivenessAA,
7075                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
7076   const IRPosition &IRP = AA.getIRPosition();
7077   if (!Functions.count(IRP.getAnchorScope()))
7078     return false;
7079   return isAssumedDead(IRP, &AA, FnLivenessAA, CheckBBLivenessOnly, DepClass);
7080 }
7081 
7082 bool Attributor::isAssumedDead(const Use &U,
7083                                const AbstractAttribute *QueryingAA,
7084                                const AAIsDead *FnLivenessAA,
7085                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
7086   Instruction *UserI = dyn_cast<Instruction>(U.getUser());
7087   if (!UserI)
7088     return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
7089                          CheckBBLivenessOnly, DepClass);
7090 
7091   if (CallSite CS = CallSite(UserI)) {
7092     // For call site argument uses we can check if the argument is
7093     // unused/dead.
7094     if (CS.isArgOperand(&U)) {
7095       const IRPosition &CSArgPos =
7096           IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
7097       return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
7098                            CheckBBLivenessOnly, DepClass);
7099     }
7100   } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
7101     const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
7102     return isAssumedDead(RetPos, QueryingAA, FnLivenessAA, CheckBBLivenessOnly,
7103                          DepClass);
7104   } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
7105     BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
7106     return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
7107                          CheckBBLivenessOnly, DepClass);
7108   }
7109 
7110   return isAssumedDead(IRPosition::value(*UserI), QueryingAA, FnLivenessAA,
7111                        CheckBBLivenessOnly, DepClass);
7112 }
7113 
7114 bool Attributor::isAssumedDead(const Instruction &I,
7115                                const AbstractAttribute *QueryingAA,
7116                                const AAIsDead *FnLivenessAA,
7117                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
7118   if (!FnLivenessAA)
7119     FnLivenessAA = lookupAAFor<AAIsDead>(IRPosition::function(*I.getFunction()),
7120                                          QueryingAA,
7121                                          /* TrackDependence */ false);
7122 
7123   // If we have a context instruction and a liveness AA we use it.
7124   if (FnLivenessAA &&
7125       FnLivenessAA->getIRPosition().getAnchorScope() == I.getFunction() &&
7126       FnLivenessAA->isAssumedDead(&I)) {
7127     if (QueryingAA)
7128       recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
7129     return true;
7130   }
7131 
7132   if (CheckBBLivenessOnly)
7133     return false;
7134 
7135   const AAIsDead &IsDeadAA = getOrCreateAAFor<AAIsDead>(
7136       IRPosition::value(I), QueryingAA, /* TrackDependence */ false);
7137   // Don't check liveness for AAIsDead.
7138   if (QueryingAA == &IsDeadAA)
7139     return false;
7140 
7141   if (IsDeadAA.isAssumedDead()) {
7142     if (QueryingAA)
7143       recordDependence(IsDeadAA, *QueryingAA, DepClass);
7144     return true;
7145   }
7146 
7147   return false;
7148 }
7149 
7150 bool Attributor::isAssumedDead(const IRPosition &IRP,
7151                                const AbstractAttribute *QueryingAA,
7152                                const AAIsDead *FnLivenessAA,
7153                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
7154   Instruction *CtxI = IRP.getCtxI();
7155   if (CtxI &&
7156       isAssumedDead(*CtxI, QueryingAA, FnLivenessAA,
7157                     /* CheckBBLivenessOnly */ true,
7158                     CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
7159     return true;
7160 
7161   if (CheckBBLivenessOnly)
7162     return false;
7163 
7164   // If we haven't succeeded we query the specific liveness info for the IRP.
7165   const AAIsDead *IsDeadAA;
7166   if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE)
7167     IsDeadAA = &getOrCreateAAFor<AAIsDead>(
7168         IRPosition::callsite_returned(cast<CallBase>(IRP.getAssociatedValue())),
7169         QueryingAA, /* TrackDependence */ false);
7170   else
7171     IsDeadAA = &getOrCreateAAFor<AAIsDead>(IRP, QueryingAA,
7172                                            /* TrackDependence */ false);
7173   // Don't check liveness for AAIsDead.
7174   if (QueryingAA == IsDeadAA)
7175     return false;
7176 
7177   if (IsDeadAA->isAssumedDead()) {
7178     if (QueryingAA)
7179       recordDependence(*IsDeadAA, *QueryingAA, DepClass);
7180     return true;
7181   }
7182 
7183   return false;
7184 }
7185 
7186 bool Attributor::checkForAllUses(
7187     const function_ref<bool(const Use &, bool &)> &Pred,
7188     const AbstractAttribute &QueryingAA, const Value &V,
7189     DepClassTy LivenessDepClass) {
7190 
7191   // Check the trivial case first as it catches void values.
7192   if (V.use_empty())
7193     return true;
7194 
7195   // If the value is replaced by another one, for now a constant, we do not have
7196   // uses. Note that this requires users of `checkForAllUses` to not recurse but
7197   // instead use the `follow` callback argument to look at transitive users,
7198   // however, that should be clear from the presence of the argument.
7199   bool UsedAssumedInformation = false;
7200   Optional<ConstantInt *> CI =
7201       getAssumedConstant(*this, V, QueryingAA, UsedAssumedInformation);
7202   if (CI.hasValue() && CI.getValue()) {
7203     LLVM_DEBUG(dbgs() << "[Attributor] Value is simplified, uses skipped: " << V
7204                       << " -> " << *CI.getValue() << "\n");
7205     return true;
7206   }
7207 
7208   const IRPosition &IRP = QueryingAA.getIRPosition();
7209   SmallVector<const Use *, 16> Worklist;
7210   SmallPtrSet<const Use *, 16> Visited;
7211 
7212   for (const Use &U : V.uses())
7213     Worklist.push_back(&U);
7214 
7215   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
7216                     << " initial uses to check\n");
7217 
7218   const Function *ScopeFn = IRP.getAnchorScope();
7219   const auto *LivenessAA =
7220       ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
7221                                     /* TrackDependence */ false)
7222               : nullptr;
7223 
7224   while (!Worklist.empty()) {
7225     const Use *U = Worklist.pop_back_val();
7226     if (!Visited.insert(U).second)
7227       continue;
7228     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << **U << " in "
7229                       << *U->getUser() << "\n");
7230     if (isAssumedDead(*U, &QueryingAA, LivenessAA,
7231                       /* CheckBBLivenessOnly */ false, LivenessDepClass)) {
7232       LLVM_DEBUG(dbgs() << "[Attributor] Dead use, skip!\n");
7233       continue;
7234     }
7235 
7236     bool Follow = false;
7237     if (!Pred(*U, Follow))
7238       return false;
7239     if (!Follow)
7240       continue;
7241     for (const Use &UU : U->getUser()->uses())
7242       Worklist.push_back(&UU);
7243   }
7244 
7245   return true;
7246 }
7247 
7248 bool Attributor::checkForAllCallSites(
7249     const function_ref<bool(AbstractCallSite)> &Pred,
7250     const AbstractAttribute &QueryingAA, bool RequireAllCallSites,
7251     bool &AllCallSitesKnown) {
7252   // We can try to determine information from
7253   // the call sites. However, this is only possible all call sites are known,
7254   // hence the function has internal linkage.
7255   const IRPosition &IRP = QueryingAA.getIRPosition();
7256   const Function *AssociatedFunction = IRP.getAssociatedFunction();
7257   if (!AssociatedFunction) {
7258     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
7259                       << "\n");
7260     AllCallSitesKnown = false;
7261     return false;
7262   }
7263 
7264   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
7265                               &QueryingAA, AllCallSitesKnown);
7266 }
7267 
7268 bool Attributor::checkForAllCallSites(
7269     const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
7270     bool RequireAllCallSites, const AbstractAttribute *QueryingAA,
7271     bool &AllCallSitesKnown) {
7272   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
7273     LLVM_DEBUG(
7274         dbgs()
7275         << "[Attributor] Function " << Fn.getName()
7276         << " has no internal linkage, hence not all call sites are known\n");
7277     AllCallSitesKnown = false;
7278     return false;
7279   }
7280 
7281   // If we do not require all call sites we might not see all.
7282   AllCallSitesKnown = RequireAllCallSites;
7283 
7284   for (const Use &U : Fn.uses()) {
7285     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << *U << " in "
7286                       << *U.getUser() << "\n");
7287     if (isAssumedDead(U, QueryingAA, nullptr, /* CheckBBLivenessOnly */ true)) {
7288       LLVM_DEBUG(dbgs() << "[Attributor] Dead use, skip!\n");
7289       continue;
7290     }
7291 
7292     AbstractCallSite ACS(&U);
7293     if (!ACS) {
7294       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
7295                         << " has non call site use " << *U.get() << " in "
7296                         << *U.getUser() << "\n");
7297       // BlockAddress users are allowed.
7298       if (isa<BlockAddress>(U.getUser()))
7299         continue;
7300       return false;
7301     }
7302 
7303     const Use *EffectiveUse =
7304         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
7305     if (!ACS.isCallee(EffectiveUse)) {
7306       if (!RequireAllCallSites)
7307         continue;
7308       LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
7309                         << " is an invalid use of " << Fn.getName() << "\n");
7310       return false;
7311     }
7312 
7313     if (Pred(ACS))
7314       continue;
7315 
7316     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
7317                       << *ACS.getInstruction() << "\n");
7318     return false;
7319   }
7320 
7321   return true;
7322 }
7323 
7324 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
7325     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
7326         &Pred,
7327     const AbstractAttribute &QueryingAA) {
7328 
7329   const IRPosition &IRP = QueryingAA.getIRPosition();
7330   // Since we need to provide return instructions we have to have an exact
7331   // definition.
7332   const Function *AssociatedFunction = IRP.getAssociatedFunction();
7333   if (!AssociatedFunction)
7334     return false;
7335 
7336   // If this is a call site query we use the call site specific return values
7337   // and liveness information.
7338   // TODO: use the function scope once we have call site AAReturnedValues.
7339   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
7340   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
7341   if (!AARetVal.getState().isValidState())
7342     return false;
7343 
7344   return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
7345 }
7346 
7347 bool Attributor::checkForAllReturnedValues(
7348     const function_ref<bool(Value &)> &Pred,
7349     const AbstractAttribute &QueryingAA) {
7350 
7351   const IRPosition &IRP = QueryingAA.getIRPosition();
7352   const Function *AssociatedFunction = IRP.getAssociatedFunction();
7353   if (!AssociatedFunction)
7354     return false;
7355 
7356   // TODO: use the function scope once we have call site AAReturnedValues.
7357   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
7358   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
7359   if (!AARetVal.getState().isValidState())
7360     return false;
7361 
7362   return AARetVal.checkForAllReturnedValuesAndReturnInsts(
7363       [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
7364         return Pred(RV);
7365       });
7366 }
7367 
7368 static bool checkForAllInstructionsImpl(
7369     Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap,
7370     const function_ref<bool(Instruction &)> &Pred,
7371     const AbstractAttribute *QueryingAA, const AAIsDead *LivenessAA,
7372     const ArrayRef<unsigned> &Opcodes, bool CheckBBLivenessOnly = false) {
7373   for (unsigned Opcode : Opcodes) {
7374     for (Instruction *I : OpcodeInstMap[Opcode]) {
7375       // Skip dead instructions.
7376       if (A && A->isAssumedDead(IRPosition::value(*I), QueryingAA, LivenessAA,
7377                                 CheckBBLivenessOnly))
7378         continue;
7379 
7380       if (!Pred(*I))
7381         return false;
7382     }
7383   }
7384   return true;
7385 }
7386 
7387 bool Attributor::checkForAllInstructions(
7388     const llvm::function_ref<bool(Instruction &)> &Pred,
7389     const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes,
7390     bool CheckBBLivenessOnly) {
7391 
7392   const IRPosition &IRP = QueryingAA.getIRPosition();
7393   // Since we need to provide instructions we have to have an exact definition.
7394   const Function *AssociatedFunction = IRP.getAssociatedFunction();
7395   if (!AssociatedFunction)
7396     return false;
7397 
7398   // TODO: use the function scope once we have call site AAReturnedValues.
7399   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
7400   const auto &LivenessAA =
7401       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
7402 
7403   auto &OpcodeInstMap =
7404       InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
7405   if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, &QueryingAA,
7406                                    &LivenessAA, Opcodes, CheckBBLivenessOnly))
7407     return false;
7408 
7409   return true;
7410 }
7411 
7412 bool Attributor::checkForAllReadWriteInstructions(
7413     const llvm::function_ref<bool(Instruction &)> &Pred,
7414     AbstractAttribute &QueryingAA) {
7415 
7416   const Function *AssociatedFunction =
7417       QueryingAA.getIRPosition().getAssociatedFunction();
7418   if (!AssociatedFunction)
7419     return false;
7420 
7421   // TODO: use the function scope once we have call site AAReturnedValues.
7422   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
7423   const auto &LivenessAA =
7424       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
7425 
7426   for (Instruction *I :
7427        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
7428     // Skip dead instructions.
7429     if (isAssumedDead(IRPosition::value(*I), &QueryingAA, &LivenessAA))
7430       continue;
7431 
7432     if (!Pred(*I))
7433       return false;
7434   }
7435 
7436   return true;
7437 }
7438 
7439 ChangeStatus Attributor::run() {
7440   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
7441                     << AllAbstractAttributes.size()
7442                     << " abstract attributes.\n");
7443 
7444   // Now that all abstract attributes are collected and initialized we start
7445   // the abstract analysis.
7446 
7447   unsigned IterationCounter = 1;
7448 
7449   SmallVector<AbstractAttribute *, 64> ChangedAAs;
7450   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
7451   Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
7452 
7453   bool RecomputeDependences = false;
7454 
7455   do {
7456     // Remember the size to determine new attributes.
7457     size_t NumAAs = AllAbstractAttributes.size();
7458     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
7459                       << ", Worklist size: " << Worklist.size() << "\n");
7460 
7461     // For invalid AAs we can fix dependent AAs that have a required dependence,
7462     // thereby folding long dependence chains in a single step without the need
7463     // to run updates.
7464     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
7465       AbstractAttribute *InvalidAA = InvalidAAs[u];
7466       auto &QuerriedAAs = QueryMap[InvalidAA];
7467       LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "
7468                         << QuerriedAAs.RequiredAAs.size() << "/"
7469                         << QuerriedAAs.OptionalAAs.size()
7470                         << " required/optional dependences\n");
7471       for (AbstractAttribute *DepOnInvalidAA : QuerriedAAs.RequiredAAs) {
7472         AbstractState &DOIAAState = DepOnInvalidAA->getState();
7473         DOIAAState.indicatePessimisticFixpoint();
7474         ++NumAttributesFixedDueToRequiredDependences;
7475         assert(DOIAAState.isAtFixpoint() && "Expected fixpoint state!");
7476         if (!DOIAAState.isValidState())
7477           InvalidAAs.insert(DepOnInvalidAA);
7478         else
7479           ChangedAAs.push_back(DepOnInvalidAA);
7480       }
7481       if (!RecomputeDependences)
7482         Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
7483                         QuerriedAAs.OptionalAAs.end());
7484     }
7485 
7486     // If dependences (=QueryMap) are recomputed we have to look at all abstract
7487     // attributes again, regardless of what changed in the last iteration.
7488     if (RecomputeDependences) {
7489       LLVM_DEBUG(
7490           dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
7491       QueryMap.clear();
7492       ChangedAAs.clear();
7493       Worklist.insert(AllAbstractAttributes.begin(),
7494                       AllAbstractAttributes.end());
7495     }
7496 
7497     // Add all abstract attributes that are potentially dependent on one that
7498     // changed to the work list.
7499     for (AbstractAttribute *ChangedAA : ChangedAAs) {
7500       auto &QuerriedAAs = QueryMap[ChangedAA];
7501       Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
7502                       QuerriedAAs.OptionalAAs.end());
7503       Worklist.insert(QuerriedAAs.RequiredAAs.begin(),
7504                       QuerriedAAs.RequiredAAs.end());
7505     }
7506 
7507     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
7508                       << ", Worklist+Dependent size: " << Worklist.size()
7509                       << "\n");
7510 
7511     // Reset the changed and invalid set.
7512     ChangedAAs.clear();
7513     InvalidAAs.clear();
7514 
7515     // Update all abstract attribute in the work list and record the ones that
7516     // changed.
7517     for (AbstractAttribute *AA : Worklist)
7518       if (!AA->getState().isAtFixpoint() &&
7519           !isAssumedDead(*AA, nullptr, /* CheckBBLivenessOnly */ true)) {
7520         QueriedNonFixAA = false;
7521         if (AA->update(*this) == ChangeStatus::CHANGED) {
7522           ChangedAAs.push_back(AA);
7523           if (!AA->getState().isValidState())
7524             InvalidAAs.insert(AA);
7525         } else if (!QueriedNonFixAA) {
7526           // If the attribute did not query any non-fix information, the state
7527           // will not change and we can indicate that right away.
7528           AA->getState().indicateOptimisticFixpoint();
7529         }
7530       }
7531 
7532     // Check if we recompute the dependences in the next iteration.
7533     RecomputeDependences = (DepRecomputeInterval > 0 &&
7534                             IterationCounter % DepRecomputeInterval == 0);
7535 
7536     // Add attributes to the changed set if they have been created in the last
7537     // iteration.
7538     ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
7539                       AllAbstractAttributes.end());
7540 
7541     // Reset the work list and repopulate with the changed abstract attributes.
7542     // Note that dependent ones are added above.
7543     Worklist.clear();
7544     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
7545 
7546   } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
7547                                  VerifyMaxFixpointIterations));
7548 
7549   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
7550                     << IterationCounter << "/" << MaxFixpointIterations
7551                     << " iterations\n");
7552 
7553   size_t NumFinalAAs = AllAbstractAttributes.size();
7554 
7555   // Reset abstract arguments not settled in a sound fixpoint by now. This
7556   // happens when we stopped the fixpoint iteration early. Note that only the
7557   // ones marked as "changed" *and* the ones transitively depending on them
7558   // need to be reverted to a pessimistic state. Others might not be in a
7559   // fixpoint state but we can use the optimistic results for them anyway.
7560   SmallPtrSet<AbstractAttribute *, 32> Visited;
7561   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
7562     AbstractAttribute *ChangedAA = ChangedAAs[u];
7563     if (!Visited.insert(ChangedAA).second)
7564       continue;
7565 
7566     AbstractState &State = ChangedAA->getState();
7567     if (!State.isAtFixpoint()) {
7568       State.indicatePessimisticFixpoint();
7569 
7570       NumAttributesTimedOut++;
7571     }
7572 
7573     auto &QuerriedAAs = QueryMap[ChangedAA];
7574     ChangedAAs.append(QuerriedAAs.OptionalAAs.begin(),
7575                       QuerriedAAs.OptionalAAs.end());
7576     ChangedAAs.append(QuerriedAAs.RequiredAAs.begin(),
7577                       QuerriedAAs.RequiredAAs.end());
7578   }
7579 
7580   LLVM_DEBUG({
7581     if (!Visited.empty())
7582       dbgs() << "\n[Attributor] Finalized " << Visited.size()
7583              << " abstract attributes.\n";
7584   });
7585 
7586   unsigned NumManifested = 0;
7587   unsigned NumAtFixpoint = 0;
7588   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
7589   for (AbstractAttribute *AA : AllAbstractAttributes) {
7590     AbstractState &State = AA->getState();
7591 
7592     // If there is not already a fixpoint reached, we can now take the
7593     // optimistic state. This is correct because we enforced a pessimistic one
7594     // on abstract attributes that were transitively dependent on a changed one
7595     // already above.
7596     if (!State.isAtFixpoint())
7597       State.indicateOptimisticFixpoint();
7598 
7599     // If the state is invalid, we do not try to manifest it.
7600     if (!State.isValidState())
7601       continue;
7602 
7603     // Skip dead code.
7604     if (isAssumedDead(*AA, nullptr, /* CheckBBLivenessOnly */ true))
7605       continue;
7606     // Manifest the state and record if we changed the IR.
7607     ChangeStatus LocalChange = AA->manifest(*this);
7608     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
7609       AA->trackStatistics();
7610 
7611     ManifestChange = ManifestChange | LocalChange;
7612 
7613     NumAtFixpoint++;
7614     NumManifested += (LocalChange == ChangeStatus::CHANGED);
7615   }
7616 
7617   (void)NumManifested;
7618   (void)NumAtFixpoint;
7619   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
7620                     << " arguments while " << NumAtFixpoint
7621                     << " were in a valid fixpoint state\n");
7622 
7623   NumAttributesManifested += NumManifested;
7624   NumAttributesValidFixpoint += NumAtFixpoint;
7625 
7626   (void)NumFinalAAs;
7627   if (NumFinalAAs != AllAbstractAttributes.size()) {
7628     for (unsigned u = NumFinalAAs; u < AllAbstractAttributes.size(); ++u)
7629       errs() << "Unexpected abstract attribute: " << *AllAbstractAttributes[u]
7630              << " :: "
7631              << AllAbstractAttributes[u]->getIRPosition().getAssociatedValue()
7632              << "\n";
7633     llvm_unreachable("Expected the final number of abstract attributes to "
7634                      "remain unchanged!");
7635   }
7636 
7637   // Delete stuff at the end to avoid invalid references and a nice order.
7638   {
7639     LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
7640                       << ToBeDeletedFunctions.size() << " functions and "
7641                       << ToBeDeletedBlocks.size() << " blocks and "
7642                       << ToBeDeletedInsts.size() << " instructions and "
7643                       << ToBeChangedUses.size() << " uses\n");
7644 
7645     SmallVector<WeakTrackingVH, 32> DeadInsts;
7646     SmallVector<Instruction *, 32> TerminatorsToFold;
7647 
7648     for (auto &It : ToBeChangedUses) {
7649       Use *U = It.first;
7650       Value *NewV = It.second;
7651       Value *OldV = U->get();
7652       LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
7653                         << " instead of " << *OldV << "\n");
7654       U->set(NewV);
7655       // Do not modify call instructions outside the SCC.
7656       if (auto *CB = dyn_cast<CallBase>(OldV))
7657         if (!Functions.count(CB->getCaller()))
7658           continue;
7659       if (Instruction *I = dyn_cast<Instruction>(OldV)) {
7660         CGModifiedFunctions.insert(I->getFunction());
7661         if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
7662             isInstructionTriviallyDead(I))
7663           DeadInsts.push_back(I);
7664       }
7665       if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
7666         Instruction *UserI = cast<Instruction>(U->getUser());
7667         if (isa<UndefValue>(NewV)) {
7668           ToBeChangedToUnreachableInsts.insert(UserI);
7669         } else {
7670           TerminatorsToFold.push_back(UserI);
7671         }
7672       }
7673     }
7674     for (auto &V : InvokeWithDeadSuccessor)
7675       if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
7676         bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
7677         bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
7678         bool Invoke2CallAllowed =
7679             !AAIsDeadFunction::mayCatchAsynchronousExceptions(
7680                 *II->getFunction());
7681         assert((UnwindBBIsDead || NormalBBIsDead) &&
7682                "Invoke does not have dead successors!");
7683         BasicBlock *BB = II->getParent();
7684         BasicBlock *NormalDestBB = II->getNormalDest();
7685         if (UnwindBBIsDead) {
7686           Instruction *NormalNextIP = &NormalDestBB->front();
7687           if (Invoke2CallAllowed) {
7688             changeToCall(II);
7689             NormalNextIP = BB->getTerminator();
7690           }
7691           if (NormalBBIsDead)
7692             ToBeChangedToUnreachableInsts.insert(NormalNextIP);
7693         } else {
7694           assert(NormalBBIsDead && "Broken invariant!");
7695           if (!NormalDestBB->getUniquePredecessor())
7696             NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
7697           ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
7698         }
7699       }
7700     for (auto &V : ToBeChangedToUnreachableInsts)
7701       if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
7702         CGModifiedFunctions.insert(I->getFunction());
7703         changeToUnreachable(I, /* UseLLVMTrap */ false);
7704       }
7705     for (Instruction *I : TerminatorsToFold) {
7706       CGModifiedFunctions.insert(I->getFunction());
7707       ConstantFoldTerminator(I->getParent());
7708     }
7709 
7710     for (auto &V : ToBeDeletedInsts) {
7711       if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
7712         CGModifiedFunctions.insert(I->getFunction());
7713         if (!I->getType()->isVoidTy())
7714           I->replaceAllUsesWith(UndefValue::get(I->getType()));
7715         if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
7716           DeadInsts.push_back(I);
7717         else
7718           I->eraseFromParent();
7719       }
7720     }
7721 
7722     RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
7723 
7724     if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
7725       SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
7726       ToBeDeletedBBs.reserve(NumDeadBlocks);
7727       for (BasicBlock *BB : ToBeDeletedBlocks) {
7728         CGModifiedFunctions.insert(BB->getParent());
7729         ToBeDeletedBBs.push_back(BB);
7730       }
7731       // Actually we do not delete the blocks but squash them into a single
7732       // unreachable but untangling branches that jump here is something we need
7733       // to do in a more generic way.
7734       DetatchDeadBlocks(ToBeDeletedBBs, nullptr);
7735       STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
7736       BUILD_STAT_NAME(AAIsDead, BasicBlock) += ToBeDeletedBlocks.size();
7737     }
7738 
7739     // Identify dead internal functions and delete them. This happens outside
7740     // the other fixpoint analysis as we might treat potentially dead functions
7741     // as live to lower the number of iterations. If they happen to be dead, the
7742     // below fixpoint loop will identify and eliminate them.
7743     SmallVector<Function *, 8> InternalFns;
7744     for (Function *F : Functions)
7745       if (F->hasLocalLinkage())
7746         InternalFns.push_back(F);
7747 
7748     bool FoundDeadFn = true;
7749     while (FoundDeadFn) {
7750       FoundDeadFn = false;
7751       for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
7752         Function *F = InternalFns[u];
7753         if (!F)
7754           continue;
7755 
7756         bool AllCallSitesKnown;
7757         if (!checkForAllCallSites(
7758                 [this](AbstractCallSite ACS) {
7759                   return ToBeDeletedFunctions.count(
7760                       ACS.getInstruction()->getFunction());
7761                 },
7762                 *F, true, nullptr, AllCallSitesKnown))
7763           continue;
7764 
7765         ToBeDeletedFunctions.insert(F);
7766         InternalFns[u] = nullptr;
7767         FoundDeadFn = true;
7768       }
7769     }
7770   }
7771 
7772   // Rewrite the functions as requested during manifest.
7773   ManifestChange =
7774       ManifestChange | rewriteFunctionSignatures(CGModifiedFunctions);
7775 
7776   for (Function *Fn : CGModifiedFunctions)
7777     CGUpdater.reanalyzeFunction(*Fn);
7778 
7779   STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
7780   BUILD_STAT_NAME(AAIsDead, Function) += ToBeDeletedFunctions.size();
7781 
7782   for (Function *Fn : ToBeDeletedFunctions)
7783     CGUpdater.removeFunction(*Fn);
7784 
7785   if (VerifyMaxFixpointIterations &&
7786       IterationCounter != MaxFixpointIterations) {
7787     errs() << "\n[Attributor] Fixpoint iteration done after: "
7788            << IterationCounter << "/" << MaxFixpointIterations
7789            << " iterations\n";
7790     llvm_unreachable("The fixpoint was not reached with exactly the number of "
7791                      "specified iterations!");
7792   }
7793 
7794   return ManifestChange;
7795 }
7796 
7797 bool Attributor::isValidFunctionSignatureRewrite(
7798     Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
7799 
7800   auto CallSiteCanBeChanged = [](AbstractCallSite ACS) {
7801     // Forbid must-tail calls for now.
7802     return !ACS.isCallbackCall() && !ACS.getCallSite().isMustTailCall();
7803   };
7804 
7805   Function *Fn = Arg.getParent();
7806   // Avoid var-arg functions for now.
7807   if (Fn->isVarArg()) {
7808     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
7809     return false;
7810   }
7811 
7812   // Avoid functions with complicated argument passing semantics.
7813   AttributeList FnAttributeList = Fn->getAttributes();
7814   if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
7815       FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
7816       FnAttributeList.hasAttrSomewhere(Attribute::InAlloca)) {
7817     LLVM_DEBUG(
7818         dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
7819     return false;
7820   }
7821 
7822   // Avoid callbacks for now.
7823   bool AllCallSitesKnown;
7824   if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
7825                             AllCallSitesKnown)) {
7826     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
7827     return false;
7828   }
7829 
7830   auto InstPred = [](Instruction &I) {
7831     if (auto *CI = dyn_cast<CallInst>(&I))
7832       return !CI->isMustTailCall();
7833     return true;
7834   };
7835 
7836   // Forbid must-tail calls for now.
7837   // TODO:
7838   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
7839   if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
7840                                    nullptr, {Instruction::Call})) {
7841     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
7842     return false;
7843   }
7844 
7845   return true;
7846 }
7847 
7848 bool Attributor::registerFunctionSignatureRewrite(
7849     Argument &Arg, ArrayRef<Type *> ReplacementTypes,
7850     ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
7851     ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
7852   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
7853                     << Arg.getParent()->getName() << " with "
7854                     << ReplacementTypes.size() << " replacements\n");
7855   assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
7856          "Cannot register an invalid rewrite");
7857 
7858   Function *Fn = Arg.getParent();
7859   SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = ArgumentReplacementMap[Fn];
7860   if (ARIs.empty())
7861     ARIs.resize(Fn->arg_size());
7862 
7863   // If we have a replacement already with less than or equal new arguments,
7864   // ignore this request.
7865   ArgumentReplacementInfo *&ARI = ARIs[Arg.getArgNo()];
7866   if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
7867     LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
7868     return false;
7869   }
7870 
7871   // If we have a replacement already but we like the new one better, delete
7872   // the old.
7873   if (ARI)
7874     delete ARI;
7875 
7876   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
7877                     << Arg.getParent()->getName() << " with "
7878                     << ReplacementTypes.size() << " replacements\n");
7879 
7880   // Remember the replacement.
7881   ARI = new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
7882                                     std::move(CalleeRepairCB),
7883                                     std::move(ACSRepairCB));
7884 
7885   return true;
7886 }
7887 
7888 ChangeStatus Attributor::rewriteFunctionSignatures(
7889     SmallPtrSetImpl<Function *> &ModifiedFns) {
7890   ChangeStatus Changed = ChangeStatus::UNCHANGED;
7891 
7892   for (auto &It : ArgumentReplacementMap) {
7893     Function *OldFn = It.getFirst();
7894 
7895     // Deleted functions do not require rewrites.
7896     if (ToBeDeletedFunctions.count(OldFn))
7897       continue;
7898 
7899     const SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = It.getSecond();
7900     assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
7901 
7902     SmallVector<Type *, 16> NewArgumentTypes;
7903     SmallVector<AttributeSet, 16> NewArgumentAttributes;
7904 
7905     // Collect replacement argument types and copy over existing attributes.
7906     AttributeList OldFnAttributeList = OldFn->getAttributes();
7907     for (Argument &Arg : OldFn->args()) {
7908       if (ArgumentReplacementInfo *ARI = ARIs[Arg.getArgNo()]) {
7909         NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
7910                                 ARI->ReplacementTypes.end());
7911         NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
7912                                      AttributeSet());
7913       } else {
7914         NewArgumentTypes.push_back(Arg.getType());
7915         NewArgumentAttributes.push_back(
7916             OldFnAttributeList.getParamAttributes(Arg.getArgNo()));
7917       }
7918     }
7919 
7920     FunctionType *OldFnTy = OldFn->getFunctionType();
7921     Type *RetTy = OldFnTy->getReturnType();
7922 
7923     // Construct the new function type using the new arguments types.
7924     FunctionType *NewFnTy =
7925         FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
7926 
7927     LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
7928                       << "' from " << *OldFn->getFunctionType() << " to "
7929                       << *NewFnTy << "\n");
7930 
7931     // Create the new function body and insert it into the module.
7932     Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
7933                                        OldFn->getAddressSpace(), "");
7934     OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
7935     NewFn->takeName(OldFn);
7936     NewFn->copyAttributesFrom(OldFn);
7937 
7938     // Patch the pointer to LLVM function in debug info descriptor.
7939     NewFn->setSubprogram(OldFn->getSubprogram());
7940     OldFn->setSubprogram(nullptr);
7941 
7942     // Recompute the parameter attributes list based on the new arguments for
7943     // the function.
7944     LLVMContext &Ctx = OldFn->getContext();
7945     NewFn->setAttributes(AttributeList::get(
7946         Ctx, OldFnAttributeList.getFnAttributes(),
7947         OldFnAttributeList.getRetAttributes(), NewArgumentAttributes));
7948 
7949     // Since we have now created the new function, splice the body of the old
7950     // function right into the new function, leaving the old rotting hulk of the
7951     // function empty.
7952     NewFn->getBasicBlockList().splice(NewFn->begin(),
7953                                       OldFn->getBasicBlockList());
7954 
7955     // Set of all "call-like" instructions that invoke the old function mapped
7956     // to their new replacements.
7957     SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
7958 
7959     // Callback to create a new "call-like" instruction for a given one.
7960     auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
7961       CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
7962       const AttributeList &OldCallAttributeList = OldCB->getAttributes();
7963 
7964       // Collect the new argument operands for the replacement call site.
7965       SmallVector<Value *, 16> NewArgOperands;
7966       SmallVector<AttributeSet, 16> NewArgOperandAttributes;
7967       for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
7968         unsigned NewFirstArgNum = NewArgOperands.size();
7969         (void)NewFirstArgNum; // only used inside assert.
7970         if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
7971           if (ARI->ACSRepairCB)
7972             ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
7973           assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
7974                      NewArgOperands.size() &&
7975                  "ACS repair callback did not provide as many operand as new "
7976                  "types were registered!");
7977           // TODO: Exose the attribute set to the ACS repair callback
7978           NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
7979                                          AttributeSet());
7980         } else {
7981           NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
7982           NewArgOperandAttributes.push_back(
7983               OldCallAttributeList.getParamAttributes(OldArgNum));
7984         }
7985       }
7986 
7987       assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
7988              "Mismatch # argument operands vs. # argument operand attributes!");
7989       assert(NewArgOperands.size() == NewFn->arg_size() &&
7990              "Mismatch # argument operands vs. # function arguments!");
7991 
7992       SmallVector<OperandBundleDef, 4> OperandBundleDefs;
7993       OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
7994 
7995       // Create a new call or invoke instruction to replace the old one.
7996       CallBase *NewCB;
7997       if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
7998         NewCB =
7999             InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
8000                                NewArgOperands, OperandBundleDefs, "", OldCB);
8001       } else {
8002         auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
8003                                        "", OldCB);
8004         NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
8005         NewCB = NewCI;
8006       }
8007 
8008       // Copy over various properties and the new attributes.
8009       uint64_t W;
8010       if (OldCB->extractProfTotalWeight(W))
8011         NewCB->setProfWeight(W);
8012       NewCB->setCallingConv(OldCB->getCallingConv());
8013       NewCB->setDebugLoc(OldCB->getDebugLoc());
8014       NewCB->takeName(OldCB);
8015       NewCB->setAttributes(AttributeList::get(
8016           Ctx, OldCallAttributeList.getFnAttributes(),
8017           OldCallAttributeList.getRetAttributes(), NewArgOperandAttributes));
8018 
8019       CallSitePairs.push_back({OldCB, NewCB});
8020       return true;
8021     };
8022 
8023     // Use the CallSiteReplacementCreator to create replacement call sites.
8024     bool AllCallSitesKnown;
8025     bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
8026                                         true, nullptr, AllCallSitesKnown);
8027     (void)Success;
8028     assert(Success && "Assumed call site replacement to succeed!");
8029 
8030     // Rewire the arguments.
8031     auto OldFnArgIt = OldFn->arg_begin();
8032     auto NewFnArgIt = NewFn->arg_begin();
8033     for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
8034          ++OldArgNum, ++OldFnArgIt) {
8035       if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
8036         if (ARI->CalleeRepairCB)
8037           ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
8038         NewFnArgIt += ARI->ReplacementTypes.size();
8039       } else {
8040         NewFnArgIt->takeName(&*OldFnArgIt);
8041         OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
8042         ++NewFnArgIt;
8043       }
8044     }
8045 
8046     // Eliminate the instructions *after* we visited all of them.
8047     for (auto &CallSitePair : CallSitePairs) {
8048       CallBase &OldCB = *CallSitePair.first;
8049       CallBase &NewCB = *CallSitePair.second;
8050       // We do not modify the call graph here but simply reanalyze the old
8051       // function. This should be revisited once the old PM is gone.
8052       ModifiedFns.insert(OldCB.getFunction());
8053       OldCB.replaceAllUsesWith(&NewCB);
8054       OldCB.eraseFromParent();
8055     }
8056 
8057     // Replace the function in the call graph (if any).
8058     CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
8059 
8060     // If the old function was modified and needed to be reanalyzed, the new one
8061     // does now.
8062     if (ModifiedFns.erase(OldFn))
8063       ModifiedFns.insert(NewFn);
8064 
8065     Changed = ChangeStatus::CHANGED;
8066   }
8067 
8068   return Changed;
8069 }
8070 
8071 void Attributor::initializeInformationCache(Function &F) {
8072 
8073   // Walk all instructions to find interesting instructions that might be
8074   // queried by abstract attributes during their initialization or update.
8075   // This has to happen before we create attributes.
8076   auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
8077   auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
8078 
8079   for (Instruction &I : instructions(&F)) {
8080     bool IsInterestingOpcode = false;
8081 
8082     // To allow easy access to all instructions in a function with a given
8083     // opcode we store them in the InfoCache. As not all opcodes are interesting
8084     // to concrete attributes we only cache the ones that are as identified in
8085     // the following switch.
8086     // Note: There are no concrete attributes now so this is initially empty.
8087     switch (I.getOpcode()) {
8088     default:
8089       assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
8090              "New call site/base instruction type needs to be known int the "
8091              "Attributor.");
8092       break;
8093     case Instruction::Load:
8094       // The alignment of a pointer is interesting for loads.
8095     case Instruction::Store:
8096       // The alignment of a pointer is interesting for stores.
8097     case Instruction::Call:
8098     case Instruction::CallBr:
8099     case Instruction::Invoke:
8100     case Instruction::CleanupRet:
8101     case Instruction::CatchSwitch:
8102     case Instruction::AtomicRMW:
8103     case Instruction::AtomicCmpXchg:
8104     case Instruction::Br:
8105     case Instruction::Resume:
8106     case Instruction::Ret:
8107       IsInterestingOpcode = true;
8108     }
8109     if (IsInterestingOpcode)
8110       InstOpcodeMap[I.getOpcode()].push_back(&I);
8111     if (I.mayReadOrWriteMemory())
8112       ReadOrWriteInsts.push_back(&I);
8113   }
8114 }
8115 
8116 void Attributor::recordDependence(const AbstractAttribute &FromAA,
8117                                   const AbstractAttribute &ToAA,
8118                                   DepClassTy DepClass) {
8119   if (FromAA.getState().isAtFixpoint())
8120     return;
8121 
8122   if (DepClass == DepClassTy::REQUIRED)
8123     QueryMap[&FromAA].RequiredAAs.insert(
8124         const_cast<AbstractAttribute *>(&ToAA));
8125   else
8126     QueryMap[&FromAA].OptionalAAs.insert(
8127         const_cast<AbstractAttribute *>(&ToAA));
8128   QueriedNonFixAA = true;
8129 }
8130 
8131 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
8132   if (!VisitedFunctions.insert(&F).second)
8133     return;
8134   if (F.isDeclaration())
8135     return;
8136 
8137   IRPosition FPos = IRPosition::function(F);
8138 
8139   // Check for dead BasicBlocks in every function.
8140   // We need dead instruction detection because we do not want to deal with
8141   // broken IR in which SSA rules do not apply.
8142   getOrCreateAAFor<AAIsDead>(FPos);
8143 
8144   // Every function might be "will-return".
8145   getOrCreateAAFor<AAWillReturn>(FPos);
8146 
8147   // Every function might contain instructions that cause "undefined behavior".
8148   getOrCreateAAFor<AAUndefinedBehavior>(FPos);
8149 
8150   // Every function can be nounwind.
8151   getOrCreateAAFor<AANoUnwind>(FPos);
8152 
8153   // Every function might be marked "nosync"
8154   getOrCreateAAFor<AANoSync>(FPos);
8155 
8156   // Every function might be "no-free".
8157   getOrCreateAAFor<AANoFree>(FPos);
8158 
8159   // Every function might be "no-return".
8160   getOrCreateAAFor<AANoReturn>(FPos);
8161 
8162   // Every function might be "no-recurse".
8163   getOrCreateAAFor<AANoRecurse>(FPos);
8164 
8165   // Every function might be "readnone/readonly/writeonly/...".
8166   getOrCreateAAFor<AAMemoryBehavior>(FPos);
8167 
8168   // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
8169   getOrCreateAAFor<AAMemoryLocation>(FPos);
8170 
8171   // Every function might be applicable for Heap-To-Stack conversion.
8172   if (EnableHeapToStack)
8173     getOrCreateAAFor<AAHeapToStack>(FPos);
8174 
8175   // Return attributes are only appropriate if the return type is non void.
8176   Type *ReturnType = F.getReturnType();
8177   if (!ReturnType->isVoidTy()) {
8178     // Argument attribute "returned" --- Create only one per function even
8179     // though it is an argument attribute.
8180     getOrCreateAAFor<AAReturnedValues>(FPos);
8181 
8182     IRPosition RetPos = IRPosition::returned(F);
8183 
8184     // Every returned value might be dead.
8185     getOrCreateAAFor<AAIsDead>(RetPos);
8186 
8187     // Every function might be simplified.
8188     getOrCreateAAFor<AAValueSimplify>(RetPos);
8189 
8190     if (ReturnType->isPointerTy()) {
8191 
8192       // Every function with pointer return type might be marked align.
8193       getOrCreateAAFor<AAAlign>(RetPos);
8194 
8195       // Every function with pointer return type might be marked nonnull.
8196       getOrCreateAAFor<AANonNull>(RetPos);
8197 
8198       // Every function with pointer return type might be marked noalias.
8199       getOrCreateAAFor<AANoAlias>(RetPos);
8200 
8201       // Every function with pointer return type might be marked
8202       // dereferenceable.
8203       getOrCreateAAFor<AADereferenceable>(RetPos);
8204     }
8205   }
8206 
8207   for (Argument &Arg : F.args()) {
8208     IRPosition ArgPos = IRPosition::argument(Arg);
8209 
8210     // Every argument might be simplified.
8211     getOrCreateAAFor<AAValueSimplify>(ArgPos);
8212 
8213     if (Arg.getType()->isPointerTy()) {
8214       // Every argument with pointer type might be marked nonnull.
8215       getOrCreateAAFor<AANonNull>(ArgPos);
8216 
8217       // Every argument with pointer type might be marked noalias.
8218       getOrCreateAAFor<AANoAlias>(ArgPos);
8219 
8220       // Every argument with pointer type might be marked dereferenceable.
8221       getOrCreateAAFor<AADereferenceable>(ArgPos);
8222 
8223       // Every argument with pointer type might be marked align.
8224       getOrCreateAAFor<AAAlign>(ArgPos);
8225 
8226       // Every argument with pointer type might be marked nocapture.
8227       getOrCreateAAFor<AANoCapture>(ArgPos);
8228 
8229       // Every argument with pointer type might be marked
8230       // "readnone/readonly/writeonly/..."
8231       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
8232 
8233       // Every argument with pointer type might be marked nofree.
8234       getOrCreateAAFor<AANoFree>(ArgPos);
8235 
8236       // Every argument with pointer type might be privatizable (or promotable)
8237       getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
8238     }
8239   }
8240 
8241   auto CallSitePred = [&](Instruction &I) -> bool {
8242     CallSite CS(&I);
8243     IRPosition CSRetPos = IRPosition::callsite_returned(CS);
8244 
8245     // Call sites might be dead if they do not have side effects and no live
8246     // users. The return value might be dead if there are no live users.
8247     getOrCreateAAFor<AAIsDead>(CSRetPos);
8248 
8249     if (Function *Callee = CS.getCalledFunction()) {
8250       // Skip declerations except if annotations on their call sites were
8251       // explicitly requested.
8252       if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
8253           !Callee->hasMetadata(LLVMContext::MD_callback))
8254         return true;
8255 
8256       if (!Callee->getReturnType()->isVoidTy() && !CS->use_empty()) {
8257 
8258         IRPosition CSRetPos = IRPosition::callsite_returned(CS);
8259 
8260         // Call site return integer values might be limited by a constant range.
8261         if (Callee->getReturnType()->isIntegerTy())
8262           getOrCreateAAFor<AAValueConstantRange>(CSRetPos);
8263       }
8264 
8265       for (int i = 0, e = CS.getNumArgOperands(); i < e; i++) {
8266 
8267         IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
8268 
8269         // Every call site argument might be dead.
8270         getOrCreateAAFor<AAIsDead>(CSArgPos);
8271 
8272         // Call site argument might be simplified.
8273         getOrCreateAAFor<AAValueSimplify>(CSArgPos);
8274 
8275         if (!CS.getArgument(i)->getType()->isPointerTy())
8276           continue;
8277 
8278         // Call site argument attribute "non-null".
8279         getOrCreateAAFor<AANonNull>(CSArgPos);
8280 
8281         // Call site argument attribute "no-alias".
8282         getOrCreateAAFor<AANoAlias>(CSArgPos);
8283 
8284         // Call site argument attribute "dereferenceable".
8285         getOrCreateAAFor<AADereferenceable>(CSArgPos);
8286 
8287         // Call site argument attribute "align".
8288         getOrCreateAAFor<AAAlign>(CSArgPos);
8289 
8290         // Call site argument attribute
8291         // "readnone/readonly/writeonly/..."
8292         getOrCreateAAFor<AAMemoryBehavior>(CSArgPos);
8293 
8294         // Call site argument attribute "nofree".
8295         getOrCreateAAFor<AANoFree>(CSArgPos);
8296       }
8297     }
8298     return true;
8299   };
8300 
8301   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
8302   bool Success;
8303   Success = checkForAllInstructionsImpl(
8304       nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
8305       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
8306        (unsigned)Instruction::Call});
8307   (void)Success;
8308   assert(Success && "Expected the check call to be successful!");
8309 
8310   auto LoadStorePred = [&](Instruction &I) -> bool {
8311     if (isa<LoadInst>(I))
8312       getOrCreateAAFor<AAAlign>(
8313           IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
8314     else
8315       getOrCreateAAFor<AAAlign>(
8316           IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
8317     return true;
8318   };
8319   Success = checkForAllInstructionsImpl(
8320       nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
8321       {(unsigned)Instruction::Load, (unsigned)Instruction::Store});
8322   (void)Success;
8323   assert(Success && "Expected the check call to be successful!");
8324 }
8325 
8326 /// Helpers to ease debugging through output streams and print calls.
8327 ///
8328 ///{
8329 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
8330   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
8331 }
8332 
8333 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
8334   switch (AP) {
8335   case IRPosition::IRP_INVALID:
8336     return OS << "inv";
8337   case IRPosition::IRP_FLOAT:
8338     return OS << "flt";
8339   case IRPosition::IRP_RETURNED:
8340     return OS << "fn_ret";
8341   case IRPosition::IRP_CALL_SITE_RETURNED:
8342     return OS << "cs_ret";
8343   case IRPosition::IRP_FUNCTION:
8344     return OS << "fn";
8345   case IRPosition::IRP_CALL_SITE:
8346     return OS << "cs";
8347   case IRPosition::IRP_ARGUMENT:
8348     return OS << "arg";
8349   case IRPosition::IRP_CALL_SITE_ARGUMENT:
8350     return OS << "cs_arg";
8351   }
8352   llvm_unreachable("Unknown attribute position!");
8353 }
8354 
8355 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
8356   const Value &AV = Pos.getAssociatedValue();
8357   return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
8358             << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
8359 }
8360 
8361 template <typename base_ty, base_ty BestState, base_ty WorstState>
8362 raw_ostream &
8363 llvm::operator<<(raw_ostream &OS,
8364                  const IntegerStateBase<base_ty, BestState, WorstState> &S) {
8365   return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
8366             << static_cast<const AbstractState &>(S);
8367 }
8368 
8369 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
8370   OS << "range-state(" << S.getBitWidth() << ")<";
8371   S.getKnown().print(OS);
8372   OS << " / ";
8373   S.getAssumed().print(OS);
8374   OS << ">";
8375 
8376   return OS << static_cast<const AbstractState &>(S);
8377 }
8378 
8379 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
8380   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
8381 }
8382 
8383 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
8384   AA.print(OS);
8385   return OS;
8386 }
8387 
8388 void AbstractAttribute::print(raw_ostream &OS) const {
8389   OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
8390      << "]";
8391 }
8392 ///}
8393 
8394 /// ----------------------------------------------------------------------------
8395 ///                       Pass (Manager) Boilerplate
8396 /// ----------------------------------------------------------------------------
8397 
8398 static bool runAttributorOnFunctions(InformationCache &InfoCache,
8399                                      SetVector<Function *> &Functions,
8400                                      AnalysisGetter &AG,
8401                                      CallGraphUpdater &CGUpdater) {
8402   if (DisableAttributor || Functions.empty())
8403     return false;
8404 
8405   LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << Functions.size()
8406                     << " functions.\n");
8407 
8408   // Create an Attributor and initially empty information cache that is filled
8409   // while we identify default attribute opportunities.
8410   Attributor A(Functions, InfoCache, CGUpdater, DepRecInterval);
8411 
8412   for (Function *F : Functions)
8413     A.initializeInformationCache(*F);
8414 
8415   for (Function *F : Functions) {
8416     if (F->hasExactDefinition())
8417       NumFnWithExactDefinition++;
8418     else
8419       NumFnWithoutExactDefinition++;
8420 
8421     // We look at internal functions only on-demand but if any use is not a
8422     // direct call or outside the current set of analyzed functions, we have to
8423     // do it eagerly.
8424     if (F->hasLocalLinkage()) {
8425       if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
8426             ImmutableCallSite ICS(U.getUser());
8427             return ICS && ICS.isCallee(&U) &&
8428                    Functions.count(const_cast<Function *>(ICS.getCaller()));
8429           }))
8430         continue;
8431     }
8432 
8433     // Populate the Attributor with abstract attribute opportunities in the
8434     // function and the information cache with IR information.
8435     A.identifyDefaultAbstractAttributes(*F);
8436   }
8437 
8438   ChangeStatus Changed = A.run();
8439   assert(!verifyModule(*Functions.front()->getParent(), &errs()) &&
8440          "Module verification failed!");
8441   LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
8442                     << " functions, result: " << Changed << ".\n");
8443   return Changed == ChangeStatus::CHANGED;
8444 }
8445 
8446 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
8447   FunctionAnalysisManager &FAM =
8448       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
8449   AnalysisGetter AG(FAM);
8450 
8451   SetVector<Function *> Functions;
8452   for (Function &F : M)
8453     Functions.insert(&F);
8454 
8455   CallGraphUpdater CGUpdater;
8456   InformationCache InfoCache(M, AG, /* CGSCC */ nullptr);
8457   if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater)) {
8458     // FIXME: Think about passes we will preserve and add them here.
8459     return PreservedAnalyses::none();
8460   }
8461   return PreservedAnalyses::all();
8462 }
8463 
8464 PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
8465                                            CGSCCAnalysisManager &AM,
8466                                            LazyCallGraph &CG,
8467                                            CGSCCUpdateResult &UR) {
8468   FunctionAnalysisManager &FAM =
8469       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
8470   AnalysisGetter AG(FAM);
8471 
8472   SetVector<Function *> Functions;
8473   for (LazyCallGraph::Node &N : C)
8474     Functions.insert(&N.getFunction());
8475 
8476   if (Functions.empty())
8477     return PreservedAnalyses::all();
8478 
8479   Module &M = *Functions.back()->getParent();
8480   CallGraphUpdater CGUpdater;
8481   CGUpdater.initialize(CG, C, AM, UR);
8482   InformationCache InfoCache(M, AG, /* CGSCC */ &Functions);
8483   if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater)) {
8484     // FIXME: Think about passes we will preserve and add them here.
8485     return PreservedAnalyses::none();
8486   }
8487   return PreservedAnalyses::all();
8488 }
8489 
8490 namespace {
8491 
8492 struct AttributorLegacyPass : public ModulePass {
8493   static char ID;
8494 
8495   AttributorLegacyPass() : ModulePass(ID) {
8496     initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
8497   }
8498 
8499   bool runOnModule(Module &M) override {
8500     if (skipModule(M))
8501       return false;
8502 
8503     AnalysisGetter AG;
8504     SetVector<Function *> Functions;
8505     for (Function &F : M)
8506       Functions.insert(&F);
8507 
8508     CallGraphUpdater CGUpdater;
8509     InformationCache InfoCache(M, AG, /* CGSCC */ nullptr);
8510     return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater);
8511   }
8512 
8513   void getAnalysisUsage(AnalysisUsage &AU) const override {
8514     // FIXME: Think about passes we will preserve and add them here.
8515     AU.addRequired<TargetLibraryInfoWrapperPass>();
8516   }
8517 };
8518 
8519 struct AttributorCGSCCLegacyPass : public CallGraphSCCPass {
8520   CallGraphUpdater CGUpdater;
8521   static char ID;
8522 
8523   AttributorCGSCCLegacyPass() : CallGraphSCCPass(ID) {
8524     initializeAttributorCGSCCLegacyPassPass(*PassRegistry::getPassRegistry());
8525   }
8526 
8527   bool runOnSCC(CallGraphSCC &SCC) override {
8528     if (skipSCC(SCC))
8529       return false;
8530 
8531     SetVector<Function *> Functions;
8532     for (CallGraphNode *CGN : SCC)
8533       if (Function *Fn = CGN->getFunction())
8534         if (!Fn->isDeclaration())
8535           Functions.insert(Fn);
8536 
8537     if (Functions.empty())
8538       return false;
8539 
8540     AnalysisGetter AG;
8541     CallGraph &CG = const_cast<CallGraph &>(SCC.getCallGraph());
8542     CGUpdater.initialize(CG, SCC);
8543     Module &M = *Functions.back()->getParent();
8544     InformationCache InfoCache(M, AG, /* CGSCC */ &Functions);
8545     return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater);
8546   }
8547 
8548   bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); }
8549 
8550   void getAnalysisUsage(AnalysisUsage &AU) const override {
8551     // FIXME: Think about passes we will preserve and add them here.
8552     AU.addRequired<TargetLibraryInfoWrapperPass>();
8553     CallGraphSCCPass::getAnalysisUsage(AU);
8554   }
8555 };
8556 
8557 } // end anonymous namespace
8558 
8559 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
8560 Pass *llvm::createAttributorCGSCCLegacyPass() {
8561   return new AttributorCGSCCLegacyPass();
8562 }
8563 
8564 char AttributorLegacyPass::ID = 0;
8565 char AttributorCGSCCLegacyPass::ID = 0;
8566 
8567 const char AAReturnedValues::ID = 0;
8568 const char AANoUnwind::ID = 0;
8569 const char AANoSync::ID = 0;
8570 const char AANoFree::ID = 0;
8571 const char AANonNull::ID = 0;
8572 const char AANoRecurse::ID = 0;
8573 const char AAWillReturn::ID = 0;
8574 const char AAUndefinedBehavior::ID = 0;
8575 const char AANoAlias::ID = 0;
8576 const char AAReachability::ID = 0;
8577 const char AANoReturn::ID = 0;
8578 const char AAIsDead::ID = 0;
8579 const char AADereferenceable::ID = 0;
8580 const char AAAlign::ID = 0;
8581 const char AANoCapture::ID = 0;
8582 const char AAValueSimplify::ID = 0;
8583 const char AAHeapToStack::ID = 0;
8584 const char AAPrivatizablePtr::ID = 0;
8585 const char AAMemoryBehavior::ID = 0;
8586 const char AAMemoryLocation::ID = 0;
8587 const char AAValueConstantRange::ID = 0;
8588 
8589 // Macro magic to create the static generator function for attributes that
8590 // follow the naming scheme.
8591 
8592 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8593   case IRPosition::PK:                                                         \
8594     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8595 
8596 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8597   case IRPosition::PK:                                                         \
8598     AA = new CLASS##SUFFIX(IRP);                                               \
8599     break;
8600 
8601 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8602   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8603     CLASS *AA = nullptr;                                                       \
8604     switch (IRP.getPositionKind()) {                                           \
8605       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8606       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8607       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8608       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8609       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8610       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8611       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8612       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8613     }                                                                          \
8614     return *AA;                                                                \
8615   }
8616 
8617 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8618   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8619     CLASS *AA = nullptr;                                                       \
8620     switch (IRP.getPositionKind()) {                                           \
8621       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8622       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8623       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8624       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8625       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8626       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8627       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8628       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8629     }                                                                          \
8630     return *AA;                                                                \
8631   }
8632 
8633 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8634   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8635     CLASS *AA = nullptr;                                                       \
8636     switch (IRP.getPositionKind()) {                                           \
8637       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8638       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8639       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8640       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8641       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8642       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8643       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8644       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8645     }                                                                          \
8646     return *AA;                                                                \
8647   }
8648 
8649 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8650   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8651     CLASS *AA = nullptr;                                                       \
8652     switch (IRP.getPositionKind()) {                                           \
8653       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8654       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8655       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8656       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8657       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8658       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8659       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8660       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8661     }                                                                          \
8662     return *AA;                                                                \
8663   }
8664 
8665 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8666   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8667     CLASS *AA = nullptr;                                                       \
8668     switch (IRP.getPositionKind()) {                                           \
8669       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8670       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8671       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8672       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8673       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8674       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8675       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8676       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8677     }                                                                          \
8678     return *AA;                                                                \
8679   }
8680 
8681 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8682 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8683 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8684 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8685 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8686 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8687 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8688 
8689 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8690 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8691 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8692 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8693 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8694 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8695 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8696 
8697 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8698 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8699 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8700 
8701 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8702 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8703 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8704 
8705 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8706 
8707 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8708 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8709 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8710 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8711 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8712 #undef SWITCH_PK_CREATE
8713 #undef SWITCH_PK_INV
8714 
8715 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
8716                       "Deduce and propagate attributes", false, false)
8717 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
8718 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
8719                     "Deduce and propagate attributes", false, false)
8720 INITIALIZE_PASS_BEGIN(AttributorCGSCCLegacyPass, "attributor-cgscc",
8721                       "Deduce and propagate attributes (CGSCC pass)", false,
8722                       false)
8723 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
8724 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
8725 INITIALIZE_PASS_END(AttributorCGSCCLegacyPass, "attributor-cgscc",
8726                     "Deduce and propagate attributes (CGSCC pass)", false,
8727                     false)
8728