1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CallGraph.h"
24 #include "llvm/Analysis/CallGraphSCCPass.h"
25 #include "llvm/Analysis/CaptureTracking.h"
26 #include "llvm/Analysis/EHPersonalities.h"
27 #include "llvm/Analysis/GlobalsModRef.h"
28 #include "llvm/Analysis/LazyValueInfo.h"
29 #include "llvm/Analysis/Loads.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/ScalarEvolution.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Argument.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/CFG.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/InstIterator.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Verifier.h"
40 #include "llvm/InitializePasses.h"
41 #include "llvm/IR/NoFolder.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/Local.h"
48 
49 #include <cassert>
50 
51 using namespace llvm;
52 
53 #define DEBUG_TYPE "attributor"
54 
55 STATISTIC(NumFnWithExactDefinition,
56           "Number of function with exact definitions");
57 STATISTIC(NumFnWithoutExactDefinition,
58           "Number of function without exact definitions");
59 STATISTIC(NumAttributesTimedOut,
60           "Number of abstract attributes timed out before fixpoint");
61 STATISTIC(NumAttributesValidFixpoint,
62           "Number of abstract attributes in a valid fixpoint state");
63 STATISTIC(NumAttributesManifested,
64           "Number of abstract attributes manifested in IR");
65 STATISTIC(NumAttributesFixedDueToRequiredDependences,
66           "Number of abstract attributes fixed due to required dependences");
67 
68 // Some helper macros to deal with statistics tracking.
69 //
70 // Usage:
71 // For simple IR attribute tracking overload trackStatistics in the abstract
72 // attribute and choose the right STATS_DECLTRACK_********* macro,
73 // e.g.,:
74 //  void trackStatistics() const override {
75 //    STATS_DECLTRACK_ARG_ATTR(returned)
76 //  }
77 // If there is a single "increment" side one can use the macro
78 // STATS_DECLTRACK with a custom message. If there are multiple increment
79 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
80 //
81 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
82   ("Number of " #TYPE " marked '" #NAME "'")
83 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
84 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
85 #define STATS_DECL(NAME, TYPE, MSG)                                            \
86   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
87 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
88 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
89   {                                                                            \
90     STATS_DECL(NAME, TYPE, MSG)                                                \
91     STATS_TRACK(NAME, TYPE)                                                    \
92   }
93 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
94   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
95 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
96   STATS_DECLTRACK(NAME, CSArguments,                                           \
97                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
98 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
99   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
100 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
101   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
102 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
103   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
104                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
105 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
106   STATS_DECLTRACK(NAME, CSReturn,                                              \
107                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
108 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
109   STATS_DECLTRACK(NAME, Floating,                                              \
110                   ("Number of floating values known to be '" #NAME "'"))
111 
112 // Specialization of the operator<< for abstract attributes subclasses. This
113 // disambiguates situations where multiple operators are applicable.
114 namespace llvm {
115 #define PIPE_OPERATOR(CLASS)                                                   \
116   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
117     return OS << static_cast<const AbstractAttribute &>(AA);                   \
118   }
119 
120 PIPE_OPERATOR(AAIsDead)
121 PIPE_OPERATOR(AANoUnwind)
122 PIPE_OPERATOR(AANoSync)
123 PIPE_OPERATOR(AANoRecurse)
124 PIPE_OPERATOR(AAWillReturn)
125 PIPE_OPERATOR(AANoReturn)
126 PIPE_OPERATOR(AAReturnedValues)
127 PIPE_OPERATOR(AANonNull)
128 PIPE_OPERATOR(AANoAlias)
129 PIPE_OPERATOR(AADereferenceable)
130 PIPE_OPERATOR(AAAlign)
131 PIPE_OPERATOR(AANoCapture)
132 PIPE_OPERATOR(AAValueSimplify)
133 PIPE_OPERATOR(AANoFree)
134 PIPE_OPERATOR(AAHeapToStack)
135 PIPE_OPERATOR(AAReachability)
136 PIPE_OPERATOR(AAMemoryBehavior)
137 PIPE_OPERATOR(AAMemoryLocation)
138 PIPE_OPERATOR(AAValueConstantRange)
139 PIPE_OPERATOR(AAPrivatizablePtr)
140 
141 #undef PIPE_OPERATOR
142 } // namespace llvm
143 
144 // TODO: Determine a good default value.
145 //
146 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
147 // (when run with the first 5 abstract attributes). The results also indicate
148 // that we never reach 32 iterations but always find a fixpoint sooner.
149 //
150 // This will become more evolved once we perform two interleaved fixpoint
151 // iterations: bottom-up and top-down.
152 static cl::opt<unsigned>
153     MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
154                           cl::desc("Maximal number of fixpoint iterations."),
155                           cl::init(32));
156 static cl::opt<bool> VerifyMaxFixpointIterations(
157     "attributor-max-iterations-verify", cl::Hidden,
158     cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
159     cl::init(false));
160 
161 static cl::opt<bool> DisableAttributor(
162     "attributor-disable", cl::Hidden,
163     cl::desc("Disable the attributor inter-procedural deduction pass."),
164     cl::init(true));
165 
166 static cl::opt<bool> AnnotateDeclarationCallSites(
167     "attributor-annotate-decl-cs", cl::Hidden,
168     cl::desc("Annotate call sites of function declarations."), cl::init(false));
169 
170 static cl::opt<bool> ManifestInternal(
171     "attributor-manifest-internal", cl::Hidden,
172     cl::desc("Manifest Attributor internal string attributes."),
173     cl::init(false));
174 
175 static cl::opt<unsigned> DepRecInterval(
176     "attributor-dependence-recompute-interval", cl::Hidden,
177     cl::desc("Number of iterations until dependences are recomputed."),
178     cl::init(4));
179 
180 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
181                                        cl::init(true), cl::Hidden);
182 
183 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
184                                        cl::Hidden);
185 
186 /// Logic operators for the change status enum class.
187 ///
188 ///{
189 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
190   return l == ChangeStatus::CHANGED ? l : r;
191 }
192 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
193   return l == ChangeStatus::UNCHANGED ? l : r;
194 }
195 ///}
196 
197 Argument *IRPosition::getAssociatedArgument() const {
198   if (getPositionKind() == IRP_ARGUMENT)
199     return cast<Argument>(&getAnchorValue());
200 
201   // Not an Argument and no argument number means this is not a call site
202   // argument, thus we cannot find a callback argument to return.
203   int ArgNo = getArgNo();
204   if (ArgNo < 0)
205     return nullptr;
206 
207   // Use abstract call sites to make the connection between the call site
208   // values and the ones in callbacks. If a callback was found that makes use
209   // of the underlying call site operand, we want the corresponding callback
210   // callee argument and not the direct callee argument.
211   Optional<Argument *> CBCandidateArg;
212   SmallVector<const Use *, 4> CBUses;
213   ImmutableCallSite ICS(&getAnchorValue());
214   AbstractCallSite::getCallbackUses(ICS, CBUses);
215   for (const Use *U : CBUses) {
216     AbstractCallSite ACS(U);
217     assert(ACS && ACS.isCallbackCall());
218     if (!ACS.getCalledFunction())
219       continue;
220 
221     for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
222 
223       // Test if the underlying call site operand is argument number u of the
224       // callback callee.
225       if (ACS.getCallArgOperandNo(u) != ArgNo)
226         continue;
227 
228       assert(ACS.getCalledFunction()->arg_size() > u &&
229              "ACS mapped into var-args arguments!");
230       if (CBCandidateArg.hasValue()) {
231         CBCandidateArg = nullptr;
232         break;
233       }
234       CBCandidateArg = ACS.getCalledFunction()->getArg(u);
235     }
236   }
237 
238   // If we found a unique callback candidate argument, return it.
239   if (CBCandidateArg.hasValue() && CBCandidateArg.getValue())
240     return CBCandidateArg.getValue();
241 
242   // If no callbacks were found, or none used the underlying call site operand
243   // exclusively, use the direct callee argument if available.
244   const Function *Callee = ICS.getCalledFunction();
245   if (Callee && Callee->arg_size() > unsigned(ArgNo))
246     return Callee->getArg(ArgNo);
247 
248   return nullptr;
249 }
250 
251 static Optional<Constant *> getAssumedConstant(Attributor &A, const Value &V,
252                                                const AbstractAttribute &AA,
253                                                bool &UsedAssumedInformation) {
254   const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
255       AA, IRPosition::value(V), /* TrackDependence */ false);
256   Optional<Value *> SimplifiedV = ValueSimplifyAA.getAssumedSimplifiedValue(A);
257   bool IsKnown = ValueSimplifyAA.isKnown();
258   UsedAssumedInformation |= !IsKnown;
259   if (!SimplifiedV.hasValue()) {
260     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
261     return llvm::None;
262   }
263   if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue())) {
264     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
265     return llvm::None;
266   }
267   Constant *CI = dyn_cast_or_null<Constant>(SimplifiedV.getValue());
268   if (CI && CI->getType() != V.getType()) {
269     // TODO: Check for a save conversion.
270     return nullptr;
271   }
272   if (CI)
273     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
274   return CI;
275 }
276 
277 static Optional<ConstantInt *>
278 getAssumedConstantInt(Attributor &A, const Value &V,
279                       const AbstractAttribute &AA,
280                       bool &UsedAssumedInformation) {
281   Optional<Constant *> C = getAssumedConstant(A, V, AA, UsedAssumedInformation);
282   if (C.hasValue())
283     return dyn_cast_or_null<ConstantInt>(C.getValue());
284   return llvm::None;
285 }
286 
287 /// Get pointer operand of memory accessing instruction. If \p I is
288 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
289 /// is set to false and the instruction is volatile, return nullptr.
290 static const Value *getPointerOperand(const Instruction *I,
291                                       bool AllowVolatile) {
292   if (auto *LI = dyn_cast<LoadInst>(I)) {
293     if (!AllowVolatile && LI->isVolatile())
294       return nullptr;
295     return LI->getPointerOperand();
296   }
297 
298   if (auto *SI = dyn_cast<StoreInst>(I)) {
299     if (!AllowVolatile && SI->isVolatile())
300       return nullptr;
301     return SI->getPointerOperand();
302   }
303 
304   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
305     if (!AllowVolatile && CXI->isVolatile())
306       return nullptr;
307     return CXI->getPointerOperand();
308   }
309 
310   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
311     if (!AllowVolatile && RMWI->isVolatile())
312       return nullptr;
313     return RMWI->getPointerOperand();
314   }
315 
316   return nullptr;
317 }
318 
319 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
320 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
321 /// getelement pointer instructions that traverse the natural type of \p Ptr if
322 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
323 /// through a cast to i8*.
324 ///
325 /// TODO: This could probably live somewhere more prominantly if it doesn't
326 ///       already exist.
327 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
328                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
329   assert(Offset >= 0 && "Negative offset not supported yet!");
330   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
331                     << "-bytes as " << *ResTy << "\n");
332 
333   // The initial type we are trying to traverse to get nice GEPs.
334   Type *Ty = Ptr->getType();
335 
336   SmallVector<Value *, 4> Indices;
337   std::string GEPName = Ptr->getName().str();
338   while (Offset) {
339     uint64_t Idx, Rem;
340 
341     if (auto *STy = dyn_cast<StructType>(Ty)) {
342       const StructLayout *SL = DL.getStructLayout(STy);
343       if (int64_t(SL->getSizeInBytes()) < Offset)
344         break;
345       Idx = SL->getElementContainingOffset(Offset);
346       assert(Idx < STy->getNumElements() && "Offset calculation error!");
347       Rem = Offset - SL->getElementOffset(Idx);
348       Ty = STy->getElementType(Idx);
349     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
350       Ty = PTy->getElementType();
351       if (!Ty->isSized())
352         break;
353       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
354       assert(ElementSize && "Expected type with size!");
355       Idx = Offset / ElementSize;
356       Rem = Offset % ElementSize;
357     } else {
358       // Non-aggregate type, we cast and make byte-wise progress now.
359       break;
360     }
361 
362     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
363                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
364 
365     GEPName += "." + std::to_string(Idx);
366     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
367     Offset = Rem;
368   }
369 
370   // Create a GEP if we collected indices above.
371   if (Indices.size())
372     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
373 
374   // If an offset is left we use byte-wise adjustment.
375   if (Offset) {
376     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
377     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
378                         GEPName + ".b" + Twine(Offset));
379   }
380 
381   // Ensure the result has the requested type.
382   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
383 
384   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
385   return Ptr;
386 }
387 
388 /// Recursively visit all values that might become \p IRP at some point. This
389 /// will be done by looking through cast instructions, selects, phis, and calls
390 /// with the "returned" attribute. Once we cannot look through the value any
391 /// further, the callback \p VisitValueCB is invoked and passed the current
392 /// value, the \p State, and a flag to indicate if we stripped anything.
393 /// Stripped means that we unpacked the value associated with \p IRP at least
394 /// once. Note that the value used for the callback may still be the value
395 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
396 /// we will never visit more values than specified by \p MaxValues.
397 template <typename AAType, typename StateTy>
398 static bool genericValueTraversal(
399     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
400     const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
401     int MaxValues = 8, const function_ref<Value *(Value *)> StripCB = nullptr) {
402 
403   const AAIsDead *LivenessAA = nullptr;
404   if (IRP.getAnchorScope())
405     LivenessAA = &A.getAAFor<AAIsDead>(
406         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
407         /* TrackDependence */ false);
408   bool AnyDead = false;
409 
410   // TODO: Use Positions here to allow context sensitivity in VisitValueCB
411   SmallPtrSet<Value *, 16> Visited;
412   SmallVector<Value *, 16> Worklist;
413   Worklist.push_back(&IRP.getAssociatedValue());
414 
415   int Iteration = 0;
416   do {
417     Value *V = Worklist.pop_back_val();
418     if (StripCB)
419       V = StripCB(V);
420 
421     // Check if we should process the current value. To prevent endless
422     // recursion keep a record of the values we followed!
423     if (!Visited.insert(V).second)
424       continue;
425 
426     // Make sure we limit the compile time for complex expressions.
427     if (Iteration++ >= MaxValues)
428       return false;
429 
430     // Explicitly look through calls with a "returned" attribute if we do
431     // not have a pointer as stripPointerCasts only works on them.
432     Value *NewV = nullptr;
433     if (V->getType()->isPointerTy()) {
434       NewV = V->stripPointerCasts();
435     } else {
436       CallSite CS(V);
437       if (CS && CS.getCalledFunction()) {
438         for (Argument &Arg : CS.getCalledFunction()->args())
439           if (Arg.hasReturnedAttr()) {
440             NewV = CS.getArgOperand(Arg.getArgNo());
441             break;
442           }
443       }
444     }
445     if (NewV && NewV != V) {
446       Worklist.push_back(NewV);
447       continue;
448     }
449 
450     // Look through select instructions, visit both potential values.
451     if (auto *SI = dyn_cast<SelectInst>(V)) {
452       Worklist.push_back(SI->getTrueValue());
453       Worklist.push_back(SI->getFalseValue());
454       continue;
455     }
456 
457     // Look through phi nodes, visit all live operands.
458     if (auto *PHI = dyn_cast<PHINode>(V)) {
459       assert(LivenessAA &&
460              "Expected liveness in the presence of instructions!");
461       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
462         const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
463         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
464                             LivenessAA,
465                             /* CheckBBLivenessOnly */ true)) {
466           AnyDead = true;
467           continue;
468         }
469         Worklist.push_back(PHI->getIncomingValue(u));
470       }
471       continue;
472     }
473 
474     // Once a leaf is reached we inform the user through the callback.
475     if (!VisitValueCB(*V, State, Iteration > 1))
476       return false;
477   } while (!Worklist.empty());
478 
479   // If we actually used liveness information so we have to record a dependence.
480   if (AnyDead)
481     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
482 
483   // All values have been visited.
484   return true;
485 }
486 
487 /// Return true if \p New is equal or worse than \p Old.
488 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
489   if (!Old.isIntAttribute())
490     return true;
491 
492   return Old.getValueAsInt() >= New.getValueAsInt();
493 }
494 
495 /// Return true if the information provided by \p Attr was added to the
496 /// attribute list \p Attrs. This is only the case if it was not already present
497 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
498 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
499                              AttributeList &Attrs, int AttrIdx) {
500 
501   if (Attr.isEnumAttribute()) {
502     Attribute::AttrKind Kind = Attr.getKindAsEnum();
503     if (Attrs.hasAttribute(AttrIdx, Kind))
504       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
505         return false;
506     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
507     return true;
508   }
509   if (Attr.isStringAttribute()) {
510     StringRef Kind = Attr.getKindAsString();
511     if (Attrs.hasAttribute(AttrIdx, Kind))
512       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
513         return false;
514     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
515     return true;
516   }
517   if (Attr.isIntAttribute()) {
518     Attribute::AttrKind Kind = Attr.getKindAsEnum();
519     if (Attrs.hasAttribute(AttrIdx, Kind))
520       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
521         return false;
522     Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
523     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
524     return true;
525   }
526 
527   llvm_unreachable("Expected enum or string attribute!");
528 }
529 
530 static const Value *
531 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
532                                      const DataLayout &DL,
533                                      bool AllowNonInbounds = false) {
534   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
535   if (!Ptr)
536     return nullptr;
537 
538   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
539                                           AllowNonInbounds);
540 }
541 
542 ChangeStatus AbstractAttribute::update(Attributor &A) {
543   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
544   if (getState().isAtFixpoint())
545     return HasChanged;
546 
547   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
548 
549   HasChanged = updateImpl(A);
550 
551   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
552                     << "\n");
553 
554   return HasChanged;
555 }
556 
557 ChangeStatus
558 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
559                                    const ArrayRef<Attribute> &DeducedAttrs) {
560   Function *ScopeFn = IRP.getAssociatedFunction();
561   IRPosition::Kind PK = IRP.getPositionKind();
562 
563   // In the following some generic code that will manifest attributes in
564   // DeducedAttrs if they improve the current IR. Due to the different
565   // annotation positions we use the underlying AttributeList interface.
566 
567   AttributeList Attrs;
568   switch (PK) {
569   case IRPosition::IRP_INVALID:
570   case IRPosition::IRP_FLOAT:
571     return ChangeStatus::UNCHANGED;
572   case IRPosition::IRP_ARGUMENT:
573   case IRPosition::IRP_FUNCTION:
574   case IRPosition::IRP_RETURNED:
575     Attrs = ScopeFn->getAttributes();
576     break;
577   case IRPosition::IRP_CALL_SITE:
578   case IRPosition::IRP_CALL_SITE_RETURNED:
579   case IRPosition::IRP_CALL_SITE_ARGUMENT:
580     Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
581     break;
582   }
583 
584   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
585   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
586   for (const Attribute &Attr : DeducedAttrs) {
587     if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
588       continue;
589 
590     HasChanged = ChangeStatus::CHANGED;
591   }
592 
593   if (HasChanged == ChangeStatus::UNCHANGED)
594     return HasChanged;
595 
596   switch (PK) {
597   case IRPosition::IRP_ARGUMENT:
598   case IRPosition::IRP_FUNCTION:
599   case IRPosition::IRP_RETURNED:
600     ScopeFn->setAttributes(Attrs);
601     break;
602   case IRPosition::IRP_CALL_SITE:
603   case IRPosition::IRP_CALL_SITE_RETURNED:
604   case IRPosition::IRP_CALL_SITE_ARGUMENT:
605     CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
606     break;
607   case IRPosition::IRP_INVALID:
608   case IRPosition::IRP_FLOAT:
609     break;
610   }
611 
612   return HasChanged;
613 }
614 
615 const IRPosition IRPosition::EmptyKey(255);
616 const IRPosition IRPosition::TombstoneKey(256);
617 
618 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
619   IRPositions.emplace_back(IRP);
620 
621   ImmutableCallSite ICS(&IRP.getAnchorValue());
622   switch (IRP.getPositionKind()) {
623   case IRPosition::IRP_INVALID:
624   case IRPosition::IRP_FLOAT:
625   case IRPosition::IRP_FUNCTION:
626     return;
627   case IRPosition::IRP_ARGUMENT:
628   case IRPosition::IRP_RETURNED:
629     IRPositions.emplace_back(
630         IRPosition::function(*IRP.getAssociatedFunction()));
631     return;
632   case IRPosition::IRP_CALL_SITE:
633     assert(ICS && "Expected call site!");
634     // TODO: We need to look at the operand bundles similar to the redirection
635     //       in CallBase.
636     if (!ICS.hasOperandBundles())
637       if (const Function *Callee = ICS.getCalledFunction())
638         IRPositions.emplace_back(IRPosition::function(*Callee));
639     return;
640   case IRPosition::IRP_CALL_SITE_RETURNED:
641     assert(ICS && "Expected call site!");
642     // TODO: We need to look at the operand bundles similar to the redirection
643     //       in CallBase.
644     if (!ICS.hasOperandBundles()) {
645       if (const Function *Callee = ICS.getCalledFunction()) {
646         IRPositions.emplace_back(IRPosition::returned(*Callee));
647         IRPositions.emplace_back(IRPosition::function(*Callee));
648         for (const Argument &Arg : Callee->args())
649           if (Arg.hasReturnedAttr()) {
650             IRPositions.emplace_back(
651                 IRPosition::callsite_argument(ICS, Arg.getArgNo()));
652             IRPositions.emplace_back(
653                 IRPosition::value(*ICS.getArgOperand(Arg.getArgNo())));
654             IRPositions.emplace_back(IRPosition::argument(Arg));
655           }
656       }
657     }
658     IRPositions.emplace_back(
659         IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
660     return;
661   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
662     int ArgNo = IRP.getArgNo();
663     assert(ICS && ArgNo >= 0 && "Expected call site!");
664     // TODO: We need to look at the operand bundles similar to the redirection
665     //       in CallBase.
666     if (!ICS.hasOperandBundles()) {
667       const Function *Callee = ICS.getCalledFunction();
668       if (Callee && Callee->arg_size() > unsigned(ArgNo))
669         IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
670       if (Callee)
671         IRPositions.emplace_back(IRPosition::function(*Callee));
672     }
673     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
674     return;
675   }
676   }
677 }
678 
679 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
680                          bool IgnoreSubsumingPositions) const {
681   SmallVector<Attribute, 4> Attrs;
682   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
683     for (Attribute::AttrKind AK : AKs)
684       if (EquivIRP.getAttrsFromIRAttr(AK, Attrs))
685         return true;
686     // The first position returned by the SubsumingPositionIterator is
687     // always the position itself. If we ignore subsuming positions we
688     // are done after the first iteration.
689     if (IgnoreSubsumingPositions)
690       break;
691   }
692   return false;
693 }
694 
695 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
696                           SmallVectorImpl<Attribute> &Attrs,
697                           bool IgnoreSubsumingPositions) const {
698   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
699     for (Attribute::AttrKind AK : AKs)
700       EquivIRP.getAttrsFromIRAttr(AK, Attrs);
701     // The first position returned by the SubsumingPositionIterator is
702     // always the position itself. If we ignore subsuming positions we
703     // are done after the first iteration.
704     if (IgnoreSubsumingPositions)
705       break;
706   }
707 }
708 
709 bool IRPosition::getAttrsFromIRAttr(Attribute::AttrKind AK,
710                                     SmallVectorImpl<Attribute> &Attrs) const {
711   if (getPositionKind() == IRP_INVALID || getPositionKind() == IRP_FLOAT)
712     return false;
713 
714   AttributeList AttrList;
715   if (ImmutableCallSite ICS = ImmutableCallSite(&getAnchorValue()))
716     AttrList = ICS.getAttributes();
717   else
718     AttrList = getAssociatedFunction()->getAttributes();
719 
720   bool HasAttr = AttrList.hasAttribute(getAttrIdx(), AK);
721   if (HasAttr)
722     Attrs.push_back(AttrList.getAttribute(getAttrIdx(), AK));
723   return HasAttr;
724 }
725 
726 
727 void IRPosition::verify() {
728   switch (KindOrArgNo) {
729   default:
730     assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
731     assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
732            "Expected call base or argument for positive attribute index!");
733     if (isa<Argument>(AnchorVal)) {
734       assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
735              "Argument number mismatch!");
736       assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
737              "Associated value mismatch!");
738     } else {
739       assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
740              "Call site argument number mismatch!");
741       assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
742                  &getAssociatedValue() &&
743              "Associated value mismatch!");
744     }
745     break;
746   case IRP_INVALID:
747     assert(!AnchorVal && "Expected no value for an invalid position!");
748     break;
749   case IRP_FLOAT:
750     assert((!isa<CallBase>(&getAssociatedValue()) &&
751             !isa<Argument>(&getAssociatedValue())) &&
752            "Expected specialized kind for call base and argument values!");
753     break;
754   case IRP_RETURNED:
755     assert(isa<Function>(AnchorVal) &&
756            "Expected function for a 'returned' position!");
757     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
758     break;
759   case IRP_CALL_SITE_RETURNED:
760     assert((isa<CallBase>(AnchorVal)) &&
761            "Expected call base for 'call site returned' position!");
762     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
763     break;
764   case IRP_CALL_SITE:
765     assert((isa<CallBase>(AnchorVal)) &&
766            "Expected call base for 'call site function' position!");
767     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
768     break;
769   case IRP_FUNCTION:
770     assert(isa<Function>(AnchorVal) &&
771            "Expected function for a 'function' position!");
772     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
773     break;
774   }
775 }
776 
777 namespace {
778 
779 /// Helper function to clamp a state \p S of type \p StateType with the
780 /// information in \p R and indicate/return if \p S did change (as-in update is
781 /// required to be run again).
782 template <typename StateType>
783 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
784   auto Assumed = S.getAssumed();
785   S ^= R;
786   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
787                                    : ChangeStatus::CHANGED;
788 }
789 
790 /// Clamp the information known for all returned values of a function
791 /// (identified by \p QueryingAA) into \p S.
792 template <typename AAType, typename StateType = typename AAType::StateType>
793 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
794                                      StateType &S) {
795   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
796                     << QueryingAA << " into " << S << "\n");
797 
798   assert((QueryingAA.getIRPosition().getPositionKind() ==
799               IRPosition::IRP_RETURNED ||
800           QueryingAA.getIRPosition().getPositionKind() ==
801               IRPosition::IRP_CALL_SITE_RETURNED) &&
802          "Can only clamp returned value states for a function returned or call "
803          "site returned position!");
804 
805   // Use an optional state as there might not be any return values and we want
806   // to join (IntegerState::operator&) the state of all there are.
807   Optional<StateType> T;
808 
809   // Callback for each possibly returned value.
810   auto CheckReturnValue = [&](Value &RV) -> bool {
811     const IRPosition &RVPos = IRPosition::value(RV);
812     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
813     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
814                       << " @ " << RVPos << "\n");
815     const StateType &AAS = static_cast<const StateType &>(AA.getState());
816     if (T.hasValue())
817       *T &= AAS;
818     else
819       T = AAS;
820     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
821                       << "\n");
822     return T->isValidState();
823   };
824 
825   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
826     S.indicatePessimisticFixpoint();
827   else if (T.hasValue())
828     S ^= *T;
829 }
830 
831 /// Helper class to compose two generic deduction
832 template <typename AAType, typename Base, typename StateType,
833           template <typename...> class F, template <typename...> class G>
834 struct AAComposeTwoGenericDeduction
835     : public F<AAType, G<AAType, Base, StateType>, StateType> {
836   AAComposeTwoGenericDeduction(const IRPosition &IRP)
837       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
838 
839   void initialize(Attributor &A) override {
840     F<AAType, G<AAType, Base, StateType>, StateType>::initialize(A);
841     G<AAType, Base, StateType>::initialize(A);
842   }
843 
844   /// See AbstractAttribute::updateImpl(...).
845   ChangeStatus updateImpl(Attributor &A) override {
846     ChangeStatus ChangedF =
847         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
848     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
849     return ChangedF | ChangedG;
850   }
851 };
852 
853 /// Helper class for generic deduction: return value -> returned position.
854 template <typename AAType, typename Base,
855           typename StateType = typename Base::StateType>
856 struct AAReturnedFromReturnedValues : public Base {
857   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
858 
859   /// See AbstractAttribute::updateImpl(...).
860   ChangeStatus updateImpl(Attributor &A) override {
861     StateType S(StateType::getBestState(this->getState()));
862     clampReturnedValueStates<AAType, StateType>(A, *this, S);
863     // TODO: If we know we visited all returned values, thus no are assumed
864     // dead, we can take the known information from the state T.
865     return clampStateAndIndicateChange<StateType>(this->getState(), S);
866   }
867 };
868 
869 /// Clamp the information known at all call sites for a given argument
870 /// (identified by \p QueryingAA) into \p S.
871 template <typename AAType, typename StateType = typename AAType::StateType>
872 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
873                                         StateType &S) {
874   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
875                     << QueryingAA << " into " << S << "\n");
876 
877   assert(QueryingAA.getIRPosition().getPositionKind() ==
878              IRPosition::IRP_ARGUMENT &&
879          "Can only clamp call site argument states for an argument position!");
880 
881   // Use an optional state as there might not be any return values and we want
882   // to join (IntegerState::operator&) the state of all there are.
883   Optional<StateType> T;
884 
885   // The argument number which is also the call site argument number.
886   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
887 
888   auto CallSiteCheck = [&](AbstractCallSite ACS) {
889     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
890     // Check if a coresponding argument was found or if it is on not associated
891     // (which can happen for callback calls).
892     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
893       return false;
894 
895     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
896     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
897                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
898     const StateType &AAS = static_cast<const StateType &>(AA.getState());
899     if (T.hasValue())
900       *T &= AAS;
901     else
902       T = AAS;
903     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
904                       << "\n");
905     return T->isValidState();
906   };
907 
908   bool AllCallSitesKnown;
909   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
910                               AllCallSitesKnown))
911     S.indicatePessimisticFixpoint();
912   else if (T.hasValue())
913     S ^= *T;
914 }
915 
916 /// Helper class for generic deduction: call site argument -> argument position.
917 template <typename AAType, typename Base,
918           typename StateType = typename AAType::StateType>
919 struct AAArgumentFromCallSiteArguments : public Base {
920   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
921 
922   /// See AbstractAttribute::updateImpl(...).
923   ChangeStatus updateImpl(Attributor &A) override {
924     StateType S(StateType::getBestState(this->getState()));
925     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
926     // TODO: If we know we visited all incoming values, thus no are assumed
927     // dead, we can take the known information from the state T.
928     return clampStateAndIndicateChange<StateType>(this->getState(), S);
929   }
930 };
931 
932 /// Helper class for generic replication: function returned -> cs returned.
933 template <typename AAType, typename Base,
934           typename StateType = typename Base::StateType>
935 struct AACallSiteReturnedFromReturned : public Base {
936   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
937 
938   /// See AbstractAttribute::updateImpl(...).
939   ChangeStatus updateImpl(Attributor &A) override {
940     assert(this->getIRPosition().getPositionKind() ==
941                IRPosition::IRP_CALL_SITE_RETURNED &&
942            "Can only wrap function returned positions for call site returned "
943            "positions!");
944     auto &S = this->getState();
945 
946     const Function *AssociatedFunction =
947         this->getIRPosition().getAssociatedFunction();
948     if (!AssociatedFunction)
949       return S.indicatePessimisticFixpoint();
950 
951     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
952     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
953     return clampStateAndIndicateChange(
954         S, static_cast<const StateType &>(AA.getState()));
955   }
956 };
957 
958 /// Helper class for generic deduction using must-be-executed-context
959 /// Base class is required to have `followUse` method.
960 
961 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
962 /// U - Underlying use.
963 /// I - The user of the \p U.
964 /// `followUse` returns true if the value should be tracked transitively.
965 
966 template <typename AAType, typename Base,
967           typename StateType = typename AAType::StateType>
968 struct AAFromMustBeExecutedContext : public Base {
969   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
970 
971   void initialize(Attributor &A) override {
972     Base::initialize(A);
973     const IRPosition &IRP = this->getIRPosition();
974     Instruction *CtxI = IRP.getCtxI();
975 
976     if (!CtxI)
977       return;
978 
979     for (const Use &U : IRP.getAssociatedValue().uses())
980       Uses.insert(&U);
981   }
982 
983   /// Helper function to accumulate uses.
984   void followUsesInContext(Attributor &A,
985                            MustBeExecutedContextExplorer &Explorer,
986                            const Instruction *CtxI,
987                            SetVector<const Use *> &Uses, StateType &State) {
988     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
989     for (unsigned u = 0; u < Uses.size(); ++u) {
990       const Use *U = Uses[u];
991       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
992         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
993         if (Found && Base::followUse(A, U, UserI, State))
994           for (const Use &Us : UserI->uses())
995             Uses.insert(&Us);
996       }
997     }
998   }
999 
1000   /// See AbstractAttribute::updateImpl(...).
1001   ChangeStatus updateImpl(Attributor &A) override {
1002     auto BeforeState = this->getState();
1003     auto &S = this->getState();
1004     Instruction *CtxI = this->getIRPosition().getCtxI();
1005     if (!CtxI)
1006       return ChangeStatus::UNCHANGED;
1007 
1008     MustBeExecutedContextExplorer &Explorer =
1009         A.getInfoCache().getMustBeExecutedContextExplorer();
1010 
1011     followUsesInContext(A, Explorer, CtxI, Uses, S);
1012 
1013     if (this->isAtFixpoint())
1014       return ChangeStatus::CHANGED;
1015 
1016     SmallVector<const BranchInst *, 4> BrInsts;
1017     auto Pred = [&](const Instruction *I) {
1018       if (const BranchInst *Br = dyn_cast<BranchInst>(I))
1019         if (Br->isConditional())
1020           BrInsts.push_back(Br);
1021       return true;
1022     };
1023 
1024     // Here, accumulate conditional branch instructions in the context. We
1025     // explore the child paths and collect the known states. The disjunction of
1026     // those states can be merged to its own state. Let ParentState_i be a state
1027     // to indicate the known information for an i-th branch instruction in the
1028     // context. ChildStates are created for its successors respectively.
1029     //
1030     // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
1031     // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
1032     //      ...
1033     // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
1034     //
1035     // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
1036     //
1037     // FIXME: Currently, recursive branches are not handled. For example, we
1038     // can't deduce that ptr must be dereferenced in below function.
1039     //
1040     // void f(int a, int c, int *ptr) {
1041     //    if(a)
1042     //      if (b) {
1043     //        *ptr = 0;
1044     //      } else {
1045     //        *ptr = 1;
1046     //      }
1047     //    else {
1048     //      if (b) {
1049     //        *ptr = 0;
1050     //      } else {
1051     //        *ptr = 1;
1052     //      }
1053     //    }
1054     // }
1055 
1056     Explorer.checkForAllContext(CtxI, Pred);
1057     for (const BranchInst *Br : BrInsts) {
1058       StateType ParentState;
1059 
1060       // The known state of the parent state is a conjunction of children's
1061       // known states so it is initialized with a best state.
1062       ParentState.indicateOptimisticFixpoint();
1063 
1064       for (const BasicBlock *BB : Br->successors()) {
1065         StateType ChildState;
1066 
1067         size_t BeforeSize = Uses.size();
1068         followUsesInContext(A, Explorer, &BB->front(), Uses, ChildState);
1069 
1070         // Erase uses which only appear in the child.
1071         for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
1072           It = Uses.erase(It);
1073 
1074         ParentState &= ChildState;
1075       }
1076 
1077       // Use only known state.
1078       S += ParentState;
1079     }
1080 
1081     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1082   }
1083 
1084 private:
1085   /// Container for (transitive) uses of the associated value.
1086   SetVector<const Use *> Uses;
1087 };
1088 
1089 template <typename AAType, typename Base,
1090           typename StateType = typename AAType::StateType>
1091 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
1092     AAComposeTwoGenericDeduction<AAType, Base, StateType,
1093                                  AAFromMustBeExecutedContext,
1094                                  AAArgumentFromCallSiteArguments>;
1095 
1096 template <typename AAType, typename Base,
1097           typename StateType = typename AAType::StateType>
1098 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
1099     AAComposeTwoGenericDeduction<AAType, Base, StateType,
1100                                  AAFromMustBeExecutedContext,
1101                                  AACallSiteReturnedFromReturned>;
1102 
1103 /// -----------------------NoUnwind Function Attribute--------------------------
1104 
1105 struct AANoUnwindImpl : AANoUnwind {
1106   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
1107 
1108   const std::string getAsStr() const override {
1109     return getAssumed() ? "nounwind" : "may-unwind";
1110   }
1111 
1112   /// See AbstractAttribute::updateImpl(...).
1113   ChangeStatus updateImpl(Attributor &A) override {
1114     auto Opcodes = {
1115         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1116         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1117         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1118 
1119     auto CheckForNoUnwind = [&](Instruction &I) {
1120       if (!I.mayThrow())
1121         return true;
1122 
1123       if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1124         const auto &NoUnwindAA =
1125             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
1126         return NoUnwindAA.isAssumedNoUnwind();
1127       }
1128       return false;
1129     };
1130 
1131     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
1132       return indicatePessimisticFixpoint();
1133 
1134     return ChangeStatus::UNCHANGED;
1135   }
1136 };
1137 
1138 struct AANoUnwindFunction final : public AANoUnwindImpl {
1139   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
1140 
1141   /// See AbstractAttribute::trackStatistics()
1142   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1143 };
1144 
1145 /// NoUnwind attribute deduction for a call sites.
1146 struct AANoUnwindCallSite final : AANoUnwindImpl {
1147   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
1148 
1149   /// See AbstractAttribute::initialize(...).
1150   void initialize(Attributor &A) override {
1151     AANoUnwindImpl::initialize(A);
1152     Function *F = getAssociatedFunction();
1153     if (!F)
1154       indicatePessimisticFixpoint();
1155   }
1156 
1157   /// See AbstractAttribute::updateImpl(...).
1158   ChangeStatus updateImpl(Attributor &A) override {
1159     // TODO: Once we have call site specific value information we can provide
1160     //       call site specific liveness information and then it makes
1161     //       sense to specialize attributes for call sites arguments instead of
1162     //       redirecting requests to the callee argument.
1163     Function *F = getAssociatedFunction();
1164     const IRPosition &FnPos = IRPosition::function(*F);
1165     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
1166     return clampStateAndIndicateChange(
1167         getState(),
1168         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
1169   }
1170 
1171   /// See AbstractAttribute::trackStatistics()
1172   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1173 };
1174 
1175 /// --------------------- Function Return Values -------------------------------
1176 
1177 /// "Attribute" that collects all potential returned values and the return
1178 /// instructions that they arise from.
1179 ///
1180 /// If there is a unique returned value R, the manifest method will:
1181 ///   - mark R with the "returned" attribute, if R is an argument.
1182 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1183 
1184   /// Mapping of values potentially returned by the associated function to the
1185   /// return instructions that might return them.
1186   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1187 
1188   /// Mapping to remember the number of returned values for a call site such
1189   /// that we can avoid updates if nothing changed.
1190   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
1191 
1192   /// Set of unresolved calls returned by the associated function.
1193   SmallSetVector<CallBase *, 4> UnresolvedCalls;
1194 
1195   /// State flags
1196   ///
1197   ///{
1198   bool IsFixed = false;
1199   bool IsValidState = true;
1200   ///}
1201 
1202 public:
1203   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
1204 
1205   /// See AbstractAttribute::initialize(...).
1206   void initialize(Attributor &A) override {
1207     // Reset the state.
1208     IsFixed = false;
1209     IsValidState = true;
1210     ReturnedValues.clear();
1211 
1212     Function *F = getAssociatedFunction();
1213     if (!F) {
1214       indicatePessimisticFixpoint();
1215       return;
1216     }
1217     assert(!F->getReturnType()->isVoidTy() &&
1218            "Did not expect a void return type!");
1219 
1220     // The map from instruction opcodes to those instructions in the function.
1221     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1222 
1223     // Look through all arguments, if one is marked as returned we are done.
1224     for (Argument &Arg : F->args()) {
1225       if (Arg.hasReturnedAttr()) {
1226         auto &ReturnInstSet = ReturnedValues[&Arg];
1227         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
1228           ReturnInstSet.insert(cast<ReturnInst>(RI));
1229 
1230         indicateOptimisticFixpoint();
1231         return;
1232       }
1233     }
1234 
1235     if (!F->hasExactDefinition())
1236       indicatePessimisticFixpoint();
1237   }
1238 
1239   /// See AbstractAttribute::manifest(...).
1240   ChangeStatus manifest(Attributor &A) override;
1241 
1242   /// See AbstractAttribute::getState(...).
1243   AbstractState &getState() override { return *this; }
1244 
1245   /// See AbstractAttribute::getState(...).
1246   const AbstractState &getState() const override { return *this; }
1247 
1248   /// See AbstractAttribute::updateImpl(Attributor &A).
1249   ChangeStatus updateImpl(Attributor &A) override;
1250 
1251   llvm::iterator_range<iterator> returned_values() override {
1252     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1253   }
1254 
1255   llvm::iterator_range<const_iterator> returned_values() const override {
1256     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1257   }
1258 
1259   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
1260     return UnresolvedCalls;
1261   }
1262 
1263   /// Return the number of potential return values, -1 if unknown.
1264   size_t getNumReturnValues() const override {
1265     return isValidState() ? ReturnedValues.size() : -1;
1266   }
1267 
1268   /// Return an assumed unique return value if a single candidate is found. If
1269   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1270   /// Optional::NoneType.
1271   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1272 
1273   /// See AbstractState::checkForAllReturnedValues(...).
1274   bool checkForAllReturnedValuesAndReturnInsts(
1275       const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1276           &Pred) const override;
1277 
1278   /// Pretty print the attribute similar to the IR representation.
1279   const std::string getAsStr() const override;
1280 
1281   /// See AbstractState::isAtFixpoint().
1282   bool isAtFixpoint() const override { return IsFixed; }
1283 
1284   /// See AbstractState::isValidState().
1285   bool isValidState() const override { return IsValidState; }
1286 
1287   /// See AbstractState::indicateOptimisticFixpoint(...).
1288   ChangeStatus indicateOptimisticFixpoint() override {
1289     IsFixed = true;
1290     return ChangeStatus::UNCHANGED;
1291   }
1292 
1293   ChangeStatus indicatePessimisticFixpoint() override {
1294     IsFixed = true;
1295     IsValidState = false;
1296     return ChangeStatus::CHANGED;
1297   }
1298 };
1299 
1300 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1301   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1302 
1303   // Bookkeeping.
1304   assert(isValidState());
1305   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1306                   "Number of function with known return values");
1307 
1308   // Check if we have an assumed unique return value that we could manifest.
1309   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1310 
1311   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1312     return Changed;
1313 
1314   // Bookkeeping.
1315   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1316                   "Number of function with unique return");
1317 
1318   // Callback to replace the uses of CB with the constant C.
1319   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
1320     if (CB.getNumUses() == 0 || CB.isMustTailCall())
1321       return ChangeStatus::UNCHANGED;
1322     if (A.changeValueAfterManifest(CB, C))
1323       return ChangeStatus::CHANGED;
1324     return ChangeStatus::UNCHANGED;
1325   };
1326 
1327   // If the assumed unique return value is an argument, annotate it.
1328   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1329     // TODO: This should be handled differently!
1330     this->AnchorVal = UniqueRVArg;
1331     this->KindOrArgNo = UniqueRVArg->getArgNo();
1332     Changed = IRAttribute::manifest(A);
1333   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
1334     // We can replace the returned value with the unique returned constant.
1335     Value &AnchorValue = getAnchorValue();
1336     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1337       for (const Use &U : F->uses())
1338         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1339           if (CB->isCallee(&U)) {
1340             Constant *RVCCast =
1341                 CB->getType() == RVC->getType()
1342                     ? RVC
1343                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
1344             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1345           }
1346     } else {
1347       assert(isa<CallBase>(AnchorValue) &&
1348              "Expcected a function or call base anchor!");
1349       Constant *RVCCast =
1350           AnchorValue.getType() == RVC->getType()
1351               ? RVC
1352               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1353       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1354     }
1355     if (Changed == ChangeStatus::CHANGED)
1356       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1357                       "Number of function returns replaced by constant return");
1358   }
1359 
1360   return Changed;
1361 }
1362 
1363 const std::string AAReturnedValuesImpl::getAsStr() const {
1364   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1365          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1366          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1367 }
1368 
1369 Optional<Value *>
1370 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1371   // If checkForAllReturnedValues provides a unique value, ignoring potential
1372   // undef values that can also be present, it is assumed to be the actual
1373   // return value and forwarded to the caller of this method. If there are
1374   // multiple, a nullptr is returned indicating there cannot be a unique
1375   // returned value.
1376   Optional<Value *> UniqueRV;
1377 
1378   auto Pred = [&](Value &RV) -> bool {
1379     // If we found a second returned value and neither the current nor the saved
1380     // one is an undef, there is no unique returned value. Undefs are special
1381     // since we can pretend they have any value.
1382     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1383         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1384       UniqueRV = nullptr;
1385       return false;
1386     }
1387 
1388     // Do not overwrite a value with an undef.
1389     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1390       UniqueRV = &RV;
1391 
1392     return true;
1393   };
1394 
1395   if (!A.checkForAllReturnedValues(Pred, *this))
1396     UniqueRV = nullptr;
1397 
1398   return UniqueRV;
1399 }
1400 
1401 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1402     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1403         &Pred) const {
1404   if (!isValidState())
1405     return false;
1406 
1407   // Check all returned values but ignore call sites as long as we have not
1408   // encountered an overdefined one during an update.
1409   for (auto &It : ReturnedValues) {
1410     Value *RV = It.first;
1411 
1412     CallBase *CB = dyn_cast<CallBase>(RV);
1413     if (CB && !UnresolvedCalls.count(CB))
1414       continue;
1415 
1416     if (!Pred(*RV, It.second))
1417       return false;
1418   }
1419 
1420   return true;
1421 }
1422 
1423 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1424   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1425   bool Changed = false;
1426 
1427   // State used in the value traversals starting in returned values.
1428   struct RVState {
1429     // The map in which we collect return values -> return instrs.
1430     decltype(ReturnedValues) &RetValsMap;
1431     // The flag to indicate a change.
1432     bool &Changed;
1433     // The return instrs we come from.
1434     SmallSetVector<ReturnInst *, 4> RetInsts;
1435   };
1436 
1437   // Callback for a leaf value returned by the associated function.
1438   auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1439     auto Size = RVS.RetValsMap[&Val].size();
1440     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1441     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1442     RVS.Changed |= Inserted;
1443     LLVM_DEBUG({
1444       if (Inserted)
1445         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1446                << " => " << RVS.RetInsts.size() << "\n";
1447     });
1448     return true;
1449   };
1450 
1451   // Helper method to invoke the generic value traversal.
1452   auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1453     IRPosition RetValPos = IRPosition::value(RV);
1454     return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1455                                                             RVS, VisitValueCB);
1456   };
1457 
1458   // Callback for all "return intructions" live in the associated function.
1459   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1460     ReturnInst &Ret = cast<ReturnInst>(I);
1461     RVState RVS({ReturnedValues, Changed, {}});
1462     RVS.RetInsts.insert(&Ret);
1463     return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1464   };
1465 
1466   // Start by discovering returned values from all live returned instructions in
1467   // the associated function.
1468   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1469     return indicatePessimisticFixpoint();
1470 
1471   // Once returned values "directly" present in the code are handled we try to
1472   // resolve returned calls.
1473   decltype(ReturnedValues) NewRVsMap;
1474   for (auto &It : ReturnedValues) {
1475     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1476                       << " by #" << It.second.size() << " RIs\n");
1477     CallBase *CB = dyn_cast<CallBase>(It.first);
1478     if (!CB || UnresolvedCalls.count(CB))
1479       continue;
1480 
1481     if (!CB->getCalledFunction()) {
1482       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1483                         << "\n");
1484       UnresolvedCalls.insert(CB);
1485       continue;
1486     }
1487 
1488     // TODO: use the function scope once we have call site AAReturnedValues.
1489     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1490         *this, IRPosition::function(*CB->getCalledFunction()));
1491     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1492                       << RetValAA << "\n");
1493 
1494     // Skip dead ends, thus if we do not know anything about the returned
1495     // call we mark it as unresolved and it will stay that way.
1496     if (!RetValAA.getState().isValidState()) {
1497       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1498                         << "\n");
1499       UnresolvedCalls.insert(CB);
1500       continue;
1501     }
1502 
1503     // Do not try to learn partial information. If the callee has unresolved
1504     // return values we will treat the call as unresolved/opaque.
1505     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1506     if (!RetValAAUnresolvedCalls.empty()) {
1507       UnresolvedCalls.insert(CB);
1508       continue;
1509     }
1510 
1511     // Now check if we can track transitively returned values. If possible, thus
1512     // if all return value can be represented in the current scope, do so.
1513     bool Unresolved = false;
1514     for (auto &RetValAAIt : RetValAA.returned_values()) {
1515       Value *RetVal = RetValAAIt.first;
1516       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1517           isa<Constant>(RetVal))
1518         continue;
1519       // Anything that did not fit in the above categories cannot be resolved,
1520       // mark the call as unresolved.
1521       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1522                            "cannot be translated: "
1523                         << *RetVal << "\n");
1524       UnresolvedCalls.insert(CB);
1525       Unresolved = true;
1526       break;
1527     }
1528 
1529     if (Unresolved)
1530       continue;
1531 
1532     // Now track transitively returned values.
1533     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1534     if (NumRetAA == RetValAA.getNumReturnValues()) {
1535       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1536                            "changed since it was seen last\n");
1537       continue;
1538     }
1539     NumRetAA = RetValAA.getNumReturnValues();
1540 
1541     for (auto &RetValAAIt : RetValAA.returned_values()) {
1542       Value *RetVal = RetValAAIt.first;
1543       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1544         // Arguments are mapped to call site operands and we begin the traversal
1545         // again.
1546         bool Unused = false;
1547         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1548         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1549         continue;
1550       } else if (isa<CallBase>(RetVal)) {
1551         // Call sites are resolved by the callee attribute over time, no need to
1552         // do anything for us.
1553         continue;
1554       } else if (isa<Constant>(RetVal)) {
1555         // Constants are valid everywhere, we can simply take them.
1556         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1557         continue;
1558       }
1559     }
1560   }
1561 
1562   // To avoid modifications to the ReturnedValues map while we iterate over it
1563   // we kept record of potential new entries in a copy map, NewRVsMap.
1564   for (auto &It : NewRVsMap) {
1565     assert(!It.second.empty() && "Entry does not add anything.");
1566     auto &ReturnInsts = ReturnedValues[It.first];
1567     for (ReturnInst *RI : It.second)
1568       if (ReturnInsts.insert(RI)) {
1569         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1570                           << *It.first << " => " << *RI << "\n");
1571         Changed = true;
1572       }
1573   }
1574 
1575   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1576   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1577 }
1578 
1579 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1580   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1581 
1582   /// See AbstractAttribute::trackStatistics()
1583   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1584 };
1585 
1586 /// Returned values information for a call sites.
1587 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1588   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1589 
1590   /// See AbstractAttribute::initialize(...).
1591   void initialize(Attributor &A) override {
1592     // TODO: Once we have call site specific value information we can provide
1593     //       call site specific liveness information and then it makes
1594     //       sense to specialize attributes for call sites instead of
1595     //       redirecting requests to the callee.
1596     llvm_unreachable("Abstract attributes for returned values are not "
1597                      "supported for call sites yet!");
1598   }
1599 
1600   /// See AbstractAttribute::updateImpl(...).
1601   ChangeStatus updateImpl(Attributor &A) override {
1602     return indicatePessimisticFixpoint();
1603   }
1604 
1605   /// See AbstractAttribute::trackStatistics()
1606   void trackStatistics() const override {}
1607 };
1608 
1609 /// ------------------------ NoSync Function Attribute -------------------------
1610 
1611 struct AANoSyncImpl : AANoSync {
1612   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1613 
1614   const std::string getAsStr() const override {
1615     return getAssumed() ? "nosync" : "may-sync";
1616   }
1617 
1618   /// See AbstractAttribute::updateImpl(...).
1619   ChangeStatus updateImpl(Attributor &A) override;
1620 
1621   /// Helper function used to determine whether an instruction is non-relaxed
1622   /// atomic. In other words, if an atomic instruction does not have unordered
1623   /// or monotonic ordering
1624   static bool isNonRelaxedAtomic(Instruction *I);
1625 
1626   /// Helper function used to determine whether an instruction is volatile.
1627   static bool isVolatile(Instruction *I);
1628 
1629   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1630   /// memset).
1631   static bool isNoSyncIntrinsic(Instruction *I);
1632 };
1633 
1634 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1635   if (!I->isAtomic())
1636     return false;
1637 
1638   AtomicOrdering Ordering;
1639   switch (I->getOpcode()) {
1640   case Instruction::AtomicRMW:
1641     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1642     break;
1643   case Instruction::Store:
1644     Ordering = cast<StoreInst>(I)->getOrdering();
1645     break;
1646   case Instruction::Load:
1647     Ordering = cast<LoadInst>(I)->getOrdering();
1648     break;
1649   case Instruction::Fence: {
1650     auto *FI = cast<FenceInst>(I);
1651     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1652       return false;
1653     Ordering = FI->getOrdering();
1654     break;
1655   }
1656   case Instruction::AtomicCmpXchg: {
1657     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1658     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1659     // Only if both are relaxed, than it can be treated as relaxed.
1660     // Otherwise it is non-relaxed.
1661     if (Success != AtomicOrdering::Unordered &&
1662         Success != AtomicOrdering::Monotonic)
1663       return true;
1664     if (Failure != AtomicOrdering::Unordered &&
1665         Failure != AtomicOrdering::Monotonic)
1666       return true;
1667     return false;
1668   }
1669   default:
1670     llvm_unreachable(
1671         "New atomic operations need to be known in the attributor.");
1672   }
1673 
1674   // Relaxed.
1675   if (Ordering == AtomicOrdering::Unordered ||
1676       Ordering == AtomicOrdering::Monotonic)
1677     return false;
1678   return true;
1679 }
1680 
1681 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1682 /// FIXME: We should ipmrove the handling of intrinsics.
1683 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1684   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1685     switch (II->getIntrinsicID()) {
1686     /// Element wise atomic memory intrinsics are can only be unordered,
1687     /// therefore nosync.
1688     case Intrinsic::memset_element_unordered_atomic:
1689     case Intrinsic::memmove_element_unordered_atomic:
1690     case Intrinsic::memcpy_element_unordered_atomic:
1691       return true;
1692     case Intrinsic::memset:
1693     case Intrinsic::memmove:
1694     case Intrinsic::memcpy:
1695       if (!cast<MemIntrinsic>(II)->isVolatile())
1696         return true;
1697       return false;
1698     default:
1699       return false;
1700     }
1701   }
1702   return false;
1703 }
1704 
1705 bool AANoSyncImpl::isVolatile(Instruction *I) {
1706   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1707          "Calls should not be checked here");
1708 
1709   switch (I->getOpcode()) {
1710   case Instruction::AtomicRMW:
1711     return cast<AtomicRMWInst>(I)->isVolatile();
1712   case Instruction::Store:
1713     return cast<StoreInst>(I)->isVolatile();
1714   case Instruction::Load:
1715     return cast<LoadInst>(I)->isVolatile();
1716   case Instruction::AtomicCmpXchg:
1717     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1718   default:
1719     return false;
1720   }
1721 }
1722 
1723 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1724 
1725   auto CheckRWInstForNoSync = [&](Instruction &I) {
1726     /// We are looking for volatile instructions or Non-Relaxed atomics.
1727     /// FIXME: We should improve the handling of intrinsics.
1728 
1729     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1730       return true;
1731 
1732     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1733       if (ICS.hasFnAttr(Attribute::NoSync))
1734         return true;
1735 
1736       const auto &NoSyncAA =
1737           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1738       if (NoSyncAA.isAssumedNoSync())
1739         return true;
1740       return false;
1741     }
1742 
1743     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1744       return true;
1745 
1746     return false;
1747   };
1748 
1749   auto CheckForNoSync = [&](Instruction &I) {
1750     // At this point we handled all read/write effects and they are all
1751     // nosync, so they can be skipped.
1752     if (I.mayReadOrWriteMemory())
1753       return true;
1754 
1755     // non-convergent and readnone imply nosync.
1756     return !ImmutableCallSite(&I).isConvergent();
1757   };
1758 
1759   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1760       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1761     return indicatePessimisticFixpoint();
1762 
1763   return ChangeStatus::UNCHANGED;
1764 }
1765 
1766 struct AANoSyncFunction final : public AANoSyncImpl {
1767   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1768 
1769   /// See AbstractAttribute::trackStatistics()
1770   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1771 };
1772 
1773 /// NoSync attribute deduction for a call sites.
1774 struct AANoSyncCallSite final : AANoSyncImpl {
1775   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1776 
1777   /// See AbstractAttribute::initialize(...).
1778   void initialize(Attributor &A) override {
1779     AANoSyncImpl::initialize(A);
1780     Function *F = getAssociatedFunction();
1781     if (!F)
1782       indicatePessimisticFixpoint();
1783   }
1784 
1785   /// See AbstractAttribute::updateImpl(...).
1786   ChangeStatus updateImpl(Attributor &A) override {
1787     // TODO: Once we have call site specific value information we can provide
1788     //       call site specific liveness information and then it makes
1789     //       sense to specialize attributes for call sites arguments instead of
1790     //       redirecting requests to the callee argument.
1791     Function *F = getAssociatedFunction();
1792     const IRPosition &FnPos = IRPosition::function(*F);
1793     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1794     return clampStateAndIndicateChange(
1795         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1796   }
1797 
1798   /// See AbstractAttribute::trackStatistics()
1799   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1800 };
1801 
1802 /// ------------------------ No-Free Attributes ----------------------------
1803 
1804 struct AANoFreeImpl : public AANoFree {
1805   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1806 
1807   /// See AbstractAttribute::updateImpl(...).
1808   ChangeStatus updateImpl(Attributor &A) override {
1809     auto CheckForNoFree = [&](Instruction &I) {
1810       ImmutableCallSite ICS(&I);
1811       if (ICS.hasFnAttr(Attribute::NoFree))
1812         return true;
1813 
1814       const auto &NoFreeAA =
1815           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1816       return NoFreeAA.isAssumedNoFree();
1817     };
1818 
1819     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1820       return indicatePessimisticFixpoint();
1821     return ChangeStatus::UNCHANGED;
1822   }
1823 
1824   /// See AbstractAttribute::getAsStr().
1825   const std::string getAsStr() const override {
1826     return getAssumed() ? "nofree" : "may-free";
1827   }
1828 };
1829 
1830 struct AANoFreeFunction final : public AANoFreeImpl {
1831   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1832 
1833   /// See AbstractAttribute::trackStatistics()
1834   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1835 };
1836 
1837 /// NoFree attribute deduction for a call sites.
1838 struct AANoFreeCallSite final : AANoFreeImpl {
1839   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1840 
1841   /// See AbstractAttribute::initialize(...).
1842   void initialize(Attributor &A) override {
1843     AANoFreeImpl::initialize(A);
1844     Function *F = getAssociatedFunction();
1845     if (!F)
1846       indicatePessimisticFixpoint();
1847   }
1848 
1849   /// See AbstractAttribute::updateImpl(...).
1850   ChangeStatus updateImpl(Attributor &A) override {
1851     // TODO: Once we have call site specific value information we can provide
1852     //       call site specific liveness information and then it makes
1853     //       sense to specialize attributes for call sites arguments instead of
1854     //       redirecting requests to the callee argument.
1855     Function *F = getAssociatedFunction();
1856     const IRPosition &FnPos = IRPosition::function(*F);
1857     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1858     return clampStateAndIndicateChange(
1859         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1860   }
1861 
1862   /// See AbstractAttribute::trackStatistics()
1863   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1864 };
1865 
1866 /// NoFree attribute for floating values.
1867 struct AANoFreeFloating : AANoFreeImpl {
1868   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1869 
1870   /// See AbstractAttribute::trackStatistics()
1871   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1872 
1873   /// See Abstract Attribute::updateImpl(...).
1874   ChangeStatus updateImpl(Attributor &A) override {
1875     const IRPosition &IRP = getIRPosition();
1876 
1877     const auto &NoFreeAA =
1878         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1879     if (NoFreeAA.isAssumedNoFree())
1880       return ChangeStatus::UNCHANGED;
1881 
1882     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1883     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1884       Instruction *UserI = cast<Instruction>(U.getUser());
1885       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1886         if (CB->isBundleOperand(&U))
1887           return false;
1888         if (!CB->isArgOperand(&U))
1889           return true;
1890         unsigned ArgNo = CB->getArgOperandNo(&U);
1891 
1892         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1893             *this, IRPosition::callsite_argument(*CB, ArgNo));
1894         return NoFreeArg.isAssumedNoFree();
1895       }
1896 
1897       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1898           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1899         Follow = true;
1900         return true;
1901       }
1902       if (isa<ReturnInst>(UserI))
1903         return true;
1904 
1905       // Unknown user.
1906       return false;
1907     };
1908     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1909       return indicatePessimisticFixpoint();
1910 
1911     return ChangeStatus::UNCHANGED;
1912   }
1913 };
1914 
1915 /// NoFree attribute for a call site argument.
1916 struct AANoFreeArgument final : AANoFreeFloating {
1917   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1918 
1919   /// See AbstractAttribute::trackStatistics()
1920   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1921 };
1922 
1923 /// NoFree attribute for call site arguments.
1924 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1925   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1926 
1927   /// See AbstractAttribute::updateImpl(...).
1928   ChangeStatus updateImpl(Attributor &A) override {
1929     // TODO: Once we have call site specific value information we can provide
1930     //       call site specific liveness information and then it makes
1931     //       sense to specialize attributes for call sites arguments instead of
1932     //       redirecting requests to the callee argument.
1933     Argument *Arg = getAssociatedArgument();
1934     if (!Arg)
1935       return indicatePessimisticFixpoint();
1936     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1937     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1938     return clampStateAndIndicateChange(
1939         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1940   }
1941 
1942   /// See AbstractAttribute::trackStatistics()
1943   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1944 };
1945 
1946 /// NoFree attribute for function return value.
1947 struct AANoFreeReturned final : AANoFreeFloating {
1948   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1949     llvm_unreachable("NoFree is not applicable to function returns!");
1950   }
1951 
1952   /// See AbstractAttribute::initialize(...).
1953   void initialize(Attributor &A) override {
1954     llvm_unreachable("NoFree is not applicable to function returns!");
1955   }
1956 
1957   /// See AbstractAttribute::updateImpl(...).
1958   ChangeStatus updateImpl(Attributor &A) override {
1959     llvm_unreachable("NoFree is not applicable to function returns!");
1960   }
1961 
1962   /// See AbstractAttribute::trackStatistics()
1963   void trackStatistics() const override {}
1964 };
1965 
1966 /// NoFree attribute deduction for a call site return value.
1967 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1968   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1969 
1970   ChangeStatus manifest(Attributor &A) override {
1971     return ChangeStatus::UNCHANGED;
1972   }
1973   /// See AbstractAttribute::trackStatistics()
1974   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1975 };
1976 
1977 /// ------------------------ NonNull Argument Attribute ------------------------
1978 static int64_t getKnownNonNullAndDerefBytesForUse(
1979     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1980     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1981   TrackUse = false;
1982 
1983   const Value *UseV = U->get();
1984   if (!UseV->getType()->isPointerTy())
1985     return 0;
1986 
1987   Type *PtrTy = UseV->getType();
1988   const Function *F = I->getFunction();
1989   bool NullPointerIsDefined =
1990       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1991   const DataLayout &DL = A.getInfoCache().getDL();
1992   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1993     if (ICS.isBundleOperand(U))
1994       return 0;
1995 
1996     if (ICS.isCallee(U)) {
1997       IsNonNull |= !NullPointerIsDefined;
1998       return 0;
1999     }
2000 
2001     unsigned ArgNo = ICS.getArgumentNo(U);
2002     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
2003     // As long as we only use known information there is no need to track
2004     // dependences here.
2005     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
2006                                                   /* TrackDependence */ false);
2007     IsNonNull |= DerefAA.isKnownNonNull();
2008     return DerefAA.getKnownDereferenceableBytes();
2009   }
2010 
2011   // We need to follow common pointer manipulation uses to the accesses they
2012   // feed into. We can try to be smart to avoid looking through things we do not
2013   // like for now, e.g., non-inbounds GEPs.
2014   if (isa<CastInst>(I)) {
2015     TrackUse = true;
2016     return 0;
2017   }
2018   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
2019     if (GEP->hasAllConstantIndices()) {
2020       TrackUse = true;
2021       return 0;
2022     }
2023 
2024   int64_t Offset;
2025   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
2026     if (Base == &AssociatedValue &&
2027         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2028       int64_t DerefBytes =
2029           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
2030 
2031       IsNonNull |= !NullPointerIsDefined;
2032       return std::max(int64_t(0), DerefBytes);
2033     }
2034   }
2035 
2036   /// Corner case when an offset is 0.
2037   if (const Value *Base = getBasePointerOfAccessPointerOperand(
2038           I, Offset, DL, /*AllowNonInbounds*/ true)) {
2039     if (Offset == 0 && Base == &AssociatedValue &&
2040         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2041       int64_t DerefBytes =
2042           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
2043       IsNonNull |= !NullPointerIsDefined;
2044       return std::max(int64_t(0), DerefBytes);
2045     }
2046   }
2047 
2048   return 0;
2049 }
2050 
2051 struct AANonNullImpl : AANonNull {
2052   AANonNullImpl(const IRPosition &IRP)
2053       : AANonNull(IRP),
2054         NullIsDefined(NullPointerIsDefined(
2055             getAnchorScope(),
2056             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2057 
2058   /// See AbstractAttribute::initialize(...).
2059   void initialize(Attributor &A) override {
2060     if (!NullIsDefined &&
2061         hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
2062       indicateOptimisticFixpoint();
2063     else if (isa<ConstantPointerNull>(getAssociatedValue()))
2064       indicatePessimisticFixpoint();
2065     else
2066       AANonNull::initialize(A);
2067   }
2068 
2069   /// See AAFromMustBeExecutedContext
2070   bool followUse(Attributor &A, const Use *U, const Instruction *I,
2071                  AANonNull::StateType &State) {
2072     bool IsNonNull = false;
2073     bool TrackUse = false;
2074     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2075                                        IsNonNull, TrackUse);
2076     State.setKnown(IsNonNull);
2077     return TrackUse;
2078   }
2079 
2080   /// See AbstractAttribute::getAsStr().
2081   const std::string getAsStr() const override {
2082     return getAssumed() ? "nonnull" : "may-null";
2083   }
2084 
2085   /// Flag to determine if the underlying value can be null and still allow
2086   /// valid accesses.
2087   const bool NullIsDefined;
2088 };
2089 
2090 /// NonNull attribute for a floating value.
2091 struct AANonNullFloating
2092     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
2093   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
2094   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
2095 
2096   /// See AbstractAttribute::updateImpl(...).
2097   ChangeStatus updateImpl(Attributor &A) override {
2098     ChangeStatus Change = Base::updateImpl(A);
2099     if (isKnownNonNull())
2100       return Change;
2101 
2102     if (!NullIsDefined) {
2103       const auto &DerefAA =
2104           A.getAAFor<AADereferenceable>(*this, getIRPosition());
2105       if (DerefAA.getAssumedDereferenceableBytes())
2106         return Change;
2107     }
2108 
2109     const DataLayout &DL = A.getDataLayout();
2110 
2111     DominatorTree *DT = nullptr;
2112     AssumptionCache *AC = nullptr;
2113     InformationCache &InfoCache = A.getInfoCache();
2114     if (const Function *Fn = getAnchorScope()) {
2115       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2116       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2117     }
2118 
2119     auto VisitValueCB = [&](Value &V, AANonNull::StateType &T,
2120                             bool Stripped) -> bool {
2121       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
2122       if (!Stripped && this == &AA) {
2123         if (!isKnownNonZero(&V, DL, 0, AC, getCtxI(), DT))
2124           T.indicatePessimisticFixpoint();
2125       } else {
2126         // Use abstract attribute information.
2127         const AANonNull::StateType &NS =
2128             static_cast<const AANonNull::StateType &>(AA.getState());
2129         T ^= NS;
2130       }
2131       return T.isValidState();
2132     };
2133 
2134     StateType T;
2135     if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
2136                                                      T, VisitValueCB))
2137       return indicatePessimisticFixpoint();
2138 
2139     return clampStateAndIndicateChange(getState(), T);
2140   }
2141 
2142   /// See AbstractAttribute::trackStatistics()
2143   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2144 };
2145 
2146 /// NonNull attribute for function return value.
2147 struct AANonNullReturned final
2148     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
2149   AANonNullReturned(const IRPosition &IRP)
2150       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
2151 
2152   /// See AbstractAttribute::trackStatistics()
2153   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2154 };
2155 
2156 /// NonNull attribute for function argument.
2157 struct AANonNullArgument final
2158     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
2159                                                               AANonNullImpl> {
2160   AANonNullArgument(const IRPosition &IRP)
2161       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
2162                                                                 AANonNullImpl>(
2163             IRP) {}
2164 
2165   /// See AbstractAttribute::trackStatistics()
2166   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2167 };
2168 
2169 struct AANonNullCallSiteArgument final : AANonNullFloating {
2170   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
2171 
2172   /// See AbstractAttribute::trackStatistics()
2173   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2174 };
2175 
2176 /// NonNull attribute for a call site return position.
2177 struct AANonNullCallSiteReturned final
2178     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
2179                                                              AANonNullImpl> {
2180   AANonNullCallSiteReturned(const IRPosition &IRP)
2181       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
2182                                                                AANonNullImpl>(
2183             IRP) {}
2184 
2185   /// See AbstractAttribute::trackStatistics()
2186   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2187 };
2188 
2189 /// ------------------------ No-Recurse Attributes ----------------------------
2190 
2191 struct AANoRecurseImpl : public AANoRecurse {
2192   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
2193 
2194   /// See AbstractAttribute::getAsStr()
2195   const std::string getAsStr() const override {
2196     return getAssumed() ? "norecurse" : "may-recurse";
2197   }
2198 };
2199 
2200 struct AANoRecurseFunction final : AANoRecurseImpl {
2201   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
2202 
2203   /// See AbstractAttribute::initialize(...).
2204   void initialize(Attributor &A) override {
2205     AANoRecurseImpl::initialize(A);
2206     if (const Function *F = getAnchorScope())
2207       if (A.getInfoCache().getSccSize(*F) != 1)
2208         indicatePessimisticFixpoint();
2209   }
2210 
2211   /// See AbstractAttribute::updateImpl(...).
2212   ChangeStatus updateImpl(Attributor &A) override {
2213 
2214     // If all live call sites are known to be no-recurse, we are as well.
2215     auto CallSitePred = [&](AbstractCallSite ACS) {
2216       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2217           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2218           /* TrackDependence */ false, DepClassTy::OPTIONAL);
2219       return NoRecurseAA.isKnownNoRecurse();
2220     };
2221     bool AllCallSitesKnown;
2222     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2223       // If we know all call sites and all are known no-recurse, we are done.
2224       // If all known call sites, which might not be all that exist, are known
2225       // to be no-recurse, we are not done but we can continue to assume
2226       // no-recurse. If one of the call sites we have not visited will become
2227       // live, another update is triggered.
2228       if (AllCallSitesKnown)
2229         indicateOptimisticFixpoint();
2230       return ChangeStatus::UNCHANGED;
2231     }
2232 
2233     // If the above check does not hold anymore we look at the calls.
2234     auto CheckForNoRecurse = [&](Instruction &I) {
2235       ImmutableCallSite ICS(&I);
2236       if (ICS.hasFnAttr(Attribute::NoRecurse))
2237         return true;
2238 
2239       const auto &NoRecurseAA =
2240           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
2241       if (!NoRecurseAA.isAssumedNoRecurse())
2242         return false;
2243 
2244       // Recursion to the same function
2245       if (ICS.getCalledFunction() == getAnchorScope())
2246         return false;
2247 
2248       return true;
2249     };
2250 
2251     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
2252       return indicatePessimisticFixpoint();
2253     return ChangeStatus::UNCHANGED;
2254   }
2255 
2256   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2257 };
2258 
2259 /// NoRecurse attribute deduction for a call sites.
2260 struct AANoRecurseCallSite final : AANoRecurseImpl {
2261   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
2262 
2263   /// See AbstractAttribute::initialize(...).
2264   void initialize(Attributor &A) override {
2265     AANoRecurseImpl::initialize(A);
2266     Function *F = getAssociatedFunction();
2267     if (!F)
2268       indicatePessimisticFixpoint();
2269   }
2270 
2271   /// See AbstractAttribute::updateImpl(...).
2272   ChangeStatus updateImpl(Attributor &A) override {
2273     // TODO: Once we have call site specific value information we can provide
2274     //       call site specific liveness information and then it makes
2275     //       sense to specialize attributes for call sites arguments instead of
2276     //       redirecting requests to the callee argument.
2277     Function *F = getAssociatedFunction();
2278     const IRPosition &FnPos = IRPosition::function(*F);
2279     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
2280     return clampStateAndIndicateChange(
2281         getState(),
2282         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
2283   }
2284 
2285   /// See AbstractAttribute::trackStatistics()
2286   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2287 };
2288 
2289 /// -------------------- Undefined-Behavior Attributes ------------------------
2290 
2291 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2292   AAUndefinedBehaviorImpl(const IRPosition &IRP) : AAUndefinedBehavior(IRP) {}
2293 
2294   /// See AbstractAttribute::updateImpl(...).
2295   // through a pointer (i.e. also branches etc.)
2296   ChangeStatus updateImpl(Attributor &A) override {
2297     const size_t UBPrevSize = KnownUBInsts.size();
2298     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2299 
2300     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2301       // Skip instructions that are already saved.
2302       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2303         return true;
2304 
2305       // If we reach here, we know we have an instruction
2306       // that accesses memory through a pointer operand,
2307       // for which getPointerOperand() should give it to us.
2308       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
2309       assert(PtrOp &&
2310              "Expected pointer operand of memory accessing instruction");
2311 
2312       // A memory access through a pointer is considered UB
2313       // only if the pointer has constant null value.
2314       // TODO: Expand it to not only check constant values.
2315       if (!isa<ConstantPointerNull>(PtrOp)) {
2316         AssumedNoUBInsts.insert(&I);
2317         return true;
2318       }
2319       const Type *PtrTy = PtrOp->getType();
2320 
2321       // Because we only consider instructions inside functions,
2322       // assume that a parent function exists.
2323       const Function *F = I.getFunction();
2324 
2325       // A memory access using constant null pointer is only considered UB
2326       // if null pointer is _not_ defined for the target platform.
2327       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2328         AssumedNoUBInsts.insert(&I);
2329       else
2330         KnownUBInsts.insert(&I);
2331       return true;
2332     };
2333 
2334     auto InspectBrInstForUB = [&](Instruction &I) {
2335       // A conditional branch instruction is considered UB if it has `undef`
2336       // condition.
2337 
2338       // Skip instructions that are already saved.
2339       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2340         return true;
2341 
2342       // We know we have a branch instruction.
2343       auto BrInst = cast<BranchInst>(&I);
2344 
2345       // Unconditional branches are never considered UB.
2346       if (BrInst->isUnconditional())
2347         return true;
2348 
2349       // Either we stopped and the appropriate action was taken,
2350       // or we got back a simplified value to continue.
2351       Optional<Value *> SimplifiedCond =
2352           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2353       if (!SimplifiedCond.hasValue())
2354         return true;
2355       AssumedNoUBInsts.insert(&I);
2356       return true;
2357     };
2358 
2359     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2360                               {Instruction::Load, Instruction::Store,
2361                                Instruction::AtomicCmpXchg,
2362                                Instruction::AtomicRMW},
2363                               /* CheckBBLivenessOnly */ true);
2364     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2365                               /* CheckBBLivenessOnly */ true);
2366     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2367         UBPrevSize != KnownUBInsts.size())
2368       return ChangeStatus::CHANGED;
2369     return ChangeStatus::UNCHANGED;
2370   }
2371 
2372   bool isKnownToCauseUB(Instruction *I) const override {
2373     return KnownUBInsts.count(I);
2374   }
2375 
2376   bool isAssumedToCauseUB(Instruction *I) const override {
2377     // In simple words, if an instruction is not in the assumed to _not_
2378     // cause UB, then it is assumed UB (that includes those
2379     // in the KnownUBInsts set). The rest is boilerplate
2380     // is to ensure that it is one of the instructions we test
2381     // for UB.
2382 
2383     switch (I->getOpcode()) {
2384     case Instruction::Load:
2385     case Instruction::Store:
2386     case Instruction::AtomicCmpXchg:
2387     case Instruction::AtomicRMW:
2388       return !AssumedNoUBInsts.count(I);
2389     case Instruction::Br: {
2390       auto BrInst = cast<BranchInst>(I);
2391       if (BrInst->isUnconditional())
2392         return false;
2393       return !AssumedNoUBInsts.count(I);
2394     } break;
2395     default:
2396       return false;
2397     }
2398     return false;
2399   }
2400 
2401   ChangeStatus manifest(Attributor &A) override {
2402     if (KnownUBInsts.empty())
2403       return ChangeStatus::UNCHANGED;
2404     for (Instruction *I : KnownUBInsts)
2405       A.changeToUnreachableAfterManifest(I);
2406     return ChangeStatus::CHANGED;
2407   }
2408 
2409   /// See AbstractAttribute::getAsStr()
2410   const std::string getAsStr() const override {
2411     return getAssumed() ? "undefined-behavior" : "no-ub";
2412   }
2413 
2414   /// Note: The correctness of this analysis depends on the fact that the
2415   /// following 2 sets will stop changing after some point.
2416   /// "Change" here means that their size changes.
2417   /// The size of each set is monotonically increasing
2418   /// (we only add items to them) and it is upper bounded by the number of
2419   /// instructions in the processed function (we can never save more
2420   /// elements in either set than this number). Hence, at some point,
2421   /// they will stop increasing.
2422   /// Consequently, at some point, both sets will have stopped
2423   /// changing, effectively making the analysis reach a fixpoint.
2424 
2425   /// Note: These 2 sets are disjoint and an instruction can be considered
2426   /// one of 3 things:
2427   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2428   ///    the KnownUBInsts set.
2429   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2430   ///    has a reason to assume it).
2431   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2432   ///    could not find a reason to assume or prove that it can cause UB,
2433   ///    hence it assumes it doesn't. We have a set for these instructions
2434   ///    so that we don't reprocess them in every update.
2435   ///    Note however that instructions in this set may cause UB.
2436 
2437 protected:
2438   /// A set of all live instructions _known_ to cause UB.
2439   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2440 
2441 private:
2442   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2443   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2444 
2445   // Should be called on updates in which if we're processing an instruction
2446   // \p I that depends on a value \p V, one of the following has to happen:
2447   // - If the value is assumed, then stop.
2448   // - If the value is known but undef, then consider it UB.
2449   // - Otherwise, do specific processing with the simplified value.
2450   // We return None in the first 2 cases to signify that an appropriate
2451   // action was taken and the caller should stop.
2452   // Otherwise, we return the simplified value that the caller should
2453   // use for specific processing.
2454   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2455                                          Instruction *I) {
2456     const auto &ValueSimplifyAA =
2457         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2458     Optional<Value *> SimplifiedV =
2459         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2460     if (!ValueSimplifyAA.isKnown()) {
2461       // Don't depend on assumed values.
2462       return llvm::None;
2463     }
2464     if (!SimplifiedV.hasValue()) {
2465       // If it is known (which we tested above) but it doesn't have a value,
2466       // then we can assume `undef` and hence the instruction is UB.
2467       KnownUBInsts.insert(I);
2468       return llvm::None;
2469     }
2470     Value *Val = SimplifiedV.getValue();
2471     if (isa<UndefValue>(Val)) {
2472       KnownUBInsts.insert(I);
2473       return llvm::None;
2474     }
2475     return Val;
2476   }
2477 };
2478 
2479 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2480   AAUndefinedBehaviorFunction(const IRPosition &IRP)
2481       : AAUndefinedBehaviorImpl(IRP) {}
2482 
2483   /// See AbstractAttribute::trackStatistics()
2484   void trackStatistics() const override {
2485     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2486                "Number of instructions known to have UB");
2487     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2488         KnownUBInsts.size();
2489   }
2490 };
2491 
2492 /// ------------------------ Will-Return Attributes ----------------------------
2493 
2494 // Helper function that checks whether a function has any cycle.
2495 // TODO: Replace with more efficent code
2496 static bool containsCycle(Function &F) {
2497   SmallPtrSet<BasicBlock *, 32> Visited;
2498 
2499   // Traverse BB by dfs and check whether successor is already visited.
2500   for (BasicBlock *BB : depth_first(&F)) {
2501     Visited.insert(BB);
2502     for (auto *SuccBB : successors(BB)) {
2503       if (Visited.count(SuccBB))
2504         return true;
2505     }
2506   }
2507   return false;
2508 }
2509 
2510 // Helper function that checks the function have a loop which might become an
2511 // endless loop
2512 // FIXME: Any cycle is regarded as endless loop for now.
2513 //        We have to allow some patterns.
2514 static bool containsPossiblyEndlessLoop(Function *F) {
2515   return !F || !F->hasExactDefinition() || containsCycle(*F);
2516 }
2517 
2518 struct AAWillReturnImpl : public AAWillReturn {
2519   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
2520 
2521   /// See AbstractAttribute::initialize(...).
2522   void initialize(Attributor &A) override {
2523     AAWillReturn::initialize(A);
2524 
2525     Function *F = getAssociatedFunction();
2526     if (containsPossiblyEndlessLoop(F))
2527       indicatePessimisticFixpoint();
2528   }
2529 
2530   /// See AbstractAttribute::updateImpl(...).
2531   ChangeStatus updateImpl(Attributor &A) override {
2532     auto CheckForWillReturn = [&](Instruction &I) {
2533       IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
2534       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2535       if (WillReturnAA.isKnownWillReturn())
2536         return true;
2537       if (!WillReturnAA.isAssumedWillReturn())
2538         return false;
2539       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2540       return NoRecurseAA.isAssumedNoRecurse();
2541     };
2542 
2543     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2544       return indicatePessimisticFixpoint();
2545 
2546     return ChangeStatus::UNCHANGED;
2547   }
2548 
2549   /// See AbstractAttribute::getAsStr()
2550   const std::string getAsStr() const override {
2551     return getAssumed() ? "willreturn" : "may-noreturn";
2552   }
2553 };
2554 
2555 struct AAWillReturnFunction final : AAWillReturnImpl {
2556   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2557 
2558   /// See AbstractAttribute::trackStatistics()
2559   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2560 };
2561 
2562 /// WillReturn attribute deduction for a call sites.
2563 struct AAWillReturnCallSite final : AAWillReturnImpl {
2564   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2565 
2566   /// See AbstractAttribute::initialize(...).
2567   void initialize(Attributor &A) override {
2568     AAWillReturnImpl::initialize(A);
2569     Function *F = getAssociatedFunction();
2570     if (!F)
2571       indicatePessimisticFixpoint();
2572   }
2573 
2574   /// See AbstractAttribute::updateImpl(...).
2575   ChangeStatus updateImpl(Attributor &A) override {
2576     // TODO: Once we have call site specific value information we can provide
2577     //       call site specific liveness information and then it makes
2578     //       sense to specialize attributes for call sites arguments instead of
2579     //       redirecting requests to the callee argument.
2580     Function *F = getAssociatedFunction();
2581     const IRPosition &FnPos = IRPosition::function(*F);
2582     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2583     return clampStateAndIndicateChange(
2584         getState(),
2585         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2586   }
2587 
2588   /// See AbstractAttribute::trackStatistics()
2589   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2590 };
2591 
2592 /// -------------------AAReachability Attribute--------------------------
2593 
2594 struct AAReachabilityImpl : AAReachability {
2595   AAReachabilityImpl(const IRPosition &IRP) : AAReachability(IRP) {}
2596 
2597   const std::string getAsStr() const override {
2598     // TODO: Return the number of reachable queries.
2599     return "reachable";
2600   }
2601 
2602   /// See AbstractAttribute::initialize(...).
2603   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2604 
2605   /// See AbstractAttribute::updateImpl(...).
2606   ChangeStatus updateImpl(Attributor &A) override {
2607     return indicatePessimisticFixpoint();
2608   }
2609 };
2610 
2611 struct AAReachabilityFunction final : public AAReachabilityImpl {
2612   AAReachabilityFunction(const IRPosition &IRP) : AAReachabilityImpl(IRP) {}
2613 
2614   /// See AbstractAttribute::trackStatistics()
2615   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2616 };
2617 
2618 /// ------------------------ NoAlias Argument Attribute ------------------------
2619 
2620 struct AANoAliasImpl : AANoAlias {
2621   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {
2622     assert(getAssociatedType()->isPointerTy() &&
2623            "Noalias is a pointer attribute");
2624   }
2625 
2626   const std::string getAsStr() const override {
2627     return getAssumed() ? "noalias" : "may-alias";
2628   }
2629 };
2630 
2631 /// NoAlias attribute for a floating value.
2632 struct AANoAliasFloating final : AANoAliasImpl {
2633   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2634 
2635   /// See AbstractAttribute::initialize(...).
2636   void initialize(Attributor &A) override {
2637     AANoAliasImpl::initialize(A);
2638     Value *Val = &getAssociatedValue();
2639     do {
2640       CastInst *CI = dyn_cast<CastInst>(Val);
2641       if (!CI)
2642         break;
2643       Value *Base = CI->getOperand(0);
2644       if (Base->getNumUses() != 1)
2645         break;
2646       Val = Base;
2647     } while (true);
2648 
2649     if (!Val->getType()->isPointerTy()) {
2650       indicatePessimisticFixpoint();
2651       return;
2652     }
2653 
2654     if (isa<AllocaInst>(Val))
2655       indicateOptimisticFixpoint();
2656     else if (isa<ConstantPointerNull>(Val) &&
2657              !NullPointerIsDefined(getAnchorScope(),
2658                                    Val->getType()->getPointerAddressSpace()))
2659       indicateOptimisticFixpoint();
2660     else if (Val != &getAssociatedValue()) {
2661       const auto &ValNoAliasAA =
2662           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2663       if (ValNoAliasAA.isKnownNoAlias())
2664         indicateOptimisticFixpoint();
2665     }
2666   }
2667 
2668   /// See AbstractAttribute::updateImpl(...).
2669   ChangeStatus updateImpl(Attributor &A) override {
2670     // TODO: Implement this.
2671     return indicatePessimisticFixpoint();
2672   }
2673 
2674   /// See AbstractAttribute::trackStatistics()
2675   void trackStatistics() const override {
2676     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2677   }
2678 };
2679 
2680 /// NoAlias attribute for an argument.
2681 struct AANoAliasArgument final
2682     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2683   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2684   AANoAliasArgument(const IRPosition &IRP) : Base(IRP) {}
2685 
2686   /// See AbstractAttribute::initialize(...).
2687   void initialize(Attributor &A) override {
2688     Base::initialize(A);
2689     // See callsite argument attribute and callee argument attribute.
2690     if (hasAttr({Attribute::ByVal}))
2691       indicateOptimisticFixpoint();
2692   }
2693 
2694   /// See AbstractAttribute::update(...).
2695   ChangeStatus updateImpl(Attributor &A) override {
2696     // We have to make sure no-alias on the argument does not break
2697     // synchronization when this is a callback argument, see also [1] below.
2698     // If synchronization cannot be affected, we delegate to the base updateImpl
2699     // function, otherwise we give up for now.
2700 
2701     // If the function is no-sync, no-alias cannot break synchronization.
2702     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2703         *this, IRPosition::function_scope(getIRPosition()));
2704     if (NoSyncAA.isAssumedNoSync())
2705       return Base::updateImpl(A);
2706 
2707     // If the argument is read-only, no-alias cannot break synchronization.
2708     const auto &MemBehaviorAA =
2709         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2710     if (MemBehaviorAA.isAssumedReadOnly())
2711       return Base::updateImpl(A);
2712 
2713     // If the argument is never passed through callbacks, no-alias cannot break
2714     // synchronization.
2715     bool AllCallSitesKnown;
2716     if (A.checkForAllCallSites(
2717             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2718             true, AllCallSitesKnown))
2719       return Base::updateImpl(A);
2720 
2721     // TODO: add no-alias but make sure it doesn't break synchronization by
2722     // introducing fake uses. See:
2723     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2724     //     International Workshop on OpenMP 2018,
2725     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2726 
2727     return indicatePessimisticFixpoint();
2728   }
2729 
2730   /// See AbstractAttribute::trackStatistics()
2731   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2732 };
2733 
2734 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2735   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2736 
2737   /// See AbstractAttribute::initialize(...).
2738   void initialize(Attributor &A) override {
2739     // See callsite argument attribute and callee argument attribute.
2740     ImmutableCallSite ICS(&getAnchorValue());
2741     if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2742       indicateOptimisticFixpoint();
2743     Value &Val = getAssociatedValue();
2744     if (isa<ConstantPointerNull>(Val) &&
2745         !NullPointerIsDefined(getAnchorScope(),
2746                               Val.getType()->getPointerAddressSpace()))
2747       indicateOptimisticFixpoint();
2748   }
2749 
2750   /// Determine if the underlying value may alias with the call site argument
2751   /// \p OtherArgNo of \p ICS (= the underlying call site).
2752   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2753                             const AAMemoryBehavior &MemBehaviorAA,
2754                             ImmutableCallSite ICS, unsigned OtherArgNo) {
2755     // We do not need to worry about aliasing with the underlying IRP.
2756     if (this->getArgNo() == (int)OtherArgNo)
2757       return false;
2758 
2759     // If it is not a pointer or pointer vector we do not alias.
2760     const Value *ArgOp = ICS.getArgOperand(OtherArgNo);
2761     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2762       return false;
2763 
2764     auto &ICSArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2765         *this, IRPosition::callsite_argument(ICS, OtherArgNo),
2766         /* TrackDependence */ false);
2767 
2768     // If the argument is readnone, there is no read-write aliasing.
2769     if (ICSArgMemBehaviorAA.isAssumedReadNone()) {
2770       A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2771       return false;
2772     }
2773 
2774     // If the argument is readonly and the underlying value is readonly, there
2775     // is no read-write aliasing.
2776     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2777     if (ICSArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2778       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2779       A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2780       return false;
2781     }
2782 
2783     // We have to utilize actual alias analysis queries so we need the object.
2784     if (!AAR)
2785       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2786 
2787     // Try to rule it out at the call site.
2788     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2789     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2790                          "callsite arguments: "
2791                       << getAssociatedValue() << " " << *ArgOp << " => "
2792                       << (IsAliasing ? "" : "no-") << "alias \n");
2793 
2794     return IsAliasing;
2795   }
2796 
2797   bool
2798   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2799                                          const AAMemoryBehavior &MemBehaviorAA,
2800                                          const AANoAlias &NoAliasAA) {
2801     // We can deduce "noalias" if the following conditions hold.
2802     // (i)   Associated value is assumed to be noalias in the definition.
2803     // (ii)  Associated value is assumed to be no-capture in all the uses
2804     //       possibly executed before this callsite.
2805     // (iii) There is no other pointer argument which could alias with the
2806     //       value.
2807 
2808     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2809     if (!AssociatedValueIsNoAliasAtDef) {
2810       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2811                         << " is not no-alias at the definition\n");
2812       return false;
2813     }
2814 
2815     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2816     auto &NoCaptureAA =
2817         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2818     // Check whether the value is captured in the scope using AANoCapture.
2819     // FIXME: This is conservative though, it is better to look at CFG and
2820     //        check only uses possibly executed before this callsite.
2821     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2822       LLVM_DEBUG(
2823           dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2824                  << " cannot be noalias as it is potentially captured\n");
2825       return false;
2826     }
2827     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2828 
2829     // Check there is no other pointer argument which could alias with the
2830     // value passed at this call site.
2831     // TODO: AbstractCallSite
2832     ImmutableCallSite ICS(&getAnchorValue());
2833     for (unsigned OtherArgNo = 0; OtherArgNo < ICS.getNumArgOperands();
2834          OtherArgNo++)
2835       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, ICS, OtherArgNo))
2836         return false;
2837 
2838     return true;
2839   }
2840 
2841   /// See AbstractAttribute::updateImpl(...).
2842   ChangeStatus updateImpl(Attributor &A) override {
2843     // If the argument is readnone we are done as there are no accesses via the
2844     // argument.
2845     auto &MemBehaviorAA =
2846         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2847                                      /* TrackDependence */ false);
2848     if (MemBehaviorAA.isAssumedReadNone()) {
2849       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2850       return ChangeStatus::UNCHANGED;
2851     }
2852 
2853     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2854     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2855                                                   /* TrackDependence */ false);
2856 
2857     AAResults *AAR = nullptr;
2858     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2859                                                NoAliasAA)) {
2860       LLVM_DEBUG(
2861           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2862       return ChangeStatus::UNCHANGED;
2863     }
2864 
2865     return indicatePessimisticFixpoint();
2866   }
2867 
2868   /// See AbstractAttribute::trackStatistics()
2869   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2870 };
2871 
2872 /// NoAlias attribute for function return value.
2873 struct AANoAliasReturned final : AANoAliasImpl {
2874   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2875 
2876   /// See AbstractAttribute::updateImpl(...).
2877   virtual ChangeStatus updateImpl(Attributor &A) override {
2878 
2879     auto CheckReturnValue = [&](Value &RV) -> bool {
2880       if (Constant *C = dyn_cast<Constant>(&RV))
2881         if (C->isNullValue() || isa<UndefValue>(C))
2882           return true;
2883 
2884       /// For now, we can only deduce noalias if we have call sites.
2885       /// FIXME: add more support.
2886       ImmutableCallSite ICS(&RV);
2887       if (!ICS)
2888         return false;
2889 
2890       const IRPosition &RVPos = IRPosition::value(RV);
2891       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2892       if (!NoAliasAA.isAssumedNoAlias())
2893         return false;
2894 
2895       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2896       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2897     };
2898 
2899     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2900       return indicatePessimisticFixpoint();
2901 
2902     return ChangeStatus::UNCHANGED;
2903   }
2904 
2905   /// See AbstractAttribute::trackStatistics()
2906   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2907 };
2908 
2909 /// NoAlias attribute deduction for a call site return value.
2910 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2911   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2912 
2913   /// See AbstractAttribute::initialize(...).
2914   void initialize(Attributor &A) override {
2915     AANoAliasImpl::initialize(A);
2916     Function *F = getAssociatedFunction();
2917     if (!F)
2918       indicatePessimisticFixpoint();
2919   }
2920 
2921   /// See AbstractAttribute::updateImpl(...).
2922   ChangeStatus updateImpl(Attributor &A) override {
2923     // TODO: Once we have call site specific value information we can provide
2924     //       call site specific liveness information and then it makes
2925     //       sense to specialize attributes for call sites arguments instead of
2926     //       redirecting requests to the callee argument.
2927     Function *F = getAssociatedFunction();
2928     const IRPosition &FnPos = IRPosition::returned(*F);
2929     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2930     return clampStateAndIndicateChange(
2931         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2932   }
2933 
2934   /// See AbstractAttribute::trackStatistics()
2935   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2936 };
2937 
2938 /// -------------------AAIsDead Function Attribute-----------------------
2939 
2940 struct AAIsDeadValueImpl : public AAIsDead {
2941   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2942 
2943   /// See AAIsDead::isAssumedDead().
2944   bool isAssumedDead() const override { return getAssumed(); }
2945 
2946   /// See AAIsDead::isKnownDead().
2947   bool isKnownDead() const override { return getKnown(); }
2948 
2949   /// See AAIsDead::isAssumedDead(BasicBlock *).
2950   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2951 
2952   /// See AAIsDead::isKnownDead(BasicBlock *).
2953   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2954 
2955   /// See AAIsDead::isAssumedDead(Instruction *I).
2956   bool isAssumedDead(const Instruction *I) const override {
2957     return I == getCtxI() && isAssumedDead();
2958   }
2959 
2960   /// See AAIsDead::isKnownDead(Instruction *I).
2961   bool isKnownDead(const Instruction *I) const override {
2962     return isAssumedDead(I) && getKnown();
2963   }
2964 
2965   /// See AbstractAttribute::getAsStr().
2966   const std::string getAsStr() const override {
2967     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2968   }
2969 
2970   /// Check if all uses are assumed dead.
2971   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2972     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2973     // Explicitly set the dependence class to required because we want a long
2974     // chain of N dependent instructions to be considered live as soon as one is
2975     // without going through N update cycles. This is not required for
2976     // correctness.
2977     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2978   }
2979 
2980   /// Determine if \p I is assumed to be side-effect free.
2981   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2982     if (!I || wouldInstructionBeTriviallyDead(I))
2983       return true;
2984 
2985     auto *CB = dyn_cast<CallBase>(I);
2986     if (!CB || isa<IntrinsicInst>(CB))
2987       return false;
2988 
2989     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2990     const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, CallIRP);
2991     if (!NoUnwindAA.isAssumedNoUnwind())
2992       return false;
2993 
2994     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, CallIRP);
2995     if (!MemBehaviorAA.isAssumedReadOnly())
2996       return false;
2997 
2998     return true;
2999   }
3000 };
3001 
3002 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3003   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
3004 
3005   /// See AbstractAttribute::initialize(...).
3006   void initialize(Attributor &A) override {
3007     if (isa<UndefValue>(getAssociatedValue())) {
3008       indicatePessimisticFixpoint();
3009       return;
3010     }
3011 
3012     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3013     if (!isAssumedSideEffectFree(A, I))
3014       indicatePessimisticFixpoint();
3015   }
3016 
3017   /// See AbstractAttribute::updateImpl(...).
3018   ChangeStatus updateImpl(Attributor &A) override {
3019     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3020     if (!isAssumedSideEffectFree(A, I))
3021       return indicatePessimisticFixpoint();
3022 
3023     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3024       return indicatePessimisticFixpoint();
3025     return ChangeStatus::UNCHANGED;
3026   }
3027 
3028   /// See AbstractAttribute::manifest(...).
3029   ChangeStatus manifest(Attributor &A) override {
3030     Value &V = getAssociatedValue();
3031     if (auto *I = dyn_cast<Instruction>(&V)) {
3032       // If we get here we basically know the users are all dead. We check if
3033       // isAssumedSideEffectFree returns true here again because it might not be
3034       // the case and only the users are dead but the instruction (=call) is
3035       // still needed.
3036       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
3037         A.deleteAfterManifest(*I);
3038         return ChangeStatus::CHANGED;
3039       }
3040     }
3041     if (V.use_empty())
3042       return ChangeStatus::UNCHANGED;
3043 
3044     bool UsedAssumedInformation = false;
3045     Optional<Constant *> C =
3046         getAssumedConstant(A, V, *this, UsedAssumedInformation);
3047     if (C.hasValue() && C.getValue())
3048       return ChangeStatus::UNCHANGED;
3049 
3050     UndefValue &UV = *UndefValue::get(V.getType());
3051     bool AnyChange = A.changeValueAfterManifest(V, UV);
3052     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3053   }
3054 
3055   /// See AbstractAttribute::trackStatistics()
3056   void trackStatistics() const override {
3057     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3058   }
3059 };
3060 
3061 struct AAIsDeadArgument : public AAIsDeadFloating {
3062   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
3063 
3064   /// See AbstractAttribute::initialize(...).
3065   void initialize(Attributor &A) override {
3066     if (!getAssociatedFunction()->hasExactDefinition())
3067       indicatePessimisticFixpoint();
3068   }
3069 
3070   /// See AbstractAttribute::manifest(...).
3071   ChangeStatus manifest(Attributor &A) override {
3072     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
3073     Argument &Arg = *getAssociatedArgument();
3074     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3075       if (A.registerFunctionSignatureRewrite(
3076               Arg, /* ReplacementTypes */ {},
3077               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3078               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{}))
3079         return ChangeStatus::CHANGED;
3080     return Changed;
3081   }
3082 
3083   /// See AbstractAttribute::trackStatistics()
3084   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3085 };
3086 
3087 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3088   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
3089 
3090   /// See AbstractAttribute::initialize(...).
3091   void initialize(Attributor &A) override {
3092     if (isa<UndefValue>(getAssociatedValue()))
3093       indicatePessimisticFixpoint();
3094   }
3095 
3096   /// See AbstractAttribute::updateImpl(...).
3097   ChangeStatus updateImpl(Attributor &A) override {
3098     // TODO: Once we have call site specific value information we can provide
3099     //       call site specific liveness information and then it makes
3100     //       sense to specialize attributes for call sites arguments instead of
3101     //       redirecting requests to the callee argument.
3102     Argument *Arg = getAssociatedArgument();
3103     if (!Arg)
3104       return indicatePessimisticFixpoint();
3105     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3106     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
3107     return clampStateAndIndicateChange(
3108         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
3109   }
3110 
3111   /// See AbstractAttribute::manifest(...).
3112   ChangeStatus manifest(Attributor &A) override {
3113     CallBase &CB = cast<CallBase>(getAnchorValue());
3114     Use &U = CB.getArgOperandUse(getArgNo());
3115     assert(!isa<UndefValue>(U.get()) &&
3116            "Expected undef values to be filtered out!");
3117     UndefValue &UV = *UndefValue::get(U->getType());
3118     if (A.changeUseAfterManifest(U, UV))
3119       return ChangeStatus::CHANGED;
3120     return ChangeStatus::UNCHANGED;
3121   }
3122 
3123   /// See AbstractAttribute::trackStatistics()
3124   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3125 };
3126 
3127 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3128   AAIsDeadCallSiteReturned(const IRPosition &IRP)
3129       : AAIsDeadFloating(IRP), IsAssumedSideEffectFree(true) {}
3130 
3131   /// See AAIsDead::isAssumedDead().
3132   bool isAssumedDead() const override {
3133     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3134   }
3135 
3136   /// See AbstractAttribute::initialize(...).
3137   void initialize(Attributor &A) override {
3138     if (isa<UndefValue>(getAssociatedValue())) {
3139       indicatePessimisticFixpoint();
3140       return;
3141     }
3142 
3143     // We track this separately as a secondary state.
3144     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3145   }
3146 
3147   /// See AbstractAttribute::updateImpl(...).
3148   ChangeStatus updateImpl(Attributor &A) override {
3149     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3150     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3151       IsAssumedSideEffectFree = false;
3152       Changed = ChangeStatus::CHANGED;
3153     }
3154 
3155     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3156       return indicatePessimisticFixpoint();
3157     return Changed;
3158   }
3159 
3160   /// See AbstractAttribute::manifest(...).
3161   ChangeStatus manifest(Attributor &A) override {
3162     if (auto *CI = dyn_cast<CallInst>(&getAssociatedValue()))
3163       if (CI->isMustTailCall())
3164         return ChangeStatus::UNCHANGED;
3165     return AAIsDeadFloating::manifest(A);
3166   }
3167 
3168   /// See AbstractAttribute::trackStatistics()
3169   void trackStatistics() const override {
3170     if (IsAssumedSideEffectFree)
3171       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3172     else
3173       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3174   }
3175 
3176   /// See AbstractAttribute::getAsStr().
3177   const std::string getAsStr() const override {
3178     return isAssumedDead()
3179                ? "assumed-dead"
3180                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3181   }
3182 
3183 private:
3184   bool IsAssumedSideEffectFree;
3185 };
3186 
3187 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3188   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
3189 
3190   /// See AbstractAttribute::updateImpl(...).
3191   ChangeStatus updateImpl(Attributor &A) override {
3192 
3193     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3194                               {Instruction::Ret});
3195 
3196     auto PredForCallSite = [&](AbstractCallSite ACS) {
3197       if (ACS.isCallbackCall() || !ACS.getInstruction())
3198         return false;
3199       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3200     };
3201 
3202     bool AllCallSitesKnown;
3203     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3204                                 AllCallSitesKnown))
3205       return indicatePessimisticFixpoint();
3206 
3207     return ChangeStatus::UNCHANGED;
3208   }
3209 
3210   /// See AbstractAttribute::manifest(...).
3211   ChangeStatus manifest(Attributor &A) override {
3212     // TODO: Rewrite the signature to return void?
3213     bool AnyChange = false;
3214     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3215     auto RetInstPred = [&](Instruction &I) {
3216       ReturnInst &RI = cast<ReturnInst>(I);
3217       if (auto *CI = dyn_cast<CallInst>(RI.getReturnValue()))
3218         if (CI->isMustTailCall())
3219           return true;
3220       if (!isa<UndefValue>(RI.getReturnValue()))
3221         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3222       return true;
3223     };
3224     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3225     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3226   }
3227 
3228   /// See AbstractAttribute::trackStatistics()
3229   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3230 };
3231 
3232 struct AAIsDeadFunction : public AAIsDead {
3233   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
3234 
3235   /// See AbstractAttribute::initialize(...).
3236   void initialize(Attributor &A) override {
3237     const Function *F = getAssociatedFunction();
3238     if (F && !F->isDeclaration()) {
3239       ToBeExploredFrom.insert(&F->getEntryBlock().front());
3240       assumeLive(A, F->getEntryBlock());
3241     }
3242   }
3243 
3244   /// See AbstractAttribute::getAsStr().
3245   const std::string getAsStr() const override {
3246     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3247            std::to_string(getAssociatedFunction()->size()) + "][#TBEP " +
3248            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3249            std::to_string(KnownDeadEnds.size()) + "]";
3250   }
3251 
3252   /// See AbstractAttribute::manifest(...).
3253   ChangeStatus manifest(Attributor &A) override {
3254     assert(getState().isValidState() &&
3255            "Attempted to manifest an invalid state!");
3256 
3257     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3258     Function &F = *getAssociatedFunction();
3259 
3260     if (AssumedLiveBlocks.empty()) {
3261       A.deleteAfterManifest(F);
3262       return ChangeStatus::CHANGED;
3263     }
3264 
3265     // Flag to determine if we can change an invoke to a call assuming the
3266     // callee is nounwind. This is not possible if the personality of the
3267     // function allows to catch asynchronous exceptions.
3268     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3269 
3270     KnownDeadEnds.set_union(ToBeExploredFrom);
3271     for (const Instruction *DeadEndI : KnownDeadEnds) {
3272       auto *CB = dyn_cast<CallBase>(DeadEndI);
3273       if (!CB)
3274         continue;
3275       const auto &NoReturnAA =
3276           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
3277       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3278       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3279         continue;
3280 
3281       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3282         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3283       else
3284         A.changeToUnreachableAfterManifest(
3285             const_cast<Instruction *>(DeadEndI->getNextNode()));
3286       HasChanged = ChangeStatus::CHANGED;
3287     }
3288 
3289     for (BasicBlock &BB : F)
3290       if (!AssumedLiveBlocks.count(&BB))
3291         A.deleteAfterManifest(BB);
3292 
3293     return HasChanged;
3294   }
3295 
3296   /// See AbstractAttribute::updateImpl(...).
3297   ChangeStatus updateImpl(Attributor &A) override;
3298 
3299   /// See AbstractAttribute::trackStatistics()
3300   void trackStatistics() const override {}
3301 
3302   /// Returns true if the function is assumed dead.
3303   bool isAssumedDead() const override { return false; }
3304 
3305   /// See AAIsDead::isKnownDead().
3306   bool isKnownDead() const override { return false; }
3307 
3308   /// See AAIsDead::isAssumedDead(BasicBlock *).
3309   bool isAssumedDead(const BasicBlock *BB) const override {
3310     assert(BB->getParent() == getAssociatedFunction() &&
3311            "BB must be in the same anchor scope function.");
3312 
3313     if (!getAssumed())
3314       return false;
3315     return !AssumedLiveBlocks.count(BB);
3316   }
3317 
3318   /// See AAIsDead::isKnownDead(BasicBlock *).
3319   bool isKnownDead(const BasicBlock *BB) const override {
3320     return getKnown() && isAssumedDead(BB);
3321   }
3322 
3323   /// See AAIsDead::isAssumed(Instruction *I).
3324   bool isAssumedDead(const Instruction *I) const override {
3325     assert(I->getParent()->getParent() == getAssociatedFunction() &&
3326            "Instruction must be in the same anchor scope function.");
3327 
3328     if (!getAssumed())
3329       return false;
3330 
3331     // If it is not in AssumedLiveBlocks then it for sure dead.
3332     // Otherwise, it can still be after noreturn call in a live block.
3333     if (!AssumedLiveBlocks.count(I->getParent()))
3334       return true;
3335 
3336     // If it is not after a liveness barrier it is live.
3337     const Instruction *PrevI = I->getPrevNode();
3338     while (PrevI) {
3339       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3340         return true;
3341       PrevI = PrevI->getPrevNode();
3342     }
3343     return false;
3344   }
3345 
3346   /// See AAIsDead::isKnownDead(Instruction *I).
3347   bool isKnownDead(const Instruction *I) const override {
3348     return getKnown() && isAssumedDead(I);
3349   }
3350 
3351   /// Determine if \p F might catch asynchronous exceptions.
3352   static bool mayCatchAsynchronousExceptions(const Function &F) {
3353     return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
3354   }
3355 
3356   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3357   /// that internal function called from \p BB should now be looked at.
3358   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3359     if (!AssumedLiveBlocks.insert(&BB).second)
3360       return false;
3361 
3362     // We assume that all of BB is (probably) live now and if there are calls to
3363     // internal functions we will assume that those are now live as well. This
3364     // is a performance optimization for blocks with calls to a lot of internal
3365     // functions. It can however cause dead functions to be treated as live.
3366     for (const Instruction &I : BB)
3367       if (ImmutableCallSite ICS = ImmutableCallSite(&I))
3368         if (const Function *F = ICS.getCalledFunction())
3369           if (F->hasLocalLinkage())
3370             A.markLiveInternalFunction(*F);
3371     return true;
3372   }
3373 
3374   /// Collection of instructions that need to be explored again, e.g., we
3375   /// did assume they do not transfer control to (one of their) successors.
3376   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3377 
3378   /// Collection of instructions that are known to not transfer control.
3379   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3380 
3381   /// Collection of all assumed live BasicBlocks.
3382   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3383 };
3384 
3385 static bool
3386 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3387                         AbstractAttribute &AA,
3388                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3389   const IRPosition &IPos = IRPosition::callsite_function(CB);
3390 
3391   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
3392   if (NoReturnAA.isAssumedNoReturn())
3393     return !NoReturnAA.isKnownNoReturn();
3394   if (CB.isTerminator())
3395     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3396   else
3397     AliveSuccessors.push_back(CB.getNextNode());
3398   return false;
3399 }
3400 
3401 static bool
3402 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3403                         AbstractAttribute &AA,
3404                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3405   bool UsedAssumedInformation =
3406       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3407 
3408   // First, determine if we can change an invoke to a call assuming the
3409   // callee is nounwind. This is not possible if the personality of the
3410   // function allows to catch asynchronous exceptions.
3411   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3412     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3413   } else {
3414     const IRPosition &IPos = IRPosition::callsite_function(II);
3415     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
3416     if (AANoUnw.isAssumedNoUnwind()) {
3417       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3418     } else {
3419       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3420     }
3421   }
3422   return UsedAssumedInformation;
3423 }
3424 
3425 static bool
3426 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3427                         AbstractAttribute &AA,
3428                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3429   bool UsedAssumedInformation = false;
3430   if (BI.getNumSuccessors() == 1) {
3431     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3432   } else {
3433     Optional<ConstantInt *> CI = getAssumedConstantInt(
3434         A, *BI.getCondition(), AA, UsedAssumedInformation);
3435     if (!CI.hasValue()) {
3436       // No value yet, assume both edges are dead.
3437     } else if (CI.getValue()) {
3438       const BasicBlock *SuccBB =
3439           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3440       AliveSuccessors.push_back(&SuccBB->front());
3441     } else {
3442       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3443       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3444       UsedAssumedInformation = false;
3445     }
3446   }
3447   return UsedAssumedInformation;
3448 }
3449 
3450 static bool
3451 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3452                         AbstractAttribute &AA,
3453                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3454   bool UsedAssumedInformation = false;
3455   Optional<ConstantInt *> CI =
3456       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3457   if (!CI.hasValue()) {
3458     // No value yet, assume all edges are dead.
3459   } else if (CI.getValue()) {
3460     for (auto &CaseIt : SI.cases()) {
3461       if (CaseIt.getCaseValue() == CI.getValue()) {
3462         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3463         return UsedAssumedInformation;
3464       }
3465     }
3466     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3467     return UsedAssumedInformation;
3468   } else {
3469     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3470       AliveSuccessors.push_back(&SuccBB->front());
3471   }
3472   return UsedAssumedInformation;
3473 }
3474 
3475 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3476   ChangeStatus Change = ChangeStatus::UNCHANGED;
3477 
3478   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3479                     << getAssociatedFunction()->size() << "] BBs and "
3480                     << ToBeExploredFrom.size() << " exploration points and "
3481                     << KnownDeadEnds.size() << " known dead ends\n");
3482 
3483   // Copy and clear the list of instructions we need to explore from. It is
3484   // refilled with instructions the next update has to look at.
3485   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3486                                                ToBeExploredFrom.end());
3487   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3488 
3489   SmallVector<const Instruction *, 8> AliveSuccessors;
3490   while (!Worklist.empty()) {
3491     const Instruction *I = Worklist.pop_back_val();
3492     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3493 
3494     AliveSuccessors.clear();
3495 
3496     bool UsedAssumedInformation = false;
3497     switch (I->getOpcode()) {
3498     // TODO: look for (assumed) UB to backwards propagate "deadness".
3499     default:
3500       if (I->isTerminator()) {
3501         for (const BasicBlock *SuccBB : successors(I->getParent()))
3502           AliveSuccessors.push_back(&SuccBB->front());
3503       } else {
3504         AliveSuccessors.push_back(I->getNextNode());
3505       }
3506       break;
3507     case Instruction::Call:
3508       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3509                                                        *this, AliveSuccessors);
3510       break;
3511     case Instruction::Invoke:
3512       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3513                                                        *this, AliveSuccessors);
3514       break;
3515     case Instruction::Br:
3516       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3517                                                        *this, AliveSuccessors);
3518       break;
3519     case Instruction::Switch:
3520       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3521                                                        *this, AliveSuccessors);
3522       break;
3523     }
3524 
3525     if (UsedAssumedInformation) {
3526       NewToBeExploredFrom.insert(I);
3527     } else {
3528       Change = ChangeStatus::CHANGED;
3529       if (AliveSuccessors.empty() ||
3530           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3531         KnownDeadEnds.insert(I);
3532     }
3533 
3534     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3535                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3536                       << UsedAssumedInformation << "\n");
3537 
3538     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3539       if (!I->isTerminator()) {
3540         assert(AliveSuccessors.size() == 1 &&
3541                "Non-terminator expected to have a single successor!");
3542         Worklist.push_back(AliveSuccessor);
3543       } else {
3544         if (assumeLive(A, *AliveSuccessor->getParent()))
3545           Worklist.push_back(AliveSuccessor);
3546       }
3547     }
3548   }
3549 
3550   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3551 
3552   // If we know everything is live there is no need to query for liveness.
3553   // Instead, indicating a pessimistic fixpoint will cause the state to be
3554   // "invalid" and all queries to be answered conservatively without lookups.
3555   // To be in this state we have to (1) finished the exploration and (3) not
3556   // discovered any non-trivial dead end and (2) not ruled unreachable code
3557   // dead.
3558   if (ToBeExploredFrom.empty() &&
3559       getAssociatedFunction()->size() == AssumedLiveBlocks.size() &&
3560       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3561         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3562       }))
3563     return indicatePessimisticFixpoint();
3564   return Change;
3565 }
3566 
3567 /// Liveness information for a call sites.
3568 struct AAIsDeadCallSite final : AAIsDeadFunction {
3569   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
3570 
3571   /// See AbstractAttribute::initialize(...).
3572   void initialize(Attributor &A) override {
3573     // TODO: Once we have call site specific value information we can provide
3574     //       call site specific liveness information and then it makes
3575     //       sense to specialize attributes for call sites instead of
3576     //       redirecting requests to the callee.
3577     llvm_unreachable("Abstract attributes for liveness are not "
3578                      "supported for call sites yet!");
3579   }
3580 
3581   /// See AbstractAttribute::updateImpl(...).
3582   ChangeStatus updateImpl(Attributor &A) override {
3583     return indicatePessimisticFixpoint();
3584   }
3585 
3586   /// See AbstractAttribute::trackStatistics()
3587   void trackStatistics() const override {}
3588 };
3589 
3590 /// -------------------- Dereferenceable Argument Attribute --------------------
3591 
3592 template <>
3593 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3594                                                      const DerefState &R) {
3595   ChangeStatus CS0 =
3596       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3597   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3598   return CS0 | CS1;
3599 }
3600 
3601 struct AADereferenceableImpl : AADereferenceable {
3602   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
3603   using StateType = DerefState;
3604 
3605   void initialize(Attributor &A) override {
3606     SmallVector<Attribute, 4> Attrs;
3607     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3608              Attrs);
3609     for (const Attribute &Attr : Attrs)
3610       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3611 
3612     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition(),
3613                                        /* TrackDependence */ false);
3614 
3615     const IRPosition &IRP = this->getIRPosition();
3616     bool IsFnInterface = IRP.isFnInterfaceKind();
3617     const Function *FnScope = IRP.getAnchorScope();
3618     if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
3619       indicatePessimisticFixpoint();
3620   }
3621 
3622   /// See AbstractAttribute::getState()
3623   /// {
3624   StateType &getState() override { return *this; }
3625   const StateType &getState() const override { return *this; }
3626   /// }
3627 
3628   /// Helper function for collecting accessed bytes in must-be-executed-context
3629   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3630                               DerefState &State) {
3631     const Value *UseV = U->get();
3632     if (!UseV->getType()->isPointerTy())
3633       return;
3634 
3635     Type *PtrTy = UseV->getType();
3636     const DataLayout &DL = A.getDataLayout();
3637     int64_t Offset;
3638     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3639             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3640       if (Base == &getAssociatedValue() &&
3641           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3642         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3643         State.addAccessedBytes(Offset, Size);
3644       }
3645     }
3646     return;
3647   }
3648 
3649   /// See AAFromMustBeExecutedContext
3650   bool followUse(Attributor &A, const Use *U, const Instruction *I,
3651                  AADereferenceable::StateType &State) {
3652     bool IsNonNull = false;
3653     bool TrackUse = false;
3654     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3655         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3656 
3657     addAccessedBytesForUse(A, U, I, State);
3658     State.takeKnownDerefBytesMaximum(DerefBytes);
3659     return TrackUse;
3660   }
3661 
3662   /// See AbstractAttribute::manifest(...).
3663   ChangeStatus manifest(Attributor &A) override {
3664     ChangeStatus Change = AADereferenceable::manifest(A);
3665     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3666       removeAttrs({Attribute::DereferenceableOrNull});
3667       return ChangeStatus::CHANGED;
3668     }
3669     return Change;
3670   }
3671 
3672   void getDeducedAttributes(LLVMContext &Ctx,
3673                             SmallVectorImpl<Attribute> &Attrs) const override {
3674     // TODO: Add *_globally support
3675     if (isAssumedNonNull())
3676       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3677           Ctx, getAssumedDereferenceableBytes()));
3678     else
3679       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3680           Ctx, getAssumedDereferenceableBytes()));
3681   }
3682 
3683   /// See AbstractAttribute::getAsStr().
3684   const std::string getAsStr() const override {
3685     if (!getAssumedDereferenceableBytes())
3686       return "unknown-dereferenceable";
3687     return std::string("dereferenceable") +
3688            (isAssumedNonNull() ? "" : "_or_null") +
3689            (isAssumedGlobal() ? "_globally" : "") + "<" +
3690            std::to_string(getKnownDereferenceableBytes()) + "-" +
3691            std::to_string(getAssumedDereferenceableBytes()) + ">";
3692   }
3693 };
3694 
3695 /// Dereferenceable attribute for a floating value.
3696 struct AADereferenceableFloating
3697     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3698   using Base =
3699       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3700   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
3701 
3702   /// See AbstractAttribute::updateImpl(...).
3703   ChangeStatus updateImpl(Attributor &A) override {
3704     ChangeStatus Change = Base::updateImpl(A);
3705 
3706     const DataLayout &DL = A.getDataLayout();
3707 
3708     auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
3709       unsigned IdxWidth =
3710           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3711       APInt Offset(IdxWidth, 0);
3712       const Value *Base =
3713           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3714 
3715       const auto &AA =
3716           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3717       int64_t DerefBytes = 0;
3718       if (!Stripped && this == &AA) {
3719         // Use IR information if we did not strip anything.
3720         // TODO: track globally.
3721         bool CanBeNull;
3722         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3723         T.GlobalState.indicatePessimisticFixpoint();
3724       } else {
3725         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3726         DerefBytes = DS.DerefBytesState.getAssumed();
3727         T.GlobalState &= DS.GlobalState;
3728       }
3729 
3730       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3731 
3732       // For now we do not try to "increase" dereferenceability due to negative
3733       // indices as we first have to come up with code to deal with loops and
3734       // for overflows of the dereferenceable bytes.
3735       int64_t OffsetSExt = Offset.getSExtValue();
3736       if (OffsetSExt < 0)
3737         OffsetSExt = 0;
3738 
3739       T.takeAssumedDerefBytesMinimum(
3740           std::max(int64_t(0), DerefBytes - OffsetSExt));
3741 
3742       if (this == &AA) {
3743         if (!Stripped) {
3744           // If nothing was stripped IR information is all we got.
3745           T.takeKnownDerefBytesMaximum(
3746               std::max(int64_t(0), DerefBytes - OffsetSExt));
3747           T.indicatePessimisticFixpoint();
3748         } else if (OffsetSExt > 0) {
3749           // If something was stripped but there is circular reasoning we look
3750           // for the offset. If it is positive we basically decrease the
3751           // dereferenceable bytes in a circluar loop now, which will simply
3752           // drive them down to the known value in a very slow way which we
3753           // can accelerate.
3754           T.indicatePessimisticFixpoint();
3755         }
3756       }
3757 
3758       return T.isValidState();
3759     };
3760 
3761     DerefState T;
3762     if (!genericValueTraversal<AADereferenceable, DerefState>(
3763             A, getIRPosition(), *this, T, VisitValueCB))
3764       return indicatePessimisticFixpoint();
3765 
3766     return Change | clampStateAndIndicateChange(getState(), T);
3767   }
3768 
3769   /// See AbstractAttribute::trackStatistics()
3770   void trackStatistics() const override {
3771     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3772   }
3773 };
3774 
3775 /// Dereferenceable attribute for a return value.
3776 struct AADereferenceableReturned final
3777     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3778   AADereferenceableReturned(const IRPosition &IRP)
3779       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3780             IRP) {}
3781 
3782   /// See AbstractAttribute::trackStatistics()
3783   void trackStatistics() const override {
3784     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3785   }
3786 };
3787 
3788 /// Dereferenceable attribute for an argument
3789 struct AADereferenceableArgument final
3790     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3791           AADereferenceable, AADereferenceableImpl> {
3792   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3793       AADereferenceable, AADereferenceableImpl>;
3794   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3795 
3796   /// See AbstractAttribute::trackStatistics()
3797   void trackStatistics() const override {
3798     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3799   }
3800 };
3801 
3802 /// Dereferenceable attribute for a call site argument.
3803 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3804   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3805       : AADereferenceableFloating(IRP) {}
3806 
3807   /// See AbstractAttribute::trackStatistics()
3808   void trackStatistics() const override {
3809     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3810   }
3811 };
3812 
3813 /// Dereferenceable attribute deduction for a call site return value.
3814 struct AADereferenceableCallSiteReturned final
3815     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3816           AADereferenceable, AADereferenceableImpl> {
3817   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3818       AADereferenceable, AADereferenceableImpl>;
3819   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3820 
3821   /// See AbstractAttribute::trackStatistics()
3822   void trackStatistics() const override {
3823     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3824   }
3825 };
3826 
3827 // ------------------------ Align Argument Attribute ------------------------
3828 
3829 static unsigned int getKnownAlignForUse(Attributor &A,
3830                                         AbstractAttribute &QueryingAA,
3831                                         Value &AssociatedValue, const Use *U,
3832                                         const Instruction *I, bool &TrackUse) {
3833   // We need to follow common pointer manipulation uses to the accesses they
3834   // feed into.
3835   if (isa<CastInst>(I)) {
3836     // Follow all but ptr2int casts.
3837     TrackUse = !isa<PtrToIntInst>(I);
3838     return 0;
3839   }
3840   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3841     if (GEP->hasAllConstantIndices()) {
3842       TrackUse = true;
3843       return 0;
3844     }
3845   }
3846 
3847   unsigned Alignment = 0;
3848   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
3849     if (ICS.isBundleOperand(U) || ICS.isCallee(U))
3850       return 0;
3851 
3852     unsigned ArgNo = ICS.getArgumentNo(U);
3853     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
3854     // As long as we only use known information there is no need to track
3855     // dependences here.
3856     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3857                                         /* TrackDependence */ false);
3858     Alignment = AlignAA.getKnownAlign();
3859   }
3860 
3861   const Value *UseV = U->get();
3862   if (auto *SI = dyn_cast<StoreInst>(I)) {
3863     if (SI->getPointerOperand() == UseV)
3864       Alignment = SI->getAlignment();
3865   } else if (auto *LI = dyn_cast<LoadInst>(I))
3866     Alignment = LI->getAlignment();
3867 
3868   if (Alignment <= 1)
3869     return 0;
3870 
3871   auto &DL = A.getDataLayout();
3872   int64_t Offset;
3873 
3874   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3875     if (Base == &AssociatedValue) {
3876       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3877       // So we can say that the maximum power of two which is a divisor of
3878       // gcd(Offset, Alignment) is an alignment.
3879 
3880       uint32_t gcd =
3881           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3882       Alignment = llvm::PowerOf2Floor(gcd);
3883     }
3884   }
3885 
3886   return Alignment;
3887 }
3888 struct AAAlignImpl : AAAlign {
3889   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3890 
3891   /// See AbstractAttribute::initialize(...).
3892   void initialize(Attributor &A) override {
3893     SmallVector<Attribute, 4> Attrs;
3894     getAttrs({Attribute::Alignment}, Attrs);
3895     for (const Attribute &Attr : Attrs)
3896       takeKnownMaximum(Attr.getValueAsInt());
3897 
3898     if (getIRPosition().isFnInterfaceKind() &&
3899         (!getAssociatedFunction() ||
3900          !getAssociatedFunction()->hasExactDefinition()))
3901       indicatePessimisticFixpoint();
3902   }
3903 
3904   /// See AbstractAttribute::manifest(...).
3905   ChangeStatus manifest(Attributor &A) override {
3906     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3907 
3908     // Check for users that allow alignment annotations.
3909     Value &AssociatedValue = getAssociatedValue();
3910     for (const Use &U : AssociatedValue.uses()) {
3911       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3912         if (SI->getPointerOperand() == &AssociatedValue)
3913           if (SI->getAlignment() < getAssumedAlign()) {
3914             STATS_DECLTRACK(AAAlign, Store,
3915                             "Number of times alignment added to a store");
3916             SI->setAlignment(Align(getAssumedAlign()));
3917             LoadStoreChanged = ChangeStatus::CHANGED;
3918           }
3919       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3920         if (LI->getPointerOperand() == &AssociatedValue)
3921           if (LI->getAlignment() < getAssumedAlign()) {
3922             LI->setAlignment(Align(getAssumedAlign()));
3923             STATS_DECLTRACK(AAAlign, Load,
3924                             "Number of times alignment added to a load");
3925             LoadStoreChanged = ChangeStatus::CHANGED;
3926           }
3927       }
3928     }
3929 
3930     ChangeStatus Changed = AAAlign::manifest(A);
3931 
3932     MaybeAlign InheritAlign =
3933         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3934     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3935       return LoadStoreChanged;
3936     return Changed | LoadStoreChanged;
3937   }
3938 
3939   // TODO: Provide a helper to determine the implied ABI alignment and check in
3940   //       the existing manifest method and a new one for AAAlignImpl that value
3941   //       to avoid making the alignment explicit if it did not improve.
3942 
3943   /// See AbstractAttribute::getDeducedAttributes
3944   virtual void
3945   getDeducedAttributes(LLVMContext &Ctx,
3946                        SmallVectorImpl<Attribute> &Attrs) const override {
3947     if (getAssumedAlign() > 1)
3948       Attrs.emplace_back(
3949           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3950   }
3951   /// See AAFromMustBeExecutedContext
3952   bool followUse(Attributor &A, const Use *U, const Instruction *I,
3953                  AAAlign::StateType &State) {
3954     bool TrackUse = false;
3955 
3956     unsigned int KnownAlign =
3957         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3958     State.takeKnownMaximum(KnownAlign);
3959 
3960     return TrackUse;
3961   }
3962 
3963   /// See AbstractAttribute::getAsStr().
3964   const std::string getAsStr() const override {
3965     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3966                                 "-" + std::to_string(getAssumedAlign()) + ">")
3967                              : "unknown-align";
3968   }
3969 };
3970 
3971 /// Align attribute for a floating value.
3972 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3973   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3974   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3975 
3976   /// See AbstractAttribute::updateImpl(...).
3977   ChangeStatus updateImpl(Attributor &A) override {
3978     Base::updateImpl(A);
3979 
3980     const DataLayout &DL = A.getDataLayout();
3981 
3982     auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
3983                             bool Stripped) -> bool {
3984       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3985       if (!Stripped && this == &AA) {
3986         // Use only IR information if we did not strip anything.
3987         const MaybeAlign PA = V.getPointerAlignment(DL);
3988         T.takeKnownMaximum(PA ? PA->value() : 0);
3989         T.indicatePessimisticFixpoint();
3990       } else {
3991         // Use abstract attribute information.
3992         const AAAlign::StateType &DS =
3993             static_cast<const AAAlign::StateType &>(AA.getState());
3994         T ^= DS;
3995       }
3996       return T.isValidState();
3997     };
3998 
3999     StateType T;
4000     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
4001                                                    VisitValueCB))
4002       return indicatePessimisticFixpoint();
4003 
4004     // TODO: If we know we visited all incoming values, thus no are assumed
4005     // dead, we can take the known information from the state T.
4006     return clampStateAndIndicateChange(getState(), T);
4007   }
4008 
4009   /// See AbstractAttribute::trackStatistics()
4010   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4011 };
4012 
4013 /// Align attribute for function return value.
4014 struct AAAlignReturned final
4015     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4016   AAAlignReturned(const IRPosition &IRP)
4017       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
4018 
4019   /// See AbstractAttribute::trackStatistics()
4020   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4021 };
4022 
4023 /// Align attribute for function argument.
4024 struct AAAlignArgument final
4025     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
4026                                                               AAAlignImpl> {
4027   AAAlignArgument(const IRPosition &IRP)
4028       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
4029                                                                 AAAlignImpl>(
4030             IRP) {}
4031 
4032   /// See AbstractAttribute::trackStatistics()
4033   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4034 };
4035 
4036 struct AAAlignCallSiteArgument final : AAAlignFloating {
4037   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
4038 
4039   /// See AbstractAttribute::manifest(...).
4040   ChangeStatus manifest(Attributor &A) override {
4041     ChangeStatus Changed = AAAlignImpl::manifest(A);
4042     MaybeAlign InheritAlign =
4043         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4044     if (InheritAlign.valueOrOne() >= getAssumedAlign())
4045       Changed = ChangeStatus::UNCHANGED;
4046     return Changed;
4047   }
4048 
4049   /// See AbstractAttribute::updateImpl(Attributor &A).
4050   ChangeStatus updateImpl(Attributor &A) override {
4051     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4052     if (Argument *Arg = getAssociatedArgument()) {
4053       // We only take known information from the argument
4054       // so we do not need to track a dependence.
4055       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4056           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
4057       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4058     }
4059     return Changed;
4060   }
4061 
4062   /// See AbstractAttribute::trackStatistics()
4063   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4064 };
4065 
4066 /// Align attribute deduction for a call site return value.
4067 struct AAAlignCallSiteReturned final
4068     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
4069                                                              AAAlignImpl> {
4070   using Base =
4071       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
4072                                                              AAAlignImpl>;
4073   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
4074 
4075   /// See AbstractAttribute::initialize(...).
4076   void initialize(Attributor &A) override {
4077     Base::initialize(A);
4078     Function *F = getAssociatedFunction();
4079     if (!F)
4080       indicatePessimisticFixpoint();
4081   }
4082 
4083   /// See AbstractAttribute::trackStatistics()
4084   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4085 };
4086 
4087 /// ------------------ Function No-Return Attribute ----------------------------
4088 struct AANoReturnImpl : public AANoReturn {
4089   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
4090 
4091   /// See AbstractAttribute::initialize(...).
4092   void initialize(Attributor &A) override {
4093     AANoReturn::initialize(A);
4094     Function *F = getAssociatedFunction();
4095     if (!F)
4096       indicatePessimisticFixpoint();
4097   }
4098 
4099   /// See AbstractAttribute::getAsStr().
4100   const std::string getAsStr() const override {
4101     return getAssumed() ? "noreturn" : "may-return";
4102   }
4103 
4104   /// See AbstractAttribute::updateImpl(Attributor &A).
4105   virtual ChangeStatus updateImpl(Attributor &A) override {
4106     auto CheckForNoReturn = [](Instruction &) { return false; };
4107     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4108                                    {(unsigned)Instruction::Ret}))
4109       return indicatePessimisticFixpoint();
4110     return ChangeStatus::UNCHANGED;
4111   }
4112 };
4113 
4114 struct AANoReturnFunction final : AANoReturnImpl {
4115   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
4116 
4117   /// See AbstractAttribute::trackStatistics()
4118   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4119 };
4120 
4121 /// NoReturn attribute deduction for a call sites.
4122 struct AANoReturnCallSite final : AANoReturnImpl {
4123   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
4124 
4125   /// See AbstractAttribute::updateImpl(...).
4126   ChangeStatus updateImpl(Attributor &A) override {
4127     // TODO: Once we have call site specific value information we can provide
4128     //       call site specific liveness information and then it makes
4129     //       sense to specialize attributes for call sites arguments instead of
4130     //       redirecting requests to the callee argument.
4131     Function *F = getAssociatedFunction();
4132     const IRPosition &FnPos = IRPosition::function(*F);
4133     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
4134     return clampStateAndIndicateChange(
4135         getState(),
4136         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
4137   }
4138 
4139   /// See AbstractAttribute::trackStatistics()
4140   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4141 };
4142 
4143 /// ----------------------- Variable Capturing ---------------------------------
4144 
4145 /// A class to hold the state of for no-capture attributes.
4146 struct AANoCaptureImpl : public AANoCapture {
4147   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
4148 
4149   /// See AbstractAttribute::initialize(...).
4150   void initialize(Attributor &A) override {
4151     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4152       indicateOptimisticFixpoint();
4153       return;
4154     }
4155     Function *AnchorScope = getAnchorScope();
4156     if (isFnInterfaceKind() &&
4157         (!AnchorScope || !AnchorScope->hasExactDefinition())) {
4158       indicatePessimisticFixpoint();
4159       return;
4160     }
4161 
4162     // You cannot "capture" null in the default address space.
4163     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4164         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4165       indicateOptimisticFixpoint();
4166       return;
4167     }
4168 
4169     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
4170 
4171     // Check what state the associated function can actually capture.
4172     if (F)
4173       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4174     else
4175       indicatePessimisticFixpoint();
4176   }
4177 
4178   /// See AbstractAttribute::updateImpl(...).
4179   ChangeStatus updateImpl(Attributor &A) override;
4180 
4181   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4182   virtual void
4183   getDeducedAttributes(LLVMContext &Ctx,
4184                        SmallVectorImpl<Attribute> &Attrs) const override {
4185     if (!isAssumedNoCaptureMaybeReturned())
4186       return;
4187 
4188     if (getArgNo() >= 0) {
4189       if (isAssumedNoCapture())
4190         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4191       else if (ManifestInternal)
4192         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4193     }
4194   }
4195 
4196   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4197   /// depending on the ability of the function associated with \p IRP to capture
4198   /// state in memory and through "returning/throwing", respectively.
4199   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4200                                                    const Function &F,
4201                                                    BitIntegerState &State) {
4202     // TODO: Once we have memory behavior attributes we should use them here.
4203 
4204     // If we know we cannot communicate or write to memory, we do not care about
4205     // ptr2int anymore.
4206     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4207         F.getReturnType()->isVoidTy()) {
4208       State.addKnownBits(NO_CAPTURE);
4209       return;
4210     }
4211 
4212     // A function cannot capture state in memory if it only reads memory, it can
4213     // however return/throw state and the state might be influenced by the
4214     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4215     if (F.onlyReadsMemory())
4216       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4217 
4218     // A function cannot communicate state back if it does not through
4219     // exceptions and doesn not return values.
4220     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4221       State.addKnownBits(NOT_CAPTURED_IN_RET);
4222 
4223     // Check existing "returned" attributes.
4224     int ArgNo = IRP.getArgNo();
4225     if (F.doesNotThrow() && ArgNo >= 0) {
4226       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4227         if (F.hasParamAttribute(u, Attribute::Returned)) {
4228           if (u == unsigned(ArgNo))
4229             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4230           else if (F.onlyReadsMemory())
4231             State.addKnownBits(NO_CAPTURE);
4232           else
4233             State.addKnownBits(NOT_CAPTURED_IN_RET);
4234           break;
4235         }
4236     }
4237   }
4238 
4239   /// See AbstractState::getAsStr().
4240   const std::string getAsStr() const override {
4241     if (isKnownNoCapture())
4242       return "known not-captured";
4243     if (isAssumedNoCapture())
4244       return "assumed not-captured";
4245     if (isKnownNoCaptureMaybeReturned())
4246       return "known not-captured-maybe-returned";
4247     if (isAssumedNoCaptureMaybeReturned())
4248       return "assumed not-captured-maybe-returned";
4249     return "assumed-captured";
4250   }
4251 };
4252 
4253 /// Attributor-aware capture tracker.
4254 struct AACaptureUseTracker final : public CaptureTracker {
4255 
4256   /// Create a capture tracker that can lookup in-flight abstract attributes
4257   /// through the Attributor \p A.
4258   ///
4259   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4260   /// search is stopped. If a use leads to a return instruction,
4261   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4262   /// If a use leads to a ptr2int which may capture the value,
4263   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4264   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4265   /// set. All values in \p PotentialCopies are later tracked as well. For every
4266   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4267   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4268   /// conservatively set to true.
4269   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4270                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4271                       SmallVectorImpl<const Value *> &PotentialCopies,
4272                       unsigned &RemainingUsesToExplore)
4273       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4274         PotentialCopies(PotentialCopies),
4275         RemainingUsesToExplore(RemainingUsesToExplore) {}
4276 
4277   /// Determine if \p V maybe captured. *Also updates the state!*
4278   bool valueMayBeCaptured(const Value *V) {
4279     if (V->getType()->isPointerTy()) {
4280       PointerMayBeCaptured(V, this);
4281     } else {
4282       State.indicatePessimisticFixpoint();
4283     }
4284     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4285   }
4286 
4287   /// See CaptureTracker::tooManyUses().
4288   void tooManyUses() override {
4289     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4290   }
4291 
4292   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4293     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4294       return true;
4295     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4296         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4297         DepClassTy::OPTIONAL);
4298     return DerefAA.getAssumedDereferenceableBytes();
4299   }
4300 
4301   /// See CaptureTracker::captured(...).
4302   bool captured(const Use *U) override {
4303     Instruction *UInst = cast<Instruction>(U->getUser());
4304     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4305                       << "\n");
4306 
4307     // Because we may reuse the tracker multiple times we keep track of the
4308     // number of explored uses ourselves as well.
4309     if (RemainingUsesToExplore-- == 0) {
4310       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4311       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4312                           /* Return */ true);
4313     }
4314 
4315     // Deal with ptr2int by following uses.
4316     if (isa<PtrToIntInst>(UInst)) {
4317       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4318       return valueMayBeCaptured(UInst);
4319     }
4320 
4321     // Explicitly catch return instructions.
4322     if (isa<ReturnInst>(UInst))
4323       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4324                           /* Return */ true);
4325 
4326     // For now we only use special logic for call sites. However, the tracker
4327     // itself knows about a lot of other non-capturing cases already.
4328     CallSite CS(UInst);
4329     if (!CS || !CS.isArgOperand(U))
4330       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4331                           /* Return */ true);
4332 
4333     unsigned ArgNo = CS.getArgumentNo(U);
4334     const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
4335     // If we have a abstract no-capture attribute for the argument we can use
4336     // it to justify a non-capture attribute here. This allows recursion!
4337     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4338     if (ArgNoCaptureAA.isAssumedNoCapture())
4339       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4340                           /* Return */ false);
4341     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4342       addPotentialCopy(CS);
4343       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4344                           /* Return */ false);
4345     }
4346 
4347     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4348     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4349                         /* Return */ true);
4350   }
4351 
4352   /// Register \p CS as potential copy of the value we are checking.
4353   void addPotentialCopy(CallSite CS) {
4354     PotentialCopies.push_back(CS.getInstruction());
4355   }
4356 
4357   /// See CaptureTracker::shouldExplore(...).
4358   bool shouldExplore(const Use *U) override {
4359     // Check liveness.
4360     return !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4361   }
4362 
4363   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4364   /// \p CapturedInRet, then return the appropriate value for use in the
4365   /// CaptureTracker::captured() interface.
4366   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4367                     bool CapturedInRet) {
4368     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4369                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4370     if (CapturedInMem)
4371       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4372     if (CapturedInInt)
4373       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4374     if (CapturedInRet)
4375       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4376     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4377   }
4378 
4379 private:
4380   /// The attributor providing in-flight abstract attributes.
4381   Attributor &A;
4382 
4383   /// The abstract attribute currently updated.
4384   AANoCapture &NoCaptureAA;
4385 
4386   /// The abstract liveness state.
4387   const AAIsDead &IsDeadAA;
4388 
4389   /// The state currently updated.
4390   AANoCapture::StateType &State;
4391 
4392   /// Set of potential copies of the tracked value.
4393   SmallVectorImpl<const Value *> &PotentialCopies;
4394 
4395   /// Global counter to limit the number of explored uses.
4396   unsigned &RemainingUsesToExplore;
4397 };
4398 
4399 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4400   const IRPosition &IRP = getIRPosition();
4401   const Value *V =
4402       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4403   if (!V)
4404     return indicatePessimisticFixpoint();
4405 
4406   const Function *F =
4407       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4408   assert(F && "Expected a function!");
4409   const IRPosition &FnPos = IRPosition::function(*F);
4410   const auto &IsDeadAA =
4411       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4412 
4413   AANoCapture::StateType T;
4414 
4415   // Readonly means we cannot capture through memory.
4416   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
4417       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4418   if (FnMemAA.isAssumedReadOnly()) {
4419     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4420     if (FnMemAA.isKnownReadOnly())
4421       addKnownBits(NOT_CAPTURED_IN_MEM);
4422   }
4423 
4424   // Make sure all returned values are different than the underlying value.
4425   // TODO: we could do this in a more sophisticated way inside
4426   //       AAReturnedValues, e.g., track all values that escape through returns
4427   //       directly somehow.
4428   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4429     bool SeenConstant = false;
4430     for (auto &It : RVAA.returned_values()) {
4431       if (isa<Constant>(It.first)) {
4432         if (SeenConstant)
4433           return false;
4434         SeenConstant = true;
4435       } else if (!isa<Argument>(It.first) ||
4436                  It.first == getAssociatedArgument())
4437         return false;
4438     }
4439     return true;
4440   };
4441 
4442   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4443       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4444   if (NoUnwindAA.isAssumedNoUnwind()) {
4445     bool IsVoidTy = F->getReturnType()->isVoidTy();
4446     const AAReturnedValues *RVAA =
4447         IsVoidTy ? nullptr
4448                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4449                                                  /* TrackDependence */ true,
4450                                                  DepClassTy::OPTIONAL);
4451     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4452       T.addKnownBits(NOT_CAPTURED_IN_RET);
4453       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4454         return ChangeStatus::UNCHANGED;
4455       if (NoUnwindAA.isKnownNoUnwind() &&
4456           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4457         addKnownBits(NOT_CAPTURED_IN_RET);
4458         if (isKnown(NOT_CAPTURED_IN_MEM))
4459           return indicateOptimisticFixpoint();
4460       }
4461     }
4462   }
4463 
4464   // Use the CaptureTracker interface and logic with the specialized tracker,
4465   // defined in AACaptureUseTracker, that can look at in-flight abstract
4466   // attributes and directly updates the assumed state.
4467   SmallVector<const Value *, 4> PotentialCopies;
4468   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
4469   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4470                               RemainingUsesToExplore);
4471 
4472   // Check all potential copies of the associated value until we can assume
4473   // none will be captured or we have to assume at least one might be.
4474   unsigned Idx = 0;
4475   PotentialCopies.push_back(V);
4476   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4477     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4478 
4479   AANoCapture::StateType &S = getState();
4480   auto Assumed = S.getAssumed();
4481   S.intersectAssumedBits(T.getAssumed());
4482   if (!isAssumedNoCaptureMaybeReturned())
4483     return indicatePessimisticFixpoint();
4484   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4485                                    : ChangeStatus::CHANGED;
4486 }
4487 
4488 /// NoCapture attribute for function arguments.
4489 struct AANoCaptureArgument final : AANoCaptureImpl {
4490   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4491 
4492   /// See AbstractAttribute::trackStatistics()
4493   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4494 };
4495 
4496 /// NoCapture attribute for call site arguments.
4497 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4498   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4499 
4500   /// See AbstractAttribute::initialize(...).
4501   void initialize(Attributor &A) override {
4502     if (Argument *Arg = getAssociatedArgument())
4503       if (Arg->hasByValAttr())
4504         indicateOptimisticFixpoint();
4505     AANoCaptureImpl::initialize(A);
4506   }
4507 
4508   /// See AbstractAttribute::updateImpl(...).
4509   ChangeStatus updateImpl(Attributor &A) override {
4510     // TODO: Once we have call site specific value information we can provide
4511     //       call site specific liveness information and then it makes
4512     //       sense to specialize attributes for call sites arguments instead of
4513     //       redirecting requests to the callee argument.
4514     Argument *Arg = getAssociatedArgument();
4515     if (!Arg)
4516       return indicatePessimisticFixpoint();
4517     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4518     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4519     return clampStateAndIndicateChange(
4520         getState(),
4521         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4522   }
4523 
4524   /// See AbstractAttribute::trackStatistics()
4525   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4526 };
4527 
4528 /// NoCapture attribute for floating values.
4529 struct AANoCaptureFloating final : AANoCaptureImpl {
4530   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4531 
4532   /// See AbstractAttribute::trackStatistics()
4533   void trackStatistics() const override {
4534     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4535   }
4536 };
4537 
4538 /// NoCapture attribute for function return value.
4539 struct AANoCaptureReturned final : AANoCaptureImpl {
4540   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
4541     llvm_unreachable("NoCapture is not applicable to function returns!");
4542   }
4543 
4544   /// See AbstractAttribute::initialize(...).
4545   void initialize(Attributor &A) override {
4546     llvm_unreachable("NoCapture is not applicable to function returns!");
4547   }
4548 
4549   /// See AbstractAttribute::updateImpl(...).
4550   ChangeStatus updateImpl(Attributor &A) override {
4551     llvm_unreachable("NoCapture is not applicable to function returns!");
4552   }
4553 
4554   /// See AbstractAttribute::trackStatistics()
4555   void trackStatistics() const override {}
4556 };
4557 
4558 /// NoCapture attribute deduction for a call site return value.
4559 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4560   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4561 
4562   /// See AbstractAttribute::trackStatistics()
4563   void trackStatistics() const override {
4564     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4565   }
4566 };
4567 
4568 /// ------------------ Value Simplify Attribute ----------------------------
4569 struct AAValueSimplifyImpl : AAValueSimplify {
4570   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
4571 
4572   /// See AbstractAttribute::initialize(...).
4573   void initialize(Attributor &A) override {
4574     if (getAssociatedValue().getType()->isVoidTy())
4575       indicatePessimisticFixpoint();
4576   }
4577 
4578   /// See AbstractAttribute::getAsStr().
4579   const std::string getAsStr() const override {
4580     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4581                         : "not-simple";
4582   }
4583 
4584   /// See AbstractAttribute::trackStatistics()
4585   void trackStatistics() const override {}
4586 
4587   /// See AAValueSimplify::getAssumedSimplifiedValue()
4588   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4589     if (!getAssumed())
4590       return const_cast<Value *>(&getAssociatedValue());
4591     return SimplifiedAssociatedValue;
4592   }
4593 
4594   /// Helper function for querying AAValueSimplify and updating candicate.
4595   /// \param QueryingValue Value trying to unify with SimplifiedValue
4596   /// \param AccumulatedSimplifiedValue Current simplification result.
4597   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4598                              Value &QueryingValue,
4599                              Optional<Value *> &AccumulatedSimplifiedValue) {
4600     // FIXME: Add a typecast support.
4601 
4602     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4603         QueryingAA, IRPosition::value(QueryingValue));
4604 
4605     Optional<Value *> QueryingValueSimplified =
4606         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4607 
4608     if (!QueryingValueSimplified.hasValue())
4609       return true;
4610 
4611     if (!QueryingValueSimplified.getValue())
4612       return false;
4613 
4614     Value &QueryingValueSimplifiedUnwrapped =
4615         *QueryingValueSimplified.getValue();
4616 
4617     if (AccumulatedSimplifiedValue.hasValue() &&
4618         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4619         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4620       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4621     if (AccumulatedSimplifiedValue.hasValue() &&
4622         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4623       return true;
4624 
4625     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4626                       << " is assumed to be "
4627                       << QueryingValueSimplifiedUnwrapped << "\n");
4628 
4629     AccumulatedSimplifiedValue = QueryingValueSimplified;
4630     return true;
4631   }
4632 
4633   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4634     if (!getAssociatedValue().getType()->isIntegerTy())
4635       return false;
4636 
4637     const auto &ValueConstantRangeAA =
4638         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4639 
4640     Optional<ConstantInt *> COpt =
4641         ValueConstantRangeAA.getAssumedConstantInt(A);
4642     if (COpt.hasValue()) {
4643       if (auto *C = COpt.getValue())
4644         SimplifiedAssociatedValue = C;
4645       else
4646         return false;
4647     } else {
4648       SimplifiedAssociatedValue = llvm::None;
4649     }
4650     return true;
4651   }
4652 
4653   /// See AbstractAttribute::manifest(...).
4654   ChangeStatus manifest(Attributor &A) override {
4655     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4656 
4657     if (SimplifiedAssociatedValue.hasValue() &&
4658         !SimplifiedAssociatedValue.getValue())
4659       return Changed;
4660 
4661     Value &V = getAssociatedValue();
4662     auto *C = SimplifiedAssociatedValue.hasValue()
4663                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4664                   : UndefValue::get(V.getType());
4665     if (C) {
4666       // We can replace the AssociatedValue with the constant.
4667       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4668         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4669                           << " :: " << *this << "\n");
4670         if (A.changeValueAfterManifest(V, *C))
4671           Changed = ChangeStatus::CHANGED;
4672       }
4673     }
4674 
4675     return Changed | AAValueSimplify::manifest(A);
4676   }
4677 
4678   /// See AbstractState::indicatePessimisticFixpoint(...).
4679   ChangeStatus indicatePessimisticFixpoint() override {
4680     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4681     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4682     SimplifiedAssociatedValue = &getAssociatedValue();
4683     indicateOptimisticFixpoint();
4684     return ChangeStatus::CHANGED;
4685   }
4686 
4687 protected:
4688   // An assumed simplified value. Initially, it is set to Optional::None, which
4689   // means that the value is not clear under current assumption. If in the
4690   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4691   // returns orignal associated value.
4692   Optional<Value *> SimplifiedAssociatedValue;
4693 };
4694 
4695 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4696   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4697 
4698   void initialize(Attributor &A) override {
4699     AAValueSimplifyImpl::initialize(A);
4700     if (!getAssociatedFunction() || getAssociatedFunction()->isDeclaration())
4701       indicatePessimisticFixpoint();
4702     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4703                 /* IgnoreSubsumingPositions */ true))
4704       indicatePessimisticFixpoint();
4705 
4706     // FIXME: This is a hack to prevent us from propagating function poiner in
4707     // the new pass manager CGSCC pass as it creates call edges the
4708     // CallGraphUpdater cannot handle yet.
4709     Value &V = getAssociatedValue();
4710     if (V.getType()->isPointerTy() &&
4711         V.getType()->getPointerElementType()->isFunctionTy() &&
4712         !A.isModulePass())
4713       indicatePessimisticFixpoint();
4714   }
4715 
4716   /// See AbstractAttribute::updateImpl(...).
4717   ChangeStatus updateImpl(Attributor &A) override {
4718     // Byval is only replacable if it is readonly otherwise we would write into
4719     // the replaced value and not the copy that byval creates implicitly.
4720     Argument *Arg = getAssociatedArgument();
4721     if (Arg->hasByValAttr()) {
4722       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4723       //       there is no race by not copying a constant byval.
4724       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4725       if (!MemAA.isAssumedReadOnly())
4726         return indicatePessimisticFixpoint();
4727     }
4728 
4729     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4730 
4731     auto PredForCallSite = [&](AbstractCallSite ACS) {
4732       const IRPosition &ACSArgPos =
4733           IRPosition::callsite_argument(ACS, getArgNo());
4734       // Check if a coresponding argument was found or if it is on not
4735       // associated (which can happen for callback calls).
4736       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4737         return false;
4738 
4739       // We can only propagate thread independent values through callbacks.
4740       // This is different to direct/indirect call sites because for them we
4741       // know the thread executing the caller and callee is the same. For
4742       // callbacks this is not guaranteed, thus a thread dependent value could
4743       // be different for the caller and callee, making it invalid to propagate.
4744       Value &ArgOp = ACSArgPos.getAssociatedValue();
4745       if (ACS.isCallbackCall())
4746         if (auto *C = dyn_cast<Constant>(&ArgOp))
4747           if (C->isThreadDependent())
4748             return false;
4749       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4750     };
4751 
4752     bool AllCallSitesKnown;
4753     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4754                                 AllCallSitesKnown))
4755       if (!askSimplifiedValueForAAValueConstantRange(A))
4756         return indicatePessimisticFixpoint();
4757 
4758     // If a candicate was found in this update, return CHANGED.
4759     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4760                ? ChangeStatus::UNCHANGED
4761                : ChangeStatus ::CHANGED;
4762   }
4763 
4764   /// See AbstractAttribute::trackStatistics()
4765   void trackStatistics() const override {
4766     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4767   }
4768 };
4769 
4770 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4771   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4772 
4773   /// See AbstractAttribute::updateImpl(...).
4774   ChangeStatus updateImpl(Attributor &A) override {
4775     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4776 
4777     auto PredForReturned = [&](Value &V) {
4778       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4779     };
4780 
4781     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4782       if (!askSimplifiedValueForAAValueConstantRange(A))
4783         return indicatePessimisticFixpoint();
4784 
4785     // If a candicate was found in this update, return CHANGED.
4786     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4787                ? ChangeStatus::UNCHANGED
4788                : ChangeStatus ::CHANGED;
4789   }
4790 
4791   ChangeStatus manifest(Attributor &A) override {
4792     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4793 
4794     if (SimplifiedAssociatedValue.hasValue() &&
4795         !SimplifiedAssociatedValue.getValue())
4796       return Changed;
4797 
4798     Value &V = getAssociatedValue();
4799     auto *C = SimplifiedAssociatedValue.hasValue()
4800                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4801                   : UndefValue::get(V.getType());
4802     if (C) {
4803       auto PredForReturned =
4804           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4805             // We can replace the AssociatedValue with the constant.
4806             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4807               return true;
4808             if (auto *CI = dyn_cast<CallInst>(&V))
4809               if (CI->isMustTailCall())
4810                 return true;
4811 
4812             for (ReturnInst *RI : RetInsts) {
4813               if (RI->getFunction() != getAnchorScope())
4814                 continue;
4815               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4816                                 << " in " << *RI << " :: " << *this << "\n");
4817               if (A.changeUseAfterManifest(RI->getOperandUse(0), *C))
4818                 Changed = ChangeStatus::CHANGED;
4819             }
4820             return true;
4821           };
4822       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4823     }
4824 
4825     return Changed | AAValueSimplify::manifest(A);
4826   }
4827 
4828   /// See AbstractAttribute::trackStatistics()
4829   void trackStatistics() const override {
4830     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4831   }
4832 };
4833 
4834 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4835   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4836 
4837   /// See AbstractAttribute::initialize(...).
4838   void initialize(Attributor &A) override {
4839     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4840     //        Needs investigation.
4841     // AAValueSimplifyImpl::initialize(A);
4842     Value &V = getAnchorValue();
4843 
4844     // TODO: add other stuffs
4845     if (isa<Constant>(V))
4846       indicatePessimisticFixpoint();
4847   }
4848 
4849   /// See AbstractAttribute::updateImpl(...).
4850   ChangeStatus updateImpl(Attributor &A) override {
4851     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4852 
4853     auto VisitValueCB = [&](Value &V, bool &, bool Stripped) -> bool {
4854       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4855       if (!Stripped && this == &AA) {
4856         // TODO: Look the instruction and check recursively.
4857 
4858         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4859                           << "\n");
4860         return false;
4861       }
4862       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4863     };
4864 
4865     bool Dummy = false;
4866     if (!genericValueTraversal<AAValueSimplify, bool>(A, getIRPosition(), *this,
4867                                                       Dummy, VisitValueCB))
4868       if (!askSimplifiedValueForAAValueConstantRange(A))
4869         return indicatePessimisticFixpoint();
4870 
4871     // If a candicate was found in this update, return CHANGED.
4872 
4873     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4874                ? ChangeStatus::UNCHANGED
4875                : ChangeStatus ::CHANGED;
4876   }
4877 
4878   /// See AbstractAttribute::trackStatistics()
4879   void trackStatistics() const override {
4880     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4881   }
4882 };
4883 
4884 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4885   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4886 
4887   /// See AbstractAttribute::initialize(...).
4888   void initialize(Attributor &A) override {
4889     SimplifiedAssociatedValue = &getAnchorValue();
4890     indicateOptimisticFixpoint();
4891   }
4892   /// See AbstractAttribute::initialize(...).
4893   ChangeStatus updateImpl(Attributor &A) override {
4894     llvm_unreachable(
4895         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4896   }
4897   /// See AbstractAttribute::trackStatistics()
4898   void trackStatistics() const override {
4899     STATS_DECLTRACK_FN_ATTR(value_simplify)
4900   }
4901 };
4902 
4903 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4904   AAValueSimplifyCallSite(const IRPosition &IRP)
4905       : AAValueSimplifyFunction(IRP) {}
4906   /// See AbstractAttribute::trackStatistics()
4907   void trackStatistics() const override {
4908     STATS_DECLTRACK_CS_ATTR(value_simplify)
4909   }
4910 };
4911 
4912 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4913   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4914       : AAValueSimplifyReturned(IRP) {}
4915 
4916   /// See AbstractAttribute::manifest(...).
4917   ChangeStatus manifest(Attributor &A) override {
4918     if (auto *CI = dyn_cast<CallInst>(&getAssociatedValue()))
4919       if (CI->isMustTailCall())
4920         return ChangeStatus::UNCHANGED;
4921     return AAValueSimplifyImpl::manifest(A);
4922   }
4923 
4924   void trackStatistics() const override {
4925     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4926   }
4927 };
4928 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4929   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4930       : AAValueSimplifyFloating(IRP) {}
4931 
4932   void trackStatistics() const override {
4933     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4934   }
4935 };
4936 
4937 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4938 struct AAHeapToStackImpl : public AAHeapToStack {
4939   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4940 
4941   const std::string getAsStr() const override {
4942     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4943   }
4944 
4945   ChangeStatus manifest(Attributor &A) override {
4946     assert(getState().isValidState() &&
4947            "Attempted to manifest an invalid state!");
4948 
4949     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4950     Function *F = getAssociatedFunction();
4951     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4952 
4953     for (Instruction *MallocCall : MallocCalls) {
4954       // This malloc cannot be replaced.
4955       if (BadMallocCalls.count(MallocCall))
4956         continue;
4957 
4958       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4959         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4960         A.deleteAfterManifest(*FreeCall);
4961         HasChanged = ChangeStatus::CHANGED;
4962       }
4963 
4964       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4965                         << "\n");
4966 
4967       Constant *Size;
4968       if (isCallocLikeFn(MallocCall, TLI)) {
4969         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4970         auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
4971         APInt TotalSize = SizeT->getValue() * Num->getValue();
4972         Size =
4973             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4974       } else {
4975         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4976       }
4977 
4978       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4979       Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
4980                                        Size, "", MallocCall->getNextNode());
4981 
4982       if (AI->getType() != MallocCall->getType())
4983         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4984                              AI->getNextNode());
4985 
4986       A.changeValueAfterManifest(*MallocCall, *AI);
4987 
4988       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4989         auto *NBB = II->getNormalDest();
4990         BranchInst::Create(NBB, MallocCall->getParent());
4991         A.deleteAfterManifest(*MallocCall);
4992       } else {
4993         A.deleteAfterManifest(*MallocCall);
4994       }
4995 
4996       if (isCallocLikeFn(MallocCall, TLI)) {
4997         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4998                                    AI->getNextNode());
4999         Value *Ops[] = {
5000             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5001             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5002 
5003         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5004         Module *M = F->getParent();
5005         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5006         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5007       }
5008       HasChanged = ChangeStatus::CHANGED;
5009     }
5010 
5011     return HasChanged;
5012   }
5013 
5014   /// Collection of all malloc calls in a function.
5015   SmallSetVector<Instruction *, 4> MallocCalls;
5016 
5017   /// Collection of malloc calls that cannot be converted.
5018   DenseSet<const Instruction *> BadMallocCalls;
5019 
5020   /// A map for each malloc call to the set of associated free calls.
5021   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5022 
5023   ChangeStatus updateImpl(Attributor &A) override;
5024 };
5025 
5026 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5027   const Function *F = getAssociatedFunction();
5028   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5029 
5030   MustBeExecutedContextExplorer &Explorer =
5031       A.getInfoCache().getMustBeExecutedContextExplorer();
5032 
5033   auto FreeCheck = [&](Instruction &I) {
5034     const auto &Frees = FreesForMalloc.lookup(&I);
5035     if (Frees.size() != 1)
5036       return false;
5037     Instruction *UniqueFree = *Frees.begin();
5038     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5039   };
5040 
5041   auto UsesCheck = [&](Instruction &I) {
5042     bool ValidUsesOnly = true;
5043     bool MustUse = true;
5044     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5045       Instruction *UserI = cast<Instruction>(U.getUser());
5046       if (isa<LoadInst>(UserI))
5047         return true;
5048       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5049         if (SI->getValueOperand() == U.get()) {
5050           LLVM_DEBUG(dbgs()
5051                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5052           ValidUsesOnly = false;
5053         } else {
5054           // A store into the malloc'ed memory is fine.
5055         }
5056         return true;
5057       }
5058       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5059         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5060           return true;
5061         // Record malloc.
5062         if (isFreeCall(UserI, TLI)) {
5063           if (MustUse) {
5064             FreesForMalloc[&I].insert(UserI);
5065           } else {
5066             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5067                               << *UserI << "\n");
5068             ValidUsesOnly = false;
5069           }
5070           return true;
5071         }
5072 
5073         unsigned ArgNo = CB->getArgOperandNo(&U);
5074 
5075         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5076             *this, IRPosition::callsite_argument(*CB, ArgNo));
5077 
5078         // If a callsite argument use is nofree, we are fine.
5079         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5080             *this, IRPosition::callsite_argument(*CB, ArgNo));
5081 
5082         if (!NoCaptureAA.isAssumedNoCapture() ||
5083             !ArgNoFreeAA.isAssumedNoFree()) {
5084           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5085           ValidUsesOnly = false;
5086         }
5087         return true;
5088       }
5089 
5090       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5091           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5092         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5093         Follow = true;
5094         return true;
5095       }
5096       // Unknown user for which we can not track uses further (in a way that
5097       // makes sense).
5098       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5099       ValidUsesOnly = false;
5100       return true;
5101     };
5102     A.checkForAllUses(Pred, *this, I);
5103     return ValidUsesOnly;
5104   };
5105 
5106   auto MallocCallocCheck = [&](Instruction &I) {
5107     if (BadMallocCalls.count(&I))
5108       return true;
5109 
5110     bool IsMalloc = isMallocLikeFn(&I, TLI);
5111     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5112     if (!IsMalloc && !IsCalloc) {
5113       BadMallocCalls.insert(&I);
5114       return true;
5115     }
5116 
5117     if (IsMalloc) {
5118       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5119         if (Size->getValue().ule(MaxHeapToStackSize))
5120           if (UsesCheck(I) || FreeCheck(I)) {
5121             MallocCalls.insert(&I);
5122             return true;
5123           }
5124     } else if (IsCalloc) {
5125       bool Overflow = false;
5126       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5127         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5128           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5129                   .ule(MaxHeapToStackSize))
5130             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5131               MallocCalls.insert(&I);
5132               return true;
5133             }
5134     }
5135 
5136     BadMallocCalls.insert(&I);
5137     return true;
5138   };
5139 
5140   size_t NumBadMallocs = BadMallocCalls.size();
5141 
5142   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5143 
5144   if (NumBadMallocs != BadMallocCalls.size())
5145     return ChangeStatus::CHANGED;
5146 
5147   return ChangeStatus::UNCHANGED;
5148 }
5149 
5150 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5151   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
5152 
5153   /// See AbstractAttribute::trackStatistics()
5154   void trackStatistics() const override {
5155     STATS_DECL(MallocCalls, Function,
5156                "Number of malloc calls converted to allocas");
5157     for (auto *C : MallocCalls)
5158       if (!BadMallocCalls.count(C))
5159         ++BUILD_STAT_NAME(MallocCalls, Function);
5160   }
5161 };
5162 
5163 /// ----------------------- Privatizable Pointers ------------------------------
5164 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5165   AAPrivatizablePtrImpl(const IRPosition &IRP)
5166       : AAPrivatizablePtr(IRP), PrivatizableType(llvm::None) {}
5167 
5168   ChangeStatus indicatePessimisticFixpoint() override {
5169     AAPrivatizablePtr::indicatePessimisticFixpoint();
5170     PrivatizableType = nullptr;
5171     return ChangeStatus::CHANGED;
5172   }
5173 
5174   /// Identify the type we can chose for a private copy of the underlying
5175   /// argument. None means it is not clear yet, nullptr means there is none.
5176   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5177 
5178   /// Return a privatizable type that encloses both T0 and T1.
5179   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5180   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5181     if (!T0.hasValue())
5182       return T1;
5183     if (!T1.hasValue())
5184       return T0;
5185     if (T0 == T1)
5186       return T0;
5187     return nullptr;
5188   }
5189 
5190   Optional<Type *> getPrivatizableType() const override {
5191     return PrivatizableType;
5192   }
5193 
5194   const std::string getAsStr() const override {
5195     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5196   }
5197 
5198 protected:
5199   Optional<Type *> PrivatizableType;
5200 };
5201 
5202 // TODO: Do this for call site arguments (probably also other values) as well.
5203 
5204 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5205   AAPrivatizablePtrArgument(const IRPosition &IRP)
5206       : AAPrivatizablePtrImpl(IRP) {}
5207 
5208   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5209   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5210     // If this is a byval argument and we know all the call sites (so we can
5211     // rewrite them), there is no need to check them explicitly.
5212     bool AllCallSitesKnown;
5213     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5214         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5215                                true, AllCallSitesKnown))
5216       return getAssociatedValue().getType()->getPointerElementType();
5217 
5218     Optional<Type *> Ty;
5219     unsigned ArgNo = getIRPosition().getArgNo();
5220 
5221     // Make sure the associated call site argument has the same type at all call
5222     // sites and it is an allocation we know is safe to privatize, for now that
5223     // means we only allow alloca instructions.
5224     // TODO: We can additionally analyze the accesses in the callee to  create
5225     //       the type from that information instead. That is a little more
5226     //       involved and will be done in a follow up patch.
5227     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5228       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5229       // Check if a coresponding argument was found or if it is one not
5230       // associated (which can happen for callback calls).
5231       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5232         return false;
5233 
5234       // Check that all call sites agree on a type.
5235       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
5236       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5237 
5238       LLVM_DEBUG({
5239         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5240         if (CSTy.hasValue() && CSTy.getValue())
5241           CSTy.getValue()->print(dbgs());
5242         else if (CSTy.hasValue())
5243           dbgs() << "<nullptr>";
5244         else
5245           dbgs() << "<none>";
5246       });
5247 
5248       Ty = combineTypes(Ty, CSTy);
5249 
5250       LLVM_DEBUG({
5251         dbgs() << " : New Type: ";
5252         if (Ty.hasValue() && Ty.getValue())
5253           Ty.getValue()->print(dbgs());
5254         else if (Ty.hasValue())
5255           dbgs() << "<nullptr>";
5256         else
5257           dbgs() << "<none>";
5258         dbgs() << "\n";
5259       });
5260 
5261       return !Ty.hasValue() || Ty.getValue();
5262     };
5263 
5264     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5265       return nullptr;
5266     return Ty;
5267   }
5268 
5269   /// See AbstractAttribute::updateImpl(...).
5270   ChangeStatus updateImpl(Attributor &A) override {
5271     PrivatizableType = identifyPrivatizableType(A);
5272     if (!PrivatizableType.hasValue())
5273       return ChangeStatus::UNCHANGED;
5274     if (!PrivatizableType.getValue())
5275       return indicatePessimisticFixpoint();
5276 
5277     // Avoid arguments with padding for now.
5278     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5279         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5280                                                 A.getInfoCache().getDL())) {
5281       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5282       return indicatePessimisticFixpoint();
5283     }
5284 
5285     // Verify callee and caller agree on how the promoted argument would be
5286     // passed.
5287     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5288     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5289     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5290     Function &Fn = *getIRPosition().getAnchorScope();
5291     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5292     ArgsToPromote.insert(getAssociatedArgument());
5293     const auto *TTI =
5294         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5295     if (!TTI ||
5296         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5297             Fn, *TTI, ArgsToPromote, Dummy) ||
5298         ArgsToPromote.empty()) {
5299       LLVM_DEBUG(
5300           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5301                  << Fn.getName() << "\n");
5302       return indicatePessimisticFixpoint();
5303     }
5304 
5305     // Collect the types that will replace the privatizable type in the function
5306     // signature.
5307     SmallVector<Type *, 16> ReplacementTypes;
5308     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5309 
5310     // Register a rewrite of the argument.
5311     Argument *Arg = getAssociatedArgument();
5312     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5313       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5314       return indicatePessimisticFixpoint();
5315     }
5316 
5317     unsigned ArgNo = Arg->getArgNo();
5318 
5319     // Helper to check if for the given call site the associated argument is
5320     // passed to a callback where the privatization would be different.
5321     auto IsCompatiblePrivArgOfCallback = [&](CallSite CS) {
5322       SmallVector<const Use *, 4> CBUses;
5323       AbstractCallSite::getCallbackUses(CS, CBUses);
5324       for (const Use *U : CBUses) {
5325         AbstractCallSite CBACS(U);
5326         assert(CBACS && CBACS.isCallbackCall());
5327         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5328           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5329 
5330           LLVM_DEBUG({
5331             dbgs()
5332                 << "[AAPrivatizablePtr] Argument " << *Arg
5333                 << "check if can be privatized in the context of its parent ("
5334                 << Arg->getParent()->getName()
5335                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5336                    "callback ("
5337                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5338                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5339                 << CBACS.getCallArgOperand(CBArg) << " vs "
5340                 << CS.getArgOperand(ArgNo) << "\n"
5341                 << "[AAPrivatizablePtr] " << CBArg << " : "
5342                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5343           });
5344 
5345           if (CBArgNo != int(ArgNo))
5346             continue;
5347           const auto &CBArgPrivAA =
5348               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5349           if (CBArgPrivAA.isValidState()) {
5350             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5351             if (!CBArgPrivTy.hasValue())
5352               continue;
5353             if (CBArgPrivTy.getValue() == PrivatizableType)
5354               continue;
5355           }
5356 
5357           LLVM_DEBUG({
5358             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5359                    << " cannot be privatized in the context of its parent ("
5360                    << Arg->getParent()->getName()
5361                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5362                       "callback ("
5363                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5364                    << ").\n[AAPrivatizablePtr] for which the argument "
5365                       "privatization is not compatible.\n";
5366           });
5367           return false;
5368         }
5369       }
5370       return true;
5371     };
5372 
5373     // Helper to check if for the given call site the associated argument is
5374     // passed to a direct call where the privatization would be different.
5375     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5376       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5377       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5378       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5379              "Expected a direct call operand for callback call operand");
5380 
5381       LLVM_DEBUG({
5382         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5383                << " check if be privatized in the context of its parent ("
5384                << Arg->getParent()->getName()
5385                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5386                   "direct call of ("
5387                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5388                << ").\n";
5389       });
5390 
5391       Function *DCCallee = DC->getCalledFunction();
5392       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5393         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5394             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5395         if (DCArgPrivAA.isValidState()) {
5396           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5397           if (!DCArgPrivTy.hasValue())
5398             return true;
5399           if (DCArgPrivTy.getValue() == PrivatizableType)
5400             return true;
5401         }
5402       }
5403 
5404       LLVM_DEBUG({
5405         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5406                << " cannot be privatized in the context of its parent ("
5407                << Arg->getParent()->getName()
5408                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5409                   "direct call of ("
5410                << ACS.getCallSite().getCalledFunction()->getName()
5411                << ").\n[AAPrivatizablePtr] for which the argument "
5412                   "privatization is not compatible.\n";
5413       });
5414       return false;
5415     };
5416 
5417     // Helper to check if the associated argument is used at the given abstract
5418     // call site in a way that is incompatible with the privatization assumed
5419     // here.
5420     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5421       if (ACS.isDirectCall())
5422         return IsCompatiblePrivArgOfCallback(ACS.getCallSite());
5423       if (ACS.isCallbackCall())
5424         return IsCompatiblePrivArgOfDirectCS(ACS);
5425       return false;
5426     };
5427 
5428     bool AllCallSitesKnown;
5429     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5430                                 AllCallSitesKnown))
5431       return indicatePessimisticFixpoint();
5432 
5433     return ChangeStatus::UNCHANGED;
5434   }
5435 
5436   /// Given a type to private \p PrivType, collect the constituates (which are
5437   /// used) in \p ReplacementTypes.
5438   static void
5439   identifyReplacementTypes(Type *PrivType,
5440                            SmallVectorImpl<Type *> &ReplacementTypes) {
5441     // TODO: For now we expand the privatization type to the fullest which can
5442     //       lead to dead arguments that need to be removed later.
5443     assert(PrivType && "Expected privatizable type!");
5444 
5445     // Traverse the type, extract constituate types on the outermost level.
5446     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5447       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5448         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5449     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5450       ReplacementTypes.append(PrivArrayType->getNumElements(),
5451                               PrivArrayType->getElementType());
5452     } else {
5453       ReplacementTypes.push_back(PrivType);
5454     }
5455   }
5456 
5457   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5458   /// The values needed are taken from the arguments of \p F starting at
5459   /// position \p ArgNo.
5460   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5461                                    unsigned ArgNo, Instruction &IP) {
5462     assert(PrivType && "Expected privatizable type!");
5463 
5464     IRBuilder<NoFolder> IRB(&IP);
5465     const DataLayout &DL = F.getParent()->getDataLayout();
5466 
5467     // Traverse the type, build GEPs and stores.
5468     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5469       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5470       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5471         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5472         Value *Ptr = constructPointer(
5473             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5474         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5475       }
5476     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5477       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5478       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5479       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5480         Value *Ptr =
5481             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5482         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5483       }
5484     } else {
5485       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5486     }
5487   }
5488 
5489   /// Extract values from \p Base according to the type \p PrivType at the
5490   /// call position \p ACS. The values are appended to \p ReplacementValues.
5491   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5492                                Value *Base,
5493                                SmallVectorImpl<Value *> &ReplacementValues) {
5494     assert(Base && "Expected base value!");
5495     assert(PrivType && "Expected privatizable type!");
5496     Instruction *IP = ACS.getInstruction();
5497 
5498     IRBuilder<NoFolder> IRB(IP);
5499     const DataLayout &DL = IP->getModule()->getDataLayout();
5500 
5501     if (Base->getType()->getPointerElementType() != PrivType)
5502       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5503                                                  "", ACS.getInstruction());
5504 
5505     // TODO: Improve the alignment of the loads.
5506     // Traverse the type, build GEPs and loads.
5507     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5508       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5509       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5510         Type *PointeeTy = PrivStructType->getElementType(u);
5511         Value *Ptr =
5512             constructPointer(PointeeTy->getPointerTo(), Base,
5513                              PrivStructLayout->getElementOffset(u), IRB, DL);
5514         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5515         L->setAlignment(MaybeAlign(1));
5516         ReplacementValues.push_back(L);
5517       }
5518     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5519       Type *PointeeTy = PrivArrayType->getElementType();
5520       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5521       Type *PointeePtrTy = PointeeTy->getPointerTo();
5522       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5523         Value *Ptr =
5524             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5525         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5526         L->setAlignment(MaybeAlign(1));
5527         ReplacementValues.push_back(L);
5528       }
5529     } else {
5530       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5531       L->setAlignment(MaybeAlign(1));
5532       ReplacementValues.push_back(L);
5533     }
5534   }
5535 
5536   /// See AbstractAttribute::manifest(...)
5537   ChangeStatus manifest(Attributor &A) override {
5538     if (!PrivatizableType.hasValue())
5539       return ChangeStatus::UNCHANGED;
5540     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5541 
5542     // Collect all tail calls in the function as we cannot allow new allocas to
5543     // escape into tail recursion.
5544     // TODO: Be smarter about new allocas escaping into tail calls.
5545     SmallVector<CallInst *, 16> TailCalls;
5546     if (!A.checkForAllInstructions(
5547             [&](Instruction &I) {
5548               CallInst &CI = cast<CallInst>(I);
5549               if (CI.isTailCall())
5550                 TailCalls.push_back(&CI);
5551               return true;
5552             },
5553             *this, {Instruction::Call}))
5554       return ChangeStatus::UNCHANGED;
5555 
5556     Argument *Arg = getAssociatedArgument();
5557 
5558     // Callback to repair the associated function. A new alloca is placed at the
5559     // beginning and initialized with the values passed through arguments. The
5560     // new alloca replaces the use of the old pointer argument.
5561     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5562         [=](const Attributor::ArgumentReplacementInfo &ARI,
5563             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5564           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5565           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5566           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5567                                     Arg->getName() + ".priv", IP);
5568           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5569                                ArgIt->getArgNo(), *IP);
5570           Arg->replaceAllUsesWith(AI);
5571 
5572           for (CallInst *CI : TailCalls)
5573             CI->setTailCall(false);
5574         };
5575 
5576     // Callback to repair a call site of the associated function. The elements
5577     // of the privatizable type are loaded prior to the call and passed to the
5578     // new function version.
5579     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5580         [=](const Attributor::ArgumentReplacementInfo &ARI,
5581             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5582           createReplacementValues(
5583               PrivatizableType.getValue(), ACS,
5584               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5585                                   NewArgOperands);
5586         };
5587 
5588     // Collect the types that will replace the privatizable type in the function
5589     // signature.
5590     SmallVector<Type *, 16> ReplacementTypes;
5591     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5592 
5593     // Register a rewrite of the argument.
5594     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5595                                            std::move(FnRepairCB),
5596                                            std::move(ACSRepairCB)))
5597       return ChangeStatus::CHANGED;
5598     return ChangeStatus::UNCHANGED;
5599   }
5600 
5601   /// See AbstractAttribute::trackStatistics()
5602   void trackStatistics() const override {
5603     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5604   }
5605 };
5606 
5607 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5608   AAPrivatizablePtrFloating(const IRPosition &IRP)
5609       : AAPrivatizablePtrImpl(IRP) {}
5610 
5611   /// See AbstractAttribute::initialize(...).
5612   virtual void initialize(Attributor &A) override {
5613     // TODO: We can privatize more than arguments.
5614     indicatePessimisticFixpoint();
5615   }
5616 
5617   ChangeStatus updateImpl(Attributor &A) override {
5618     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5619                      "updateImpl will not be called");
5620   }
5621 
5622   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5623   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5624     Value *Obj =
5625         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5626     if (!Obj) {
5627       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5628       return nullptr;
5629     }
5630 
5631     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5632       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5633         if (CI->isOne())
5634           return Obj->getType()->getPointerElementType();
5635     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5636       auto &PrivArgAA =
5637           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5638       if (PrivArgAA.isAssumedPrivatizablePtr())
5639         return Obj->getType()->getPointerElementType();
5640     }
5641 
5642     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5643                          "alloca nor privatizable argument: "
5644                       << *Obj << "!\n");
5645     return nullptr;
5646   }
5647 
5648   /// See AbstractAttribute::trackStatistics()
5649   void trackStatistics() const override {
5650     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5651   }
5652 };
5653 
5654 struct AAPrivatizablePtrCallSiteArgument final
5655     : public AAPrivatizablePtrFloating {
5656   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP)
5657       : AAPrivatizablePtrFloating(IRP) {}
5658 
5659   /// See AbstractAttribute::initialize(...).
5660   void initialize(Attributor &A) override {
5661     if (getIRPosition().hasAttr(Attribute::ByVal))
5662       indicateOptimisticFixpoint();
5663   }
5664 
5665   /// See AbstractAttribute::updateImpl(...).
5666   ChangeStatus updateImpl(Attributor &A) override {
5667     PrivatizableType = identifyPrivatizableType(A);
5668     if (!PrivatizableType.hasValue())
5669       return ChangeStatus::UNCHANGED;
5670     if (!PrivatizableType.getValue())
5671       return indicatePessimisticFixpoint();
5672 
5673     const IRPosition &IRP = getIRPosition();
5674     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5675     if (!NoCaptureAA.isAssumedNoCapture()) {
5676       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5677       return indicatePessimisticFixpoint();
5678     }
5679 
5680     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5681     if (!NoAliasAA.isAssumedNoAlias()) {
5682       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5683       return indicatePessimisticFixpoint();
5684     }
5685 
5686     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5687     if (!MemBehaviorAA.isAssumedReadOnly()) {
5688       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5689       return indicatePessimisticFixpoint();
5690     }
5691 
5692     return ChangeStatus::UNCHANGED;
5693   }
5694 
5695   /// See AbstractAttribute::trackStatistics()
5696   void trackStatistics() const override {
5697     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5698   }
5699 };
5700 
5701 struct AAPrivatizablePtrCallSiteReturned final
5702     : public AAPrivatizablePtrFloating {
5703   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP)
5704       : AAPrivatizablePtrFloating(IRP) {}
5705 
5706   /// See AbstractAttribute::initialize(...).
5707   void initialize(Attributor &A) override {
5708     // TODO: We can privatize more than arguments.
5709     indicatePessimisticFixpoint();
5710   }
5711 
5712   /// See AbstractAttribute::trackStatistics()
5713   void trackStatistics() const override {
5714     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5715   }
5716 };
5717 
5718 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5719   AAPrivatizablePtrReturned(const IRPosition &IRP)
5720       : AAPrivatizablePtrFloating(IRP) {}
5721 
5722   /// See AbstractAttribute::initialize(...).
5723   void initialize(Attributor &A) override {
5724     // TODO: We can privatize more than arguments.
5725     indicatePessimisticFixpoint();
5726   }
5727 
5728   /// See AbstractAttribute::trackStatistics()
5729   void trackStatistics() const override {
5730     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5731   }
5732 };
5733 
5734 /// -------------------- Memory Behavior Attributes ----------------------------
5735 /// Includes read-none, read-only, and write-only.
5736 /// ----------------------------------------------------------------------------
5737 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5738   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
5739 
5740   /// See AbstractAttribute::initialize(...).
5741   void initialize(Attributor &A) override {
5742     intersectAssumedBits(BEST_STATE);
5743     getKnownStateFromValue(getIRPosition(), getState());
5744     IRAttribute::initialize(A);
5745   }
5746 
5747   /// Return the memory behavior information encoded in the IR for \p IRP.
5748   static void getKnownStateFromValue(const IRPosition &IRP,
5749                                      BitIntegerState &State,
5750                                      bool IgnoreSubsumingPositions = false) {
5751     SmallVector<Attribute, 2> Attrs;
5752     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5753     for (const Attribute &Attr : Attrs) {
5754       switch (Attr.getKindAsEnum()) {
5755       case Attribute::ReadNone:
5756         State.addKnownBits(NO_ACCESSES);
5757         break;
5758       case Attribute::ReadOnly:
5759         State.addKnownBits(NO_WRITES);
5760         break;
5761       case Attribute::WriteOnly:
5762         State.addKnownBits(NO_READS);
5763         break;
5764       default:
5765         llvm_unreachable("Unexpected attribute!");
5766       }
5767     }
5768 
5769     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5770       if (!I->mayReadFromMemory())
5771         State.addKnownBits(NO_READS);
5772       if (!I->mayWriteToMemory())
5773         State.addKnownBits(NO_WRITES);
5774     }
5775   }
5776 
5777   /// See AbstractAttribute::getDeducedAttributes(...).
5778   void getDeducedAttributes(LLVMContext &Ctx,
5779                             SmallVectorImpl<Attribute> &Attrs) const override {
5780     assert(Attrs.size() == 0);
5781     if (isAssumedReadNone())
5782       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5783     else if (isAssumedReadOnly())
5784       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5785     else if (isAssumedWriteOnly())
5786       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5787     assert(Attrs.size() <= 1);
5788   }
5789 
5790   /// See AbstractAttribute::manifest(...).
5791   ChangeStatus manifest(Attributor &A) override {
5792     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5793       return ChangeStatus::UNCHANGED;
5794 
5795     const IRPosition &IRP = getIRPosition();
5796 
5797     // Check if we would improve the existing attributes first.
5798     SmallVector<Attribute, 4> DeducedAttrs;
5799     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5800     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5801           return IRP.hasAttr(Attr.getKindAsEnum(),
5802                              /* IgnoreSubsumingPositions */ true);
5803         }))
5804       return ChangeStatus::UNCHANGED;
5805 
5806     // Clear existing attributes.
5807     IRP.removeAttrs(AttrKinds);
5808 
5809     // Use the generic manifest method.
5810     return IRAttribute::manifest(A);
5811   }
5812 
5813   /// See AbstractState::getAsStr().
5814   const std::string getAsStr() const override {
5815     if (isAssumedReadNone())
5816       return "readnone";
5817     if (isAssumedReadOnly())
5818       return "readonly";
5819     if (isAssumedWriteOnly())
5820       return "writeonly";
5821     return "may-read/write";
5822   }
5823 
5824   /// The set of IR attributes AAMemoryBehavior deals with.
5825   static const Attribute::AttrKind AttrKinds[3];
5826 };
5827 
5828 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5829     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5830 
5831 /// Memory behavior attribute for a floating value.
5832 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5833   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5834 
5835   /// See AbstractAttribute::initialize(...).
5836   void initialize(Attributor &A) override {
5837     AAMemoryBehaviorImpl::initialize(A);
5838     // Initialize the use vector with all direct uses of the associated value.
5839     for (const Use &U : getAssociatedValue().uses())
5840       Uses.insert(&U);
5841   }
5842 
5843   /// See AbstractAttribute::updateImpl(...).
5844   ChangeStatus updateImpl(Attributor &A) override;
5845 
5846   /// See AbstractAttribute::trackStatistics()
5847   void trackStatistics() const override {
5848     if (isAssumedReadNone())
5849       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5850     else if (isAssumedReadOnly())
5851       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5852     else if (isAssumedWriteOnly())
5853       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5854   }
5855 
5856 private:
5857   /// Return true if users of \p UserI might access the underlying
5858   /// variable/location described by \p U and should therefore be analyzed.
5859   bool followUsersOfUseIn(Attributor &A, const Use *U,
5860                           const Instruction *UserI);
5861 
5862   /// Update the state according to the effect of use \p U in \p UserI.
5863   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5864 
5865 protected:
5866   /// Container for (transitive) uses of the associated argument.
5867   SetVector<const Use *> Uses;
5868 };
5869 
5870 /// Memory behavior attribute for function argument.
5871 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5872   AAMemoryBehaviorArgument(const IRPosition &IRP)
5873       : AAMemoryBehaviorFloating(IRP) {}
5874 
5875   /// See AbstractAttribute::initialize(...).
5876   void initialize(Attributor &A) override {
5877     intersectAssumedBits(BEST_STATE);
5878     const IRPosition &IRP = getIRPosition();
5879     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5880     // can query it when we use has/getAttr. That would allow us to reuse the
5881     // initialize of the base class here.
5882     bool HasByVal =
5883         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5884     getKnownStateFromValue(IRP, getState(),
5885                            /* IgnoreSubsumingPositions */ HasByVal);
5886 
5887     // Initialize the use vector with all direct uses of the associated value.
5888     Argument *Arg = getAssociatedArgument();
5889     if (!Arg || !Arg->getParent()->hasExactDefinition()) {
5890       indicatePessimisticFixpoint();
5891     } else {
5892       // Initialize the use vector with all direct uses of the associated value.
5893       for (const Use &U : Arg->uses())
5894         Uses.insert(&U);
5895     }
5896   }
5897 
5898   ChangeStatus manifest(Attributor &A) override {
5899     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5900     if (!getAssociatedValue().getType()->isPointerTy())
5901       return ChangeStatus::UNCHANGED;
5902 
5903     // TODO: From readattrs.ll: "inalloca parameters are always
5904     //                           considered written"
5905     if (hasAttr({Attribute::InAlloca})) {
5906       removeKnownBits(NO_WRITES);
5907       removeAssumedBits(NO_WRITES);
5908     }
5909     return AAMemoryBehaviorFloating::manifest(A);
5910   }
5911 
5912   /// See AbstractAttribute::trackStatistics()
5913   void trackStatistics() const override {
5914     if (isAssumedReadNone())
5915       STATS_DECLTRACK_ARG_ATTR(readnone)
5916     else if (isAssumedReadOnly())
5917       STATS_DECLTRACK_ARG_ATTR(readonly)
5918     else if (isAssumedWriteOnly())
5919       STATS_DECLTRACK_ARG_ATTR(writeonly)
5920   }
5921 };
5922 
5923 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5924   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
5925       : AAMemoryBehaviorArgument(IRP) {}
5926 
5927   /// See AbstractAttribute::initialize(...).
5928   void initialize(Attributor &A) override {
5929     if (Argument *Arg = getAssociatedArgument()) {
5930       if (Arg->hasByValAttr()) {
5931         addKnownBits(NO_WRITES);
5932         removeKnownBits(NO_READS);
5933         removeAssumedBits(NO_READS);
5934       }
5935     } else {
5936     }
5937     AAMemoryBehaviorArgument::initialize(A);
5938   }
5939 
5940   /// See AbstractAttribute::updateImpl(...).
5941   ChangeStatus updateImpl(Attributor &A) override {
5942     // TODO: Once we have call site specific value information we can provide
5943     //       call site specific liveness liveness information and then it makes
5944     //       sense to specialize attributes for call sites arguments instead of
5945     //       redirecting requests to the callee argument.
5946     Argument *Arg = getAssociatedArgument();
5947     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5948     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5949     return clampStateAndIndicateChange(
5950         getState(),
5951         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5952   }
5953 
5954   /// See AbstractAttribute::trackStatistics()
5955   void trackStatistics() const override {
5956     if (isAssumedReadNone())
5957       STATS_DECLTRACK_CSARG_ATTR(readnone)
5958     else if (isAssumedReadOnly())
5959       STATS_DECLTRACK_CSARG_ATTR(readonly)
5960     else if (isAssumedWriteOnly())
5961       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5962   }
5963 };
5964 
5965 /// Memory behavior attribute for a call site return position.
5966 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5967   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
5968       : AAMemoryBehaviorFloating(IRP) {}
5969 
5970   /// See AbstractAttribute::manifest(...).
5971   ChangeStatus manifest(Attributor &A) override {
5972     // We do not annotate returned values.
5973     return ChangeStatus::UNCHANGED;
5974   }
5975 
5976   /// See AbstractAttribute::trackStatistics()
5977   void trackStatistics() const override {}
5978 };
5979 
5980 /// An AA to represent the memory behavior function attributes.
5981 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5982   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5983 
5984   /// See AbstractAttribute::updateImpl(Attributor &A).
5985   virtual ChangeStatus updateImpl(Attributor &A) override;
5986 
5987   /// See AbstractAttribute::manifest(...).
5988   ChangeStatus manifest(Attributor &A) override {
5989     Function &F = cast<Function>(getAnchorValue());
5990     if (isAssumedReadNone()) {
5991       F.removeFnAttr(Attribute::ArgMemOnly);
5992       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5993       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5994     }
5995     return AAMemoryBehaviorImpl::manifest(A);
5996   }
5997 
5998   /// See AbstractAttribute::trackStatistics()
5999   void trackStatistics() const override {
6000     if (isAssumedReadNone())
6001       STATS_DECLTRACK_FN_ATTR(readnone)
6002     else if (isAssumedReadOnly())
6003       STATS_DECLTRACK_FN_ATTR(readonly)
6004     else if (isAssumedWriteOnly())
6005       STATS_DECLTRACK_FN_ATTR(writeonly)
6006   }
6007 };
6008 
6009 /// AAMemoryBehavior attribute for call sites.
6010 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6011   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
6012 
6013   /// See AbstractAttribute::initialize(...).
6014   void initialize(Attributor &A) override {
6015     AAMemoryBehaviorImpl::initialize(A);
6016     Function *F = getAssociatedFunction();
6017     if (!F || !F->hasExactDefinition())
6018       indicatePessimisticFixpoint();
6019   }
6020 
6021   /// See AbstractAttribute::updateImpl(...).
6022   ChangeStatus updateImpl(Attributor &A) override {
6023     // TODO: Once we have call site specific value information we can provide
6024     //       call site specific liveness liveness information and then it makes
6025     //       sense to specialize attributes for call sites arguments instead of
6026     //       redirecting requests to the callee argument.
6027     Function *F = getAssociatedFunction();
6028     const IRPosition &FnPos = IRPosition::function(*F);
6029     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
6030     return clampStateAndIndicateChange(
6031         getState(),
6032         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
6033   }
6034 
6035   /// See AbstractAttribute::trackStatistics()
6036   void trackStatistics() const override {
6037     if (isAssumedReadNone())
6038       STATS_DECLTRACK_CS_ATTR(readnone)
6039     else if (isAssumedReadOnly())
6040       STATS_DECLTRACK_CS_ATTR(readonly)
6041     else if (isAssumedWriteOnly())
6042       STATS_DECLTRACK_CS_ATTR(writeonly)
6043   }
6044 };
6045 
6046 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6047 
6048   // The current assumed state used to determine a change.
6049   auto AssumedState = getAssumed();
6050 
6051   auto CheckRWInst = [&](Instruction &I) {
6052     // If the instruction has an own memory behavior state, use it to restrict
6053     // the local state. No further analysis is required as the other memory
6054     // state is as optimistic as it gets.
6055     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
6056       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6057           *this, IRPosition::callsite_function(ICS));
6058       intersectAssumedBits(MemBehaviorAA.getAssumed());
6059       return !isAtFixpoint();
6060     }
6061 
6062     // Remove access kind modifiers if necessary.
6063     if (I.mayReadFromMemory())
6064       removeAssumedBits(NO_READS);
6065     if (I.mayWriteToMemory())
6066       removeAssumedBits(NO_WRITES);
6067     return !isAtFixpoint();
6068   };
6069 
6070   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6071     return indicatePessimisticFixpoint();
6072 
6073   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6074                                         : ChangeStatus::UNCHANGED;
6075 }
6076 
6077 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6078 
6079   const IRPosition &IRP = getIRPosition();
6080   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6081   AAMemoryBehavior::StateType &S = getState();
6082 
6083   // First, check the function scope. We take the known information and we avoid
6084   // work if the assumed information implies the current assumed information for
6085   // this attribute. This is a valid for all but byval arguments.
6086   Argument *Arg = IRP.getAssociatedArgument();
6087   AAMemoryBehavior::base_t FnMemAssumedState =
6088       AAMemoryBehavior::StateType::getWorstState();
6089   if (!Arg || !Arg->hasByValAttr()) {
6090     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
6091         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6092     FnMemAssumedState = FnMemAA.getAssumed();
6093     S.addKnownBits(FnMemAA.getKnown());
6094     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6095       return ChangeStatus::UNCHANGED;
6096   }
6097 
6098   // Make sure the value is not captured (except through "return"), if
6099   // it is, any information derived would be irrelevant anyway as we cannot
6100   // check the potential aliases introduced by the capture. However, no need
6101   // to fall back to anythign less optimistic than the function state.
6102   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6103       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6104   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6105     S.intersectAssumedBits(FnMemAssumedState);
6106     return ChangeStatus::CHANGED;
6107   }
6108 
6109   // The current assumed state used to determine a change.
6110   auto AssumedState = S.getAssumed();
6111 
6112   // Liveness information to exclude dead users.
6113   // TODO: Take the FnPos once we have call site specific liveness information.
6114   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6115       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6116       /* TrackDependence */ false);
6117 
6118   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6119   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6120     const Use *U = Uses[i];
6121     Instruction *UserI = cast<Instruction>(U->getUser());
6122     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6123                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6124                       << "]\n");
6125     if (A.isAssumedDead(*U, this, &LivenessAA))
6126       continue;
6127 
6128     // Check if the users of UserI should also be visited.
6129     if (followUsersOfUseIn(A, U, UserI))
6130       for (const Use &UserIUse : UserI->uses())
6131         Uses.insert(&UserIUse);
6132 
6133     // If UserI might touch memory we analyze the use in detail.
6134     if (UserI->mayReadOrWriteMemory())
6135       analyzeUseIn(A, U, UserI);
6136   }
6137 
6138   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6139                                         : ChangeStatus::UNCHANGED;
6140 }
6141 
6142 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6143                                                   const Instruction *UserI) {
6144   // The loaded value is unrelated to the pointer argument, no need to
6145   // follow the users of the load.
6146   if (isa<LoadInst>(UserI))
6147     return false;
6148 
6149   // By default we follow all uses assuming UserI might leak information on U,
6150   // we have special handling for call sites operands though.
6151   ImmutableCallSite ICS(UserI);
6152   if (!ICS || !ICS.isArgOperand(U))
6153     return true;
6154 
6155   // If the use is a call argument known not to be captured, the users of
6156   // the call do not need to be visited because they have to be unrelated to
6157   // the input. Note that this check is not trivial even though we disallow
6158   // general capturing of the underlying argument. The reason is that the
6159   // call might the argument "through return", which we allow and for which we
6160   // need to check call users.
6161   if (U->get()->getType()->isPointerTy()) {
6162     unsigned ArgNo = ICS.getArgumentNo(U);
6163     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6164         *this, IRPosition::callsite_argument(ICS, ArgNo),
6165         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6166     return !ArgNoCaptureAA.isAssumedNoCapture();
6167   }
6168 
6169   return true;
6170 }
6171 
6172 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6173                                             const Instruction *UserI) {
6174   assert(UserI->mayReadOrWriteMemory());
6175 
6176   switch (UserI->getOpcode()) {
6177   default:
6178     // TODO: Handle all atomics and other side-effect operations we know of.
6179     break;
6180   case Instruction::Load:
6181     // Loads cause the NO_READS property to disappear.
6182     removeAssumedBits(NO_READS);
6183     return;
6184 
6185   case Instruction::Store:
6186     // Stores cause the NO_WRITES property to disappear if the use is the
6187     // pointer operand. Note that we do assume that capturing was taken care of
6188     // somewhere else.
6189     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6190       removeAssumedBits(NO_WRITES);
6191     return;
6192 
6193   case Instruction::Call:
6194   case Instruction::CallBr:
6195   case Instruction::Invoke: {
6196     // For call sites we look at the argument memory behavior attribute (this
6197     // could be recursive!) in order to restrict our own state.
6198     ImmutableCallSite ICS(UserI);
6199 
6200     // Give up on operand bundles.
6201     if (ICS.isBundleOperand(U)) {
6202       indicatePessimisticFixpoint();
6203       return;
6204     }
6205 
6206     // Calling a function does read the function pointer, maybe write it if the
6207     // function is self-modifying.
6208     if (ICS.isCallee(U)) {
6209       removeAssumedBits(NO_READS);
6210       break;
6211     }
6212 
6213     // Adjust the possible access behavior based on the information on the
6214     // argument.
6215     IRPosition Pos;
6216     if (U->get()->getType()->isPointerTy())
6217       Pos = IRPosition::callsite_argument(ICS, ICS.getArgumentNo(U));
6218     else
6219       Pos = IRPosition::callsite_function(ICS);
6220     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6221         *this, Pos,
6222         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6223     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6224     // and at least "known".
6225     intersectAssumedBits(MemBehaviorAA.getAssumed());
6226     return;
6227   }
6228   };
6229 
6230   // Generally, look at the "may-properties" and adjust the assumed state if we
6231   // did not trigger special handling before.
6232   if (UserI->mayReadFromMemory())
6233     removeAssumedBits(NO_READS);
6234   if (UserI->mayWriteToMemory())
6235     removeAssumedBits(NO_WRITES);
6236 }
6237 
6238 } // namespace
6239 
6240 /// -------------------- Memory Locations Attributes ---------------------------
6241 /// Includes read-none, argmemonly, inaccessiblememonly,
6242 /// inaccessiblememorargmemonly
6243 /// ----------------------------------------------------------------------------
6244 
6245 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6246     AAMemoryLocation::MemoryLocationsKind MLK) {
6247   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6248     return "all memory";
6249   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6250     return "no memory";
6251   std::string S = "memory:";
6252   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6253     S += "stack,";
6254   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6255     S += "constant,";
6256   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6257     S += "internal global,";
6258   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6259     S += "external global,";
6260   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6261     S += "argument,";
6262   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6263     S += "inaccessible,";
6264   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6265     S += "malloced,";
6266   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6267     S += "unknown,";
6268   S.pop_back();
6269   return S;
6270 }
6271 
6272 namespace {
6273 
6274 struct AAMemoryLocationImpl : public AAMemoryLocation {
6275 
6276   AAMemoryLocationImpl(const IRPosition &IRP) : AAMemoryLocation(IRP) {}
6277 
6278   /// See AbstractAttribute::initialize(...).
6279   void initialize(Attributor &A) override {
6280     intersectAssumedBits(BEST_STATE);
6281     getKnownStateFromValue(getIRPosition(), getState());
6282     IRAttribute::initialize(A);
6283   }
6284 
6285   /// Return the memory behavior information encoded in the IR for \p IRP.
6286   static void getKnownStateFromValue(const IRPosition &IRP,
6287                                      BitIntegerState &State,
6288                                      bool IgnoreSubsumingPositions = false) {
6289     SmallVector<Attribute, 2> Attrs;
6290     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6291     for (const Attribute &Attr : Attrs) {
6292       switch (Attr.getKindAsEnum()) {
6293       case Attribute::ReadNone:
6294         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6295         break;
6296       case Attribute::InaccessibleMemOnly:
6297         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6298         break;
6299       case Attribute::ArgMemOnly:
6300         State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6301         break;
6302       case Attribute::InaccessibleMemOrArgMemOnly:
6303         State.addKnownBits(
6304             inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6305         break;
6306       default:
6307         llvm_unreachable("Unexpected attribute!");
6308       }
6309     }
6310   }
6311 
6312   /// See AbstractAttribute::getDeducedAttributes(...).
6313   void getDeducedAttributes(LLVMContext &Ctx,
6314                             SmallVectorImpl<Attribute> &Attrs) const override {
6315     assert(Attrs.size() == 0);
6316     if (isAssumedReadNone()) {
6317       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6318     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6319       if (isAssumedInaccessibleMemOnly())
6320         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6321       else if (isAssumedArgMemOnly())
6322         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6323       else if (isAssumedInaccessibleOrArgMemOnly())
6324         Attrs.push_back(
6325             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6326     }
6327     assert(Attrs.size() <= 1);
6328   }
6329 
6330   /// See AbstractAttribute::manifest(...).
6331   ChangeStatus manifest(Attributor &A) override {
6332     const IRPosition &IRP = getIRPosition();
6333 
6334     // Check if we would improve the existing attributes first.
6335     SmallVector<Attribute, 4> DeducedAttrs;
6336     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6337     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6338           return IRP.hasAttr(Attr.getKindAsEnum(),
6339                              /* IgnoreSubsumingPositions */ true);
6340         }))
6341       return ChangeStatus::UNCHANGED;
6342 
6343     // Clear existing attributes.
6344     IRP.removeAttrs(AttrKinds);
6345     if (isAssumedReadNone())
6346       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6347 
6348     // Use the generic manifest method.
6349     return IRAttribute::manifest(A);
6350   }
6351 
6352   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6353   bool checkForAllAccessesToMemoryKind(
6354       const function_ref<bool(const Instruction *, const Value *, AccessKind,
6355                               MemoryLocationsKind)> &Pred,
6356       MemoryLocationsKind RequestedMLK) const override {
6357     if (!isValidState())
6358       return false;
6359 
6360     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6361     if (AssumedMLK == NO_LOCATIONS)
6362       return true;
6363 
6364     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6365       if (CurMLK & RequestedMLK)
6366         continue;
6367 
6368       const auto &Accesses = AccessKindAccessesMap.lookup(CurMLK);
6369       for (const AccessInfo &AI : Accesses) {
6370         if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6371           return false;
6372       }
6373     }
6374 
6375     return true;
6376   }
6377 
6378   ChangeStatus indicatePessimisticFixpoint() override {
6379     // If we give up and indicate a pessimistic fixpoint this instruction will
6380     // become an access for all potential access kinds:
6381     // TODO: Add pointers for argmemonly and globals to improve the results of
6382     //       checkForAllAccessesToMemoryKind.
6383     bool Changed = false;
6384     MemoryLocationsKind KnownMLK = getKnown();
6385     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6386     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6387       if (!(CurMLK & KnownMLK))
6388         updateStateAndAccessesMap(getState(), AccessKindAccessesMap, CurMLK, I,
6389                                   nullptr, Changed);
6390     return AAMemoryLocation::indicatePessimisticFixpoint();
6391   }
6392 
6393 protected:
6394   /// Helper struct to tie together an instruction that has a read or write
6395   /// effect with the pointer it accesses (if any).
6396   struct AccessInfo {
6397 
6398     /// The instruction that caused the access.
6399     const Instruction *I;
6400 
6401     /// The base pointer that is accessed, or null if unknown.
6402     const Value *Ptr;
6403 
6404     /// The kind of access (read/write/read+write).
6405     AccessKind Kind;
6406 
6407     bool operator==(const AccessInfo &RHS) const {
6408       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6409     }
6410     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6411       if (LHS.I != RHS.I)
6412         return LHS.I < RHS.I;
6413       if (LHS.Ptr != RHS.Ptr)
6414         return LHS.Ptr < RHS.Ptr;
6415       if (LHS.Kind != RHS.Kind)
6416         return LHS.Kind < RHS.Kind;
6417       return false;
6418     }
6419   };
6420 
6421   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6422   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6423   using AccessKindAccessesMapTy =
6424       DenseMap<unsigned, SmallSet<AccessInfo, 8, AccessInfo>>;
6425   AccessKindAccessesMapTy AccessKindAccessesMap;
6426 
6427   /// Return the kind(s) of location that may be accessed by \p V.
6428   AAMemoryLocation::MemoryLocationsKind
6429   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6430 
6431   /// Update the state \p State and the AccessKindAccessesMap given that \p I is
6432   /// an access to a \p MLK memory location with the access pointer \p Ptr.
6433   static void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6434                                         AccessKindAccessesMapTy &AccessMap,
6435                                         MemoryLocationsKind MLK,
6436                                         const Instruction *I, const Value *Ptr,
6437                                         bool &Changed) {
6438     // TODO: The kind should be determined at the call sites based on the
6439     // information we have there.
6440     AccessKind Kind = READ_WRITE;
6441     if (I) {
6442       Kind = I->mayReadFromMemory() ? READ : NONE;
6443       Kind = AccessKind(Kind | (I->mayWriteToMemory() ? WRITE : NONE));
6444     }
6445 
6446     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6447     Changed |= AccessMap[MLK].insert(AccessInfo{I, Ptr, Kind}).second;
6448     State.removeAssumedBits(MLK);
6449   }
6450 
6451   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6452   /// arguments, and update the state and access map accordingly.
6453   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6454                           AAMemoryLocation::StateType &State, bool &Changed);
6455 
6456   /// The set of IR attributes AAMemoryLocation deals with.
6457   static const Attribute::AttrKind AttrKinds[4];
6458 };
6459 
6460 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6461     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6462     Attribute::InaccessibleMemOrArgMemOnly};
6463 
6464 void AAMemoryLocationImpl::categorizePtrValue(
6465     Attributor &A, const Instruction &I, const Value &Ptr,
6466     AAMemoryLocation::StateType &State, bool &Changed) {
6467   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6468                     << Ptr << " ["
6469                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6470 
6471   auto StripGEPCB = [](Value *V) -> Value * {
6472     auto *GEP = dyn_cast<GEPOperator>(V);
6473     while (GEP) {
6474       V = GEP->getPointerOperand();
6475       GEP = dyn_cast<GEPOperator>(V);
6476     }
6477     return V;
6478   };
6479 
6480   auto VisitValueCB = [&](Value &V, AAMemoryLocation::StateType &T,
6481                           bool Stripped) -> bool {
6482     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6483     if (isa<UndefValue>(V))
6484       return true;
6485     if (auto *Arg = dyn_cast<Argument>(&V)) {
6486       if (Arg->hasByValAttr())
6487         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_LOCAL_MEM, &I,
6488                                   &V, Changed);
6489       else
6490         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_ARGUMENT_MEM, &I,
6491                                   &V, Changed);
6492       return true;
6493     }
6494     if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6495       if (GV->hasLocalLinkage())
6496         updateStateAndAccessesMap(T, AccessKindAccessesMap,
6497                                   NO_GLOBAL_INTERNAL_MEM, &I, &V, Changed);
6498       else
6499         updateStateAndAccessesMap(T, AccessKindAccessesMap,
6500                                   NO_GLOBAL_EXTERNAL_MEM, &I, &V, Changed);
6501       return true;
6502     }
6503     if (isa<AllocaInst>(V)) {
6504       updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_LOCAL_MEM, &I, &V,
6505                                 Changed);
6506       return true;
6507     }
6508     if (ImmutableCallSite ICS = ImmutableCallSite(&V)) {
6509       const auto &NoAliasAA =
6510           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(ICS));
6511       if (NoAliasAA.isAssumedNoAlias()) {
6512         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_MALLOCED_MEM, &I,
6513                                   &V, Changed);
6514         return true;
6515       }
6516     }
6517 
6518     updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_UNKOWN_MEM, &I, &V,
6519                               Changed);
6520     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6521                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6522                       << "\n");
6523     return true;
6524   };
6525 
6526   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6527           A, IRPosition::value(Ptr), *this, State, VisitValueCB,
6528           /* MaxValues */ 32, StripGEPCB)) {
6529     LLVM_DEBUG(
6530         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6531     updateStateAndAccessesMap(State, AccessKindAccessesMap, NO_UNKOWN_MEM, &I,
6532                               nullptr, Changed);
6533   } else {
6534     LLVM_DEBUG(
6535         dbgs()
6536         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6537         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6538   }
6539 }
6540 
6541 AAMemoryLocation::MemoryLocationsKind
6542 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6543                                                   bool &Changed) {
6544   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6545                     << I << "\n");
6546 
6547   AAMemoryLocation::StateType AccessedLocs;
6548   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6549 
6550   if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
6551 
6552     // First check if we assume any memory is access is visible.
6553     const auto &ICSMemLocationAA =
6554         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(ICS));
6555     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6556                       << " [" << ICSMemLocationAA << "]\n");
6557 
6558     if (ICSMemLocationAA.isAssumedReadNone())
6559       return NO_LOCATIONS;
6560 
6561     if (ICSMemLocationAA.isAssumedInaccessibleMemOnly()) {
6562       updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap,
6563                                 NO_INACCESSIBLE_MEM, &I, nullptr, Changed);
6564       return AccessedLocs.getAssumed();
6565     }
6566 
6567     uint32_t ICSAssumedNotAccessedLocs =
6568         ICSMemLocationAA.getAssumedNotAccessedLocation();
6569 
6570     // Set the argmemonly and global bit as we handle them separately below.
6571     uint32_t ICSAssumedNotAccessedLocsNoArgMem =
6572         ICSAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6573 
6574     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6575       if (ICSAssumedNotAccessedLocsNoArgMem & CurMLK)
6576         continue;
6577       updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, CurMLK, &I,
6578                                 nullptr, Changed);
6579     }
6580 
6581     // Now handle global memory if it might be accessed.
6582     bool HasGlobalAccesses = !(ICSAssumedNotAccessedLocs & NO_GLOBAL_MEM);
6583     if (HasGlobalAccesses) {
6584       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6585                             AccessKind Kind, MemoryLocationsKind MLK) {
6586         updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, MLK, &I,
6587                                   Ptr, Changed);
6588         return true;
6589       };
6590       if (!ICSMemLocationAA.checkForAllAccessesToMemoryKind(
6591               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6592         return AccessedLocs.getWorstState();
6593     }
6594 
6595     LLVM_DEBUG(
6596         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6597                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6598 
6599     // Now handle argument memory if it might be accessed.
6600     bool HasArgAccesses = !(ICSAssumedNotAccessedLocs & NO_ARGUMENT_MEM);
6601     if (HasArgAccesses) {
6602       for (unsigned ArgNo = 0, e = ICS.getNumArgOperands(); ArgNo < e;
6603            ++ArgNo) {
6604 
6605         // Skip non-pointer arguments.
6606         const Value *ArgOp = ICS.getArgOperand(ArgNo);
6607         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6608           continue;
6609 
6610         // Skip readnone arguments.
6611         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(ICS, ArgNo);
6612         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6613             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6614 
6615         if (ArgOpMemLocationAA.isAssumedReadNone())
6616           continue;
6617 
6618         // Categorize potentially accessed pointer arguments as if there was an
6619         // access instruction with them as pointer.
6620         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6621       }
6622     }
6623 
6624     LLVM_DEBUG(
6625         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6626                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6627 
6628     return AccessedLocs.getAssumed();
6629   }
6630 
6631   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6632     LLVM_DEBUG(
6633         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6634                << I << " [" << *Ptr << "]\n");
6635     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6636     return AccessedLocs.getAssumed();
6637   }
6638 
6639   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6640                     << I << "\n");
6641   updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, NO_UNKOWN_MEM,
6642                             &I, nullptr, Changed);
6643   return AccessedLocs.getAssumed();
6644 }
6645 
6646 /// An AA to represent the memory behavior function attributes.
6647 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6648   AAMemoryLocationFunction(const IRPosition &IRP) : AAMemoryLocationImpl(IRP) {}
6649 
6650   /// See AbstractAttribute::updateImpl(Attributor &A).
6651   virtual ChangeStatus updateImpl(Attributor &A) override {
6652 
6653     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6654         *this, getIRPosition(), /* TrackDependence */ false);
6655     if (MemBehaviorAA.isAssumedReadNone()) {
6656       if (MemBehaviorAA.isKnownReadNone())
6657         return indicateOptimisticFixpoint();
6658       assert(isAssumedReadNone() &&
6659              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6660       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6661       return ChangeStatus::UNCHANGED;
6662     }
6663 
6664     // The current assumed state used to determine a change.
6665     auto AssumedState = getAssumed();
6666     bool Changed = false;
6667 
6668     auto CheckRWInst = [&](Instruction &I) {
6669       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6670       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6671                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6672       removeAssumedBits(inverseLocation(MLK, false, false));
6673       return true;
6674     };
6675 
6676     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6677       return indicatePessimisticFixpoint();
6678 
6679     Changed |= AssumedState != getAssumed();
6680     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6681   }
6682 
6683   /// See AbstractAttribute::trackStatistics()
6684   void trackStatistics() const override {
6685     if (isAssumedReadNone())
6686       STATS_DECLTRACK_FN_ATTR(readnone)
6687     else if (isAssumedArgMemOnly())
6688       STATS_DECLTRACK_FN_ATTR(argmemonly)
6689     else if (isAssumedInaccessibleMemOnly())
6690       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6691     else if (isAssumedInaccessibleOrArgMemOnly())
6692       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6693   }
6694 };
6695 
6696 /// AAMemoryLocation attribute for call sites.
6697 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6698   AAMemoryLocationCallSite(const IRPosition &IRP) : AAMemoryLocationImpl(IRP) {}
6699 
6700   /// See AbstractAttribute::initialize(...).
6701   void initialize(Attributor &A) override {
6702     AAMemoryLocationImpl::initialize(A);
6703     Function *F = getAssociatedFunction();
6704     if (!F || !F->hasExactDefinition())
6705       indicatePessimisticFixpoint();
6706   }
6707 
6708   /// See AbstractAttribute::updateImpl(...).
6709   ChangeStatus updateImpl(Attributor &A) override {
6710     // TODO: Once we have call site specific value information we can provide
6711     //       call site specific liveness liveness information and then it makes
6712     //       sense to specialize attributes for call sites arguments instead of
6713     //       redirecting requests to the callee argument.
6714     Function *F = getAssociatedFunction();
6715     const IRPosition &FnPos = IRPosition::function(*F);
6716     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6717     bool Changed = false;
6718     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6719                           AccessKind Kind, MemoryLocationsKind MLK) {
6720       updateStateAndAccessesMap(getState(), AccessKindAccessesMap, MLK, I, Ptr,
6721                                 Changed);
6722       return true;
6723     };
6724     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6725       return indicatePessimisticFixpoint();
6726     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6727   }
6728 
6729   /// See AbstractAttribute::trackStatistics()
6730   void trackStatistics() const override {
6731     if (isAssumedReadNone())
6732       STATS_DECLTRACK_CS_ATTR(readnone)
6733   }
6734 };
6735 
6736 /// ------------------ Value Constant Range Attribute -------------------------
6737 
6738 struct AAValueConstantRangeImpl : AAValueConstantRange {
6739   using StateType = IntegerRangeState;
6740   AAValueConstantRangeImpl(const IRPosition &IRP) : AAValueConstantRange(IRP) {}
6741 
6742   /// See AbstractAttribute::getAsStr().
6743   const std::string getAsStr() const override {
6744     std::string Str;
6745     llvm::raw_string_ostream OS(Str);
6746     OS << "range(" << getBitWidth() << ")<";
6747     getKnown().print(OS);
6748     OS << " / ";
6749     getAssumed().print(OS);
6750     OS << ">";
6751     return OS.str();
6752   }
6753 
6754   /// Helper function to get a SCEV expr for the associated value at program
6755   /// point \p I.
6756   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6757     if (!getAnchorScope())
6758       return nullptr;
6759 
6760     ScalarEvolution *SE =
6761         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6762             *getAnchorScope());
6763 
6764     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6765         *getAnchorScope());
6766 
6767     if (!SE || !LI)
6768       return nullptr;
6769 
6770     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6771     if (!I)
6772       return S;
6773 
6774     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6775   }
6776 
6777   /// Helper function to get a range from SCEV for the associated value at
6778   /// program point \p I.
6779   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6780                                          const Instruction *I = nullptr) const {
6781     if (!getAnchorScope())
6782       return getWorstState(getBitWidth());
6783 
6784     ScalarEvolution *SE =
6785         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6786             *getAnchorScope());
6787 
6788     const SCEV *S = getSCEV(A, I);
6789     if (!SE || !S)
6790       return getWorstState(getBitWidth());
6791 
6792     return SE->getUnsignedRange(S);
6793   }
6794 
6795   /// Helper function to get a range from LVI for the associated value at
6796   /// program point \p I.
6797   ConstantRange
6798   getConstantRangeFromLVI(Attributor &A,
6799                           const Instruction *CtxI = nullptr) const {
6800     if (!getAnchorScope())
6801       return getWorstState(getBitWidth());
6802 
6803     LazyValueInfo *LVI =
6804         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6805             *getAnchorScope());
6806 
6807     if (!LVI || !CtxI)
6808       return getWorstState(getBitWidth());
6809     return LVI->getConstantRange(&getAssociatedValue(),
6810                                  const_cast<BasicBlock *>(CtxI->getParent()),
6811                                  const_cast<Instruction *>(CtxI));
6812   }
6813 
6814   /// See AAValueConstantRange::getKnownConstantRange(..).
6815   ConstantRange
6816   getKnownConstantRange(Attributor &A,
6817                         const Instruction *CtxI = nullptr) const override {
6818     if (!CtxI || CtxI == getCtxI())
6819       return getKnown();
6820 
6821     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6822     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6823     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6824   }
6825 
6826   /// See AAValueConstantRange::getAssumedConstantRange(..).
6827   ConstantRange
6828   getAssumedConstantRange(Attributor &A,
6829                           const Instruction *CtxI = nullptr) const override {
6830     // TODO: Make SCEV use Attributor assumption.
6831     //       We may be able to bound a variable range via assumptions in
6832     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6833     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6834 
6835     if (!CtxI || CtxI == getCtxI())
6836       return getAssumed();
6837 
6838     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6839     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6840     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6841   }
6842 
6843   /// See AbstractAttribute::initialize(..).
6844   void initialize(Attributor &A) override {
6845     // Intersect a range given by SCEV.
6846     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6847 
6848     // Intersect a range given by LVI.
6849     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6850   }
6851 
6852   /// Helper function to create MDNode for range metadata.
6853   static MDNode *
6854   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6855                             const ConstantRange &AssumedConstantRange) {
6856     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6857                                   Ty, AssumedConstantRange.getLower())),
6858                               ConstantAsMetadata::get(ConstantInt::get(
6859                                   Ty, AssumedConstantRange.getUpper()))};
6860     return MDNode::get(Ctx, LowAndHigh);
6861   }
6862 
6863   /// Return true if \p Assumed is included in \p KnownRanges.
6864   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6865 
6866     if (Assumed.isFullSet())
6867       return false;
6868 
6869     if (!KnownRanges)
6870       return true;
6871 
6872     // If multiple ranges are annotated in IR, we give up to annotate assumed
6873     // range for now.
6874 
6875     // TODO:  If there exists a known range which containts assumed range, we
6876     // can say assumed range is better.
6877     if (KnownRanges->getNumOperands() > 2)
6878       return false;
6879 
6880     ConstantInt *Lower =
6881         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6882     ConstantInt *Upper =
6883         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6884 
6885     ConstantRange Known(Lower->getValue(), Upper->getValue());
6886     return Known.contains(Assumed) && Known != Assumed;
6887   }
6888 
6889   /// Helper function to set range metadata.
6890   static bool
6891   setRangeMetadataIfisBetterRange(Instruction *I,
6892                                   const ConstantRange &AssumedConstantRange) {
6893     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6894     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6895       if (!AssumedConstantRange.isEmptySet()) {
6896         I->setMetadata(LLVMContext::MD_range,
6897                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6898                                                  AssumedConstantRange));
6899         return true;
6900       }
6901     }
6902     return false;
6903   }
6904 
6905   /// See AbstractAttribute::manifest()
6906   ChangeStatus manifest(Attributor &A) override {
6907     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6908     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6909     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6910 
6911     auto &V = getAssociatedValue();
6912     if (!AssumedConstantRange.isEmptySet() &&
6913         !AssumedConstantRange.isSingleElement()) {
6914       if (Instruction *I = dyn_cast<Instruction>(&V))
6915         if (isa<CallInst>(I) || isa<LoadInst>(I))
6916           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6917             Changed = ChangeStatus::CHANGED;
6918     }
6919 
6920     return Changed;
6921   }
6922 };
6923 
6924 struct AAValueConstantRangeArgument final
6925     : AAArgumentFromCallSiteArguments<
6926           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6927   AAValueConstantRangeArgument(const IRPosition &IRP)
6928       : AAArgumentFromCallSiteArguments<
6929             AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>(
6930             IRP) {}
6931 
6932   /// See AbstractAttribute::trackStatistics()
6933   void trackStatistics() const override {
6934     STATS_DECLTRACK_ARG_ATTR(value_range)
6935   }
6936 };
6937 
6938 struct AAValueConstantRangeReturned
6939     : AAReturnedFromReturnedValues<AAValueConstantRange,
6940                                    AAValueConstantRangeImpl> {
6941   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6942                                             AAValueConstantRangeImpl>;
6943   AAValueConstantRangeReturned(const IRPosition &IRP) : Base(IRP) {}
6944 
6945   /// See AbstractAttribute::initialize(...).
6946   void initialize(Attributor &A) override {}
6947 
6948   /// See AbstractAttribute::trackStatistics()
6949   void trackStatistics() const override {
6950     STATS_DECLTRACK_FNRET_ATTR(value_range)
6951   }
6952 };
6953 
6954 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6955   AAValueConstantRangeFloating(const IRPosition &IRP)
6956       : AAValueConstantRangeImpl(IRP) {}
6957 
6958   /// See AbstractAttribute::initialize(...).
6959   void initialize(Attributor &A) override {
6960     AAValueConstantRangeImpl::initialize(A);
6961     Value &V = getAssociatedValue();
6962 
6963     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6964       unionAssumed(ConstantRange(C->getValue()));
6965       indicateOptimisticFixpoint();
6966       return;
6967     }
6968 
6969     if (isa<UndefValue>(&V)) {
6970       // Collapse the undef state to 0.
6971       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6972       indicateOptimisticFixpoint();
6973       return;
6974     }
6975 
6976     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6977       return;
6978     // If it is a load instruction with range metadata, use it.
6979     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6980       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6981         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6982         return;
6983       }
6984 
6985     // We can work with PHI and select instruction as we traverse their operands
6986     // during update.
6987     if (isa<SelectInst>(V) || isa<PHINode>(V))
6988       return;
6989 
6990     // Otherwise we give up.
6991     indicatePessimisticFixpoint();
6992 
6993     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6994                       << getAssociatedValue() << "\n");
6995   }
6996 
6997   bool calculateBinaryOperator(
6998       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6999       Instruction *CtxI,
7000       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7001     Value *LHS = BinOp->getOperand(0);
7002     Value *RHS = BinOp->getOperand(1);
7003     // TODO: Allow non integers as well.
7004     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7005       return false;
7006 
7007     auto &LHSAA =
7008         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7009     QuerriedAAs.push_back(&LHSAA);
7010     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7011 
7012     auto &RHSAA =
7013         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7014     QuerriedAAs.push_back(&RHSAA);
7015     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7016 
7017     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7018 
7019     T.unionAssumed(AssumedRange);
7020 
7021     // TODO: Track a known state too.
7022 
7023     return T.isValidState();
7024   }
7025 
7026   bool calculateCastInst(
7027       Attributor &A, CastInst *CastI, IntegerRangeState &T, Instruction *CtxI,
7028       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7029     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7030     // TODO: Allow non integers as well.
7031     Value &OpV = *CastI->getOperand(0);
7032     if (!OpV.getType()->isIntegerTy())
7033       return false;
7034 
7035     auto &OpAA =
7036         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
7037     QuerriedAAs.push_back(&OpAA);
7038     T.unionAssumed(
7039         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7040     return T.isValidState();
7041   }
7042 
7043   bool
7044   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7045                    Instruction *CtxI,
7046                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7047     Value *LHS = CmpI->getOperand(0);
7048     Value *RHS = CmpI->getOperand(1);
7049     // TODO: Allow non integers as well.
7050     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7051       return false;
7052 
7053     auto &LHSAA =
7054         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7055     QuerriedAAs.push_back(&LHSAA);
7056     auto &RHSAA =
7057         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7058     QuerriedAAs.push_back(&RHSAA);
7059 
7060     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7061     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7062 
7063     // If one of them is empty set, we can't decide.
7064     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7065       return true;
7066 
7067     bool MustTrue = false, MustFalse = false;
7068 
7069     auto AllowedRegion =
7070         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7071 
7072     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
7073         CmpI->getPredicate(), RHSAARange);
7074 
7075     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7076       MustFalse = true;
7077 
7078     if (SatisfyingRegion.contains(LHSAARange))
7079       MustTrue = true;
7080 
7081     assert((!MustTrue || !MustFalse) &&
7082            "Either MustTrue or MustFalse should be false!");
7083 
7084     if (MustTrue)
7085       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7086     else if (MustFalse)
7087       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7088     else
7089       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7090 
7091     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7092                       << " " << RHSAA << "\n");
7093 
7094     // TODO: Track a known state too.
7095     return T.isValidState();
7096   }
7097 
7098   /// See AbstractAttribute::updateImpl(...).
7099   ChangeStatus updateImpl(Attributor &A) override {
7100     Instruction *CtxI = getCtxI();
7101     auto VisitValueCB = [&](Value &V, IntegerRangeState &T,
7102                             bool Stripped) -> bool {
7103       Instruction *I = dyn_cast<Instruction>(&V);
7104       if (!I) {
7105 
7106         // If the value is not instruction, we query AA to Attributor.
7107         const auto &AA =
7108             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
7109 
7110         // Clamp operator is not used to utilize a program point CtxI.
7111         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7112 
7113         return T.isValidState();
7114       }
7115 
7116       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7117       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7118         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7119           return false;
7120       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7121         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7122           return false;
7123       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7124         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7125           return false;
7126       } else {
7127         // Give up with other instructions.
7128         // TODO: Add other instructions
7129 
7130         T.indicatePessimisticFixpoint();
7131         return false;
7132       }
7133 
7134       // Catch circular reasoning in a pessimistic way for now.
7135       // TODO: Check how the range evolves and if we stripped anything, see also
7136       //       AADereferenceable or AAAlign for similar situations.
7137       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7138         if (QueriedAA != this)
7139           continue;
7140         // If we are in a stady state we do not need to worry.
7141         if (T.getAssumed() == getState().getAssumed())
7142           continue;
7143         T.indicatePessimisticFixpoint();
7144       }
7145 
7146       return T.isValidState();
7147     };
7148 
7149     IntegerRangeState T(getBitWidth());
7150 
7151     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7152             A, getIRPosition(), *this, T, VisitValueCB))
7153       return indicatePessimisticFixpoint();
7154 
7155     return clampStateAndIndicateChange(getState(), T);
7156   }
7157 
7158   /// See AbstractAttribute::trackStatistics()
7159   void trackStatistics() const override {
7160     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7161   }
7162 };
7163 
7164 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7165   AAValueConstantRangeFunction(const IRPosition &IRP)
7166       : AAValueConstantRangeImpl(IRP) {}
7167 
7168   /// See AbstractAttribute::initialize(...).
7169   ChangeStatus updateImpl(Attributor &A) override {
7170     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7171                      "not be called");
7172   }
7173 
7174   /// See AbstractAttribute::trackStatistics()
7175   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7176 };
7177 
7178 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7179   AAValueConstantRangeCallSite(const IRPosition &IRP)
7180       : AAValueConstantRangeFunction(IRP) {}
7181 
7182   /// See AbstractAttribute::trackStatistics()
7183   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7184 };
7185 
7186 struct AAValueConstantRangeCallSiteReturned
7187     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7188                                      AAValueConstantRangeImpl> {
7189   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP)
7190       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7191                                        AAValueConstantRangeImpl>(IRP) {}
7192 
7193   /// See AbstractAttribute::initialize(...).
7194   void initialize(Attributor &A) override {
7195     // If it is a load instruction with range metadata, use the metadata.
7196     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7197       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7198         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7199 
7200     AAValueConstantRangeImpl::initialize(A);
7201   }
7202 
7203   /// See AbstractAttribute::trackStatistics()
7204   void trackStatistics() const override {
7205     STATS_DECLTRACK_CSRET_ATTR(value_range)
7206   }
7207 };
7208 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7209   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP)
7210       : AAValueConstantRangeFloating(IRP) {}
7211 
7212   /// See AbstractAttribute::trackStatistics()
7213   void trackStatistics() const override {
7214     STATS_DECLTRACK_CSARG_ATTR(value_range)
7215   }
7216 };
7217 
7218 } // namespace
7219 /// ----------------------------------------------------------------------------
7220 ///                               Attributor
7221 /// ----------------------------------------------------------------------------
7222 
7223 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
7224                                const AAIsDead *FnLivenessAA,
7225                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
7226   const IRPosition &IRP = AA.getIRPosition();
7227   if (!Functions.count(IRP.getAnchorScope()))
7228     return false;
7229   return isAssumedDead(IRP, &AA, FnLivenessAA, CheckBBLivenessOnly, DepClass);
7230 }
7231 
7232 bool Attributor::isAssumedDead(const Use &U,
7233                                const AbstractAttribute *QueryingAA,
7234                                const AAIsDead *FnLivenessAA,
7235                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
7236   Instruction *UserI = dyn_cast<Instruction>(U.getUser());
7237   if (!UserI)
7238     return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
7239                          CheckBBLivenessOnly, DepClass);
7240 
7241   if (CallSite CS = CallSite(UserI)) {
7242     // For call site argument uses we can check if the argument is
7243     // unused/dead.
7244     if (CS.isArgOperand(&U)) {
7245       const IRPosition &CSArgPos =
7246           IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
7247       return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
7248                            CheckBBLivenessOnly, DepClass);
7249     }
7250   } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
7251     const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
7252     return isAssumedDead(RetPos, QueryingAA, FnLivenessAA, CheckBBLivenessOnly,
7253                          DepClass);
7254   } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
7255     BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
7256     return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
7257                          CheckBBLivenessOnly, DepClass);
7258   }
7259 
7260   return isAssumedDead(IRPosition::value(*UserI), QueryingAA, FnLivenessAA,
7261                        CheckBBLivenessOnly, DepClass);
7262 }
7263 
7264 bool Attributor::isAssumedDead(const Instruction &I,
7265                                const AbstractAttribute *QueryingAA,
7266                                const AAIsDead *FnLivenessAA,
7267                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
7268   if (!FnLivenessAA)
7269     FnLivenessAA = lookupAAFor<AAIsDead>(IRPosition::function(*I.getFunction()),
7270                                          QueryingAA,
7271                                          /* TrackDependence */ false);
7272 
7273   // If we have a context instruction and a liveness AA we use it.
7274   if (FnLivenessAA &&
7275       FnLivenessAA->getIRPosition().getAnchorScope() == I.getFunction() &&
7276       FnLivenessAA->isAssumedDead(&I)) {
7277     if (QueryingAA)
7278       recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
7279     return true;
7280   }
7281 
7282   if (CheckBBLivenessOnly)
7283     return false;
7284 
7285   const AAIsDead &IsDeadAA = getOrCreateAAFor<AAIsDead>(
7286       IRPosition::value(I), QueryingAA, /* TrackDependence */ false);
7287   // Don't check liveness for AAIsDead.
7288   if (QueryingAA == &IsDeadAA)
7289     return false;
7290 
7291   if (IsDeadAA.isAssumedDead()) {
7292     if (QueryingAA)
7293       recordDependence(IsDeadAA, *QueryingAA, DepClass);
7294     return true;
7295   }
7296 
7297   return false;
7298 }
7299 
7300 bool Attributor::isAssumedDead(const IRPosition &IRP,
7301                                const AbstractAttribute *QueryingAA,
7302                                const AAIsDead *FnLivenessAA,
7303                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
7304   Instruction *CtxI = IRP.getCtxI();
7305   if (CtxI &&
7306       isAssumedDead(*CtxI, QueryingAA, FnLivenessAA,
7307                     /* CheckBBLivenessOnly */ true,
7308                     CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
7309     return true;
7310 
7311   if (CheckBBLivenessOnly)
7312     return false;
7313 
7314   // If we haven't succeeded we query the specific liveness info for the IRP.
7315   const AAIsDead *IsDeadAA;
7316   if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE)
7317     IsDeadAA = &getOrCreateAAFor<AAIsDead>(
7318         IRPosition::callsite_returned(cast<CallBase>(IRP.getAssociatedValue())),
7319         QueryingAA, /* TrackDependence */ false);
7320   else
7321     IsDeadAA = &getOrCreateAAFor<AAIsDead>(IRP, QueryingAA,
7322                                            /* TrackDependence */ false);
7323   // Don't check liveness for AAIsDead.
7324   if (QueryingAA == IsDeadAA)
7325     return false;
7326 
7327   if (IsDeadAA->isAssumedDead()) {
7328     if (QueryingAA)
7329       recordDependence(*IsDeadAA, *QueryingAA, DepClass);
7330     return true;
7331   }
7332 
7333   return false;
7334 }
7335 
7336 bool Attributor::checkForAllUses(
7337     const function_ref<bool(const Use &, bool &)> &Pred,
7338     const AbstractAttribute &QueryingAA, const Value &V,
7339     DepClassTy LivenessDepClass) {
7340 
7341   // Check the trivial case first as it catches void values.
7342   if (V.use_empty())
7343     return true;
7344 
7345   // If the value is replaced by another one, for now a constant, we do not have
7346   // uses. Note that this requires users of `checkForAllUses` to not recurse but
7347   // instead use the `follow` callback argument to look at transitive users,
7348   // however, that should be clear from the presence of the argument.
7349   bool UsedAssumedInformation = false;
7350   Optional<Constant *> C =
7351       getAssumedConstant(*this, V, QueryingAA, UsedAssumedInformation);
7352   if (C.hasValue() && C.getValue()) {
7353     LLVM_DEBUG(dbgs() << "[Attributor] Value is simplified, uses skipped: " << V
7354                       << " -> " << *C.getValue() << "\n");
7355     return true;
7356   }
7357 
7358   const IRPosition &IRP = QueryingAA.getIRPosition();
7359   SmallVector<const Use *, 16> Worklist;
7360   SmallPtrSet<const Use *, 16> Visited;
7361 
7362   for (const Use &U : V.uses())
7363     Worklist.push_back(&U);
7364 
7365   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
7366                     << " initial uses to check\n");
7367 
7368   const Function *ScopeFn = IRP.getAnchorScope();
7369   const auto *LivenessAA =
7370       ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
7371                                     /* TrackDependence */ false)
7372               : nullptr;
7373 
7374   while (!Worklist.empty()) {
7375     const Use *U = Worklist.pop_back_val();
7376     if (!Visited.insert(U).second)
7377       continue;
7378     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << **U << " in "
7379                       << *U->getUser() << "\n");
7380     if (isAssumedDead(*U, &QueryingAA, LivenessAA,
7381                       /* CheckBBLivenessOnly */ false, LivenessDepClass)) {
7382       LLVM_DEBUG(dbgs() << "[Attributor] Dead use, skip!\n");
7383       continue;
7384     }
7385 
7386     bool Follow = false;
7387     if (!Pred(*U, Follow))
7388       return false;
7389     if (!Follow)
7390       continue;
7391     for (const Use &UU : U->getUser()->uses())
7392       Worklist.push_back(&UU);
7393   }
7394 
7395   return true;
7396 }
7397 
7398 bool Attributor::checkForAllCallSites(
7399     const function_ref<bool(AbstractCallSite)> &Pred,
7400     const AbstractAttribute &QueryingAA, bool RequireAllCallSites,
7401     bool &AllCallSitesKnown) {
7402   // We can try to determine information from
7403   // the call sites. However, this is only possible all call sites are known,
7404   // hence the function has internal linkage.
7405   const IRPosition &IRP = QueryingAA.getIRPosition();
7406   const Function *AssociatedFunction = IRP.getAssociatedFunction();
7407   if (!AssociatedFunction) {
7408     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
7409                       << "\n");
7410     AllCallSitesKnown = false;
7411     return false;
7412   }
7413 
7414   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
7415                               &QueryingAA, AllCallSitesKnown);
7416 }
7417 
7418 bool Attributor::checkForAllCallSites(
7419     const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
7420     bool RequireAllCallSites, const AbstractAttribute *QueryingAA,
7421     bool &AllCallSitesKnown) {
7422   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
7423     LLVM_DEBUG(
7424         dbgs()
7425         << "[Attributor] Function " << Fn.getName()
7426         << " has no internal linkage, hence not all call sites are known\n");
7427     AllCallSitesKnown = false;
7428     return false;
7429   }
7430 
7431   // If we do not require all call sites we might not see all.
7432   AllCallSitesKnown = RequireAllCallSites;
7433 
7434   SmallVector<const Use *, 8> Uses(make_pointer_range(Fn.uses()));
7435   for (unsigned u = 0; u < Uses.size(); ++u) {
7436     const Use &U = *Uses[u];
7437     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << *U << " in "
7438                       << *U.getUser() << "\n");
7439     if (isAssumedDead(U, QueryingAA, nullptr, /* CheckBBLivenessOnly */ true)) {
7440       LLVM_DEBUG(dbgs() << "[Attributor] Dead use, skip!\n");
7441       continue;
7442     }
7443     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
7444       if (CE->isCast() && CE->getType()->isPointerTy() &&
7445           CE->getType()->getPointerElementType()->isFunctionTy()) {
7446         for (const Use &CEU : CE->uses())
7447           Uses.push_back(&CEU);
7448         continue;
7449       }
7450     }
7451 
7452     AbstractCallSite ACS(&U);
7453     if (!ACS) {
7454       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
7455                         << " has non call site use " << *U.get() << " in "
7456                         << *U.getUser() << "\n");
7457       // BlockAddress users are allowed.
7458       if (isa<BlockAddress>(U.getUser()))
7459         continue;
7460       return false;
7461     }
7462 
7463     const Use *EffectiveUse =
7464         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
7465     if (!ACS.isCallee(EffectiveUse)) {
7466       if (!RequireAllCallSites)
7467         continue;
7468       LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
7469                         << " is an invalid use of " << Fn.getName() << "\n");
7470       return false;
7471     }
7472 
7473     // Make sure the arguments that can be matched between the call site and the
7474     // callee argee on their type. It is unlikely they do not and it doesn't
7475     // make sense for all attributes to know/care about this.
7476     assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
7477     unsigned MinArgsParams =
7478         std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
7479     for (unsigned u = 0; u < MinArgsParams; ++u) {
7480       Value *CSArgOp = ACS.getCallArgOperand(u);
7481       if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
7482         LLVM_DEBUG(
7483             dbgs() << "[Attributor] Call site / callee argument type mismatch ["
7484                    << u << "@" << Fn.getName() << ": "
7485                    << *Fn.getArg(u)->getType() << " vs. "
7486                    << *ACS.getCallArgOperand(u)->getType() << "\n");
7487         return false;
7488       }
7489     }
7490 
7491     if (Pred(ACS))
7492       continue;
7493 
7494     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
7495                       << *ACS.getInstruction() << "\n");
7496     return false;
7497   }
7498 
7499   return true;
7500 }
7501 
7502 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
7503     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
7504         &Pred,
7505     const AbstractAttribute &QueryingAA) {
7506 
7507   const IRPosition &IRP = QueryingAA.getIRPosition();
7508   // Since we need to provide return instructions we have to have an exact
7509   // definition.
7510   const Function *AssociatedFunction = IRP.getAssociatedFunction();
7511   if (!AssociatedFunction)
7512     return false;
7513 
7514   // If this is a call site query we use the call site specific return values
7515   // and liveness information.
7516   // TODO: use the function scope once we have call site AAReturnedValues.
7517   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
7518   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
7519   if (!AARetVal.getState().isValidState())
7520     return false;
7521 
7522   return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
7523 }
7524 
7525 bool Attributor::checkForAllReturnedValues(
7526     const function_ref<bool(Value &)> &Pred,
7527     const AbstractAttribute &QueryingAA) {
7528 
7529   const IRPosition &IRP = QueryingAA.getIRPosition();
7530   const Function *AssociatedFunction = IRP.getAssociatedFunction();
7531   if (!AssociatedFunction)
7532     return false;
7533 
7534   // TODO: use the function scope once we have call site AAReturnedValues.
7535   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
7536   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
7537   if (!AARetVal.getState().isValidState())
7538     return false;
7539 
7540   return AARetVal.checkForAllReturnedValuesAndReturnInsts(
7541       [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
7542         return Pred(RV);
7543       });
7544 }
7545 
7546 static bool checkForAllInstructionsImpl(
7547     Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap,
7548     const function_ref<bool(Instruction &)> &Pred,
7549     const AbstractAttribute *QueryingAA, const AAIsDead *LivenessAA,
7550     const ArrayRef<unsigned> &Opcodes, bool CheckBBLivenessOnly = false) {
7551   for (unsigned Opcode : Opcodes) {
7552     for (Instruction *I : OpcodeInstMap[Opcode]) {
7553       // Skip dead instructions.
7554       if (A && A->isAssumedDead(IRPosition::value(*I), QueryingAA, LivenessAA,
7555                                 CheckBBLivenessOnly))
7556         continue;
7557 
7558       if (!Pred(*I))
7559         return false;
7560     }
7561   }
7562   return true;
7563 }
7564 
7565 bool Attributor::checkForAllInstructions(
7566     const llvm::function_ref<bool(Instruction &)> &Pred,
7567     const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes,
7568     bool CheckBBLivenessOnly) {
7569 
7570   const IRPosition &IRP = QueryingAA.getIRPosition();
7571   // Since we need to provide instructions we have to have an exact definition.
7572   const Function *AssociatedFunction = IRP.getAssociatedFunction();
7573   if (!AssociatedFunction)
7574     return false;
7575 
7576   // TODO: use the function scope once we have call site AAReturnedValues.
7577   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
7578   const auto &LivenessAA =
7579       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
7580 
7581   auto &OpcodeInstMap =
7582       InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
7583   if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, &QueryingAA,
7584                                    &LivenessAA, Opcodes, CheckBBLivenessOnly))
7585     return false;
7586 
7587   return true;
7588 }
7589 
7590 bool Attributor::checkForAllReadWriteInstructions(
7591     const llvm::function_ref<bool(Instruction &)> &Pred,
7592     AbstractAttribute &QueryingAA) {
7593 
7594   const Function *AssociatedFunction =
7595       QueryingAA.getIRPosition().getAssociatedFunction();
7596   if (!AssociatedFunction)
7597     return false;
7598 
7599   // TODO: use the function scope once we have call site AAReturnedValues.
7600   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
7601   const auto &LivenessAA =
7602       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
7603 
7604   for (Instruction *I :
7605        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
7606     // Skip dead instructions.
7607     if (isAssumedDead(IRPosition::value(*I), &QueryingAA, &LivenessAA))
7608       continue;
7609 
7610     if (!Pred(*I))
7611       return false;
7612   }
7613 
7614   return true;
7615 }
7616 
7617 ChangeStatus Attributor::run() {
7618   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
7619                     << AllAbstractAttributes.size()
7620                     << " abstract attributes.\n");
7621 
7622   // Now that all abstract attributes are collected and initialized we start
7623   // the abstract analysis.
7624 
7625   unsigned IterationCounter = 1;
7626 
7627   SmallVector<AbstractAttribute *, 64> ChangedAAs;
7628   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
7629   Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
7630 
7631   bool RecomputeDependences = false;
7632 
7633   do {
7634     // Remember the size to determine new attributes.
7635     size_t NumAAs = AllAbstractAttributes.size();
7636     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
7637                       << ", Worklist size: " << Worklist.size() << "\n");
7638 
7639     // For invalid AAs we can fix dependent AAs that have a required dependence,
7640     // thereby folding long dependence chains in a single step without the need
7641     // to run updates.
7642     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
7643       AbstractAttribute *InvalidAA = InvalidAAs[u];
7644       auto &QuerriedAAs = QueryMap[InvalidAA];
7645       LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "
7646                         << QuerriedAAs.RequiredAAs.size() << "/"
7647                         << QuerriedAAs.OptionalAAs.size()
7648                         << " required/optional dependences\n");
7649       for (AbstractAttribute *DepOnInvalidAA : QuerriedAAs.RequiredAAs) {
7650         AbstractState &DOIAAState = DepOnInvalidAA->getState();
7651         DOIAAState.indicatePessimisticFixpoint();
7652         ++NumAttributesFixedDueToRequiredDependences;
7653         assert(DOIAAState.isAtFixpoint() && "Expected fixpoint state!");
7654         if (!DOIAAState.isValidState())
7655           InvalidAAs.insert(DepOnInvalidAA);
7656         else
7657           ChangedAAs.push_back(DepOnInvalidAA);
7658       }
7659       if (!RecomputeDependences)
7660         Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
7661                         QuerriedAAs.OptionalAAs.end());
7662     }
7663 
7664     // If dependences (=QueryMap) are recomputed we have to look at all abstract
7665     // attributes again, regardless of what changed in the last iteration.
7666     if (RecomputeDependences) {
7667       LLVM_DEBUG(
7668           dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
7669       QueryMap.clear();
7670       ChangedAAs.clear();
7671       Worklist.insert(AllAbstractAttributes.begin(),
7672                       AllAbstractAttributes.end());
7673     }
7674 
7675     // Add all abstract attributes that are potentially dependent on one that
7676     // changed to the work list.
7677     for (AbstractAttribute *ChangedAA : ChangedAAs) {
7678       auto &QuerriedAAs = QueryMap[ChangedAA];
7679       Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
7680                       QuerriedAAs.OptionalAAs.end());
7681       Worklist.insert(QuerriedAAs.RequiredAAs.begin(),
7682                       QuerriedAAs.RequiredAAs.end());
7683     }
7684 
7685     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
7686                       << ", Worklist+Dependent size: " << Worklist.size()
7687                       << "\n");
7688 
7689     // Reset the changed and invalid set.
7690     ChangedAAs.clear();
7691     InvalidAAs.clear();
7692 
7693     // Update all abstract attribute in the work list and record the ones that
7694     // changed.
7695     for (AbstractAttribute *AA : Worklist)
7696       if (!AA->getState().isAtFixpoint() &&
7697           !isAssumedDead(*AA, nullptr, /* CheckBBLivenessOnly */ true)) {
7698         QueriedNonFixAA = false;
7699         if (AA->update(*this) == ChangeStatus::CHANGED) {
7700           ChangedAAs.push_back(AA);
7701           if (!AA->getState().isValidState())
7702             InvalidAAs.insert(AA);
7703         } else if (!QueriedNonFixAA) {
7704           // If the attribute did not query any non-fix information, the state
7705           // will not change and we can indicate that right away.
7706           AA->getState().indicateOptimisticFixpoint();
7707         }
7708       }
7709 
7710     // Check if we recompute the dependences in the next iteration.
7711     RecomputeDependences = (DepRecomputeInterval > 0 &&
7712                             IterationCounter % DepRecomputeInterval == 0);
7713 
7714     // Add attributes to the changed set if they have been created in the last
7715     // iteration.
7716     ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
7717                       AllAbstractAttributes.end());
7718 
7719     // Reset the work list and repopulate with the changed abstract attributes.
7720     // Note that dependent ones are added above.
7721     Worklist.clear();
7722     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
7723 
7724   } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
7725                                  VerifyMaxFixpointIterations));
7726 
7727   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
7728                     << IterationCounter << "/" << MaxFixpointIterations
7729                     << " iterations\n");
7730 
7731   size_t NumFinalAAs = AllAbstractAttributes.size();
7732 
7733   // Reset abstract arguments not settled in a sound fixpoint by now. This
7734   // happens when we stopped the fixpoint iteration early. Note that only the
7735   // ones marked as "changed" *and* the ones transitively depending on them
7736   // need to be reverted to a pessimistic state. Others might not be in a
7737   // fixpoint state but we can use the optimistic results for them anyway.
7738   SmallPtrSet<AbstractAttribute *, 32> Visited;
7739   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
7740     AbstractAttribute *ChangedAA = ChangedAAs[u];
7741     if (!Visited.insert(ChangedAA).second)
7742       continue;
7743 
7744     AbstractState &State = ChangedAA->getState();
7745     if (!State.isAtFixpoint()) {
7746       State.indicatePessimisticFixpoint();
7747 
7748       NumAttributesTimedOut++;
7749     }
7750 
7751     auto &QuerriedAAs = QueryMap[ChangedAA];
7752     ChangedAAs.append(QuerriedAAs.OptionalAAs.begin(),
7753                       QuerriedAAs.OptionalAAs.end());
7754     ChangedAAs.append(QuerriedAAs.RequiredAAs.begin(),
7755                       QuerriedAAs.RequiredAAs.end());
7756   }
7757 
7758   LLVM_DEBUG({
7759     if (!Visited.empty())
7760       dbgs() << "\n[Attributor] Finalized " << Visited.size()
7761              << " abstract attributes.\n";
7762   });
7763 
7764   unsigned NumManifested = 0;
7765   unsigned NumAtFixpoint = 0;
7766   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
7767   for (AbstractAttribute *AA : AllAbstractAttributes) {
7768     AbstractState &State = AA->getState();
7769 
7770     // If there is not already a fixpoint reached, we can now take the
7771     // optimistic state. This is correct because we enforced a pessimistic one
7772     // on abstract attributes that were transitively dependent on a changed one
7773     // already above.
7774     if (!State.isAtFixpoint())
7775       State.indicateOptimisticFixpoint();
7776 
7777     // If the state is invalid, we do not try to manifest it.
7778     if (!State.isValidState())
7779       continue;
7780 
7781     // Skip dead code.
7782     if (isAssumedDead(*AA, nullptr, /* CheckBBLivenessOnly */ true))
7783       continue;
7784     // Manifest the state and record if we changed the IR.
7785     ChangeStatus LocalChange = AA->manifest(*this);
7786     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
7787       AA->trackStatistics();
7788     LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
7789                       << "\n");
7790 
7791     ManifestChange = ManifestChange | LocalChange;
7792 
7793     NumAtFixpoint++;
7794     NumManifested += (LocalChange == ChangeStatus::CHANGED);
7795   }
7796 
7797   (void)NumManifested;
7798   (void)NumAtFixpoint;
7799   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
7800                     << " arguments while " << NumAtFixpoint
7801                     << " were in a valid fixpoint state\n");
7802 
7803   NumAttributesManifested += NumManifested;
7804   NumAttributesValidFixpoint += NumAtFixpoint;
7805 
7806   (void)NumFinalAAs;
7807   if (NumFinalAAs != AllAbstractAttributes.size()) {
7808     for (unsigned u = NumFinalAAs; u < AllAbstractAttributes.size(); ++u)
7809       errs() << "Unexpected abstract attribute: " << *AllAbstractAttributes[u]
7810              << " :: "
7811              << AllAbstractAttributes[u]->getIRPosition().getAssociatedValue()
7812              << "\n";
7813     llvm_unreachable("Expected the final number of abstract attributes to "
7814                      "remain unchanged!");
7815   }
7816 
7817   // Delete stuff at the end to avoid invalid references and a nice order.
7818   {
7819     LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
7820                       << ToBeDeletedFunctions.size() << " functions and "
7821                       << ToBeDeletedBlocks.size() << " blocks and "
7822                       << ToBeDeletedInsts.size() << " instructions and "
7823                       << ToBeChangedUses.size() << " uses\n");
7824 
7825     SmallVector<WeakTrackingVH, 32> DeadInsts;
7826     SmallVector<Instruction *, 32> TerminatorsToFold;
7827 
7828     for (auto &It : ToBeChangedUses) {
7829       Use *U = It.first;
7830       Value *NewV = It.second;
7831       Value *OldV = U->get();
7832       LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
7833                         << " instead of " << *OldV << "\n");
7834       U->set(NewV);
7835       // Do not modify call instructions outside the SCC.
7836       if (auto *CB = dyn_cast<CallBase>(OldV))
7837         if (!Functions.count(CB->getCaller()))
7838           continue;
7839       if (Instruction *I = dyn_cast<Instruction>(OldV)) {
7840         CGModifiedFunctions.insert(I->getFunction());
7841         if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
7842             isInstructionTriviallyDead(I))
7843           DeadInsts.push_back(I);
7844       }
7845       if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
7846         Instruction *UserI = cast<Instruction>(U->getUser());
7847         if (isa<UndefValue>(NewV)) {
7848           ToBeChangedToUnreachableInsts.insert(UserI);
7849         } else {
7850           TerminatorsToFold.push_back(UserI);
7851         }
7852       }
7853     }
7854     for (auto &V : InvokeWithDeadSuccessor)
7855       if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
7856         bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
7857         bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
7858         bool Invoke2CallAllowed =
7859             !AAIsDeadFunction::mayCatchAsynchronousExceptions(
7860                 *II->getFunction());
7861         assert((UnwindBBIsDead || NormalBBIsDead) &&
7862                "Invoke does not have dead successors!");
7863         BasicBlock *BB = II->getParent();
7864         BasicBlock *NormalDestBB = II->getNormalDest();
7865         if (UnwindBBIsDead) {
7866           Instruction *NormalNextIP = &NormalDestBB->front();
7867           if (Invoke2CallAllowed) {
7868             changeToCall(II);
7869             NormalNextIP = BB->getTerminator();
7870           }
7871           if (NormalBBIsDead)
7872             ToBeChangedToUnreachableInsts.insert(NormalNextIP);
7873         } else {
7874           assert(NormalBBIsDead && "Broken invariant!");
7875           if (!NormalDestBB->getUniquePredecessor())
7876             NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
7877           ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
7878         }
7879       }
7880     for (Instruction *I : TerminatorsToFold) {
7881       CGModifiedFunctions.insert(I->getFunction());
7882       ConstantFoldTerminator(I->getParent());
7883     }
7884     for (auto &V : ToBeChangedToUnreachableInsts)
7885       if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
7886         CGModifiedFunctions.insert(I->getFunction());
7887         changeToUnreachable(I, /* UseLLVMTrap */ false);
7888       }
7889 
7890     for (auto &V : ToBeDeletedInsts) {
7891       if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
7892         CGModifiedFunctions.insert(I->getFunction());
7893         if (!I->getType()->isVoidTy())
7894           I->replaceAllUsesWith(UndefValue::get(I->getType()));
7895         if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
7896           DeadInsts.push_back(I);
7897         else
7898           I->eraseFromParent();
7899       }
7900     }
7901 
7902     RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
7903 
7904     if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
7905       SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
7906       ToBeDeletedBBs.reserve(NumDeadBlocks);
7907       for (BasicBlock *BB : ToBeDeletedBlocks) {
7908         CGModifiedFunctions.insert(BB->getParent());
7909         ToBeDeletedBBs.push_back(BB);
7910       }
7911       // Actually we do not delete the blocks but squash them into a single
7912       // unreachable but untangling branches that jump here is something we need
7913       // to do in a more generic way.
7914       DetatchDeadBlocks(ToBeDeletedBBs, nullptr);
7915       STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
7916       BUILD_STAT_NAME(AAIsDead, BasicBlock) += ToBeDeletedBlocks.size();
7917     }
7918 
7919     // Identify dead internal functions and delete them. This happens outside
7920     // the other fixpoint analysis as we might treat potentially dead functions
7921     // as live to lower the number of iterations. If they happen to be dead, the
7922     // below fixpoint loop will identify and eliminate them.
7923     SmallVector<Function *, 8> InternalFns;
7924     for (Function *F : Functions)
7925       if (F->hasLocalLinkage())
7926         InternalFns.push_back(F);
7927 
7928     bool FoundDeadFn = true;
7929     while (FoundDeadFn) {
7930       FoundDeadFn = false;
7931       for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
7932         Function *F = InternalFns[u];
7933         if (!F)
7934           continue;
7935 
7936         bool AllCallSitesKnown;
7937         if (!checkForAllCallSites(
7938                 [this](AbstractCallSite ACS) {
7939                   return ToBeDeletedFunctions.count(
7940                       ACS.getInstruction()->getFunction());
7941                 },
7942                 *F, true, nullptr, AllCallSitesKnown))
7943           continue;
7944 
7945         ToBeDeletedFunctions.insert(F);
7946         InternalFns[u] = nullptr;
7947         FoundDeadFn = true;
7948       }
7949     }
7950   }
7951 
7952   // Rewrite the functions as requested during manifest.
7953   ManifestChange =
7954       ManifestChange | rewriteFunctionSignatures(CGModifiedFunctions);
7955 
7956   for (Function *Fn : CGModifiedFunctions)
7957     CGUpdater.reanalyzeFunction(*Fn);
7958 
7959   STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
7960   BUILD_STAT_NAME(AAIsDead, Function) += ToBeDeletedFunctions.size();
7961 
7962   for (Function *Fn : ToBeDeletedFunctions)
7963     CGUpdater.removeFunction(*Fn);
7964 
7965   if (VerifyMaxFixpointIterations &&
7966       IterationCounter != MaxFixpointIterations) {
7967     errs() << "\n[Attributor] Fixpoint iteration done after: "
7968            << IterationCounter << "/" << MaxFixpointIterations
7969            << " iterations\n";
7970     llvm_unreachable("The fixpoint was not reached with exactly the number of "
7971                      "specified iterations!");
7972   }
7973 
7974   return ManifestChange;
7975 }
7976 
7977 bool Attributor::isValidFunctionSignatureRewrite(
7978     Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
7979 
7980   auto CallSiteCanBeChanged = [](AbstractCallSite ACS) {
7981     // Forbid must-tail calls for now.
7982     return !ACS.isCallbackCall() && !ACS.getCallSite().isMustTailCall();
7983   };
7984 
7985   Function *Fn = Arg.getParent();
7986   // Avoid var-arg functions for now.
7987   if (Fn->isVarArg()) {
7988     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
7989     return false;
7990   }
7991 
7992   // Avoid functions with complicated argument passing semantics.
7993   AttributeList FnAttributeList = Fn->getAttributes();
7994   if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
7995       FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
7996       FnAttributeList.hasAttrSomewhere(Attribute::InAlloca)) {
7997     LLVM_DEBUG(
7998         dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
7999     return false;
8000   }
8001 
8002   // Avoid callbacks for now.
8003   bool AllCallSitesKnown;
8004   if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
8005                             AllCallSitesKnown)) {
8006     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
8007     return false;
8008   }
8009 
8010   auto InstPred = [](Instruction &I) {
8011     if (auto *CI = dyn_cast<CallInst>(&I))
8012       return !CI->isMustTailCall();
8013     return true;
8014   };
8015 
8016   // Forbid must-tail calls for now.
8017   // TODO:
8018   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
8019   if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
8020                                    nullptr, {Instruction::Call})) {
8021     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
8022     return false;
8023   }
8024 
8025   return true;
8026 }
8027 
8028 bool Attributor::registerFunctionSignatureRewrite(
8029     Argument &Arg, ArrayRef<Type *> ReplacementTypes,
8030     ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
8031     ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
8032   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
8033                     << Arg.getParent()->getName() << " with "
8034                     << ReplacementTypes.size() << " replacements\n");
8035   assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
8036          "Cannot register an invalid rewrite");
8037 
8038   Function *Fn = Arg.getParent();
8039   SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = ArgumentReplacementMap[Fn];
8040   if (ARIs.empty())
8041     ARIs.resize(Fn->arg_size());
8042 
8043   // If we have a replacement already with less than or equal new arguments,
8044   // ignore this request.
8045   ArgumentReplacementInfo *&ARI = ARIs[Arg.getArgNo()];
8046   if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
8047     LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
8048     return false;
8049   }
8050 
8051   // If we have a replacement already but we like the new one better, delete
8052   // the old.
8053   if (ARI)
8054     delete ARI;
8055 
8056   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
8057                     << Arg.getParent()->getName() << " with "
8058                     << ReplacementTypes.size() << " replacements\n");
8059 
8060   // Remember the replacement.
8061   ARI = new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
8062                                     std::move(CalleeRepairCB),
8063                                     std::move(ACSRepairCB));
8064 
8065   return true;
8066 }
8067 
8068 ChangeStatus Attributor::rewriteFunctionSignatures(
8069     SmallPtrSetImpl<Function *> &ModifiedFns) {
8070   ChangeStatus Changed = ChangeStatus::UNCHANGED;
8071 
8072   for (auto &It : ArgumentReplacementMap) {
8073     Function *OldFn = It.getFirst();
8074 
8075     // Deleted functions do not require rewrites.
8076     if (ToBeDeletedFunctions.count(OldFn))
8077       continue;
8078 
8079     const SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = It.getSecond();
8080     assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
8081 
8082     SmallVector<Type *, 16> NewArgumentTypes;
8083     SmallVector<AttributeSet, 16> NewArgumentAttributes;
8084 
8085     // Collect replacement argument types and copy over existing attributes.
8086     AttributeList OldFnAttributeList = OldFn->getAttributes();
8087     for (Argument &Arg : OldFn->args()) {
8088       if (ArgumentReplacementInfo *ARI = ARIs[Arg.getArgNo()]) {
8089         NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
8090                                 ARI->ReplacementTypes.end());
8091         NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
8092                                      AttributeSet());
8093       } else {
8094         NewArgumentTypes.push_back(Arg.getType());
8095         NewArgumentAttributes.push_back(
8096             OldFnAttributeList.getParamAttributes(Arg.getArgNo()));
8097       }
8098     }
8099 
8100     FunctionType *OldFnTy = OldFn->getFunctionType();
8101     Type *RetTy = OldFnTy->getReturnType();
8102 
8103     // Construct the new function type using the new arguments types.
8104     FunctionType *NewFnTy =
8105         FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
8106 
8107     LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
8108                       << "' from " << *OldFn->getFunctionType() << " to "
8109                       << *NewFnTy << "\n");
8110 
8111     // Create the new function body and insert it into the module.
8112     Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
8113                                        OldFn->getAddressSpace(), "");
8114     OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
8115     NewFn->takeName(OldFn);
8116     NewFn->copyAttributesFrom(OldFn);
8117 
8118     // Patch the pointer to LLVM function in debug info descriptor.
8119     NewFn->setSubprogram(OldFn->getSubprogram());
8120     OldFn->setSubprogram(nullptr);
8121 
8122     // Recompute the parameter attributes list based on the new arguments for
8123     // the function.
8124     LLVMContext &Ctx = OldFn->getContext();
8125     NewFn->setAttributes(AttributeList::get(
8126         Ctx, OldFnAttributeList.getFnAttributes(),
8127         OldFnAttributeList.getRetAttributes(), NewArgumentAttributes));
8128 
8129     // Since we have now created the new function, splice the body of the old
8130     // function right into the new function, leaving the old rotting hulk of the
8131     // function empty.
8132     NewFn->getBasicBlockList().splice(NewFn->begin(),
8133                                       OldFn->getBasicBlockList());
8134 
8135     // Set of all "call-like" instructions that invoke the old function mapped
8136     // to their new replacements.
8137     SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
8138 
8139     // Callback to create a new "call-like" instruction for a given one.
8140     auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
8141       CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
8142       const AttributeList &OldCallAttributeList = OldCB->getAttributes();
8143 
8144       // Collect the new argument operands for the replacement call site.
8145       SmallVector<Value *, 16> NewArgOperands;
8146       SmallVector<AttributeSet, 16> NewArgOperandAttributes;
8147       for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
8148         unsigned NewFirstArgNum = NewArgOperands.size();
8149         (void)NewFirstArgNum; // only used inside assert.
8150         if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
8151           if (ARI->ACSRepairCB)
8152             ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
8153           assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
8154                      NewArgOperands.size() &&
8155                  "ACS repair callback did not provide as many operand as new "
8156                  "types were registered!");
8157           // TODO: Exose the attribute set to the ACS repair callback
8158           NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
8159                                          AttributeSet());
8160         } else {
8161           NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
8162           NewArgOperandAttributes.push_back(
8163               OldCallAttributeList.getParamAttributes(OldArgNum));
8164         }
8165       }
8166 
8167       assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
8168              "Mismatch # argument operands vs. # argument operand attributes!");
8169       assert(NewArgOperands.size() == NewFn->arg_size() &&
8170              "Mismatch # argument operands vs. # function arguments!");
8171 
8172       SmallVector<OperandBundleDef, 4> OperandBundleDefs;
8173       OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
8174 
8175       // Create a new call or invoke instruction to replace the old one.
8176       CallBase *NewCB;
8177       if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
8178         NewCB =
8179             InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
8180                                NewArgOperands, OperandBundleDefs, "", OldCB);
8181       } else {
8182         auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
8183                                        "", OldCB);
8184         NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
8185         NewCB = NewCI;
8186       }
8187 
8188       // Copy over various properties and the new attributes.
8189       uint64_t W;
8190       if (OldCB->extractProfTotalWeight(W))
8191         NewCB->setProfWeight(W);
8192       NewCB->setCallingConv(OldCB->getCallingConv());
8193       NewCB->setDebugLoc(OldCB->getDebugLoc());
8194       NewCB->takeName(OldCB);
8195       NewCB->setAttributes(AttributeList::get(
8196           Ctx, OldCallAttributeList.getFnAttributes(),
8197           OldCallAttributeList.getRetAttributes(), NewArgOperandAttributes));
8198 
8199       CallSitePairs.push_back({OldCB, NewCB});
8200       return true;
8201     };
8202 
8203     // Use the CallSiteReplacementCreator to create replacement call sites.
8204     bool AllCallSitesKnown;
8205     bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
8206                                         true, nullptr, AllCallSitesKnown);
8207     (void)Success;
8208     assert(Success && "Assumed call site replacement to succeed!");
8209 
8210     // Rewire the arguments.
8211     auto OldFnArgIt = OldFn->arg_begin();
8212     auto NewFnArgIt = NewFn->arg_begin();
8213     for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
8214          ++OldArgNum, ++OldFnArgIt) {
8215       if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
8216         if (ARI->CalleeRepairCB)
8217           ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
8218         NewFnArgIt += ARI->ReplacementTypes.size();
8219       } else {
8220         NewFnArgIt->takeName(&*OldFnArgIt);
8221         OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
8222         ++NewFnArgIt;
8223       }
8224     }
8225 
8226     // Eliminate the instructions *after* we visited all of them.
8227     for (auto &CallSitePair : CallSitePairs) {
8228       CallBase &OldCB = *CallSitePair.first;
8229       CallBase &NewCB = *CallSitePair.second;
8230       // We do not modify the call graph here but simply reanalyze the old
8231       // function. This should be revisited once the old PM is gone.
8232       ModifiedFns.insert(OldCB.getFunction());
8233       OldCB.replaceAllUsesWith(&NewCB);
8234       OldCB.eraseFromParent();
8235     }
8236 
8237     // Replace the function in the call graph (if any).
8238     CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
8239 
8240     // If the old function was modified and needed to be reanalyzed, the new one
8241     // does now.
8242     if (ModifiedFns.erase(OldFn))
8243       ModifiedFns.insert(NewFn);
8244 
8245     Changed = ChangeStatus::CHANGED;
8246   }
8247 
8248   return Changed;
8249 }
8250 
8251 void Attributor::initializeInformationCache(Function &F) {
8252 
8253   // Walk all instructions to find interesting instructions that might be
8254   // queried by abstract attributes during their initialization or update.
8255   // This has to happen before we create attributes.
8256   auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
8257   auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
8258 
8259   for (Instruction &I : instructions(&F)) {
8260     bool IsInterestingOpcode = false;
8261 
8262     // To allow easy access to all instructions in a function with a given
8263     // opcode we store them in the InfoCache. As not all opcodes are interesting
8264     // to concrete attributes we only cache the ones that are as identified in
8265     // the following switch.
8266     // Note: There are no concrete attributes now so this is initially empty.
8267     switch (I.getOpcode()) {
8268     default:
8269       assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
8270              "New call site/base instruction type needs to be known int the "
8271              "Attributor.");
8272       break;
8273     case Instruction::Load:
8274       // The alignment of a pointer is interesting for loads.
8275     case Instruction::Store:
8276       // The alignment of a pointer is interesting for stores.
8277     case Instruction::Call:
8278     case Instruction::CallBr:
8279     case Instruction::Invoke:
8280     case Instruction::CleanupRet:
8281     case Instruction::CatchSwitch:
8282     case Instruction::AtomicRMW:
8283     case Instruction::AtomicCmpXchg:
8284     case Instruction::Br:
8285     case Instruction::Resume:
8286     case Instruction::Ret:
8287       IsInterestingOpcode = true;
8288     }
8289     if (IsInterestingOpcode)
8290       InstOpcodeMap[I.getOpcode()].push_back(&I);
8291     if (I.mayReadOrWriteMemory())
8292       ReadOrWriteInsts.push_back(&I);
8293   }
8294 }
8295 
8296 void Attributor::recordDependence(const AbstractAttribute &FromAA,
8297                                   const AbstractAttribute &ToAA,
8298                                   DepClassTy DepClass) {
8299   if (FromAA.getState().isAtFixpoint())
8300     return;
8301 
8302   if (DepClass == DepClassTy::REQUIRED)
8303     QueryMap[&FromAA].RequiredAAs.insert(
8304         const_cast<AbstractAttribute *>(&ToAA));
8305   else
8306     QueryMap[&FromAA].OptionalAAs.insert(
8307         const_cast<AbstractAttribute *>(&ToAA));
8308   QueriedNonFixAA = true;
8309 }
8310 
8311 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
8312   if (!VisitedFunctions.insert(&F).second)
8313     return;
8314   if (F.isDeclaration())
8315     return;
8316 
8317   IRPosition FPos = IRPosition::function(F);
8318 
8319   // Check for dead BasicBlocks in every function.
8320   // We need dead instruction detection because we do not want to deal with
8321   // broken IR in which SSA rules do not apply.
8322   getOrCreateAAFor<AAIsDead>(FPos);
8323 
8324   // Every function might be "will-return".
8325   getOrCreateAAFor<AAWillReturn>(FPos);
8326 
8327   // Every function might contain instructions that cause "undefined behavior".
8328   getOrCreateAAFor<AAUndefinedBehavior>(FPos);
8329 
8330   // Every function can be nounwind.
8331   getOrCreateAAFor<AANoUnwind>(FPos);
8332 
8333   // Every function might be marked "nosync"
8334   getOrCreateAAFor<AANoSync>(FPos);
8335 
8336   // Every function might be "no-free".
8337   getOrCreateAAFor<AANoFree>(FPos);
8338 
8339   // Every function might be "no-return".
8340   getOrCreateAAFor<AANoReturn>(FPos);
8341 
8342   // Every function might be "no-recurse".
8343   getOrCreateAAFor<AANoRecurse>(FPos);
8344 
8345   // Every function might be "readnone/readonly/writeonly/...".
8346   getOrCreateAAFor<AAMemoryBehavior>(FPos);
8347 
8348   // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
8349   getOrCreateAAFor<AAMemoryLocation>(FPos);
8350 
8351   // Every function might be applicable for Heap-To-Stack conversion.
8352   if (EnableHeapToStack)
8353     getOrCreateAAFor<AAHeapToStack>(FPos);
8354 
8355   // Return attributes are only appropriate if the return type is non void.
8356   Type *ReturnType = F.getReturnType();
8357   if (!ReturnType->isVoidTy()) {
8358     // Argument attribute "returned" --- Create only one per function even
8359     // though it is an argument attribute.
8360     getOrCreateAAFor<AAReturnedValues>(FPos);
8361 
8362     IRPosition RetPos = IRPosition::returned(F);
8363 
8364     // Every returned value might be dead.
8365     getOrCreateAAFor<AAIsDead>(RetPos);
8366 
8367     // Every function might be simplified.
8368     getOrCreateAAFor<AAValueSimplify>(RetPos);
8369 
8370     if (ReturnType->isPointerTy()) {
8371 
8372       // Every function with pointer return type might be marked align.
8373       getOrCreateAAFor<AAAlign>(RetPos);
8374 
8375       // Every function with pointer return type might be marked nonnull.
8376       getOrCreateAAFor<AANonNull>(RetPos);
8377 
8378       // Every function with pointer return type might be marked noalias.
8379       getOrCreateAAFor<AANoAlias>(RetPos);
8380 
8381       // Every function with pointer return type might be marked
8382       // dereferenceable.
8383       getOrCreateAAFor<AADereferenceable>(RetPos);
8384     }
8385   }
8386 
8387   for (Argument &Arg : F.args()) {
8388     IRPosition ArgPos = IRPosition::argument(Arg);
8389 
8390     // Every argument might be simplified.
8391     getOrCreateAAFor<AAValueSimplify>(ArgPos);
8392 
8393     // Every argument might be dead.
8394     getOrCreateAAFor<AAIsDead>(ArgPos);
8395 
8396     if (Arg.getType()->isPointerTy()) {
8397       // Every argument with pointer type might be marked nonnull.
8398       getOrCreateAAFor<AANonNull>(ArgPos);
8399 
8400       // Every argument with pointer type might be marked noalias.
8401       getOrCreateAAFor<AANoAlias>(ArgPos);
8402 
8403       // Every argument with pointer type might be marked dereferenceable.
8404       getOrCreateAAFor<AADereferenceable>(ArgPos);
8405 
8406       // Every argument with pointer type might be marked align.
8407       getOrCreateAAFor<AAAlign>(ArgPos);
8408 
8409       // Every argument with pointer type might be marked nocapture.
8410       getOrCreateAAFor<AANoCapture>(ArgPos);
8411 
8412       // Every argument with pointer type might be marked
8413       // "readnone/readonly/writeonly/..."
8414       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
8415 
8416       // Every argument with pointer type might be marked nofree.
8417       getOrCreateAAFor<AANoFree>(ArgPos);
8418 
8419       // Every argument with pointer type might be privatizable (or promotable)
8420       getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
8421     }
8422   }
8423 
8424   auto CallSitePred = [&](Instruction &I) -> bool {
8425     CallSite CS(&I);
8426     IRPosition CSRetPos = IRPosition::callsite_returned(CS);
8427 
8428     // Call sites might be dead if they do not have side effects and no live
8429     // users. The return value might be dead if there are no live users.
8430     getOrCreateAAFor<AAIsDead>(CSRetPos);
8431 
8432     if (Function *Callee = CS.getCalledFunction()) {
8433       // Skip declerations except if annotations on their call sites were
8434       // explicitly requested.
8435       if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
8436           !Callee->hasMetadata(LLVMContext::MD_callback))
8437         return true;
8438 
8439       if (!Callee->getReturnType()->isVoidTy() && !CS->use_empty()) {
8440 
8441         IRPosition CSRetPos = IRPosition::callsite_returned(CS);
8442 
8443         // Call site return integer values might be limited by a constant range.
8444         if (Callee->getReturnType()->isIntegerTy())
8445           getOrCreateAAFor<AAValueConstantRange>(CSRetPos);
8446       }
8447 
8448       for (int i = 0, e = CS.getNumArgOperands(); i < e; i++) {
8449 
8450         IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
8451 
8452         // Every call site argument might be dead.
8453         getOrCreateAAFor<AAIsDead>(CSArgPos);
8454 
8455         // Call site argument might be simplified.
8456         getOrCreateAAFor<AAValueSimplify>(CSArgPos);
8457 
8458         if (!CS.getArgument(i)->getType()->isPointerTy())
8459           continue;
8460 
8461         // Call site argument attribute "non-null".
8462         getOrCreateAAFor<AANonNull>(CSArgPos);
8463 
8464         // Call site argument attribute "no-alias".
8465         getOrCreateAAFor<AANoAlias>(CSArgPos);
8466 
8467         // Call site argument attribute "dereferenceable".
8468         getOrCreateAAFor<AADereferenceable>(CSArgPos);
8469 
8470         // Call site argument attribute "align".
8471         getOrCreateAAFor<AAAlign>(CSArgPos);
8472 
8473         // Call site argument attribute
8474         // "readnone/readonly/writeonly/..."
8475         getOrCreateAAFor<AAMemoryBehavior>(CSArgPos);
8476 
8477         // Call site argument attribute "nofree".
8478         getOrCreateAAFor<AANoFree>(CSArgPos);
8479       }
8480     }
8481     return true;
8482   };
8483 
8484   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
8485   bool Success;
8486   Success = checkForAllInstructionsImpl(
8487       nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
8488       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
8489        (unsigned)Instruction::Call});
8490   (void)Success;
8491   assert(Success && "Expected the check call to be successful!");
8492 
8493   auto LoadStorePred = [&](Instruction &I) -> bool {
8494     if (isa<LoadInst>(I))
8495       getOrCreateAAFor<AAAlign>(
8496           IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
8497     else
8498       getOrCreateAAFor<AAAlign>(
8499           IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
8500     return true;
8501   };
8502   Success = checkForAllInstructionsImpl(
8503       nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
8504       {(unsigned)Instruction::Load, (unsigned)Instruction::Store});
8505   (void)Success;
8506   assert(Success && "Expected the check call to be successful!");
8507 }
8508 
8509 /// Helpers to ease debugging through output streams and print calls.
8510 ///
8511 ///{
8512 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
8513   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
8514 }
8515 
8516 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
8517   switch (AP) {
8518   case IRPosition::IRP_INVALID:
8519     return OS << "inv";
8520   case IRPosition::IRP_FLOAT:
8521     return OS << "flt";
8522   case IRPosition::IRP_RETURNED:
8523     return OS << "fn_ret";
8524   case IRPosition::IRP_CALL_SITE_RETURNED:
8525     return OS << "cs_ret";
8526   case IRPosition::IRP_FUNCTION:
8527     return OS << "fn";
8528   case IRPosition::IRP_CALL_SITE:
8529     return OS << "cs";
8530   case IRPosition::IRP_ARGUMENT:
8531     return OS << "arg";
8532   case IRPosition::IRP_CALL_SITE_ARGUMENT:
8533     return OS << "cs_arg";
8534   }
8535   llvm_unreachable("Unknown attribute position!");
8536 }
8537 
8538 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
8539   const Value &AV = Pos.getAssociatedValue();
8540   return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
8541             << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
8542 }
8543 
8544 template <typename base_ty, base_ty BestState, base_ty WorstState>
8545 raw_ostream &
8546 llvm::operator<<(raw_ostream &OS,
8547                  const IntegerStateBase<base_ty, BestState, WorstState> &S) {
8548   return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
8549             << static_cast<const AbstractState &>(S);
8550 }
8551 
8552 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
8553   OS << "range-state(" << S.getBitWidth() << ")<";
8554   S.getKnown().print(OS);
8555   OS << " / ";
8556   S.getAssumed().print(OS);
8557   OS << ">";
8558 
8559   return OS << static_cast<const AbstractState &>(S);
8560 }
8561 
8562 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
8563   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
8564 }
8565 
8566 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
8567   AA.print(OS);
8568   return OS;
8569 }
8570 
8571 void AbstractAttribute::print(raw_ostream &OS) const {
8572   OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
8573      << "]";
8574 }
8575 ///}
8576 
8577 /// ----------------------------------------------------------------------------
8578 ///                       Pass (Manager) Boilerplate
8579 /// ----------------------------------------------------------------------------
8580 
8581 static bool runAttributorOnFunctions(InformationCache &InfoCache,
8582                                      SetVector<Function *> &Functions,
8583                                      AnalysisGetter &AG,
8584                                      CallGraphUpdater &CGUpdater) {
8585   if (DisableAttributor || Functions.empty())
8586     return false;
8587 
8588   LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << Functions.size()
8589                     << " functions.\n");
8590 
8591   // Create an Attributor and initially empty information cache that is filled
8592   // while we identify default attribute opportunities.
8593   Attributor A(Functions, InfoCache, CGUpdater, DepRecInterval);
8594 
8595   for (Function *F : Functions)
8596     A.initializeInformationCache(*F);
8597 
8598   for (Function *F : Functions) {
8599     if (F->hasExactDefinition())
8600       NumFnWithExactDefinition++;
8601     else
8602       NumFnWithoutExactDefinition++;
8603 
8604     // We look at internal functions only on-demand but if any use is not a
8605     // direct call or outside the current set of analyzed functions, we have to
8606     // do it eagerly.
8607     if (F->hasLocalLinkage()) {
8608       if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
8609             ImmutableCallSite ICS(U.getUser());
8610             return ICS && ICS.isCallee(&U) &&
8611                    Functions.count(const_cast<Function *>(ICS.getCaller()));
8612           }))
8613         continue;
8614     }
8615 
8616     // Populate the Attributor with abstract attribute opportunities in the
8617     // function and the information cache with IR information.
8618     A.identifyDefaultAbstractAttributes(*F);
8619   }
8620 
8621   ChangeStatus Changed = A.run();
8622   assert(!verifyModule(*Functions.front()->getParent(), &errs()) &&
8623          "Module verification failed!");
8624   LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
8625                     << " functions, result: " << Changed << ".\n");
8626   return Changed == ChangeStatus::CHANGED;
8627 }
8628 
8629 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
8630   FunctionAnalysisManager &FAM =
8631       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
8632   AnalysisGetter AG(FAM);
8633 
8634   SetVector<Function *> Functions;
8635   for (Function &F : M)
8636     Functions.insert(&F);
8637 
8638   CallGraphUpdater CGUpdater;
8639   InformationCache InfoCache(M, AG, /* CGSCC */ nullptr);
8640   if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater)) {
8641     // FIXME: Think about passes we will preserve and add them here.
8642     return PreservedAnalyses::none();
8643   }
8644   return PreservedAnalyses::all();
8645 }
8646 
8647 PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
8648                                            CGSCCAnalysisManager &AM,
8649                                            LazyCallGraph &CG,
8650                                            CGSCCUpdateResult &UR) {
8651   FunctionAnalysisManager &FAM =
8652       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
8653   AnalysisGetter AG(FAM);
8654 
8655   SetVector<Function *> Functions;
8656   for (LazyCallGraph::Node &N : C)
8657     Functions.insert(&N.getFunction());
8658 
8659   if (Functions.empty())
8660     return PreservedAnalyses::all();
8661 
8662   Module &M = *Functions.back()->getParent();
8663   CallGraphUpdater CGUpdater;
8664   CGUpdater.initialize(CG, C, AM, UR);
8665   InformationCache InfoCache(M, AG, /* CGSCC */ &Functions);
8666   if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater)) {
8667     // FIXME: Think about passes we will preserve and add them here.
8668     return PreservedAnalyses::none();
8669   }
8670   return PreservedAnalyses::all();
8671 }
8672 
8673 namespace {
8674 
8675 struct AttributorLegacyPass : public ModulePass {
8676   static char ID;
8677 
8678   AttributorLegacyPass() : ModulePass(ID) {
8679     initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
8680   }
8681 
8682   bool runOnModule(Module &M) override {
8683     if (skipModule(M))
8684       return false;
8685 
8686     AnalysisGetter AG;
8687     SetVector<Function *> Functions;
8688     for (Function &F : M)
8689       Functions.insert(&F);
8690 
8691     CallGraphUpdater CGUpdater;
8692     InformationCache InfoCache(M, AG, /* CGSCC */ nullptr);
8693     return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater);
8694   }
8695 
8696   void getAnalysisUsage(AnalysisUsage &AU) const override {
8697     // FIXME: Think about passes we will preserve and add them here.
8698     AU.addRequired<TargetLibraryInfoWrapperPass>();
8699   }
8700 };
8701 
8702 struct AttributorCGSCCLegacyPass : public CallGraphSCCPass {
8703   CallGraphUpdater CGUpdater;
8704   static char ID;
8705 
8706   AttributorCGSCCLegacyPass() : CallGraphSCCPass(ID) {
8707     initializeAttributorCGSCCLegacyPassPass(*PassRegistry::getPassRegistry());
8708   }
8709 
8710   bool runOnSCC(CallGraphSCC &SCC) override {
8711     if (skipSCC(SCC))
8712       return false;
8713 
8714     SetVector<Function *> Functions;
8715     for (CallGraphNode *CGN : SCC)
8716       if (Function *Fn = CGN->getFunction())
8717         if (!Fn->isDeclaration())
8718           Functions.insert(Fn);
8719 
8720     if (Functions.empty())
8721       return false;
8722 
8723     AnalysisGetter AG;
8724     CallGraph &CG = const_cast<CallGraph &>(SCC.getCallGraph());
8725     CGUpdater.initialize(CG, SCC);
8726     Module &M = *Functions.back()->getParent();
8727     InformationCache InfoCache(M, AG, /* CGSCC */ &Functions);
8728     return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater);
8729   }
8730 
8731   bool doFinalization(CallGraph &CG) override { return CGUpdater.finalize(); }
8732 
8733   void getAnalysisUsage(AnalysisUsage &AU) const override {
8734     // FIXME: Think about passes we will preserve and add them here.
8735     AU.addRequired<TargetLibraryInfoWrapperPass>();
8736     CallGraphSCCPass::getAnalysisUsage(AU);
8737   }
8738 };
8739 
8740 } // end anonymous namespace
8741 
8742 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
8743 Pass *llvm::createAttributorCGSCCLegacyPass() {
8744   return new AttributorCGSCCLegacyPass();
8745 }
8746 
8747 char AttributorLegacyPass::ID = 0;
8748 char AttributorCGSCCLegacyPass::ID = 0;
8749 
8750 const char AAReturnedValues::ID = 0;
8751 const char AANoUnwind::ID = 0;
8752 const char AANoSync::ID = 0;
8753 const char AANoFree::ID = 0;
8754 const char AANonNull::ID = 0;
8755 const char AANoRecurse::ID = 0;
8756 const char AAWillReturn::ID = 0;
8757 const char AAUndefinedBehavior::ID = 0;
8758 const char AANoAlias::ID = 0;
8759 const char AAReachability::ID = 0;
8760 const char AANoReturn::ID = 0;
8761 const char AAIsDead::ID = 0;
8762 const char AADereferenceable::ID = 0;
8763 const char AAAlign::ID = 0;
8764 const char AANoCapture::ID = 0;
8765 const char AAValueSimplify::ID = 0;
8766 const char AAHeapToStack::ID = 0;
8767 const char AAPrivatizablePtr::ID = 0;
8768 const char AAMemoryBehavior::ID = 0;
8769 const char AAMemoryLocation::ID = 0;
8770 const char AAValueConstantRange::ID = 0;
8771 
8772 // Macro magic to create the static generator function for attributes that
8773 // follow the naming scheme.
8774 
8775 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8776   case IRPosition::PK:                                                         \
8777     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8778 
8779 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8780   case IRPosition::PK:                                                         \
8781     AA = new CLASS##SUFFIX(IRP);                                               \
8782     break;
8783 
8784 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8785   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8786     CLASS *AA = nullptr;                                                       \
8787     switch (IRP.getPositionKind()) {                                           \
8788       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8789       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8790       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8791       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8792       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8793       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8794       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8795       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8796     }                                                                          \
8797     return *AA;                                                                \
8798   }
8799 
8800 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8801   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8802     CLASS *AA = nullptr;                                                       \
8803     switch (IRP.getPositionKind()) {                                           \
8804       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8805       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8806       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8807       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8808       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8809       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8810       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8811       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8812     }                                                                          \
8813     return *AA;                                                                \
8814   }
8815 
8816 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8817   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8818     CLASS *AA = nullptr;                                                       \
8819     switch (IRP.getPositionKind()) {                                           \
8820       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8821       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8822       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8823       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8824       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8825       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8826       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8827       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8828     }                                                                          \
8829     return *AA;                                                                \
8830   }
8831 
8832 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8833   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8834     CLASS *AA = nullptr;                                                       \
8835     switch (IRP.getPositionKind()) {                                           \
8836       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8837       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8838       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8839       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8840       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8841       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8842       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8843       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8844     }                                                                          \
8845     return *AA;                                                                \
8846   }
8847 
8848 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8849   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8850     CLASS *AA = nullptr;                                                       \
8851     switch (IRP.getPositionKind()) {                                           \
8852       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8853       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8854       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8855       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8856       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8857       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8858       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8859       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8860     }                                                                          \
8861     return *AA;                                                                \
8862   }
8863 
8864 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8865 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8866 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8867 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8868 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8869 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8870 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8871 
8872 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8873 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8874 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8875 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8876 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8877 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8878 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8879 
8880 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8881 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8882 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8883 
8884 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8885 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8886 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8887 
8888 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8889 
8890 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8891 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8892 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8893 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8894 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8895 #undef SWITCH_PK_CREATE
8896 #undef SWITCH_PK_INV
8897 
8898 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
8899                       "Deduce and propagate attributes", false, false)
8900 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
8901 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
8902                     "Deduce and propagate attributes", false, false)
8903 INITIALIZE_PASS_BEGIN(AttributorCGSCCLegacyPass, "attributor-cgscc",
8904                       "Deduce and propagate attributes (CGSCC pass)", false,
8905                       false)
8906 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
8907 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
8908 INITIALIZE_PASS_END(AttributorCGSCCLegacyPass, "attributor-cgscc",
8909                     "Deduce and propagate attributes (CGSCC pass)", false,
8910                     false)
8911