1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/LazyValueInfo.h"
27 #include "llvm/Analysis/Loads.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/ScalarEvolution.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/CFG.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/InstIterator.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Verifier.h"
38 #include "llvm/InitializePasses.h"
39 #include "llvm/IR/NoFolder.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
44 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
45 #include "llvm/Transforms/Utils/Local.h"
46 
47 #include <cassert>
48 
49 using namespace llvm;
50 
51 #define DEBUG_TYPE "attributor"
52 
53 STATISTIC(NumFnWithExactDefinition,
54           "Number of function with exact definitions");
55 STATISTIC(NumFnWithoutExactDefinition,
56           "Number of function without exact definitions");
57 STATISTIC(NumAttributesTimedOut,
58           "Number of abstract attributes timed out before fixpoint");
59 STATISTIC(NumAttributesValidFixpoint,
60           "Number of abstract attributes in a valid fixpoint state");
61 STATISTIC(NumAttributesManifested,
62           "Number of abstract attributes manifested in IR");
63 STATISTIC(NumAttributesFixedDueToRequiredDependences,
64           "Number of abstract attributes fixed due to required dependences");
65 
66 // Some helper macros to deal with statistics tracking.
67 //
68 // Usage:
69 // For simple IR attribute tracking overload trackStatistics in the abstract
70 // attribute and choose the right STATS_DECLTRACK_********* macro,
71 // e.g.,:
72 //  void trackStatistics() const override {
73 //    STATS_DECLTRACK_ARG_ATTR(returned)
74 //  }
75 // If there is a single "increment" side one can use the macro
76 // STATS_DECLTRACK with a custom message. If there are multiple increment
77 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
78 //
79 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
80   ("Number of " #TYPE " marked '" #NAME "'")
81 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
82 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
83 #define STATS_DECL(NAME, TYPE, MSG)                                            \
84   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
85 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
86 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
87   {                                                                            \
88     STATS_DECL(NAME, TYPE, MSG)                                                \
89     STATS_TRACK(NAME, TYPE)                                                    \
90   }
91 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
92   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
93 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
94   STATS_DECLTRACK(NAME, CSArguments,                                           \
95                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
96 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
97   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
98 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
99   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
100 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
101   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
102                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
103 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
104   STATS_DECLTRACK(NAME, CSReturn,                                              \
105                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
106 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
107   STATS_DECLTRACK(NAME, Floating,                                              \
108                   ("Number of floating values known to be '" #NAME "'"))
109 
110 // Specialization of the operator<< for abstract attributes subclasses. This
111 // disambiguates situations where multiple operators are applicable.
112 namespace llvm {
113 #define PIPE_OPERATOR(CLASS)                                                   \
114   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
115     return OS << static_cast<const AbstractAttribute &>(AA);                   \
116   }
117 
118 PIPE_OPERATOR(AAIsDead)
119 PIPE_OPERATOR(AANoUnwind)
120 PIPE_OPERATOR(AANoSync)
121 PIPE_OPERATOR(AANoRecurse)
122 PIPE_OPERATOR(AAWillReturn)
123 PIPE_OPERATOR(AANoReturn)
124 PIPE_OPERATOR(AAReturnedValues)
125 PIPE_OPERATOR(AANonNull)
126 PIPE_OPERATOR(AANoAlias)
127 PIPE_OPERATOR(AADereferenceable)
128 PIPE_OPERATOR(AAAlign)
129 PIPE_OPERATOR(AANoCapture)
130 PIPE_OPERATOR(AAValueSimplify)
131 PIPE_OPERATOR(AANoFree)
132 PIPE_OPERATOR(AAHeapToStack)
133 PIPE_OPERATOR(AAReachability)
134 PIPE_OPERATOR(AAMemoryBehavior)
135 PIPE_OPERATOR(AAValueConstantRange)
136 PIPE_OPERATOR(AAPrivatizablePtr)
137 
138 #undef PIPE_OPERATOR
139 } // namespace llvm
140 
141 // TODO: Determine a good default value.
142 //
143 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
144 // (when run with the first 5 abstract attributes). The results also indicate
145 // that we never reach 32 iterations but always find a fixpoint sooner.
146 //
147 // This will become more evolved once we perform two interleaved fixpoint
148 // iterations: bottom-up and top-down.
149 static cl::opt<unsigned>
150     MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
151                           cl::desc("Maximal number of fixpoint iterations."),
152                           cl::init(32));
153 static cl::opt<bool> VerifyMaxFixpointIterations(
154     "attributor-max-iterations-verify", cl::Hidden,
155     cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
156     cl::init(false));
157 
158 static cl::opt<bool> DisableAttributor(
159     "attributor-disable", cl::Hidden,
160     cl::desc("Disable the attributor inter-procedural deduction pass."),
161     cl::init(true));
162 
163 static cl::opt<bool> AnnotateDeclarationCallSites(
164     "attributor-annotate-decl-cs", cl::Hidden,
165     cl::desc("Annotate call sites of function declarations."), cl::init(false));
166 
167 static cl::opt<bool> ManifestInternal(
168     "attributor-manifest-internal", cl::Hidden,
169     cl::desc("Manifest Attributor internal string attributes."),
170     cl::init(false));
171 
172 static cl::opt<unsigned> DepRecInterval(
173     "attributor-dependence-recompute-interval", cl::Hidden,
174     cl::desc("Number of iterations until dependences are recomputed."),
175     cl::init(4));
176 
177 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
178                                        cl::init(true), cl::Hidden);
179 
180 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
181                                        cl::Hidden);
182 
183 /// Logic operators for the change status enum class.
184 ///
185 ///{
186 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
187   return l == ChangeStatus::CHANGED ? l : r;
188 }
189 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
190   return l == ChangeStatus::UNCHANGED ? l : r;
191 }
192 ///}
193 
194 Argument *IRPosition::getAssociatedArgument() const {
195   if (getPositionKind() == IRP_ARGUMENT)
196     return cast<Argument>(&getAnchorValue());
197 
198   // Not an Argument and no argument number means this is not a call site
199   // argument, thus we cannot find a callback argument to return.
200   int ArgNo = getArgNo();
201   if (ArgNo < 0)
202     return nullptr;
203 
204   // Use abstract call sites to make the connection between the call site
205   // values and the ones in callbacks. If a callback was found that makes use
206   // of the underlying call site operand, we want the corresponding callback
207   // callee argument and not the direct callee argument.
208   Optional<Argument *> CBCandidateArg;
209   SmallVector<const Use *, 4> CBUses;
210   ImmutableCallSite ICS(&getAnchorValue());
211   AbstractCallSite::getCallbackUses(ICS, CBUses);
212   for (const Use *U : CBUses) {
213     AbstractCallSite ACS(U);
214     assert(ACS && ACS.isCallbackCall());
215     if (!ACS.getCalledFunction())
216       continue;
217 
218     for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
219 
220       // Test if the underlying call site operand is argument number u of the
221       // callback callee.
222       if (ACS.getCallArgOperandNo(u) != ArgNo)
223         continue;
224 
225       assert(ACS.getCalledFunction()->arg_size() > u &&
226              "ACS mapped into var-args arguments!");
227       if (CBCandidateArg.hasValue()) {
228         CBCandidateArg = nullptr;
229         break;
230       }
231       CBCandidateArg = ACS.getCalledFunction()->getArg(u);
232     }
233   }
234 
235   // If we found a unique callback candidate argument, return it.
236   if (CBCandidateArg.hasValue() && CBCandidateArg.getValue())
237     return CBCandidateArg.getValue();
238 
239   // If no callbacks were found, or none used the underlying call site operand
240   // exclusively, use the direct callee argument if available.
241   const Function *Callee = ICS.getCalledFunction();
242   if (Callee && Callee->arg_size() > unsigned(ArgNo))
243     return Callee->getArg(ArgNo);
244 
245   return nullptr;
246 }
247 
248 /// For calls (and invokes) we will only replace instruction uses to not disturb
249 /// the old style call graph.
250 /// TODO: Remove this once we get rid of the old PM.
251 static void replaceAllInstructionUsesWith(Value &Old, Value &New) {
252   if (!isa<CallBase>(Old))
253     return Old.replaceAllUsesWith(&New);
254   SmallVector<Use *, 8> Uses;
255   for (Use &U : Old.uses())
256     if (isa<Instruction>(U.getUser()))
257       Uses.push_back(&U);
258   for (Use *U : Uses)
259     U->set(&New);
260 }
261 
262 static Optional<ConstantInt *>
263 getAssumedConstant(Attributor &A, const Value &V, const AbstractAttribute &AA,
264                    bool &UsedAssumedInformation) {
265   const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
266       AA, IRPosition::value(V), /* TrackDependence */ false);
267   Optional<Value *> SimplifiedV = ValueSimplifyAA.getAssumedSimplifiedValue(A);
268   bool IsKnown = ValueSimplifyAA.isKnown();
269   UsedAssumedInformation |= !IsKnown;
270   if (!SimplifiedV.hasValue()) {
271     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
272     return llvm::None;
273   }
274   if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue())) {
275     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
276     return llvm::None;
277   }
278   ConstantInt *CI = dyn_cast_or_null<ConstantInt>(SimplifiedV.getValue());
279   if (CI)
280     A.recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL);
281   return CI;
282 }
283 
284 /// Get pointer operand of memory accessing instruction. If \p I is
285 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
286 /// is set to false and the instruction is volatile, return nullptr.
287 static const Value *getPointerOperand(const Instruction *I,
288                                       bool AllowVolatile) {
289   if (auto *LI = dyn_cast<LoadInst>(I)) {
290     if (!AllowVolatile && LI->isVolatile())
291       return nullptr;
292     return LI->getPointerOperand();
293   }
294 
295   if (auto *SI = dyn_cast<StoreInst>(I)) {
296     if (!AllowVolatile && SI->isVolatile())
297       return nullptr;
298     return SI->getPointerOperand();
299   }
300 
301   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
302     if (!AllowVolatile && CXI->isVolatile())
303       return nullptr;
304     return CXI->getPointerOperand();
305   }
306 
307   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
308     if (!AllowVolatile && RMWI->isVolatile())
309       return nullptr;
310     return RMWI->getPointerOperand();
311   }
312 
313   return nullptr;
314 }
315 
316 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
317 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
318 /// getelement pointer instructions that traverse the natural type of \p Ptr if
319 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
320 /// through a cast to i8*.
321 ///
322 /// TODO: This could probably live somewhere more prominantly if it doesn't
323 ///       already exist.
324 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
325                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
326   assert(Offset >= 0 && "Negative offset not supported yet!");
327   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
328                     << "-bytes as " << *ResTy << "\n");
329 
330   // The initial type we are trying to traverse to get nice GEPs.
331   Type *Ty = Ptr->getType();
332 
333   SmallVector<Value *, 4> Indices;
334   std::string GEPName = Ptr->getName().str();
335   while (Offset) {
336     uint64_t Idx, Rem;
337 
338     if (auto *STy = dyn_cast<StructType>(Ty)) {
339       const StructLayout *SL = DL.getStructLayout(STy);
340       if (int64_t(SL->getSizeInBytes()) < Offset)
341         break;
342       Idx = SL->getElementContainingOffset(Offset);
343       assert(Idx < STy->getNumElements() && "Offset calculation error!");
344       Rem = Offset - SL->getElementOffset(Idx);
345       Ty = STy->getElementType(Idx);
346     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
347       Ty = PTy->getElementType();
348       if (!Ty->isSized())
349         break;
350       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
351       assert(ElementSize && "Expected type with size!");
352       Idx = Offset / ElementSize;
353       Rem = Offset % ElementSize;
354     } else {
355       // Non-aggregate type, we cast and make byte-wise progress now.
356       break;
357     }
358 
359     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
360                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
361 
362     GEPName += "." + std::to_string(Idx);
363     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
364     Offset = Rem;
365   }
366 
367   // Create a GEP if we collected indices above.
368   if (Indices.size())
369     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
370 
371   // If an offset is left we use byte-wise adjustment.
372   if (Offset) {
373     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
374     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
375                         GEPName + ".b" + Twine(Offset));
376   }
377 
378   // Ensure the result has the requested type.
379   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
380 
381   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
382   return Ptr;
383 }
384 
385 /// Recursively visit all values that might become \p IRP at some point. This
386 /// will be done by looking through cast instructions, selects, phis, and calls
387 /// with the "returned" attribute. Once we cannot look through the value any
388 /// further, the callback \p VisitValueCB is invoked and passed the current
389 /// value, the \p State, and a flag to indicate if we stripped anything. To
390 /// limit how much effort is invested, we will never visit more values than
391 /// specified by \p MaxValues.
392 template <typename AAType, typename StateTy>
393 static bool genericValueTraversal(
394     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
395     const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
396     int MaxValues = 8) {
397 
398   const AAIsDead *LivenessAA = nullptr;
399   if (IRP.getAnchorScope())
400     LivenessAA = &A.getAAFor<AAIsDead>(
401         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
402         /* TrackDependence */ false);
403   bool AnyDead = false;
404 
405   // TODO: Use Positions here to allow context sensitivity in VisitValueCB
406   SmallPtrSet<Value *, 16> Visited;
407   SmallVector<Value *, 16> Worklist;
408   Worklist.push_back(&IRP.getAssociatedValue());
409 
410   int Iteration = 0;
411   do {
412     Value *V = Worklist.pop_back_val();
413 
414     // Check if we should process the current value. To prevent endless
415     // recursion keep a record of the values we followed!
416     if (!Visited.insert(V).second)
417       continue;
418 
419     // Make sure we limit the compile time for complex expressions.
420     if (Iteration++ >= MaxValues)
421       return false;
422 
423     // Explicitly look through calls with a "returned" attribute if we do
424     // not have a pointer as stripPointerCasts only works on them.
425     Value *NewV = nullptr;
426     if (V->getType()->isPointerTy()) {
427       NewV = V->stripPointerCasts();
428     } else {
429       CallSite CS(V);
430       if (CS && CS.getCalledFunction()) {
431         for (Argument &Arg : CS.getCalledFunction()->args())
432           if (Arg.hasReturnedAttr()) {
433             NewV = CS.getArgOperand(Arg.getArgNo());
434             break;
435           }
436       }
437     }
438     if (NewV && NewV != V) {
439       Worklist.push_back(NewV);
440       continue;
441     }
442 
443     // Look through select instructions, visit both potential values.
444     if (auto *SI = dyn_cast<SelectInst>(V)) {
445       Worklist.push_back(SI->getTrueValue());
446       Worklist.push_back(SI->getFalseValue());
447       continue;
448     }
449 
450     // Look through phi nodes, visit all live operands.
451     if (auto *PHI = dyn_cast<PHINode>(V)) {
452       assert(LivenessAA &&
453              "Expected liveness in the presence of instructions!");
454       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
455         const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
456         if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
457           AnyDead = true;
458           continue;
459         }
460         Worklist.push_back(PHI->getIncomingValue(u));
461       }
462       continue;
463     }
464 
465     // Once a leaf is reached we inform the user through the callback.
466     if (!VisitValueCB(*V, State, Iteration > 1))
467       return false;
468   } while (!Worklist.empty());
469 
470   // If we actually used liveness information so we have to record a dependence.
471   if (AnyDead)
472     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
473 
474   // All values have been visited.
475   return true;
476 }
477 
478 /// Return true if \p New is equal or worse than \p Old.
479 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
480   if (!Old.isIntAttribute())
481     return true;
482 
483   return Old.getValueAsInt() >= New.getValueAsInt();
484 }
485 
486 /// Return true if the information provided by \p Attr was added to the
487 /// attribute list \p Attrs. This is only the case if it was not already present
488 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
489 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
490                              AttributeList &Attrs, int AttrIdx) {
491 
492   if (Attr.isEnumAttribute()) {
493     Attribute::AttrKind Kind = Attr.getKindAsEnum();
494     if (Attrs.hasAttribute(AttrIdx, Kind))
495       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
496         return false;
497     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
498     return true;
499   }
500   if (Attr.isStringAttribute()) {
501     StringRef Kind = Attr.getKindAsString();
502     if (Attrs.hasAttribute(AttrIdx, Kind))
503       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
504         return false;
505     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
506     return true;
507   }
508   if (Attr.isIntAttribute()) {
509     Attribute::AttrKind Kind = Attr.getKindAsEnum();
510     if (Attrs.hasAttribute(AttrIdx, Kind))
511       if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
512         return false;
513     Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
514     Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
515     return true;
516   }
517 
518   llvm_unreachable("Expected enum or string attribute!");
519 }
520 
521 static const Value *
522 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
523                                      const DataLayout &DL,
524                                      bool AllowNonInbounds = false) {
525   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
526   if (!Ptr)
527     return nullptr;
528 
529   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
530                                           AllowNonInbounds);
531 }
532 
533 ChangeStatus AbstractAttribute::update(Attributor &A) {
534   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
535   if (getState().isAtFixpoint())
536     return HasChanged;
537 
538   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
539 
540   HasChanged = updateImpl(A);
541 
542   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
543                     << "\n");
544 
545   return HasChanged;
546 }
547 
548 ChangeStatus
549 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
550                                    const ArrayRef<Attribute> &DeducedAttrs) {
551   Function *ScopeFn = IRP.getAssociatedFunction();
552   IRPosition::Kind PK = IRP.getPositionKind();
553 
554   // In the following some generic code that will manifest attributes in
555   // DeducedAttrs if they improve the current IR. Due to the different
556   // annotation positions we use the underlying AttributeList interface.
557 
558   AttributeList Attrs;
559   switch (PK) {
560   case IRPosition::IRP_INVALID:
561   case IRPosition::IRP_FLOAT:
562     return ChangeStatus::UNCHANGED;
563   case IRPosition::IRP_ARGUMENT:
564   case IRPosition::IRP_FUNCTION:
565   case IRPosition::IRP_RETURNED:
566     Attrs = ScopeFn->getAttributes();
567     break;
568   case IRPosition::IRP_CALL_SITE:
569   case IRPosition::IRP_CALL_SITE_RETURNED:
570   case IRPosition::IRP_CALL_SITE_ARGUMENT:
571     Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
572     break;
573   }
574 
575   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
576   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
577   for (const Attribute &Attr : DeducedAttrs) {
578     if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
579       continue;
580 
581     HasChanged = ChangeStatus::CHANGED;
582   }
583 
584   if (HasChanged == ChangeStatus::UNCHANGED)
585     return HasChanged;
586 
587   switch (PK) {
588   case IRPosition::IRP_ARGUMENT:
589   case IRPosition::IRP_FUNCTION:
590   case IRPosition::IRP_RETURNED:
591     ScopeFn->setAttributes(Attrs);
592     break;
593   case IRPosition::IRP_CALL_SITE:
594   case IRPosition::IRP_CALL_SITE_RETURNED:
595   case IRPosition::IRP_CALL_SITE_ARGUMENT:
596     CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
597     break;
598   case IRPosition::IRP_INVALID:
599   case IRPosition::IRP_FLOAT:
600     break;
601   }
602 
603   return HasChanged;
604 }
605 
606 const IRPosition IRPosition::EmptyKey(255);
607 const IRPosition IRPosition::TombstoneKey(256);
608 
609 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
610   IRPositions.emplace_back(IRP);
611 
612   ImmutableCallSite ICS(&IRP.getAnchorValue());
613   switch (IRP.getPositionKind()) {
614   case IRPosition::IRP_INVALID:
615   case IRPosition::IRP_FLOAT:
616   case IRPosition::IRP_FUNCTION:
617     return;
618   case IRPosition::IRP_ARGUMENT:
619   case IRPosition::IRP_RETURNED:
620     IRPositions.emplace_back(
621         IRPosition::function(*IRP.getAssociatedFunction()));
622     return;
623   case IRPosition::IRP_CALL_SITE:
624     assert(ICS && "Expected call site!");
625     // TODO: We need to look at the operand bundles similar to the redirection
626     //       in CallBase.
627     if (!ICS.hasOperandBundles())
628       if (const Function *Callee = ICS.getCalledFunction())
629         IRPositions.emplace_back(IRPosition::function(*Callee));
630     return;
631   case IRPosition::IRP_CALL_SITE_RETURNED:
632     assert(ICS && "Expected call site!");
633     // TODO: We need to look at the operand bundles similar to the redirection
634     //       in CallBase.
635     if (!ICS.hasOperandBundles()) {
636       if (const Function *Callee = ICS.getCalledFunction()) {
637         IRPositions.emplace_back(IRPosition::returned(*Callee));
638         IRPositions.emplace_back(IRPosition::function(*Callee));
639       }
640     }
641     IRPositions.emplace_back(
642         IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
643     return;
644   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
645     int ArgNo = IRP.getArgNo();
646     assert(ICS && ArgNo >= 0 && "Expected call site!");
647     // TODO: We need to look at the operand bundles similar to the redirection
648     //       in CallBase.
649     if (!ICS.hasOperandBundles()) {
650       const Function *Callee = ICS.getCalledFunction();
651       if (Callee && Callee->arg_size() > unsigned(ArgNo))
652         IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
653       if (Callee)
654         IRPositions.emplace_back(IRPosition::function(*Callee));
655     }
656     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
657     return;
658   }
659   }
660 }
661 
662 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
663                          bool IgnoreSubsumingPositions) const {
664   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
665     for (Attribute::AttrKind AK : AKs)
666       if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
667         return true;
668     // The first position returned by the SubsumingPositionIterator is
669     // always the position itself. If we ignore subsuming positions we
670     // are done after the first iteration.
671     if (IgnoreSubsumingPositions)
672       break;
673   }
674   return false;
675 }
676 
677 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
678                           SmallVectorImpl<Attribute> &Attrs,
679                           bool IgnoreSubsumingPositions) const {
680   for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
681     for (Attribute::AttrKind AK : AKs) {
682       const Attribute &Attr = EquivIRP.getAttr(AK);
683       if (Attr.getKindAsEnum() == AK)
684         Attrs.push_back(Attr);
685     }
686     // The first position returned by the SubsumingPositionIterator is
687     // always the position itself. If we ignore subsuming positions we
688     // are done after the first iteration.
689     if (IgnoreSubsumingPositions)
690       break;
691   }
692 }
693 
694 void IRPosition::verify() {
695   switch (KindOrArgNo) {
696   default:
697     assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
698     assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
699            "Expected call base or argument for positive attribute index!");
700     if (isa<Argument>(AnchorVal)) {
701       assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) &&
702              "Argument number mismatch!");
703       assert(cast<Argument>(AnchorVal) == &getAssociatedValue() &&
704              "Associated value mismatch!");
705     } else {
706       assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) &&
707              "Call site argument number mismatch!");
708       assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) ==
709                  &getAssociatedValue() &&
710              "Associated value mismatch!");
711     }
712     break;
713   case IRP_INVALID:
714     assert(!AnchorVal && "Expected no value for an invalid position!");
715     break;
716   case IRP_FLOAT:
717     assert((!isa<CallBase>(&getAssociatedValue()) &&
718             !isa<Argument>(&getAssociatedValue())) &&
719            "Expected specialized kind for call base and argument values!");
720     break;
721   case IRP_RETURNED:
722     assert(isa<Function>(AnchorVal) &&
723            "Expected function for a 'returned' position!");
724     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
725     break;
726   case IRP_CALL_SITE_RETURNED:
727     assert((isa<CallBase>(AnchorVal)) &&
728            "Expected call base for 'call site returned' position!");
729     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
730     break;
731   case IRP_CALL_SITE:
732     assert((isa<CallBase>(AnchorVal)) &&
733            "Expected call base for 'call site function' position!");
734     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
735     break;
736   case IRP_FUNCTION:
737     assert(isa<Function>(AnchorVal) &&
738            "Expected function for a 'function' position!");
739     assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
740     break;
741   }
742 }
743 
744 namespace {
745 /// Helper function to clamp a state \p S of type \p StateType with the
746 /// information in \p R and indicate/return if \p S did change (as-in update is
747 /// required to be run again).
748 template <typename StateType>
749 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
750   auto Assumed = S.getAssumed();
751   S ^= R;
752   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
753                                    : ChangeStatus::CHANGED;
754 }
755 
756 /// Clamp the information known for all returned values of a function
757 /// (identified by \p QueryingAA) into \p S.
758 template <typename AAType, typename StateType = typename AAType::StateType>
759 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
760                                      StateType &S) {
761   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
762                     << QueryingAA << " into " << S << "\n");
763 
764   assert((QueryingAA.getIRPosition().getPositionKind() ==
765               IRPosition::IRP_RETURNED ||
766           QueryingAA.getIRPosition().getPositionKind() ==
767               IRPosition::IRP_CALL_SITE_RETURNED) &&
768          "Can only clamp returned value states for a function returned or call "
769          "site returned position!");
770 
771   // Use an optional state as there might not be any return values and we want
772   // to join (IntegerState::operator&) the state of all there are.
773   Optional<StateType> T;
774 
775   // Callback for each possibly returned value.
776   auto CheckReturnValue = [&](Value &RV) -> bool {
777     const IRPosition &RVPos = IRPosition::value(RV);
778     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
779     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
780                       << " @ " << RVPos << "\n");
781     const StateType &AAS = static_cast<const StateType &>(AA.getState());
782     if (T.hasValue())
783       *T &= AAS;
784     else
785       T = AAS;
786     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
787                       << "\n");
788     return T->isValidState();
789   };
790 
791   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
792     S.indicatePessimisticFixpoint();
793   else if (T.hasValue())
794     S ^= *T;
795 }
796 
797 /// Helper class to compose two generic deduction
798 template <typename AAType, typename Base, typename StateType,
799           template <typename...> class F, template <typename...> class G>
800 struct AAComposeTwoGenericDeduction
801     : public F<AAType, G<AAType, Base, StateType>, StateType> {
802   AAComposeTwoGenericDeduction(const IRPosition &IRP)
803       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
804 
805   /// See AbstractAttribute::updateImpl(...).
806   ChangeStatus updateImpl(Attributor &A) override {
807     ChangeStatus ChangedF =
808         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
809     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
810     return ChangedF | ChangedG;
811   }
812 };
813 
814 /// Helper class for generic deduction: return value -> returned position.
815 template <typename AAType, typename Base,
816           typename StateType = typename Base::StateType>
817 struct AAReturnedFromReturnedValues : public Base {
818   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
819 
820   /// See AbstractAttribute::updateImpl(...).
821   ChangeStatus updateImpl(Attributor &A) override {
822     StateType S(StateType::getBestState(this->getState()));
823     clampReturnedValueStates<AAType, StateType>(A, *this, S);
824     // TODO: If we know we visited all returned values, thus no are assumed
825     // dead, we can take the known information from the state T.
826     return clampStateAndIndicateChange<StateType>(this->getState(), S);
827   }
828 };
829 
830 /// Clamp the information known at all call sites for a given argument
831 /// (identified by \p QueryingAA) into \p S.
832 template <typename AAType, typename StateType = typename AAType::StateType>
833 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
834                                         StateType &S) {
835   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
836                     << QueryingAA << " into " << S << "\n");
837 
838   assert(QueryingAA.getIRPosition().getPositionKind() ==
839              IRPosition::IRP_ARGUMENT &&
840          "Can only clamp call site argument states for an argument position!");
841 
842   // Use an optional state as there might not be any return values and we want
843   // to join (IntegerState::operator&) the state of all there are.
844   Optional<StateType> T;
845 
846   // The argument number which is also the call site argument number.
847   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
848 
849   auto CallSiteCheck = [&](AbstractCallSite ACS) {
850     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
851     // Check if a coresponding argument was found or if it is on not associated
852     // (which can happen for callback calls).
853     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
854       return false;
855 
856     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
857     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
858                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
859     const StateType &AAS = static_cast<const StateType &>(AA.getState());
860     if (T.hasValue())
861       *T &= AAS;
862     else
863       T = AAS;
864     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
865                       << "\n");
866     return T->isValidState();
867   };
868 
869   bool AllCallSitesKnown;
870   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
871                               AllCallSitesKnown))
872     S.indicatePessimisticFixpoint();
873   else if (T.hasValue())
874     S ^= *T;
875 }
876 
877 /// Helper class for generic deduction: call site argument -> argument position.
878 template <typename AAType, typename Base,
879           typename StateType = typename AAType::StateType>
880 struct AAArgumentFromCallSiteArguments : public Base {
881   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
882 
883   /// See AbstractAttribute::updateImpl(...).
884   ChangeStatus updateImpl(Attributor &A) override {
885     StateType S(StateType::getBestState(this->getState()));
886     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
887     // TODO: If we know we visited all incoming values, thus no are assumed
888     // dead, we can take the known information from the state T.
889     return clampStateAndIndicateChange<StateType>(this->getState(), S);
890   }
891 };
892 
893 /// Helper class for generic replication: function returned -> cs returned.
894 template <typename AAType, typename Base,
895           typename StateType = typename Base::StateType>
896 struct AACallSiteReturnedFromReturned : public Base {
897   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
898 
899   /// See AbstractAttribute::updateImpl(...).
900   ChangeStatus updateImpl(Attributor &A) override {
901     assert(this->getIRPosition().getPositionKind() ==
902                IRPosition::IRP_CALL_SITE_RETURNED &&
903            "Can only wrap function returned positions for call site returned "
904            "positions!");
905     auto &S = this->getState();
906 
907     const Function *AssociatedFunction =
908         this->getIRPosition().getAssociatedFunction();
909     if (!AssociatedFunction)
910       return S.indicatePessimisticFixpoint();
911 
912     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
913     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
914     return clampStateAndIndicateChange(
915         S, static_cast<const StateType &>(AA.getState()));
916   }
917 };
918 
919 /// Helper class for generic deduction using must-be-executed-context
920 /// Base class is required to have `followUse` method.
921 
922 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
923 /// U - Underlying use.
924 /// I - The user of the \p U.
925 /// `followUse` returns true if the value should be tracked transitively.
926 
927 template <typename AAType, typename Base,
928           typename StateType = typename AAType::StateType>
929 struct AAFromMustBeExecutedContext : public Base {
930   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
931 
932   void initialize(Attributor &A) override {
933     Base::initialize(A);
934     const IRPosition &IRP = this->getIRPosition();
935     Instruction *CtxI = IRP.getCtxI();
936 
937     if (!CtxI)
938       return;
939 
940     for (const Use &U : IRP.getAssociatedValue().uses())
941       Uses.insert(&U);
942   }
943 
944   /// See AbstractAttribute::updateImpl(...).
945   ChangeStatus updateImpl(Attributor &A) override {
946     auto BeforeState = this->getState();
947     auto &S = this->getState();
948     Instruction *CtxI = this->getIRPosition().getCtxI();
949     if (!CtxI)
950       return ChangeStatus::UNCHANGED;
951 
952     MustBeExecutedContextExplorer &Explorer =
953         A.getInfoCache().getMustBeExecutedContextExplorer();
954 
955     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
956     for (unsigned u = 0; u < Uses.size(); ++u) {
957       const Use *U = Uses[u];
958       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
959         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
960         if (Found && Base::followUse(A, U, UserI))
961           for (const Use &Us : UserI->uses())
962             Uses.insert(&Us);
963       }
964     }
965 
966     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
967   }
968 
969 private:
970   /// Container for (transitive) uses of the associated value.
971   SetVector<const Use *> Uses;
972 };
973 
974 template <typename AAType, typename Base,
975           typename StateType = typename AAType::StateType>
976 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
977     AAComposeTwoGenericDeduction<AAType, Base, StateType,
978                                  AAFromMustBeExecutedContext,
979                                  AAArgumentFromCallSiteArguments>;
980 
981 template <typename AAType, typename Base,
982           typename StateType = typename AAType::StateType>
983 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
984     AAComposeTwoGenericDeduction<AAType, Base, StateType,
985                                  AAFromMustBeExecutedContext,
986                                  AACallSiteReturnedFromReturned>;
987 
988 /// -----------------------NoUnwind Function Attribute--------------------------
989 
990 struct AANoUnwindImpl : AANoUnwind {
991   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
992 
993   const std::string getAsStr() const override {
994     return getAssumed() ? "nounwind" : "may-unwind";
995   }
996 
997   /// See AbstractAttribute::updateImpl(...).
998   ChangeStatus updateImpl(Attributor &A) override {
999     auto Opcodes = {
1000         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1001         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1002         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1003 
1004     auto CheckForNoUnwind = [&](Instruction &I) {
1005       if (!I.mayThrow())
1006         return true;
1007 
1008       if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1009         const auto &NoUnwindAA =
1010             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
1011         return NoUnwindAA.isAssumedNoUnwind();
1012       }
1013       return false;
1014     };
1015 
1016     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
1017       return indicatePessimisticFixpoint();
1018 
1019     return ChangeStatus::UNCHANGED;
1020   }
1021 };
1022 
1023 struct AANoUnwindFunction final : public AANoUnwindImpl {
1024   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
1025 
1026   /// See AbstractAttribute::trackStatistics()
1027   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1028 };
1029 
1030 /// NoUnwind attribute deduction for a call sites.
1031 struct AANoUnwindCallSite final : AANoUnwindImpl {
1032   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
1033 
1034   /// See AbstractAttribute::initialize(...).
1035   void initialize(Attributor &A) override {
1036     AANoUnwindImpl::initialize(A);
1037     Function *F = getAssociatedFunction();
1038     if (!F)
1039       indicatePessimisticFixpoint();
1040   }
1041 
1042   /// See AbstractAttribute::updateImpl(...).
1043   ChangeStatus updateImpl(Attributor &A) override {
1044     // TODO: Once we have call site specific value information we can provide
1045     //       call site specific liveness information and then it makes
1046     //       sense to specialize attributes for call sites arguments instead of
1047     //       redirecting requests to the callee argument.
1048     Function *F = getAssociatedFunction();
1049     const IRPosition &FnPos = IRPosition::function(*F);
1050     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
1051     return clampStateAndIndicateChange(
1052         getState(),
1053         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
1054   }
1055 
1056   /// See AbstractAttribute::trackStatistics()
1057   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1058 };
1059 
1060 /// --------------------- Function Return Values -------------------------------
1061 
1062 /// "Attribute" that collects all potential returned values and the return
1063 /// instructions that they arise from.
1064 ///
1065 /// If there is a unique returned value R, the manifest method will:
1066 ///   - mark R with the "returned" attribute, if R is an argument.
1067 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1068 
1069   /// Mapping of values potentially returned by the associated function to the
1070   /// return instructions that might return them.
1071   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1072 
1073   /// Mapping to remember the number of returned values for a call site such
1074   /// that we can avoid updates if nothing changed.
1075   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
1076 
1077   /// Set of unresolved calls returned by the associated function.
1078   SmallSetVector<CallBase *, 4> UnresolvedCalls;
1079 
1080   /// State flags
1081   ///
1082   ///{
1083   bool IsFixed = false;
1084   bool IsValidState = true;
1085   ///}
1086 
1087 public:
1088   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
1089 
1090   /// See AbstractAttribute::initialize(...).
1091   void initialize(Attributor &A) override {
1092     // Reset the state.
1093     IsFixed = false;
1094     IsValidState = true;
1095     ReturnedValues.clear();
1096 
1097     Function *F = getAssociatedFunction();
1098     if (!F) {
1099       indicatePessimisticFixpoint();
1100       return;
1101     }
1102     assert(!F->getReturnType()->isVoidTy() &&
1103            "Did not expect a void return type!");
1104 
1105     // The map from instruction opcodes to those instructions in the function.
1106     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1107 
1108     // Look through all arguments, if one is marked as returned we are done.
1109     for (Argument &Arg : F->args()) {
1110       if (Arg.hasReturnedAttr()) {
1111         auto &ReturnInstSet = ReturnedValues[&Arg];
1112         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
1113           ReturnInstSet.insert(cast<ReturnInst>(RI));
1114 
1115         indicateOptimisticFixpoint();
1116         return;
1117       }
1118     }
1119 
1120     if (!F->hasExactDefinition())
1121       indicatePessimisticFixpoint();
1122   }
1123 
1124   /// See AbstractAttribute::manifest(...).
1125   ChangeStatus manifest(Attributor &A) override;
1126 
1127   /// See AbstractAttribute::getState(...).
1128   AbstractState &getState() override { return *this; }
1129 
1130   /// See AbstractAttribute::getState(...).
1131   const AbstractState &getState() const override { return *this; }
1132 
1133   /// See AbstractAttribute::updateImpl(Attributor &A).
1134   ChangeStatus updateImpl(Attributor &A) override;
1135 
1136   llvm::iterator_range<iterator> returned_values() override {
1137     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1138   }
1139 
1140   llvm::iterator_range<const_iterator> returned_values() const override {
1141     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1142   }
1143 
1144   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
1145     return UnresolvedCalls;
1146   }
1147 
1148   /// Return the number of potential return values, -1 if unknown.
1149   size_t getNumReturnValues() const override {
1150     return isValidState() ? ReturnedValues.size() : -1;
1151   }
1152 
1153   /// Return an assumed unique return value if a single candidate is found. If
1154   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1155   /// Optional::NoneType.
1156   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1157 
1158   /// See AbstractState::checkForAllReturnedValues(...).
1159   bool checkForAllReturnedValuesAndReturnInsts(
1160       const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1161           &Pred) const override;
1162 
1163   /// Pretty print the attribute similar to the IR representation.
1164   const std::string getAsStr() const override;
1165 
1166   /// See AbstractState::isAtFixpoint().
1167   bool isAtFixpoint() const override { return IsFixed; }
1168 
1169   /// See AbstractState::isValidState().
1170   bool isValidState() const override { return IsValidState; }
1171 
1172   /// See AbstractState::indicateOptimisticFixpoint(...).
1173   ChangeStatus indicateOptimisticFixpoint() override {
1174     IsFixed = true;
1175     return ChangeStatus::UNCHANGED;
1176   }
1177 
1178   ChangeStatus indicatePessimisticFixpoint() override {
1179     IsFixed = true;
1180     IsValidState = false;
1181     return ChangeStatus::CHANGED;
1182   }
1183 };
1184 
1185 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1186   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1187 
1188   // Bookkeeping.
1189   assert(isValidState());
1190   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1191                   "Number of function with known return values");
1192 
1193   // Check if we have an assumed unique return value that we could manifest.
1194   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1195 
1196   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1197     return Changed;
1198 
1199   // Bookkeeping.
1200   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1201                   "Number of function with unique return");
1202 
1203   // Callback to replace the uses of CB with the constant C.
1204   auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
1205     if (CB.getNumUses() == 0 || CB.isMustTailCall())
1206       return ChangeStatus::UNCHANGED;
1207     replaceAllInstructionUsesWith(CB, C);
1208     return ChangeStatus::CHANGED;
1209   };
1210 
1211   // If the assumed unique return value is an argument, annotate it.
1212   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1213     // TODO: This should be handled differently!
1214     this->AnchorVal = UniqueRVArg;
1215     this->KindOrArgNo = UniqueRVArg->getArgNo();
1216     Changed = IRAttribute::manifest(A);
1217   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
1218     // We can replace the returned value with the unique returned constant.
1219     Value &AnchorValue = getAnchorValue();
1220     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
1221       for (const Use &U : F->uses())
1222         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
1223           if (CB->isCallee(&U)) {
1224             Constant *RVCCast =
1225                 CB->getType() == RVC->getType()
1226                     ? RVC
1227                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
1228             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
1229           }
1230     } else {
1231       assert(isa<CallBase>(AnchorValue) &&
1232              "Expcected a function or call base anchor!");
1233       Constant *RVCCast =
1234           AnchorValue.getType() == RVC->getType()
1235               ? RVC
1236               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
1237       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
1238     }
1239     if (Changed == ChangeStatus::CHANGED)
1240       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1241                       "Number of function returns replaced by constant return");
1242   }
1243 
1244   return Changed;
1245 }
1246 
1247 const std::string AAReturnedValuesImpl::getAsStr() const {
1248   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1249          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1250          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1251 }
1252 
1253 Optional<Value *>
1254 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1255   // If checkForAllReturnedValues provides a unique value, ignoring potential
1256   // undef values that can also be present, it is assumed to be the actual
1257   // return value and forwarded to the caller of this method. If there are
1258   // multiple, a nullptr is returned indicating there cannot be a unique
1259   // returned value.
1260   Optional<Value *> UniqueRV;
1261 
1262   auto Pred = [&](Value &RV) -> bool {
1263     // If we found a second returned value and neither the current nor the saved
1264     // one is an undef, there is no unique returned value. Undefs are special
1265     // since we can pretend they have any value.
1266     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1267         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1268       UniqueRV = nullptr;
1269       return false;
1270     }
1271 
1272     // Do not overwrite a value with an undef.
1273     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1274       UniqueRV = &RV;
1275 
1276     return true;
1277   };
1278 
1279   if (!A.checkForAllReturnedValues(Pred, *this))
1280     UniqueRV = nullptr;
1281 
1282   return UniqueRV;
1283 }
1284 
1285 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1286     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
1287         &Pred) const {
1288   if (!isValidState())
1289     return false;
1290 
1291   // Check all returned values but ignore call sites as long as we have not
1292   // encountered an overdefined one during an update.
1293   for (auto &It : ReturnedValues) {
1294     Value *RV = It.first;
1295 
1296     CallBase *CB = dyn_cast<CallBase>(RV);
1297     if (CB && !UnresolvedCalls.count(CB))
1298       continue;
1299 
1300     if (!Pred(*RV, It.second))
1301       return false;
1302   }
1303 
1304   return true;
1305 }
1306 
1307 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1308   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1309   bool Changed = false;
1310 
1311   // State used in the value traversals starting in returned values.
1312   struct RVState {
1313     // The map in which we collect return values -> return instrs.
1314     decltype(ReturnedValues) &RetValsMap;
1315     // The flag to indicate a change.
1316     bool &Changed;
1317     // The return instrs we come from.
1318     SmallSetVector<ReturnInst *, 4> RetInsts;
1319   };
1320 
1321   // Callback for a leaf value returned by the associated function.
1322   auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
1323     auto Size = RVS.RetValsMap[&Val].size();
1324     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1325     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1326     RVS.Changed |= Inserted;
1327     LLVM_DEBUG({
1328       if (Inserted)
1329         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1330                << " => " << RVS.RetInsts.size() << "\n";
1331     });
1332     return true;
1333   };
1334 
1335   // Helper method to invoke the generic value traversal.
1336   auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
1337     IRPosition RetValPos = IRPosition::value(RV);
1338     return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
1339                                                             RVS, VisitValueCB);
1340   };
1341 
1342   // Callback for all "return intructions" live in the associated function.
1343   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1344     ReturnInst &Ret = cast<ReturnInst>(I);
1345     RVState RVS({ReturnedValues, Changed, {}});
1346     RVS.RetInsts.insert(&Ret);
1347     return VisitReturnedValue(*Ret.getReturnValue(), RVS);
1348   };
1349 
1350   // Start by discovering returned values from all live returned instructions in
1351   // the associated function.
1352   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1353     return indicatePessimisticFixpoint();
1354 
1355   // Once returned values "directly" present in the code are handled we try to
1356   // resolve returned calls.
1357   decltype(ReturnedValues) NewRVsMap;
1358   for (auto &It : ReturnedValues) {
1359     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1360                       << " by #" << It.second.size() << " RIs\n");
1361     CallBase *CB = dyn_cast<CallBase>(It.first);
1362     if (!CB || UnresolvedCalls.count(CB))
1363       continue;
1364 
1365     if (!CB->getCalledFunction()) {
1366       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1367                         << "\n");
1368       UnresolvedCalls.insert(CB);
1369       continue;
1370     }
1371 
1372     // TODO: use the function scope once we have call site AAReturnedValues.
1373     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1374         *this, IRPosition::function(*CB->getCalledFunction()));
1375     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1376                       << RetValAA << "\n");
1377 
1378     // Skip dead ends, thus if we do not know anything about the returned
1379     // call we mark it as unresolved and it will stay that way.
1380     if (!RetValAA.getState().isValidState()) {
1381       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1382                         << "\n");
1383       UnresolvedCalls.insert(CB);
1384       continue;
1385     }
1386 
1387     // Do not try to learn partial information. If the callee has unresolved
1388     // return values we will treat the call as unresolved/opaque.
1389     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1390     if (!RetValAAUnresolvedCalls.empty()) {
1391       UnresolvedCalls.insert(CB);
1392       continue;
1393     }
1394 
1395     // Now check if we can track transitively returned values. If possible, thus
1396     // if all return value can be represented in the current scope, do so.
1397     bool Unresolved = false;
1398     for (auto &RetValAAIt : RetValAA.returned_values()) {
1399       Value *RetVal = RetValAAIt.first;
1400       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1401           isa<Constant>(RetVal))
1402         continue;
1403       // Anything that did not fit in the above categories cannot be resolved,
1404       // mark the call as unresolved.
1405       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1406                            "cannot be translated: "
1407                         << *RetVal << "\n");
1408       UnresolvedCalls.insert(CB);
1409       Unresolved = true;
1410       break;
1411     }
1412 
1413     if (Unresolved)
1414       continue;
1415 
1416     // Now track transitively returned values.
1417     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1418     if (NumRetAA == RetValAA.getNumReturnValues()) {
1419       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1420                            "changed since it was seen last\n");
1421       continue;
1422     }
1423     NumRetAA = RetValAA.getNumReturnValues();
1424 
1425     for (auto &RetValAAIt : RetValAA.returned_values()) {
1426       Value *RetVal = RetValAAIt.first;
1427       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1428         // Arguments are mapped to call site operands and we begin the traversal
1429         // again.
1430         bool Unused = false;
1431         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1432         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1433         continue;
1434       } else if (isa<CallBase>(RetVal)) {
1435         // Call sites are resolved by the callee attribute over time, no need to
1436         // do anything for us.
1437         continue;
1438       } else if (isa<Constant>(RetVal)) {
1439         // Constants are valid everywhere, we can simply take them.
1440         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1441         continue;
1442       }
1443     }
1444   }
1445 
1446   // To avoid modifications to the ReturnedValues map while we iterate over it
1447   // we kept record of potential new entries in a copy map, NewRVsMap.
1448   for (auto &It : NewRVsMap) {
1449     assert(!It.second.empty() && "Entry does not add anything.");
1450     auto &ReturnInsts = ReturnedValues[It.first];
1451     for (ReturnInst *RI : It.second)
1452       if (ReturnInsts.insert(RI)) {
1453         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1454                           << *It.first << " => " << *RI << "\n");
1455         Changed = true;
1456       }
1457   }
1458 
1459   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1460   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1461 }
1462 
1463 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1464   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1465 
1466   /// See AbstractAttribute::trackStatistics()
1467   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1468 };
1469 
1470 /// Returned values information for a call sites.
1471 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1472   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1473 
1474   /// See AbstractAttribute::initialize(...).
1475   void initialize(Attributor &A) override {
1476     // TODO: Once we have call site specific value information we can provide
1477     //       call site specific liveness information and then it makes
1478     //       sense to specialize attributes for call sites instead of
1479     //       redirecting requests to the callee.
1480     llvm_unreachable("Abstract attributes for returned values are not "
1481                      "supported for call sites yet!");
1482   }
1483 
1484   /// See AbstractAttribute::updateImpl(...).
1485   ChangeStatus updateImpl(Attributor &A) override {
1486     return indicatePessimisticFixpoint();
1487   }
1488 
1489   /// See AbstractAttribute::trackStatistics()
1490   void trackStatistics() const override {}
1491 };
1492 
1493 /// ------------------------ NoSync Function Attribute -------------------------
1494 
1495 struct AANoSyncImpl : AANoSync {
1496   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1497 
1498   const std::string getAsStr() const override {
1499     return getAssumed() ? "nosync" : "may-sync";
1500   }
1501 
1502   /// See AbstractAttribute::updateImpl(...).
1503   ChangeStatus updateImpl(Attributor &A) override;
1504 
1505   /// Helper function used to determine whether an instruction is non-relaxed
1506   /// atomic. In other words, if an atomic instruction does not have unordered
1507   /// or monotonic ordering
1508   static bool isNonRelaxedAtomic(Instruction *I);
1509 
1510   /// Helper function used to determine whether an instruction is volatile.
1511   static bool isVolatile(Instruction *I);
1512 
1513   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1514   /// memset).
1515   static bool isNoSyncIntrinsic(Instruction *I);
1516 };
1517 
1518 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1519   if (!I->isAtomic())
1520     return false;
1521 
1522   AtomicOrdering Ordering;
1523   switch (I->getOpcode()) {
1524   case Instruction::AtomicRMW:
1525     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1526     break;
1527   case Instruction::Store:
1528     Ordering = cast<StoreInst>(I)->getOrdering();
1529     break;
1530   case Instruction::Load:
1531     Ordering = cast<LoadInst>(I)->getOrdering();
1532     break;
1533   case Instruction::Fence: {
1534     auto *FI = cast<FenceInst>(I);
1535     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1536       return false;
1537     Ordering = FI->getOrdering();
1538     break;
1539   }
1540   case Instruction::AtomicCmpXchg: {
1541     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1542     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1543     // Only if both are relaxed, than it can be treated as relaxed.
1544     // Otherwise it is non-relaxed.
1545     if (Success != AtomicOrdering::Unordered &&
1546         Success != AtomicOrdering::Monotonic)
1547       return true;
1548     if (Failure != AtomicOrdering::Unordered &&
1549         Failure != AtomicOrdering::Monotonic)
1550       return true;
1551     return false;
1552   }
1553   default:
1554     llvm_unreachable(
1555         "New atomic operations need to be known in the attributor.");
1556   }
1557 
1558   // Relaxed.
1559   if (Ordering == AtomicOrdering::Unordered ||
1560       Ordering == AtomicOrdering::Monotonic)
1561     return false;
1562   return true;
1563 }
1564 
1565 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1566 /// FIXME: We should ipmrove the handling of intrinsics.
1567 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1568   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1569     switch (II->getIntrinsicID()) {
1570     /// Element wise atomic memory intrinsics are can only be unordered,
1571     /// therefore nosync.
1572     case Intrinsic::memset_element_unordered_atomic:
1573     case Intrinsic::memmove_element_unordered_atomic:
1574     case Intrinsic::memcpy_element_unordered_atomic:
1575       return true;
1576     case Intrinsic::memset:
1577     case Intrinsic::memmove:
1578     case Intrinsic::memcpy:
1579       if (!cast<MemIntrinsic>(II)->isVolatile())
1580         return true;
1581       return false;
1582     default:
1583       return false;
1584     }
1585   }
1586   return false;
1587 }
1588 
1589 bool AANoSyncImpl::isVolatile(Instruction *I) {
1590   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1591          "Calls should not be checked here");
1592 
1593   switch (I->getOpcode()) {
1594   case Instruction::AtomicRMW:
1595     return cast<AtomicRMWInst>(I)->isVolatile();
1596   case Instruction::Store:
1597     return cast<StoreInst>(I)->isVolatile();
1598   case Instruction::Load:
1599     return cast<LoadInst>(I)->isVolatile();
1600   case Instruction::AtomicCmpXchg:
1601     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1602   default:
1603     return false;
1604   }
1605 }
1606 
1607 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1608 
1609   auto CheckRWInstForNoSync = [&](Instruction &I) {
1610     /// We are looking for volatile instructions or Non-Relaxed atomics.
1611     /// FIXME: We should improve the handling of intrinsics.
1612 
1613     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1614       return true;
1615 
1616     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1617       if (ICS.hasFnAttr(Attribute::NoSync))
1618         return true;
1619 
1620       const auto &NoSyncAA =
1621           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1622       if (NoSyncAA.isAssumedNoSync())
1623         return true;
1624       return false;
1625     }
1626 
1627     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1628       return true;
1629 
1630     return false;
1631   };
1632 
1633   auto CheckForNoSync = [&](Instruction &I) {
1634     // At this point we handled all read/write effects and they are all
1635     // nosync, so they can be skipped.
1636     if (I.mayReadOrWriteMemory())
1637       return true;
1638 
1639     // non-convergent and readnone imply nosync.
1640     return !ImmutableCallSite(&I).isConvergent();
1641   };
1642 
1643   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1644       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1645     return indicatePessimisticFixpoint();
1646 
1647   return ChangeStatus::UNCHANGED;
1648 }
1649 
1650 struct AANoSyncFunction final : public AANoSyncImpl {
1651   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1652 
1653   /// See AbstractAttribute::trackStatistics()
1654   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1655 };
1656 
1657 /// NoSync attribute deduction for a call sites.
1658 struct AANoSyncCallSite final : AANoSyncImpl {
1659   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1660 
1661   /// See AbstractAttribute::initialize(...).
1662   void initialize(Attributor &A) override {
1663     AANoSyncImpl::initialize(A);
1664     Function *F = getAssociatedFunction();
1665     if (!F)
1666       indicatePessimisticFixpoint();
1667   }
1668 
1669   /// See AbstractAttribute::updateImpl(...).
1670   ChangeStatus updateImpl(Attributor &A) override {
1671     // TODO: Once we have call site specific value information we can provide
1672     //       call site specific liveness information and then it makes
1673     //       sense to specialize attributes for call sites arguments instead of
1674     //       redirecting requests to the callee argument.
1675     Function *F = getAssociatedFunction();
1676     const IRPosition &FnPos = IRPosition::function(*F);
1677     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1678     return clampStateAndIndicateChange(
1679         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1680   }
1681 
1682   /// See AbstractAttribute::trackStatistics()
1683   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1684 };
1685 
1686 /// ------------------------ No-Free Attributes ----------------------------
1687 
1688 struct AANoFreeImpl : public AANoFree {
1689   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1690 
1691   /// See AbstractAttribute::updateImpl(...).
1692   ChangeStatus updateImpl(Attributor &A) override {
1693     auto CheckForNoFree = [&](Instruction &I) {
1694       ImmutableCallSite ICS(&I);
1695       if (ICS.hasFnAttr(Attribute::NoFree))
1696         return true;
1697 
1698       const auto &NoFreeAA =
1699           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1700       return NoFreeAA.isAssumedNoFree();
1701     };
1702 
1703     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1704       return indicatePessimisticFixpoint();
1705     return ChangeStatus::UNCHANGED;
1706   }
1707 
1708   /// See AbstractAttribute::getAsStr().
1709   const std::string getAsStr() const override {
1710     return getAssumed() ? "nofree" : "may-free";
1711   }
1712 };
1713 
1714 struct AANoFreeFunction final : public AANoFreeImpl {
1715   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1716 
1717   /// See AbstractAttribute::trackStatistics()
1718   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1719 };
1720 
1721 /// NoFree attribute deduction for a call sites.
1722 struct AANoFreeCallSite final : AANoFreeImpl {
1723   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1724 
1725   /// See AbstractAttribute::initialize(...).
1726   void initialize(Attributor &A) override {
1727     AANoFreeImpl::initialize(A);
1728     Function *F = getAssociatedFunction();
1729     if (!F)
1730       indicatePessimisticFixpoint();
1731   }
1732 
1733   /// See AbstractAttribute::updateImpl(...).
1734   ChangeStatus updateImpl(Attributor &A) override {
1735     // TODO: Once we have call site specific value information we can provide
1736     //       call site specific liveness information and then it makes
1737     //       sense to specialize attributes for call sites arguments instead of
1738     //       redirecting requests to the callee argument.
1739     Function *F = getAssociatedFunction();
1740     const IRPosition &FnPos = IRPosition::function(*F);
1741     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1742     return clampStateAndIndicateChange(
1743         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1744   }
1745 
1746   /// See AbstractAttribute::trackStatistics()
1747   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1748 };
1749 
1750 /// NoFree attribute for floating values.
1751 struct AANoFreeFloating : AANoFreeImpl {
1752   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1753 
1754   /// See AbstractAttribute::trackStatistics()
1755   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1756 
1757   /// See Abstract Attribute::updateImpl(...).
1758   ChangeStatus updateImpl(Attributor &A) override {
1759     const IRPosition &IRP = getIRPosition();
1760 
1761     const auto &NoFreeAA =
1762         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1763     if (NoFreeAA.isAssumedNoFree())
1764       return ChangeStatus::UNCHANGED;
1765 
1766     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1767     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1768       Instruction *UserI = cast<Instruction>(U.getUser());
1769       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1770         if (CB->isBundleOperand(&U))
1771           return false;
1772         if (!CB->isArgOperand(&U))
1773           return true;
1774         unsigned ArgNo = CB->getArgOperandNo(&U);
1775 
1776         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1777             *this, IRPosition::callsite_argument(*CB, ArgNo));
1778         return NoFreeArg.isAssumedNoFree();
1779       }
1780 
1781       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1782           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1783         Follow = true;
1784         return true;
1785       }
1786 
1787       // Unknown user.
1788       return false;
1789     };
1790     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1791       return indicatePessimisticFixpoint();
1792 
1793     return ChangeStatus::UNCHANGED;
1794   }
1795 };
1796 
1797 /// NoFree attribute for a call site argument.
1798 struct AANoFreeArgument final : AANoFreeFloating {
1799   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1800 
1801   /// See AbstractAttribute::trackStatistics()
1802   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1803 };
1804 
1805 /// NoFree attribute for call site arguments.
1806 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1807   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1808 
1809   /// See AbstractAttribute::updateImpl(...).
1810   ChangeStatus updateImpl(Attributor &A) override {
1811     // TODO: Once we have call site specific value information we can provide
1812     //       call site specific liveness information and then it makes
1813     //       sense to specialize attributes for call sites arguments instead of
1814     //       redirecting requests to the callee argument.
1815     Argument *Arg = getAssociatedArgument();
1816     if (!Arg)
1817       return indicatePessimisticFixpoint();
1818     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1819     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1820     return clampStateAndIndicateChange(
1821         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1822   }
1823 
1824   /// See AbstractAttribute::trackStatistics()
1825   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1826 };
1827 
1828 /// NoFree attribute for function return value.
1829 struct AANoFreeReturned final : AANoFreeFloating {
1830   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1831     llvm_unreachable("NoFree is not applicable to function returns!");
1832   }
1833 
1834   /// See AbstractAttribute::initialize(...).
1835   void initialize(Attributor &A) override {
1836     llvm_unreachable("NoFree is not applicable to function returns!");
1837   }
1838 
1839   /// See AbstractAttribute::updateImpl(...).
1840   ChangeStatus updateImpl(Attributor &A) override {
1841     llvm_unreachable("NoFree is not applicable to function returns!");
1842   }
1843 
1844   /// See AbstractAttribute::trackStatistics()
1845   void trackStatistics() const override {}
1846 };
1847 
1848 /// NoFree attribute deduction for a call site return value.
1849 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1850   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1851 
1852   ChangeStatus manifest(Attributor &A) override {
1853     return ChangeStatus::UNCHANGED;
1854   }
1855   /// See AbstractAttribute::trackStatistics()
1856   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1857 };
1858 
1859 /// ------------------------ NonNull Argument Attribute ------------------------
1860 static int64_t getKnownNonNullAndDerefBytesForUse(
1861     Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue,
1862     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1863   TrackUse = false;
1864 
1865   const Value *UseV = U->get();
1866   if (!UseV->getType()->isPointerTy())
1867     return 0;
1868 
1869   Type *PtrTy = UseV->getType();
1870   const Function *F = I->getFunction();
1871   bool NullPointerIsDefined =
1872       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1873   const DataLayout &DL = A.getInfoCache().getDL();
1874   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1875     if (ICS.isBundleOperand(U))
1876       return 0;
1877 
1878     if (ICS.isCallee(U)) {
1879       IsNonNull |= !NullPointerIsDefined;
1880       return 0;
1881     }
1882 
1883     unsigned ArgNo = ICS.getArgumentNo(U);
1884     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1885     // As long as we only use known information there is no need to track
1886     // dependences here.
1887     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1888                                                   /* TrackDependence */ false);
1889     IsNonNull |= DerefAA.isKnownNonNull();
1890     return DerefAA.getKnownDereferenceableBytes();
1891   }
1892 
1893   // We need to follow common pointer manipulation uses to the accesses they
1894   // feed into. We can try to be smart to avoid looking through things we do not
1895   // like for now, e.g., non-inbounds GEPs.
1896   if (isa<CastInst>(I)) {
1897     TrackUse = true;
1898     return 0;
1899   }
1900   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1901     if (GEP->hasAllConstantIndices()) {
1902       TrackUse = true;
1903       return 0;
1904     }
1905 
1906   int64_t Offset;
1907   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1908     if (Base == &AssociatedValue &&
1909         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1910       int64_t DerefBytes =
1911           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1912 
1913       IsNonNull |= !NullPointerIsDefined;
1914       return std::max(int64_t(0), DerefBytes);
1915     }
1916   }
1917 
1918   /// Corner case when an offset is 0.
1919   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1920           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1921     if (Offset == 0 && Base == &AssociatedValue &&
1922         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1923       int64_t DerefBytes =
1924           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1925       IsNonNull |= !NullPointerIsDefined;
1926       return std::max(int64_t(0), DerefBytes);
1927     }
1928   }
1929 
1930   return 0;
1931 }
1932 
1933 struct AANonNullImpl : AANonNull {
1934   AANonNullImpl(const IRPosition &IRP)
1935       : AANonNull(IRP),
1936         NullIsDefined(NullPointerIsDefined(
1937             getAnchorScope(),
1938             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1939 
1940   /// See AbstractAttribute::initialize(...).
1941   void initialize(Attributor &A) override {
1942     if (!NullIsDefined &&
1943         hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1944       indicateOptimisticFixpoint();
1945     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1946       indicatePessimisticFixpoint();
1947     else
1948       AANonNull::initialize(A);
1949   }
1950 
1951   /// See AAFromMustBeExecutedContext
1952   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
1953     bool IsNonNull = false;
1954     bool TrackUse = false;
1955     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1956                                        IsNonNull, TrackUse);
1957     setKnown(IsNonNull);
1958     return TrackUse;
1959   }
1960 
1961   /// See AbstractAttribute::getAsStr().
1962   const std::string getAsStr() const override {
1963     return getAssumed() ? "nonnull" : "may-null";
1964   }
1965 
1966   /// Flag to determine if the underlying value can be null and still allow
1967   /// valid accesses.
1968   const bool NullIsDefined;
1969 };
1970 
1971 /// NonNull attribute for a floating value.
1972 struct AANonNullFloating
1973     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1974   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1975   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1976 
1977   /// See AbstractAttribute::updateImpl(...).
1978   ChangeStatus updateImpl(Attributor &A) override {
1979     ChangeStatus Change = Base::updateImpl(A);
1980     if (isKnownNonNull())
1981       return Change;
1982 
1983     if (!NullIsDefined) {
1984       const auto &DerefAA =
1985           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1986       if (DerefAA.getAssumedDereferenceableBytes())
1987         return Change;
1988     }
1989 
1990     const DataLayout &DL = A.getDataLayout();
1991 
1992     DominatorTree *DT = nullptr;
1993     InformationCache &InfoCache = A.getInfoCache();
1994     if (const Function *Fn = getAnchorScope())
1995       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1996 
1997     auto VisitValueCB = [&](Value &V, AANonNull::StateType &T,
1998                             bool Stripped) -> bool {
1999       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
2000       if (!Stripped && this == &AA) {
2001         if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, getCtxI(), DT))
2002           T.indicatePessimisticFixpoint();
2003       } else {
2004         // Use abstract attribute information.
2005         const AANonNull::StateType &NS =
2006             static_cast<const AANonNull::StateType &>(AA.getState());
2007         T ^= NS;
2008       }
2009       return T.isValidState();
2010     };
2011 
2012     StateType T;
2013     if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
2014                                                      T, VisitValueCB))
2015       return indicatePessimisticFixpoint();
2016 
2017     return clampStateAndIndicateChange(getState(), T);
2018   }
2019 
2020   /// See AbstractAttribute::trackStatistics()
2021   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2022 };
2023 
2024 /// NonNull attribute for function return value.
2025 struct AANonNullReturned final
2026     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
2027   AANonNullReturned(const IRPosition &IRP)
2028       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
2029 
2030   /// See AbstractAttribute::trackStatistics()
2031   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2032 };
2033 
2034 /// NonNull attribute for function argument.
2035 struct AANonNullArgument final
2036     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
2037                                                               AANonNullImpl> {
2038   AANonNullArgument(const IRPosition &IRP)
2039       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
2040                                                                 AANonNullImpl>(
2041             IRP) {}
2042 
2043   /// See AbstractAttribute::trackStatistics()
2044   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2045 };
2046 
2047 struct AANonNullCallSiteArgument final : AANonNullFloating {
2048   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
2049 
2050   /// See AbstractAttribute::trackStatistics()
2051   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2052 };
2053 
2054 /// NonNull attribute for a call site return position.
2055 struct AANonNullCallSiteReturned final
2056     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
2057                                                              AANonNullImpl> {
2058   AANonNullCallSiteReturned(const IRPosition &IRP)
2059       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
2060                                                                AANonNullImpl>(
2061             IRP) {}
2062 
2063   /// See AbstractAttribute::trackStatistics()
2064   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2065 };
2066 
2067 /// ------------------------ No-Recurse Attributes ----------------------------
2068 
2069 struct AANoRecurseImpl : public AANoRecurse {
2070   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
2071 
2072   /// See AbstractAttribute::getAsStr()
2073   const std::string getAsStr() const override {
2074     return getAssumed() ? "norecurse" : "may-recurse";
2075   }
2076 };
2077 
2078 struct AANoRecurseFunction final : AANoRecurseImpl {
2079   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
2080 
2081   /// See AbstractAttribute::initialize(...).
2082   void initialize(Attributor &A) override {
2083     AANoRecurseImpl::initialize(A);
2084     if (const Function *F = getAnchorScope())
2085       if (A.getInfoCache().getSccSize(*F) != 1)
2086         indicatePessimisticFixpoint();
2087   }
2088 
2089   /// See AbstractAttribute::updateImpl(...).
2090   ChangeStatus updateImpl(Attributor &A) override {
2091 
2092     // If all live call sites are known to be no-recurse, we are as well.
2093     auto CallSitePred = [&](AbstractCallSite ACS) {
2094       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2095           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2096           /* TrackDependence */ false, DepClassTy::OPTIONAL);
2097       return NoRecurseAA.isKnownNoRecurse();
2098     };
2099     bool AllCallSitesKnown;
2100     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2101       // If we know all call sites and all are known no-recurse, we are done.
2102       // If all known call sites, which might not be all that exist, are known
2103       // to be no-recurse, we are not done but we can continue to assume
2104       // no-recurse. If one of the call sites we have not visited will become
2105       // live, another update is triggered.
2106       if (AllCallSitesKnown)
2107         indicateOptimisticFixpoint();
2108       return ChangeStatus::UNCHANGED;
2109     }
2110 
2111     // If the above check does not hold anymore we look at the calls.
2112     auto CheckForNoRecurse = [&](Instruction &I) {
2113       ImmutableCallSite ICS(&I);
2114       if (ICS.hasFnAttr(Attribute::NoRecurse))
2115         return true;
2116 
2117       const auto &NoRecurseAA =
2118           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
2119       if (!NoRecurseAA.isAssumedNoRecurse())
2120         return false;
2121 
2122       // Recursion to the same function
2123       if (ICS.getCalledFunction() == getAnchorScope())
2124         return false;
2125 
2126       return true;
2127     };
2128 
2129     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
2130       return indicatePessimisticFixpoint();
2131     return ChangeStatus::UNCHANGED;
2132   }
2133 
2134   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2135 };
2136 
2137 /// NoRecurse attribute deduction for a call sites.
2138 struct AANoRecurseCallSite final : AANoRecurseImpl {
2139   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
2140 
2141   /// See AbstractAttribute::initialize(...).
2142   void initialize(Attributor &A) override {
2143     AANoRecurseImpl::initialize(A);
2144     Function *F = getAssociatedFunction();
2145     if (!F)
2146       indicatePessimisticFixpoint();
2147   }
2148 
2149   /// See AbstractAttribute::updateImpl(...).
2150   ChangeStatus updateImpl(Attributor &A) override {
2151     // TODO: Once we have call site specific value information we can provide
2152     //       call site specific liveness information and then it makes
2153     //       sense to specialize attributes for call sites arguments instead of
2154     //       redirecting requests to the callee argument.
2155     Function *F = getAssociatedFunction();
2156     const IRPosition &FnPos = IRPosition::function(*F);
2157     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
2158     return clampStateAndIndicateChange(
2159         getState(),
2160         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
2161   }
2162 
2163   /// See AbstractAttribute::trackStatistics()
2164   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2165 };
2166 
2167 /// -------------------- Undefined-Behavior Attributes ------------------------
2168 
2169 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2170   AAUndefinedBehaviorImpl(const IRPosition &IRP) : AAUndefinedBehavior(IRP) {}
2171 
2172   /// See AbstractAttribute::updateImpl(...).
2173   // through a pointer (i.e. also branches etc.)
2174   ChangeStatus updateImpl(Attributor &A) override {
2175     const size_t UBPrevSize = KnownUBInsts.size();
2176     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2177 
2178     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2179       // Skip instructions that are already saved.
2180       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2181         return true;
2182 
2183       // If we reach here, we know we have an instruction
2184       // that accesses memory through a pointer operand,
2185       // for which getPointerOperand() should give it to us.
2186       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
2187       assert(PtrOp &&
2188              "Expected pointer operand of memory accessing instruction");
2189 
2190       // A memory access through a pointer is considered UB
2191       // only if the pointer has constant null value.
2192       // TODO: Expand it to not only check constant values.
2193       if (!isa<ConstantPointerNull>(PtrOp)) {
2194         AssumedNoUBInsts.insert(&I);
2195         return true;
2196       }
2197       const Type *PtrTy = PtrOp->getType();
2198 
2199       // Because we only consider instructions inside functions,
2200       // assume that a parent function exists.
2201       const Function *F = I.getFunction();
2202 
2203       // A memory access using constant null pointer is only considered UB
2204       // if null pointer is _not_ defined for the target platform.
2205       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2206         AssumedNoUBInsts.insert(&I);
2207       else
2208         KnownUBInsts.insert(&I);
2209       return true;
2210     };
2211 
2212     auto InspectBrInstForUB = [&](Instruction &I) {
2213       // A conditional branch instruction is considered UB if it has `undef`
2214       // condition.
2215 
2216       // Skip instructions that are already saved.
2217       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2218         return true;
2219 
2220       // We know we have a branch instruction.
2221       auto BrInst = cast<BranchInst>(&I);
2222 
2223       // Unconditional branches are never considered UB.
2224       if (BrInst->isUnconditional())
2225         return true;
2226 
2227       // Either we stopped and the appropriate action was taken,
2228       // or we got back a simplified value to continue.
2229       Optional<Value *> SimplifiedCond =
2230           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2231       if (!SimplifiedCond.hasValue())
2232         return true;
2233       AssumedNoUBInsts.insert(&I);
2234       return true;
2235     };
2236 
2237     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2238                               {Instruction::Load, Instruction::Store,
2239                                Instruction::AtomicCmpXchg,
2240                                Instruction::AtomicRMW});
2241     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br});
2242     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2243         UBPrevSize != KnownUBInsts.size())
2244       return ChangeStatus::CHANGED;
2245     return ChangeStatus::UNCHANGED;
2246   }
2247 
2248   bool isKnownToCauseUB(Instruction *I) const override {
2249     return KnownUBInsts.count(I);
2250   }
2251 
2252   bool isAssumedToCauseUB(Instruction *I) const override {
2253     // In simple words, if an instruction is not in the assumed to _not_
2254     // cause UB, then it is assumed UB (that includes those
2255     // in the KnownUBInsts set). The rest is boilerplate
2256     // is to ensure that it is one of the instructions we test
2257     // for UB.
2258 
2259     switch (I->getOpcode()) {
2260     case Instruction::Load:
2261     case Instruction::Store:
2262     case Instruction::AtomicCmpXchg:
2263     case Instruction::AtomicRMW:
2264       return !AssumedNoUBInsts.count(I);
2265     case Instruction::Br: {
2266       auto BrInst = cast<BranchInst>(I);
2267       if (BrInst->isUnconditional())
2268         return false;
2269       return !AssumedNoUBInsts.count(I);
2270     } break;
2271     default:
2272       return false;
2273     }
2274     return false;
2275   }
2276 
2277   ChangeStatus manifest(Attributor &A) override {
2278     if (KnownUBInsts.empty())
2279       return ChangeStatus::UNCHANGED;
2280     for (Instruction *I : KnownUBInsts)
2281       A.changeToUnreachableAfterManifest(I);
2282     return ChangeStatus::CHANGED;
2283   }
2284 
2285   /// See AbstractAttribute::getAsStr()
2286   const std::string getAsStr() const override {
2287     return getAssumed() ? "undefined-behavior" : "no-ub";
2288   }
2289 
2290   /// Note: The correctness of this analysis depends on the fact that the
2291   /// following 2 sets will stop changing after some point.
2292   /// "Change" here means that their size changes.
2293   /// The size of each set is monotonically increasing
2294   /// (we only add items to them) and it is upper bounded by the number of
2295   /// instructions in the processed function (we can never save more
2296   /// elements in either set than this number). Hence, at some point,
2297   /// they will stop increasing.
2298   /// Consequently, at some point, both sets will have stopped
2299   /// changing, effectively making the analysis reach a fixpoint.
2300 
2301   /// Note: These 2 sets are disjoint and an instruction can be considered
2302   /// one of 3 things:
2303   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2304   ///    the KnownUBInsts set.
2305   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2306   ///    has a reason to assume it).
2307   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2308   ///    could not find a reason to assume or prove that it can cause UB,
2309   ///    hence it assumes it doesn't. We have a set for these instructions
2310   ///    so that we don't reprocess them in every update.
2311   ///    Note however that instructions in this set may cause UB.
2312 
2313 protected:
2314   /// A set of all live instructions _known_ to cause UB.
2315   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2316 
2317 private:
2318   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2319   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2320 
2321   // Should be called on updates in which if we're processing an instruction
2322   // \p I that depends on a value \p V, one of the following has to happen:
2323   // - If the value is assumed, then stop.
2324   // - If the value is known but undef, then consider it UB.
2325   // - Otherwise, do specific processing with the simplified value.
2326   // We return None in the first 2 cases to signify that an appropriate
2327   // action was taken and the caller should stop.
2328   // Otherwise, we return the simplified value that the caller should
2329   // use for specific processing.
2330   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2331                                          Instruction *I) {
2332     const auto &ValueSimplifyAA =
2333         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2334     Optional<Value *> SimplifiedV =
2335         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2336     if (!ValueSimplifyAA.isKnown()) {
2337       // Don't depend on assumed values.
2338       return llvm::None;
2339     }
2340     if (!SimplifiedV.hasValue()) {
2341       // If it is known (which we tested above) but it doesn't have a value,
2342       // then we can assume `undef` and hence the instruction is UB.
2343       KnownUBInsts.insert(I);
2344       return llvm::None;
2345     }
2346     Value *Val = SimplifiedV.getValue();
2347     if (isa<UndefValue>(Val)) {
2348       KnownUBInsts.insert(I);
2349       return llvm::None;
2350     }
2351     return Val;
2352   }
2353 };
2354 
2355 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2356   AAUndefinedBehaviorFunction(const IRPosition &IRP)
2357       : AAUndefinedBehaviorImpl(IRP) {}
2358 
2359   /// See AbstractAttribute::trackStatistics()
2360   void trackStatistics() const override {
2361     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2362                "Number of instructions known to have UB");
2363     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2364         KnownUBInsts.size();
2365   }
2366 };
2367 
2368 /// ------------------------ Will-Return Attributes ----------------------------
2369 
2370 // Helper function that checks whether a function has any cycle.
2371 // TODO: Replace with more efficent code
2372 static bool containsCycle(Function &F) {
2373   SmallPtrSet<BasicBlock *, 32> Visited;
2374 
2375   // Traverse BB by dfs and check whether successor is already visited.
2376   for (BasicBlock *BB : depth_first(&F)) {
2377     Visited.insert(BB);
2378     for (auto *SuccBB : successors(BB)) {
2379       if (Visited.count(SuccBB))
2380         return true;
2381     }
2382   }
2383   return false;
2384 }
2385 
2386 // Helper function that checks the function have a loop which might become an
2387 // endless loop
2388 // FIXME: Any cycle is regarded as endless loop for now.
2389 //        We have to allow some patterns.
2390 static bool containsPossiblyEndlessLoop(Function *F) {
2391   return !F || !F->hasExactDefinition() || containsCycle(*F);
2392 }
2393 
2394 struct AAWillReturnImpl : public AAWillReturn {
2395   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
2396 
2397   /// See AbstractAttribute::initialize(...).
2398   void initialize(Attributor &A) override {
2399     AAWillReturn::initialize(A);
2400 
2401     Function *F = getAssociatedFunction();
2402     if (containsPossiblyEndlessLoop(F))
2403       indicatePessimisticFixpoint();
2404   }
2405 
2406   /// See AbstractAttribute::updateImpl(...).
2407   ChangeStatus updateImpl(Attributor &A) override {
2408     auto CheckForWillReturn = [&](Instruction &I) {
2409       IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
2410       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2411       if (WillReturnAA.isKnownWillReturn())
2412         return true;
2413       if (!WillReturnAA.isAssumedWillReturn())
2414         return false;
2415       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2416       return NoRecurseAA.isAssumedNoRecurse();
2417     };
2418 
2419     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2420       return indicatePessimisticFixpoint();
2421 
2422     return ChangeStatus::UNCHANGED;
2423   }
2424 
2425   /// See AbstractAttribute::getAsStr()
2426   const std::string getAsStr() const override {
2427     return getAssumed() ? "willreturn" : "may-noreturn";
2428   }
2429 };
2430 
2431 struct AAWillReturnFunction final : AAWillReturnImpl {
2432   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2433 
2434   /// See AbstractAttribute::trackStatistics()
2435   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2436 };
2437 
2438 /// WillReturn attribute deduction for a call sites.
2439 struct AAWillReturnCallSite final : AAWillReturnImpl {
2440   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2441 
2442   /// See AbstractAttribute::initialize(...).
2443   void initialize(Attributor &A) override {
2444     AAWillReturnImpl::initialize(A);
2445     Function *F = getAssociatedFunction();
2446     if (!F)
2447       indicatePessimisticFixpoint();
2448   }
2449 
2450   /// See AbstractAttribute::updateImpl(...).
2451   ChangeStatus updateImpl(Attributor &A) override {
2452     // TODO: Once we have call site specific value information we can provide
2453     //       call site specific liveness information and then it makes
2454     //       sense to specialize attributes for call sites arguments instead of
2455     //       redirecting requests to the callee argument.
2456     Function *F = getAssociatedFunction();
2457     const IRPosition &FnPos = IRPosition::function(*F);
2458     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2459     return clampStateAndIndicateChange(
2460         getState(),
2461         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2462   }
2463 
2464   /// See AbstractAttribute::trackStatistics()
2465   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2466 };
2467 
2468 /// -------------------AAReachability Attribute--------------------------
2469 
2470 struct AAReachabilityImpl : AAReachability {
2471   AAReachabilityImpl(const IRPosition &IRP) : AAReachability(IRP) {}
2472 
2473   const std::string getAsStr() const override {
2474     // TODO: Return the number of reachable queries.
2475     return "reachable";
2476   }
2477 
2478   /// See AbstractAttribute::initialize(...).
2479   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2480 
2481   /// See AbstractAttribute::updateImpl(...).
2482   ChangeStatus updateImpl(Attributor &A) override {
2483     return indicatePessimisticFixpoint();
2484   }
2485 };
2486 
2487 struct AAReachabilityFunction final : public AAReachabilityImpl {
2488   AAReachabilityFunction(const IRPosition &IRP) : AAReachabilityImpl(IRP) {}
2489 
2490   /// See AbstractAttribute::trackStatistics()
2491   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2492 };
2493 
2494 /// ------------------------ NoAlias Argument Attribute ------------------------
2495 
2496 struct AANoAliasImpl : AANoAlias {
2497   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
2498 
2499   const std::string getAsStr() const override {
2500     return getAssumed() ? "noalias" : "may-alias";
2501   }
2502 };
2503 
2504 /// NoAlias attribute for a floating value.
2505 struct AANoAliasFloating final : AANoAliasImpl {
2506   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2507 
2508   /// See AbstractAttribute::initialize(...).
2509   void initialize(Attributor &A) override {
2510     AANoAliasImpl::initialize(A);
2511     Value &Val = getAssociatedValue();
2512     if (isa<AllocaInst>(Val))
2513       indicateOptimisticFixpoint();
2514     else if (isa<ConstantPointerNull>(Val) &&
2515              !NullPointerIsDefined(getAnchorScope(),
2516                                    Val.getType()->getPointerAddressSpace()))
2517       indicateOptimisticFixpoint();
2518   }
2519 
2520   /// See AbstractAttribute::updateImpl(...).
2521   ChangeStatus updateImpl(Attributor &A) override {
2522     // TODO: Implement this.
2523     return indicatePessimisticFixpoint();
2524   }
2525 
2526   /// See AbstractAttribute::trackStatistics()
2527   void trackStatistics() const override {
2528     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2529   }
2530 };
2531 
2532 /// NoAlias attribute for an argument.
2533 struct AANoAliasArgument final
2534     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2535   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2536   AANoAliasArgument(const IRPosition &IRP) : Base(IRP) {}
2537 
2538   /// See AbstractAttribute::initialize(...).
2539   void initialize(Attributor &A) override {
2540     Base::initialize(A);
2541     // See callsite argument attribute and callee argument attribute.
2542     if (hasAttr({Attribute::ByVal}))
2543       indicateOptimisticFixpoint();
2544   }
2545 
2546   /// See AbstractAttribute::update(...).
2547   ChangeStatus updateImpl(Attributor &A) override {
2548     // We have to make sure no-alias on the argument does not break
2549     // synchronization when this is a callback argument, see also [1] below.
2550     // If synchronization cannot be affected, we delegate to the base updateImpl
2551     // function, otherwise we give up for now.
2552 
2553     // If the function is no-sync, no-alias cannot break synchronization.
2554     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2555         *this, IRPosition::function_scope(getIRPosition()));
2556     if (NoSyncAA.isAssumedNoSync())
2557       return Base::updateImpl(A);
2558 
2559     // If the argument is read-only, no-alias cannot break synchronization.
2560     const auto &MemBehaviorAA =
2561         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2562     if (MemBehaviorAA.isAssumedReadOnly())
2563       return Base::updateImpl(A);
2564 
2565     // If the argument is never passed through callbacks, no-alias cannot break
2566     // synchronization.
2567     bool AllCallSitesKnown;
2568     if (A.checkForAllCallSites(
2569             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2570             true, AllCallSitesKnown))
2571       return Base::updateImpl(A);
2572 
2573     // TODO: add no-alias but make sure it doesn't break synchronization by
2574     // introducing fake uses. See:
2575     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2576     //     International Workshop on OpenMP 2018,
2577     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2578 
2579     return indicatePessimisticFixpoint();
2580   }
2581 
2582   /// See AbstractAttribute::trackStatistics()
2583   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2584 };
2585 
2586 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2587   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2588 
2589   /// See AbstractAttribute::initialize(...).
2590   void initialize(Attributor &A) override {
2591     // See callsite argument attribute and callee argument attribute.
2592     ImmutableCallSite ICS(&getAnchorValue());
2593     if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2594       indicateOptimisticFixpoint();
2595     Value &Val = getAssociatedValue();
2596     if (isa<ConstantPointerNull>(Val) &&
2597         !NullPointerIsDefined(getAnchorScope(),
2598                               Val.getType()->getPointerAddressSpace()))
2599       indicateOptimisticFixpoint();
2600   }
2601 
2602   /// Determine if the underlying value may alias with the call site argument
2603   /// \p OtherArgNo of \p ICS (= the underlying call site).
2604   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2605                             const AAMemoryBehavior &MemBehaviorAA,
2606                             ImmutableCallSite ICS, unsigned OtherArgNo) {
2607     // We do not need to worry about aliasing with the underlying IRP.
2608     if (this->getArgNo() == (int)OtherArgNo)
2609       return false;
2610 
2611     // If it is not a pointer or pointer vector we do not alias.
2612     const Value *ArgOp = ICS.getArgOperand(OtherArgNo);
2613     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2614       return false;
2615 
2616     auto &ICSArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2617         *this, IRPosition::callsite_argument(ICS, OtherArgNo),
2618         /* TrackDependence */ false);
2619 
2620     // If the argument is readnone, there is no read-write aliasing.
2621     if (ICSArgMemBehaviorAA.isAssumedReadNone()) {
2622       A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2623       return false;
2624     }
2625 
2626     // If the argument is readonly and the underlying value is readonly, there
2627     // is no read-write aliasing.
2628     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2629     if (ICSArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2630       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2631       A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2632       return false;
2633     }
2634 
2635     // We have to utilize actual alias analysis queries so we need the object.
2636     if (!AAR)
2637       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2638 
2639     // Try to rule it out at the call site.
2640     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2641     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2642                          "callsite arguments: "
2643                       << getAssociatedValue() << " " << *ArgOp << " => "
2644                       << (IsAliasing ? "" : "no-") << "alias \n");
2645 
2646     return IsAliasing;
2647   }
2648 
2649   bool
2650   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2651                                          const AAMemoryBehavior &MemBehaviorAA,
2652                                          const AANoAlias &NoAliasAA) {
2653     // We can deduce "noalias" if the following conditions hold.
2654     // (i)   Associated value is assumed to be noalias in the definition.
2655     // (ii)  Associated value is assumed to be no-capture in all the uses
2656     //       possibly executed before this callsite.
2657     // (iii) There is no other pointer argument which could alias with the
2658     //       value.
2659 
2660     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2661     if (!AssociatedValueIsNoAliasAtDef) {
2662       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2663                         << " is not no-alias at the definition\n");
2664       return false;
2665     }
2666 
2667     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2668     auto &NoCaptureAA =
2669         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2670     // Check whether the value is captured in the scope using AANoCapture.
2671     // FIXME: This is conservative though, it is better to look at CFG and
2672     //        check only uses possibly executed before this callsite.
2673     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2674       LLVM_DEBUG(
2675           dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2676                  << " cannot be noalias as it is potentially captured\n");
2677       return false;
2678     }
2679     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2680 
2681     // Check there is no other pointer argument which could alias with the
2682     // value passed at this call site.
2683     // TODO: AbstractCallSite
2684     ImmutableCallSite ICS(&getAnchorValue());
2685     for (unsigned OtherArgNo = 0; OtherArgNo < ICS.getNumArgOperands();
2686          OtherArgNo++)
2687       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, ICS, OtherArgNo))
2688         return false;
2689 
2690     return true;
2691   }
2692 
2693   /// See AbstractAttribute::updateImpl(...).
2694   ChangeStatus updateImpl(Attributor &A) override {
2695     // If the argument is readnone we are done as there are no accesses via the
2696     // argument.
2697     auto &MemBehaviorAA =
2698         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2699                                      /* TrackDependence */ false);
2700     if (MemBehaviorAA.isAssumedReadNone()) {
2701       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2702       return ChangeStatus::UNCHANGED;
2703     }
2704 
2705     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2706     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2707                                                   /* TrackDependence */ false);
2708 
2709     AAResults *AAR = nullptr;
2710     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2711                                                NoAliasAA)) {
2712       LLVM_DEBUG(
2713           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2714       return ChangeStatus::UNCHANGED;
2715     }
2716 
2717     return indicatePessimisticFixpoint();
2718   }
2719 
2720   /// See AbstractAttribute::trackStatistics()
2721   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2722 };
2723 
2724 /// NoAlias attribute for function return value.
2725 struct AANoAliasReturned final : AANoAliasImpl {
2726   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2727 
2728   /// See AbstractAttribute::updateImpl(...).
2729   virtual ChangeStatus updateImpl(Attributor &A) override {
2730 
2731     auto CheckReturnValue = [&](Value &RV) -> bool {
2732       if (Constant *C = dyn_cast<Constant>(&RV))
2733         if (C->isNullValue() || isa<UndefValue>(C))
2734           return true;
2735 
2736       /// For now, we can only deduce noalias if we have call sites.
2737       /// FIXME: add more support.
2738       ImmutableCallSite ICS(&RV);
2739       if (!ICS)
2740         return false;
2741 
2742       const IRPosition &RVPos = IRPosition::value(RV);
2743       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2744       if (!NoAliasAA.isAssumedNoAlias())
2745         return false;
2746 
2747       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2748       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2749     };
2750 
2751     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2752       return indicatePessimisticFixpoint();
2753 
2754     return ChangeStatus::UNCHANGED;
2755   }
2756 
2757   /// See AbstractAttribute::trackStatistics()
2758   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2759 };
2760 
2761 /// NoAlias attribute deduction for a call site return value.
2762 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2763   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2764 
2765   /// See AbstractAttribute::initialize(...).
2766   void initialize(Attributor &A) override {
2767     AANoAliasImpl::initialize(A);
2768     Function *F = getAssociatedFunction();
2769     if (!F)
2770       indicatePessimisticFixpoint();
2771   }
2772 
2773   /// See AbstractAttribute::updateImpl(...).
2774   ChangeStatus updateImpl(Attributor &A) override {
2775     // TODO: Once we have call site specific value information we can provide
2776     //       call site specific liveness information and then it makes
2777     //       sense to specialize attributes for call sites arguments instead of
2778     //       redirecting requests to the callee argument.
2779     Function *F = getAssociatedFunction();
2780     const IRPosition &FnPos = IRPosition::returned(*F);
2781     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2782     return clampStateAndIndicateChange(
2783         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2784   }
2785 
2786   /// See AbstractAttribute::trackStatistics()
2787   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2788 };
2789 
2790 /// -------------------AAIsDead Function Attribute-----------------------
2791 
2792 struct AAIsDeadValueImpl : public AAIsDead {
2793   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2794 
2795   /// See AAIsDead::isAssumedDead().
2796   bool isAssumedDead() const override { return getAssumed(); }
2797 
2798   /// See AAIsDead::isKnownDead().
2799   bool isKnownDead() const override { return getKnown(); }
2800 
2801   /// See AAIsDead::isAssumedDead(BasicBlock *).
2802   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2803 
2804   /// See AAIsDead::isKnownDead(BasicBlock *).
2805   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2806 
2807   /// See AAIsDead::isAssumedDead(Instruction *I).
2808   bool isAssumedDead(const Instruction *I) const override {
2809     return I == getCtxI() && isAssumedDead();
2810   }
2811 
2812   /// See AAIsDead::isKnownDead(Instruction *I).
2813   bool isKnownDead(const Instruction *I) const override {
2814     return I == getCtxI() && getKnown();
2815   }
2816 
2817   /// See AbstractAttribute::getAsStr().
2818   const std::string getAsStr() const override {
2819     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2820   }
2821 };
2822 
2823 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2824   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2825 
2826   /// See AbstractAttribute::initialize(...).
2827   void initialize(Attributor &A) override {
2828     if (Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()))
2829       if (!wouldInstructionBeTriviallyDead(I))
2830         indicatePessimisticFixpoint();
2831     if (isa<UndefValue>(getAssociatedValue()))
2832       indicatePessimisticFixpoint();
2833   }
2834 
2835   /// See AbstractAttribute::updateImpl(...).
2836   ChangeStatus updateImpl(Attributor &A) override {
2837     auto UsePred = [&](const Use &U, bool &Follow) {
2838       Instruction *UserI = cast<Instruction>(U.getUser());
2839       if (CallSite CS = CallSite(UserI)) {
2840         if (!CS.isArgOperand(&U))
2841           return false;
2842         const IRPosition &CSArgPos =
2843             IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
2844         const auto &CSArgIsDead = A.getAAFor<AAIsDead>(*this, CSArgPos);
2845         return CSArgIsDead.isAssumedDead();
2846       }
2847       if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
2848         const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
2849         const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, RetPos);
2850         return RetIsDeadAA.isAssumedDead();
2851       }
2852       Follow = true;
2853       return wouldInstructionBeTriviallyDead(UserI);
2854     };
2855 
2856     if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
2857       return indicatePessimisticFixpoint();
2858     return ChangeStatus::UNCHANGED;
2859   }
2860 
2861   /// See AbstractAttribute::manifest(...).
2862   ChangeStatus manifest(Attributor &A) override {
2863     Value &V = getAssociatedValue();
2864     if (auto *I = dyn_cast<Instruction>(&V))
2865       if (wouldInstructionBeTriviallyDead(I)) {
2866         A.deleteAfterManifest(*I);
2867         return ChangeStatus::CHANGED;
2868       }
2869 
2870     if (V.use_empty())
2871       return ChangeStatus::UNCHANGED;
2872 
2873     UndefValue &UV = *UndefValue::get(V.getType());
2874     bool AnyChange = A.changeValueAfterManifest(V, UV);
2875     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2876   }
2877 
2878   /// See AbstractAttribute::trackStatistics()
2879   void trackStatistics() const override {
2880     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2881   }
2882 };
2883 
2884 struct AAIsDeadArgument : public AAIsDeadFloating {
2885   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2886 
2887   /// See AbstractAttribute::initialize(...).
2888   void initialize(Attributor &A) override {
2889     if (!getAssociatedFunction()->hasExactDefinition())
2890       indicatePessimisticFixpoint();
2891   }
2892 
2893   /// See AbstractAttribute::manifest(...).
2894   ChangeStatus manifest(Attributor &A) override {
2895     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2896     Argument &Arg = *getAssociatedArgument();
2897     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2898       if (A.registerFunctionSignatureRewrite(
2899               Arg, /* ReplacementTypes */ {},
2900               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2901               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{}))
2902         return ChangeStatus::CHANGED;
2903     return Changed;
2904   }
2905 
2906   /// See AbstractAttribute::trackStatistics()
2907   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2908 };
2909 
2910 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2911   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2912 
2913   /// See AbstractAttribute::initialize(...).
2914   void initialize(Attributor &A) override {
2915     if (isa<UndefValue>(getAssociatedValue()))
2916       indicatePessimisticFixpoint();
2917   }
2918 
2919   /// See AbstractAttribute::updateImpl(...).
2920   ChangeStatus updateImpl(Attributor &A) override {
2921     // TODO: Once we have call site specific value information we can provide
2922     //       call site specific liveness information and then it makes
2923     //       sense to specialize attributes for call sites arguments instead of
2924     //       redirecting requests to the callee argument.
2925     Argument *Arg = getAssociatedArgument();
2926     if (!Arg)
2927       return indicatePessimisticFixpoint();
2928     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2929     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2930     return clampStateAndIndicateChange(
2931         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2932   }
2933 
2934   /// See AbstractAttribute::manifest(...).
2935   ChangeStatus manifest(Attributor &A) override {
2936     CallBase &CB = cast<CallBase>(getAnchorValue());
2937     Use &U = CB.getArgOperandUse(getArgNo());
2938     assert(!isa<UndefValue>(U.get()) &&
2939            "Expected undef values to be filtered out!");
2940     UndefValue &UV = *UndefValue::get(U->getType());
2941     if (A.changeUseAfterManifest(U, UV))
2942       return ChangeStatus::CHANGED;
2943     return ChangeStatus::UNCHANGED;
2944   }
2945 
2946   /// See AbstractAttribute::trackStatistics()
2947   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2948 };
2949 
2950 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2951   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2952 
2953   /// See AbstractAttribute::updateImpl(...).
2954   ChangeStatus updateImpl(Attributor &A) override {
2955 
2956     bool AllKnownDead = true;
2957     auto PredForCallSite = [&](AbstractCallSite ACS) {
2958       if (ACS.isCallbackCall())
2959         return false;
2960       const IRPosition &CSRetPos =
2961           IRPosition::callsite_returned(ACS.getCallSite());
2962       const auto &RetIsDeadAA = A.getAAFor<AAIsDead>(*this, CSRetPos);
2963       AllKnownDead &= RetIsDeadAA.isKnownDead();
2964       return RetIsDeadAA.isAssumedDead();
2965     };
2966 
2967     bool AllCallSitesKnown;
2968     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
2969                                 AllCallSitesKnown))
2970       return indicatePessimisticFixpoint();
2971 
2972     if (AllCallSitesKnown && AllKnownDead)
2973       indicateOptimisticFixpoint();
2974 
2975     return ChangeStatus::UNCHANGED;
2976   }
2977 
2978   /// See AbstractAttribute::manifest(...).
2979   ChangeStatus manifest(Attributor &A) override {
2980     // TODO: Rewrite the signature to return void?
2981     bool AnyChange = false;
2982     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2983     auto RetInstPred = [&](Instruction &I) {
2984       ReturnInst &RI = cast<ReturnInst>(I);
2985       if (!isa<UndefValue>(RI.getReturnValue()))
2986         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2987       return true;
2988     };
2989     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2990     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2991   }
2992 
2993   /// See AbstractAttribute::trackStatistics()
2994   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2995 };
2996 
2997 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2998   AAIsDeadCallSiteReturned(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2999 
3000   /// See AbstractAttribute::initialize(...).
3001   void initialize(Attributor &A) override {}
3002 
3003   /// See AbstractAttribute::trackStatistics()
3004   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(IsDead) }
3005 };
3006 
3007 struct AAIsDeadFunction : public AAIsDead {
3008   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
3009 
3010   /// See AbstractAttribute::initialize(...).
3011   void initialize(Attributor &A) override {
3012     const Function *F = getAssociatedFunction();
3013     if (F && !F->isDeclaration()) {
3014       ToBeExploredFrom.insert(&F->getEntryBlock().front());
3015       assumeLive(A, F->getEntryBlock());
3016     }
3017   }
3018 
3019   /// See AbstractAttribute::getAsStr().
3020   const std::string getAsStr() const override {
3021     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3022            std::to_string(getAssociatedFunction()->size()) + "][#TBEP " +
3023            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3024            std::to_string(KnownDeadEnds.size()) + "]";
3025   }
3026 
3027   /// See AbstractAttribute::manifest(...).
3028   ChangeStatus manifest(Attributor &A) override {
3029     assert(getState().isValidState() &&
3030            "Attempted to manifest an invalid state!");
3031 
3032     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3033     Function &F = *getAssociatedFunction();
3034 
3035     if (AssumedLiveBlocks.empty()) {
3036       A.deleteAfterManifest(F);
3037       return ChangeStatus::CHANGED;
3038     }
3039 
3040     // Flag to determine if we can change an invoke to a call assuming the
3041     // callee is nounwind. This is not possible if the personality of the
3042     // function allows to catch asynchronous exceptions.
3043     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3044 
3045     KnownDeadEnds.set_union(ToBeExploredFrom);
3046     for (const Instruction *DeadEndI : KnownDeadEnds) {
3047       auto *CB = dyn_cast<CallBase>(DeadEndI);
3048       if (!CB)
3049         continue;
3050       const auto &NoReturnAA =
3051           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
3052       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3053       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3054         continue;
3055 
3056       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3057         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3058       else
3059         A.changeToUnreachableAfterManifest(
3060             const_cast<Instruction *>(DeadEndI->getNextNode()));
3061       HasChanged = ChangeStatus::CHANGED;
3062     }
3063 
3064     for (BasicBlock &BB : F)
3065       if (!AssumedLiveBlocks.count(&BB))
3066         A.deleteAfterManifest(BB);
3067 
3068     return HasChanged;
3069   }
3070 
3071   /// See AbstractAttribute::updateImpl(...).
3072   ChangeStatus updateImpl(Attributor &A) override;
3073 
3074   /// See AbstractAttribute::trackStatistics()
3075   void trackStatistics() const override {}
3076 
3077   /// Returns true if the function is assumed dead.
3078   bool isAssumedDead() const override { return false; }
3079 
3080   /// See AAIsDead::isKnownDead().
3081   bool isKnownDead() const override { return false; }
3082 
3083   /// See AAIsDead::isAssumedDead(BasicBlock *).
3084   bool isAssumedDead(const BasicBlock *BB) const override {
3085     assert(BB->getParent() == getAssociatedFunction() &&
3086            "BB must be in the same anchor scope function.");
3087 
3088     if (!getAssumed())
3089       return false;
3090     return !AssumedLiveBlocks.count(BB);
3091   }
3092 
3093   /// See AAIsDead::isKnownDead(BasicBlock *).
3094   bool isKnownDead(const BasicBlock *BB) const override {
3095     return getKnown() && isAssumedDead(BB);
3096   }
3097 
3098   /// See AAIsDead::isAssumed(Instruction *I).
3099   bool isAssumedDead(const Instruction *I) const override {
3100     assert(I->getParent()->getParent() == getAssociatedFunction() &&
3101            "Instruction must be in the same anchor scope function.");
3102 
3103     if (!getAssumed())
3104       return false;
3105 
3106     // If it is not in AssumedLiveBlocks then it for sure dead.
3107     // Otherwise, it can still be after noreturn call in a live block.
3108     if (!AssumedLiveBlocks.count(I->getParent()))
3109       return true;
3110 
3111     // If it is not after a liveness barrier it is live.
3112     const Instruction *PrevI = I->getPrevNode();
3113     while (PrevI) {
3114       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3115         return true;
3116       PrevI = PrevI->getPrevNode();
3117     }
3118     return false;
3119   }
3120 
3121   /// See AAIsDead::isKnownDead(Instruction *I).
3122   bool isKnownDead(const Instruction *I) const override {
3123     return getKnown() && isAssumedDead(I);
3124   }
3125 
3126   /// Determine if \p F might catch asynchronous exceptions.
3127   static bool mayCatchAsynchronousExceptions(const Function &F) {
3128     return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
3129   }
3130 
3131   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3132   /// that internal function called from \p BB should now be looked at.
3133   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3134     if (!AssumedLiveBlocks.insert(&BB).second)
3135       return false;
3136 
3137     // We assume that all of BB is (probably) live now and if there are calls to
3138     // internal functions we will assume that those are now live as well. This
3139     // is a performance optimization for blocks with calls to a lot of internal
3140     // functions. It can however cause dead functions to be treated as live.
3141     for (const Instruction &I : BB)
3142       if (ImmutableCallSite ICS = ImmutableCallSite(&I))
3143         if (const Function *F = ICS.getCalledFunction())
3144           if (F->hasLocalLinkage())
3145             A.markLiveInternalFunction(*F);
3146     return true;
3147   }
3148 
3149   /// Collection of instructions that need to be explored again, e.g., we
3150   /// did assume they do not transfer control to (one of their) successors.
3151   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3152 
3153   /// Collection of instructions that are known to not transfer control.
3154   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3155 
3156   /// Collection of all assumed live BasicBlocks.
3157   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3158 };
3159 
3160 static bool
3161 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3162                         AbstractAttribute &AA,
3163                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3164   const IRPosition &IPos = IRPosition::callsite_function(CB);
3165 
3166   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
3167   if (NoReturnAA.isAssumedNoReturn())
3168     return !NoReturnAA.isKnownNoReturn();
3169   if (CB.isTerminator())
3170     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3171   else
3172     AliveSuccessors.push_back(CB.getNextNode());
3173   return false;
3174 }
3175 
3176 static bool
3177 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3178                         AbstractAttribute &AA,
3179                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3180   bool UsedAssumedInformation =
3181       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3182 
3183   // First, determine if we can change an invoke to a call assuming the
3184   // callee is nounwind. This is not possible if the personality of the
3185   // function allows to catch asynchronous exceptions.
3186   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3187     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3188   } else {
3189     const IRPosition &IPos = IRPosition::callsite_function(II);
3190     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
3191     if (AANoUnw.isAssumedNoUnwind()) {
3192       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3193     } else {
3194       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3195     }
3196   }
3197   return UsedAssumedInformation;
3198 }
3199 
3200 static bool
3201 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3202                         AbstractAttribute &AA,
3203                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3204   bool UsedAssumedInformation = false;
3205   if (BI.getNumSuccessors() == 1) {
3206     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3207   } else {
3208     Optional<ConstantInt *> CI =
3209         getAssumedConstant(A, *BI.getCondition(), AA, UsedAssumedInformation);
3210     if (!CI.hasValue()) {
3211       // No value yet, assume both edges are dead.
3212     } else if (CI.getValue()) {
3213       const BasicBlock *SuccBB =
3214           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3215       AliveSuccessors.push_back(&SuccBB->front());
3216     } else {
3217       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3218       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3219       UsedAssumedInformation = false;
3220     }
3221   }
3222   return UsedAssumedInformation;
3223 }
3224 
3225 static bool
3226 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3227                         AbstractAttribute &AA,
3228                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3229   bool UsedAssumedInformation = false;
3230   Optional<ConstantInt *> CI =
3231       getAssumedConstant(A, *SI.getCondition(), AA, UsedAssumedInformation);
3232   if (!CI.hasValue()) {
3233     // No value yet, assume all edges are dead.
3234   } else if (CI.getValue()) {
3235     for (auto &CaseIt : SI.cases()) {
3236       if (CaseIt.getCaseValue() == CI.getValue()) {
3237         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3238         return UsedAssumedInformation;
3239       }
3240     }
3241     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3242     return UsedAssumedInformation;
3243   } else {
3244     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3245       AliveSuccessors.push_back(&SuccBB->front());
3246   }
3247   return UsedAssumedInformation;
3248 }
3249 
3250 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3251   ChangeStatus Change = ChangeStatus::UNCHANGED;
3252 
3253   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3254                     << getAssociatedFunction()->size() << "] BBs and "
3255                     << ToBeExploredFrom.size() << " exploration points and "
3256                     << KnownDeadEnds.size() << " known dead ends\n");
3257 
3258   // Copy and clear the list of instructions we need to explore from. It is
3259   // refilled with instructions the next update has to look at.
3260   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3261                                                ToBeExploredFrom.end());
3262   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3263 
3264   SmallVector<const Instruction *, 8> AliveSuccessors;
3265   while (!Worklist.empty()) {
3266     const Instruction *I = Worklist.pop_back_val();
3267     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3268 
3269     AliveSuccessors.clear();
3270 
3271     bool UsedAssumedInformation = false;
3272     switch (I->getOpcode()) {
3273     // TODO: look for (assumed) UB to backwards propagate "deadness".
3274     default:
3275       if (I->isTerminator()) {
3276         for (const BasicBlock *SuccBB : successors(I->getParent()))
3277           AliveSuccessors.push_back(&SuccBB->front());
3278       } else {
3279         AliveSuccessors.push_back(I->getNextNode());
3280       }
3281       break;
3282     case Instruction::Call:
3283       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3284                                                        *this, AliveSuccessors);
3285       break;
3286     case Instruction::Invoke:
3287       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3288                                                        *this, AliveSuccessors);
3289       break;
3290     case Instruction::Br:
3291       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3292                                                        *this, AliveSuccessors);
3293       break;
3294     case Instruction::Switch:
3295       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3296                                                        *this, AliveSuccessors);
3297       break;
3298     }
3299 
3300     if (UsedAssumedInformation) {
3301       NewToBeExploredFrom.insert(I);
3302     } else {
3303       Change = ChangeStatus::CHANGED;
3304       if (AliveSuccessors.empty() ||
3305           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3306         KnownDeadEnds.insert(I);
3307     }
3308 
3309     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3310                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3311                       << UsedAssumedInformation << "\n");
3312 
3313     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3314       if (!I->isTerminator()) {
3315         assert(AliveSuccessors.size() == 1 &&
3316                "Non-terminator expected to have a single successor!");
3317         Worklist.push_back(AliveSuccessor);
3318       } else {
3319         if (assumeLive(A, *AliveSuccessor->getParent()))
3320           Worklist.push_back(AliveSuccessor);
3321       }
3322     }
3323   }
3324 
3325   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3326 
3327   // If we know everything is live there is no need to query for liveness.
3328   // Instead, indicating a pessimistic fixpoint will cause the state to be
3329   // "invalid" and all queries to be answered conservatively without lookups.
3330   // To be in this state we have to (1) finished the exploration and (3) not
3331   // discovered any non-trivial dead end and (2) not ruled unreachable code
3332   // dead.
3333   if (ToBeExploredFrom.empty() &&
3334       getAssociatedFunction()->size() == AssumedLiveBlocks.size() &&
3335       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3336         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3337       }))
3338     return indicatePessimisticFixpoint();
3339   return Change;
3340 }
3341 
3342 /// Liveness information for a call sites.
3343 struct AAIsDeadCallSite final : AAIsDeadFunction {
3344   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
3345 
3346   /// See AbstractAttribute::initialize(...).
3347   void initialize(Attributor &A) override {
3348     // TODO: Once we have call site specific value information we can provide
3349     //       call site specific liveness information and then it makes
3350     //       sense to specialize attributes for call sites instead of
3351     //       redirecting requests to the callee.
3352     llvm_unreachable("Abstract attributes for liveness are not "
3353                      "supported for call sites yet!");
3354   }
3355 
3356   /// See AbstractAttribute::updateImpl(...).
3357   ChangeStatus updateImpl(Attributor &A) override {
3358     return indicatePessimisticFixpoint();
3359   }
3360 
3361   /// See AbstractAttribute::trackStatistics()
3362   void trackStatistics() const override {}
3363 };
3364 
3365 /// -------------------- Dereferenceable Argument Attribute --------------------
3366 
3367 template <>
3368 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3369                                                      const DerefState &R) {
3370   ChangeStatus CS0 =
3371       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3372   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3373   return CS0 | CS1;
3374 }
3375 
3376 struct AADereferenceableImpl : AADereferenceable {
3377   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
3378   using StateType = DerefState;
3379 
3380   void initialize(Attributor &A) override {
3381     SmallVector<Attribute, 4> Attrs;
3382     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3383              Attrs);
3384     for (const Attribute &Attr : Attrs)
3385       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3386 
3387     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition(),
3388                                        /* TrackDependence */ false);
3389 
3390     const IRPosition &IRP = this->getIRPosition();
3391     bool IsFnInterface = IRP.isFnInterfaceKind();
3392     const Function *FnScope = IRP.getAnchorScope();
3393     if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition()))
3394       indicatePessimisticFixpoint();
3395   }
3396 
3397   /// See AbstractAttribute::getState()
3398   /// {
3399   StateType &getState() override { return *this; }
3400   const StateType &getState() const override { return *this; }
3401   /// }
3402 
3403   /// Helper function for collecting accessed bytes in must-be-executed-context
3404   void addAccessedBytesForUse(Attributor &A, const Use *U,
3405                               const Instruction *I) {
3406     const Value *UseV = U->get();
3407     if (!UseV->getType()->isPointerTy())
3408       return;
3409 
3410     Type *PtrTy = UseV->getType();
3411     const DataLayout &DL = A.getDataLayout();
3412     int64_t Offset;
3413     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3414             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3415       if (Base == &getAssociatedValue() &&
3416           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3417         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3418         addAccessedBytes(Offset, Size);
3419       }
3420     }
3421     return;
3422   }
3423 
3424   /// See AAFromMustBeExecutedContext
3425   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3426     bool IsNonNull = false;
3427     bool TrackUse = false;
3428     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3429         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3430 
3431     addAccessedBytesForUse(A, U, I);
3432     takeKnownDerefBytesMaximum(DerefBytes);
3433     return TrackUse;
3434   }
3435 
3436   /// See AbstractAttribute::manifest(...).
3437   ChangeStatus manifest(Attributor &A) override {
3438     ChangeStatus Change = AADereferenceable::manifest(A);
3439     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3440       removeAttrs({Attribute::DereferenceableOrNull});
3441       return ChangeStatus::CHANGED;
3442     }
3443     return Change;
3444   }
3445 
3446   void getDeducedAttributes(LLVMContext &Ctx,
3447                             SmallVectorImpl<Attribute> &Attrs) const override {
3448     // TODO: Add *_globally support
3449     if (isAssumedNonNull())
3450       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3451           Ctx, getAssumedDereferenceableBytes()));
3452     else
3453       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3454           Ctx, getAssumedDereferenceableBytes()));
3455   }
3456 
3457   /// See AbstractAttribute::getAsStr().
3458   const std::string getAsStr() const override {
3459     if (!getAssumedDereferenceableBytes())
3460       return "unknown-dereferenceable";
3461     return std::string("dereferenceable") +
3462            (isAssumedNonNull() ? "" : "_or_null") +
3463            (isAssumedGlobal() ? "_globally" : "") + "<" +
3464            std::to_string(getKnownDereferenceableBytes()) + "-" +
3465            std::to_string(getAssumedDereferenceableBytes()) + ">";
3466   }
3467 };
3468 
3469 /// Dereferenceable attribute for a floating value.
3470 struct AADereferenceableFloating
3471     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3472   using Base =
3473       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3474   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
3475 
3476   /// See AbstractAttribute::updateImpl(...).
3477   ChangeStatus updateImpl(Attributor &A) override {
3478     ChangeStatus Change = Base::updateImpl(A);
3479 
3480     const DataLayout &DL = A.getDataLayout();
3481 
3482     auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
3483       unsigned IdxWidth =
3484           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3485       APInt Offset(IdxWidth, 0);
3486       const Value *Base =
3487           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3488 
3489       const auto &AA =
3490           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3491       int64_t DerefBytes = 0;
3492       if (!Stripped && this == &AA) {
3493         // Use IR information if we did not strip anything.
3494         // TODO: track globally.
3495         bool CanBeNull;
3496         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3497         T.GlobalState.indicatePessimisticFixpoint();
3498       } else {
3499         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3500         DerefBytes = DS.DerefBytesState.getAssumed();
3501         T.GlobalState &= DS.GlobalState;
3502       }
3503 
3504       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3505 
3506       // For now we do not try to "increase" dereferenceability due to negative
3507       // indices as we first have to come up with code to deal with loops and
3508       // for overflows of the dereferenceable bytes.
3509       int64_t OffsetSExt = Offset.getSExtValue();
3510       if (OffsetSExt < 0)
3511         OffsetSExt = 0;
3512 
3513       T.takeAssumedDerefBytesMinimum(
3514           std::max(int64_t(0), DerefBytes - OffsetSExt));
3515 
3516       if (this == &AA) {
3517         if (!Stripped) {
3518           // If nothing was stripped IR information is all we got.
3519           T.takeKnownDerefBytesMaximum(
3520               std::max(int64_t(0), DerefBytes - OffsetSExt));
3521           T.indicatePessimisticFixpoint();
3522         } else if (OffsetSExt > 0) {
3523           // If something was stripped but there is circular reasoning we look
3524           // for the offset. If it is positive we basically decrease the
3525           // dereferenceable bytes in a circluar loop now, which will simply
3526           // drive them down to the known value in a very slow way which we
3527           // can accelerate.
3528           T.indicatePessimisticFixpoint();
3529         }
3530       }
3531 
3532       return T.isValidState();
3533     };
3534 
3535     DerefState T;
3536     if (!genericValueTraversal<AADereferenceable, DerefState>(
3537             A, getIRPosition(), *this, T, VisitValueCB))
3538       return indicatePessimisticFixpoint();
3539 
3540     return Change | clampStateAndIndicateChange(getState(), T);
3541   }
3542 
3543   /// See AbstractAttribute::trackStatistics()
3544   void trackStatistics() const override {
3545     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3546   }
3547 };
3548 
3549 /// Dereferenceable attribute for a return value.
3550 struct AADereferenceableReturned final
3551     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3552   AADereferenceableReturned(const IRPosition &IRP)
3553       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3554             IRP) {}
3555 
3556   /// See AbstractAttribute::trackStatistics()
3557   void trackStatistics() const override {
3558     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3559   }
3560 };
3561 
3562 /// Dereferenceable attribute for an argument
3563 struct AADereferenceableArgument final
3564     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3565           AADereferenceable, AADereferenceableImpl> {
3566   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3567       AADereferenceable, AADereferenceableImpl>;
3568   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3569 
3570   /// See AbstractAttribute::trackStatistics()
3571   void trackStatistics() const override {
3572     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3573   }
3574 };
3575 
3576 /// Dereferenceable attribute for a call site argument.
3577 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3578   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3579       : AADereferenceableFloating(IRP) {}
3580 
3581   /// See AbstractAttribute::trackStatistics()
3582   void trackStatistics() const override {
3583     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3584   }
3585 };
3586 
3587 /// Dereferenceable attribute deduction for a call site return value.
3588 struct AADereferenceableCallSiteReturned final
3589     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3590           AADereferenceable, AADereferenceableImpl> {
3591   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3592       AADereferenceable, AADereferenceableImpl>;
3593   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3594 
3595   /// See AbstractAttribute::trackStatistics()
3596   void trackStatistics() const override {
3597     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3598   }
3599 };
3600 
3601 // ------------------------ Align Argument Attribute ------------------------
3602 
3603 static unsigned int getKnownAlignForUse(Attributor &A,
3604                                         AbstractAttribute &QueryingAA,
3605                                         Value &AssociatedValue, const Use *U,
3606                                         const Instruction *I, bool &TrackUse) {
3607   // We need to follow common pointer manipulation uses to the accesses they
3608   // feed into.
3609   if (isa<CastInst>(I)) {
3610     // Follow all but ptr2int casts.
3611     TrackUse = !isa<PtrToIntInst>(I);
3612     return 0;
3613   }
3614   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3615     if (GEP->hasAllConstantIndices()) {
3616       TrackUse = true;
3617       return 0;
3618     }
3619   }
3620 
3621   unsigned Alignment = 0;
3622   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
3623     if (ICS.isBundleOperand(U) || ICS.isCallee(U))
3624       return 0;
3625 
3626     unsigned ArgNo = ICS.getArgumentNo(U);
3627     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
3628     // As long as we only use known information there is no need to track
3629     // dependences here.
3630     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3631                                         /* TrackDependence */ false);
3632     Alignment = AlignAA.getKnownAlign();
3633   }
3634 
3635   const Value *UseV = U->get();
3636   if (auto *SI = dyn_cast<StoreInst>(I)) {
3637     if (SI->getPointerOperand() == UseV)
3638       Alignment = SI->getAlignment();
3639   } else if (auto *LI = dyn_cast<LoadInst>(I))
3640     Alignment = LI->getAlignment();
3641 
3642   if (Alignment <= 1)
3643     return 0;
3644 
3645   auto &DL = A.getDataLayout();
3646   int64_t Offset;
3647 
3648   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3649     if (Base == &AssociatedValue) {
3650       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3651       // So we can say that the maximum power of two which is a divisor of
3652       // gcd(Offset, Alignment) is an alignment.
3653 
3654       uint32_t gcd =
3655           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3656       Alignment = llvm::PowerOf2Floor(gcd);
3657     }
3658   }
3659 
3660   return Alignment;
3661 }
3662 struct AAAlignImpl : AAAlign {
3663   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3664 
3665   /// See AbstractAttribute::initialize(...).
3666   void initialize(Attributor &A) override {
3667     SmallVector<Attribute, 4> Attrs;
3668     getAttrs({Attribute::Alignment}, Attrs);
3669     for (const Attribute &Attr : Attrs)
3670       takeKnownMaximum(Attr.getValueAsInt());
3671 
3672     if (getIRPosition().isFnInterfaceKind() &&
3673         (!getAssociatedFunction() ||
3674          !getAssociatedFunction()->hasExactDefinition()))
3675       indicatePessimisticFixpoint();
3676   }
3677 
3678   /// See AbstractAttribute::manifest(...).
3679   ChangeStatus manifest(Attributor &A) override {
3680     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3681 
3682     // Check for users that allow alignment annotations.
3683     Value &AnchorVal = getIRPosition().getAnchorValue();
3684     for (const Use &U : AnchorVal.uses()) {
3685       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3686         if (SI->getPointerOperand() == &AnchorVal)
3687           if (SI->getAlignment() < getAssumedAlign()) {
3688             STATS_DECLTRACK(AAAlign, Store,
3689                             "Number of times alignment added to a store");
3690             SI->setAlignment(Align(getAssumedAlign()));
3691             LoadStoreChanged = ChangeStatus::CHANGED;
3692           }
3693       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3694         if (LI->getPointerOperand() == &AnchorVal)
3695           if (LI->getAlignment() < getAssumedAlign()) {
3696             LI->setAlignment(Align(getAssumedAlign()));
3697             STATS_DECLTRACK(AAAlign, Load,
3698                             "Number of times alignment added to a load");
3699             LoadStoreChanged = ChangeStatus::CHANGED;
3700           }
3701       }
3702     }
3703 
3704     ChangeStatus Changed = AAAlign::manifest(A);
3705 
3706     MaybeAlign InheritAlign =
3707         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3708     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3709       return LoadStoreChanged;
3710     return Changed | LoadStoreChanged;
3711   }
3712 
3713   // TODO: Provide a helper to determine the implied ABI alignment and check in
3714   //       the existing manifest method and a new one for AAAlignImpl that value
3715   //       to avoid making the alignment explicit if it did not improve.
3716 
3717   /// See AbstractAttribute::getDeducedAttributes
3718   virtual void
3719   getDeducedAttributes(LLVMContext &Ctx,
3720                        SmallVectorImpl<Attribute> &Attrs) const override {
3721     if (getAssumedAlign() > 1)
3722       Attrs.emplace_back(
3723           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3724   }
3725   /// See AAFromMustBeExecutedContext
3726   bool followUse(Attributor &A, const Use *U, const Instruction *I) {
3727     bool TrackUse = false;
3728 
3729     unsigned int KnownAlign =
3730         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3731     takeKnownMaximum(KnownAlign);
3732 
3733     return TrackUse;
3734   }
3735 
3736   /// See AbstractAttribute::getAsStr().
3737   const std::string getAsStr() const override {
3738     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3739                                 "-" + std::to_string(getAssumedAlign()) + ">")
3740                              : "unknown-align";
3741   }
3742 };
3743 
3744 /// Align attribute for a floating value.
3745 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3746   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3747   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3748 
3749   /// See AbstractAttribute::updateImpl(...).
3750   ChangeStatus updateImpl(Attributor &A) override {
3751     Base::updateImpl(A);
3752 
3753     const DataLayout &DL = A.getDataLayout();
3754 
3755     auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
3756                             bool Stripped) -> bool {
3757       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3758       if (!Stripped && this == &AA) {
3759         // Use only IR information if we did not strip anything.
3760         const MaybeAlign PA = V.getPointerAlignment(DL);
3761         T.takeKnownMaximum(PA ? PA->value() : 0);
3762         T.indicatePessimisticFixpoint();
3763       } else {
3764         // Use abstract attribute information.
3765         const AAAlign::StateType &DS =
3766             static_cast<const AAAlign::StateType &>(AA.getState());
3767         T ^= DS;
3768       }
3769       return T.isValidState();
3770     };
3771 
3772     StateType T;
3773     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3774                                                    VisitValueCB))
3775       return indicatePessimisticFixpoint();
3776 
3777     // TODO: If we know we visited all incoming values, thus no are assumed
3778     // dead, we can take the known information from the state T.
3779     return clampStateAndIndicateChange(getState(), T);
3780   }
3781 
3782   /// See AbstractAttribute::trackStatistics()
3783   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3784 };
3785 
3786 /// Align attribute for function return value.
3787 struct AAAlignReturned final
3788     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3789   AAAlignReturned(const IRPosition &IRP)
3790       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3791 
3792   /// See AbstractAttribute::trackStatistics()
3793   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3794 };
3795 
3796 /// Align attribute for function argument.
3797 struct AAAlignArgument final
3798     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3799                                                               AAAlignImpl> {
3800   AAAlignArgument(const IRPosition &IRP)
3801       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3802                                                                 AAAlignImpl>(
3803             IRP) {}
3804 
3805   /// See AbstractAttribute::trackStatistics()
3806   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3807 };
3808 
3809 struct AAAlignCallSiteArgument final : AAAlignFloating {
3810   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3811 
3812   /// See AbstractAttribute::manifest(...).
3813   ChangeStatus manifest(Attributor &A) override {
3814     ChangeStatus Changed = AAAlignImpl::manifest(A);
3815     MaybeAlign InheritAlign =
3816         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3817     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3818       Changed = ChangeStatus::UNCHANGED;
3819     return Changed;
3820   }
3821 
3822   /// See AbstractAttribute::updateImpl(Attributor &A).
3823   ChangeStatus updateImpl(Attributor &A) override {
3824     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3825     if (Argument *Arg = getAssociatedArgument()) {
3826       // We only take known information from the argument
3827       // so we do not need to track a dependence.
3828       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3829           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3830       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3831     }
3832     return Changed;
3833   }
3834 
3835   /// See AbstractAttribute::trackStatistics()
3836   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3837 };
3838 
3839 /// Align attribute deduction for a call site return value.
3840 struct AAAlignCallSiteReturned final
3841     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3842                                                              AAAlignImpl> {
3843   using Base =
3844       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3845                                                              AAAlignImpl>;
3846   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3847 
3848   /// See AbstractAttribute::initialize(...).
3849   void initialize(Attributor &A) override {
3850     Base::initialize(A);
3851     Function *F = getAssociatedFunction();
3852     if (!F)
3853       indicatePessimisticFixpoint();
3854   }
3855 
3856   /// See AbstractAttribute::trackStatistics()
3857   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3858 };
3859 
3860 /// ------------------ Function No-Return Attribute ----------------------------
3861 struct AANoReturnImpl : public AANoReturn {
3862   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3863 
3864   /// See AbstractAttribute::initialize(...).
3865   void initialize(Attributor &A) override {
3866     AANoReturn::initialize(A);
3867     Function *F = getAssociatedFunction();
3868     if (!F)
3869       indicatePessimisticFixpoint();
3870   }
3871 
3872   /// See AbstractAttribute::getAsStr().
3873   const std::string getAsStr() const override {
3874     return getAssumed() ? "noreturn" : "may-return";
3875   }
3876 
3877   /// See AbstractAttribute::updateImpl(Attributor &A).
3878   virtual ChangeStatus updateImpl(Attributor &A) override {
3879     auto CheckForNoReturn = [](Instruction &) { return false; };
3880     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3881                                    {(unsigned)Instruction::Ret}))
3882       return indicatePessimisticFixpoint();
3883     return ChangeStatus::UNCHANGED;
3884   }
3885 };
3886 
3887 struct AANoReturnFunction final : AANoReturnImpl {
3888   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3889 
3890   /// See AbstractAttribute::trackStatistics()
3891   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3892 };
3893 
3894 /// NoReturn attribute deduction for a call sites.
3895 struct AANoReturnCallSite final : AANoReturnImpl {
3896   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3897 
3898   /// See AbstractAttribute::updateImpl(...).
3899   ChangeStatus updateImpl(Attributor &A) override {
3900     // TODO: Once we have call site specific value information we can provide
3901     //       call site specific liveness information and then it makes
3902     //       sense to specialize attributes for call sites arguments instead of
3903     //       redirecting requests to the callee argument.
3904     Function *F = getAssociatedFunction();
3905     const IRPosition &FnPos = IRPosition::function(*F);
3906     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3907     return clampStateAndIndicateChange(
3908         getState(),
3909         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3910   }
3911 
3912   /// See AbstractAttribute::trackStatistics()
3913   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3914 };
3915 
3916 /// ----------------------- Variable Capturing ---------------------------------
3917 
3918 /// A class to hold the state of for no-capture attributes.
3919 struct AANoCaptureImpl : public AANoCapture {
3920   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
3921 
3922   /// See AbstractAttribute::initialize(...).
3923   void initialize(Attributor &A) override {
3924     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3925       indicateOptimisticFixpoint();
3926       return;
3927     }
3928     Function *AnchorScope = getAnchorScope();
3929     if (isFnInterfaceKind() &&
3930         (!AnchorScope || !AnchorScope->hasExactDefinition())) {
3931       indicatePessimisticFixpoint();
3932       return;
3933     }
3934 
3935     // You cannot "capture" null in the default address space.
3936     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3937         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3938       indicateOptimisticFixpoint();
3939       return;
3940     }
3941 
3942     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3943 
3944     // Check what state the associated function can actually capture.
3945     if (F)
3946       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3947     else
3948       indicatePessimisticFixpoint();
3949   }
3950 
3951   /// See AbstractAttribute::updateImpl(...).
3952   ChangeStatus updateImpl(Attributor &A) override;
3953 
3954   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3955   virtual void
3956   getDeducedAttributes(LLVMContext &Ctx,
3957                        SmallVectorImpl<Attribute> &Attrs) const override {
3958     if (!isAssumedNoCaptureMaybeReturned())
3959       return;
3960 
3961     if (getArgNo() >= 0) {
3962       if (isAssumedNoCapture())
3963         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3964       else if (ManifestInternal)
3965         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3966     }
3967   }
3968 
3969   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3970   /// depending on the ability of the function associated with \p IRP to capture
3971   /// state in memory and through "returning/throwing", respectively.
3972   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3973                                                    const Function &F,
3974                                                    BitIntegerState &State) {
3975     // TODO: Once we have memory behavior attributes we should use them here.
3976 
3977     // If we know we cannot communicate or write to memory, we do not care about
3978     // ptr2int anymore.
3979     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3980         F.getReturnType()->isVoidTy()) {
3981       State.addKnownBits(NO_CAPTURE);
3982       return;
3983     }
3984 
3985     // A function cannot capture state in memory if it only reads memory, it can
3986     // however return/throw state and the state might be influenced by the
3987     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3988     if (F.onlyReadsMemory())
3989       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3990 
3991     // A function cannot communicate state back if it does not through
3992     // exceptions and doesn not return values.
3993     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3994       State.addKnownBits(NOT_CAPTURED_IN_RET);
3995 
3996     // Check existing "returned" attributes.
3997     int ArgNo = IRP.getArgNo();
3998     if (F.doesNotThrow() && ArgNo >= 0) {
3999       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4000         if (F.hasParamAttribute(u, Attribute::Returned)) {
4001           if (u == unsigned(ArgNo))
4002             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4003           else if (F.onlyReadsMemory())
4004             State.addKnownBits(NO_CAPTURE);
4005           else
4006             State.addKnownBits(NOT_CAPTURED_IN_RET);
4007           break;
4008         }
4009     }
4010   }
4011 
4012   /// See AbstractState::getAsStr().
4013   const std::string getAsStr() const override {
4014     if (isKnownNoCapture())
4015       return "known not-captured";
4016     if (isAssumedNoCapture())
4017       return "assumed not-captured";
4018     if (isKnownNoCaptureMaybeReturned())
4019       return "known not-captured-maybe-returned";
4020     if (isAssumedNoCaptureMaybeReturned())
4021       return "assumed not-captured-maybe-returned";
4022     return "assumed-captured";
4023   }
4024 };
4025 
4026 /// Attributor-aware capture tracker.
4027 struct AACaptureUseTracker final : public CaptureTracker {
4028 
4029   /// Create a capture tracker that can lookup in-flight abstract attributes
4030   /// through the Attributor \p A.
4031   ///
4032   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4033   /// search is stopped. If a use leads to a return instruction,
4034   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4035   /// If a use leads to a ptr2int which may capture the value,
4036   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4037   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4038   /// set. All values in \p PotentialCopies are later tracked as well. For every
4039   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4040   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4041   /// conservatively set to true.
4042   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4043                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4044                       SmallVectorImpl<const Value *> &PotentialCopies,
4045                       unsigned &RemainingUsesToExplore)
4046       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4047         PotentialCopies(PotentialCopies),
4048         RemainingUsesToExplore(RemainingUsesToExplore) {}
4049 
4050   /// Determine if \p V maybe captured. *Also updates the state!*
4051   bool valueMayBeCaptured(const Value *V) {
4052     if (V->getType()->isPointerTy()) {
4053       PointerMayBeCaptured(V, this);
4054     } else {
4055       State.indicatePessimisticFixpoint();
4056     }
4057     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4058   }
4059 
4060   /// See CaptureTracker::tooManyUses().
4061   void tooManyUses() override {
4062     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4063   }
4064 
4065   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4066     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4067       return true;
4068     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4069         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4070         DepClassTy::OPTIONAL);
4071     return DerefAA.getAssumedDereferenceableBytes();
4072   }
4073 
4074   /// See CaptureTracker::captured(...).
4075   bool captured(const Use *U) override {
4076     Instruction *UInst = cast<Instruction>(U->getUser());
4077     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4078                       << "\n");
4079 
4080     // Because we may reuse the tracker multiple times we keep track of the
4081     // number of explored uses ourselves as well.
4082     if (RemainingUsesToExplore-- == 0) {
4083       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4084       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4085                           /* Return */ true);
4086     }
4087 
4088     // Deal with ptr2int by following uses.
4089     if (isa<PtrToIntInst>(UInst)) {
4090       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4091       return valueMayBeCaptured(UInst);
4092     }
4093 
4094     // Explicitly catch return instructions.
4095     if (isa<ReturnInst>(UInst))
4096       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4097                           /* Return */ true);
4098 
4099     // For now we only use special logic for call sites. However, the tracker
4100     // itself knows about a lot of other non-capturing cases already.
4101     CallSite CS(UInst);
4102     if (!CS || !CS.isArgOperand(U))
4103       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4104                           /* Return */ true);
4105 
4106     unsigned ArgNo = CS.getArgumentNo(U);
4107     const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
4108     // If we have a abstract no-capture attribute for the argument we can use
4109     // it to justify a non-capture attribute here. This allows recursion!
4110     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4111     if (ArgNoCaptureAA.isAssumedNoCapture())
4112       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4113                           /* Return */ false);
4114     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4115       addPotentialCopy(CS);
4116       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4117                           /* Return */ false);
4118     }
4119 
4120     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4121     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4122                         /* Return */ true);
4123   }
4124 
4125   /// Register \p CS as potential copy of the value we are checking.
4126   void addPotentialCopy(CallSite CS) {
4127     PotentialCopies.push_back(CS.getInstruction());
4128   }
4129 
4130   /// See CaptureTracker::shouldExplore(...).
4131   bool shouldExplore(const Use *U) override {
4132     // Check liveness, if it is used to stop exploring we need a dependence.
4133     if (IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser()))) {
4134       A.recordDependence(IsDeadAA, NoCaptureAA, DepClassTy::OPTIONAL);
4135       return false;
4136     }
4137     return true;
4138   }
4139 
4140   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4141   /// \p CapturedInRet, then return the appropriate value for use in the
4142   /// CaptureTracker::captured() interface.
4143   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4144                     bool CapturedInRet) {
4145     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4146                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4147     if (CapturedInMem)
4148       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4149     if (CapturedInInt)
4150       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4151     if (CapturedInRet)
4152       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4153     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4154   }
4155 
4156 private:
4157   /// The attributor providing in-flight abstract attributes.
4158   Attributor &A;
4159 
4160   /// The abstract attribute currently updated.
4161   AANoCapture &NoCaptureAA;
4162 
4163   /// The abstract liveness state.
4164   const AAIsDead &IsDeadAA;
4165 
4166   /// The state currently updated.
4167   AANoCapture::StateType &State;
4168 
4169   /// Set of potential copies of the tracked value.
4170   SmallVectorImpl<const Value *> &PotentialCopies;
4171 
4172   /// Global counter to limit the number of explored uses.
4173   unsigned &RemainingUsesToExplore;
4174 };
4175 
4176 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4177   const IRPosition &IRP = getIRPosition();
4178   const Value *V =
4179       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4180   if (!V)
4181     return indicatePessimisticFixpoint();
4182 
4183   const Function *F =
4184       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4185   assert(F && "Expected a function!");
4186   const IRPosition &FnPos = IRPosition::function(*F);
4187   const auto &IsDeadAA =
4188       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4189 
4190   AANoCapture::StateType T;
4191 
4192   // Readonly means we cannot capture through memory.
4193   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
4194       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4195   if (FnMemAA.isAssumedReadOnly()) {
4196     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4197     if (FnMemAA.isKnownReadOnly())
4198       addKnownBits(NOT_CAPTURED_IN_MEM);
4199   }
4200 
4201   // Make sure all returned values are different than the underlying value.
4202   // TODO: we could do this in a more sophisticated way inside
4203   //       AAReturnedValues, e.g., track all values that escape through returns
4204   //       directly somehow.
4205   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4206     bool SeenConstant = false;
4207     for (auto &It : RVAA.returned_values()) {
4208       if (isa<Constant>(It.first)) {
4209         if (SeenConstant)
4210           return false;
4211         SeenConstant = true;
4212       } else if (!isa<Argument>(It.first) ||
4213                  It.first == getAssociatedArgument())
4214         return false;
4215     }
4216     return true;
4217   };
4218 
4219   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4220       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4221   if (NoUnwindAA.isAssumedNoUnwind()) {
4222     bool IsVoidTy = F->getReturnType()->isVoidTy();
4223     const AAReturnedValues *RVAA =
4224         IsVoidTy ? nullptr
4225                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4226                                                  /* TrackDependence */ true,
4227                                                  DepClassTy::OPTIONAL);
4228     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4229       T.addKnownBits(NOT_CAPTURED_IN_RET);
4230       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4231         return ChangeStatus::UNCHANGED;
4232       if (NoUnwindAA.isKnownNoUnwind() &&
4233           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4234         addKnownBits(NOT_CAPTURED_IN_RET);
4235         if (isKnown(NOT_CAPTURED_IN_MEM))
4236           return indicateOptimisticFixpoint();
4237       }
4238     }
4239   }
4240 
4241   // Use the CaptureTracker interface and logic with the specialized tracker,
4242   // defined in AACaptureUseTracker, that can look at in-flight abstract
4243   // attributes and directly updates the assumed state.
4244   SmallVector<const Value *, 4> PotentialCopies;
4245   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
4246   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4247                               RemainingUsesToExplore);
4248 
4249   // Check all potential copies of the associated value until we can assume
4250   // none will be captured or we have to assume at least one might be.
4251   unsigned Idx = 0;
4252   PotentialCopies.push_back(V);
4253   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4254     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4255 
4256   AANoCapture::StateType &S = getState();
4257   auto Assumed = S.getAssumed();
4258   S.intersectAssumedBits(T.getAssumed());
4259   if (!isAssumedNoCaptureMaybeReturned())
4260     return indicatePessimisticFixpoint();
4261   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4262                                    : ChangeStatus::CHANGED;
4263 }
4264 
4265 /// NoCapture attribute for function arguments.
4266 struct AANoCaptureArgument final : AANoCaptureImpl {
4267   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4268 
4269   /// See AbstractAttribute::trackStatistics()
4270   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4271 };
4272 
4273 /// NoCapture attribute for call site arguments.
4274 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4275   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4276 
4277   /// See AbstractAttribute::initialize(...).
4278   void initialize(Attributor &A) override {
4279     if (Argument *Arg = getAssociatedArgument())
4280       if (Arg->hasByValAttr())
4281         indicateOptimisticFixpoint();
4282     AANoCaptureImpl::initialize(A);
4283   }
4284 
4285   /// See AbstractAttribute::updateImpl(...).
4286   ChangeStatus updateImpl(Attributor &A) override {
4287     // TODO: Once we have call site specific value information we can provide
4288     //       call site specific liveness information and then it makes
4289     //       sense to specialize attributes for call sites arguments instead of
4290     //       redirecting requests to the callee argument.
4291     Argument *Arg = getAssociatedArgument();
4292     if (!Arg)
4293       return indicatePessimisticFixpoint();
4294     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4295     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4296     return clampStateAndIndicateChange(
4297         getState(),
4298         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4299   }
4300 
4301   /// See AbstractAttribute::trackStatistics()
4302   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4303 };
4304 
4305 /// NoCapture attribute for floating values.
4306 struct AANoCaptureFloating final : AANoCaptureImpl {
4307   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4308 
4309   /// See AbstractAttribute::trackStatistics()
4310   void trackStatistics() const override {
4311     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4312   }
4313 };
4314 
4315 /// NoCapture attribute for function return value.
4316 struct AANoCaptureReturned final : AANoCaptureImpl {
4317   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
4318     llvm_unreachable("NoCapture is not applicable to function returns!");
4319   }
4320 
4321   /// See AbstractAttribute::initialize(...).
4322   void initialize(Attributor &A) override {
4323     llvm_unreachable("NoCapture is not applicable to function returns!");
4324   }
4325 
4326   /// See AbstractAttribute::updateImpl(...).
4327   ChangeStatus updateImpl(Attributor &A) override {
4328     llvm_unreachable("NoCapture is not applicable to function returns!");
4329   }
4330 
4331   /// See AbstractAttribute::trackStatistics()
4332   void trackStatistics() const override {}
4333 };
4334 
4335 /// NoCapture attribute deduction for a call site return value.
4336 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4337   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4338 
4339   /// See AbstractAttribute::trackStatistics()
4340   void trackStatistics() const override {
4341     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4342   }
4343 };
4344 
4345 /// ------------------ Value Simplify Attribute ----------------------------
4346 struct AAValueSimplifyImpl : AAValueSimplify {
4347   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
4348 
4349   /// See AbstractAttribute::initialize(...).
4350   void initialize(Attributor &A) override {
4351     if (getAssociatedValue().getType()->isVoidTy())
4352       indicatePessimisticFixpoint();
4353   }
4354 
4355   /// See AbstractAttribute::getAsStr().
4356   const std::string getAsStr() const override {
4357     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4358                         : "not-simple";
4359   }
4360 
4361   /// See AbstractAttribute::trackStatistics()
4362   void trackStatistics() const override {}
4363 
4364   /// See AAValueSimplify::getAssumedSimplifiedValue()
4365   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4366     if (!getAssumed())
4367       return const_cast<Value *>(&getAssociatedValue());
4368     return SimplifiedAssociatedValue;
4369   }
4370 
4371   /// Helper function for querying AAValueSimplify and updating candicate.
4372   /// \param QueryingValue Value trying to unify with SimplifiedValue
4373   /// \param AccumulatedSimplifiedValue Current simplification result.
4374   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4375                              Value &QueryingValue,
4376                              Optional<Value *> &AccumulatedSimplifiedValue) {
4377     // FIXME: Add a typecast support.
4378 
4379     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4380         QueryingAA, IRPosition::value(QueryingValue));
4381 
4382     Optional<Value *> QueryingValueSimplified =
4383         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4384 
4385     if (!QueryingValueSimplified.hasValue())
4386       return true;
4387 
4388     if (!QueryingValueSimplified.getValue())
4389       return false;
4390 
4391     Value &QueryingValueSimplifiedUnwrapped =
4392         *QueryingValueSimplified.getValue();
4393 
4394     if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4395       return true;
4396 
4397     if (AccumulatedSimplifiedValue.hasValue())
4398       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4399 
4400     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4401                       << " is assumed to be "
4402                       << QueryingValueSimplifiedUnwrapped << "\n");
4403 
4404     AccumulatedSimplifiedValue = QueryingValueSimplified;
4405     return true;
4406   }
4407 
4408   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4409     if (!getAssociatedValue().getType()->isIntegerTy())
4410       return false;
4411 
4412     const auto &ValueConstantRangeAA =
4413         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4414 
4415     Optional<ConstantInt *> COpt =
4416         ValueConstantRangeAA.getAssumedConstantInt(A);
4417     if (COpt.hasValue()) {
4418       if (auto *C = COpt.getValue())
4419         SimplifiedAssociatedValue = C;
4420       else
4421         return false;
4422     } else {
4423       // FIXME: It should be llvm::None but if you set llvm::None,
4424       //        values are mistakenly infered as `undef` now.
4425       SimplifiedAssociatedValue = &getAssociatedValue();
4426     }
4427     return true;
4428   }
4429 
4430   /// See AbstractAttribute::manifest(...).
4431   ChangeStatus manifest(Attributor &A) override {
4432     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4433 
4434     if (!SimplifiedAssociatedValue.hasValue() ||
4435         !SimplifiedAssociatedValue.getValue())
4436       return Changed;
4437 
4438     if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) {
4439       // We can replace the AssociatedValue with the constant.
4440       Value &V = getAssociatedValue();
4441       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4442         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4443                           << " :: " << *this << "\n");
4444         A.changeValueAfterManifest(V, *C);
4445         Changed = ChangeStatus::CHANGED;
4446       }
4447     }
4448 
4449     return Changed | AAValueSimplify::manifest(A);
4450   }
4451 
4452   /// See AbstractState::indicatePessimisticFixpoint(...).
4453   ChangeStatus indicatePessimisticFixpoint() override {
4454     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4455     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4456     SimplifiedAssociatedValue = &getAssociatedValue();
4457     indicateOptimisticFixpoint();
4458     return ChangeStatus::CHANGED;
4459   }
4460 
4461 protected:
4462   // An assumed simplified value. Initially, it is set to Optional::None, which
4463   // means that the value is not clear under current assumption. If in the
4464   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4465   // returns orignal associated value.
4466   Optional<Value *> SimplifiedAssociatedValue;
4467 };
4468 
4469 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4470   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4471 
4472   void initialize(Attributor &A) override {
4473     AAValueSimplifyImpl::initialize(A);
4474     if (!getAssociatedFunction() || getAssociatedFunction()->isDeclaration())
4475       indicatePessimisticFixpoint();
4476     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4477                 /* IgnoreSubsumingPositions */ true))
4478       indicatePessimisticFixpoint();
4479   }
4480 
4481   /// See AbstractAttribute::updateImpl(...).
4482   ChangeStatus updateImpl(Attributor &A) override {
4483     // Byval is only replacable if it is readonly otherwise we would write into
4484     // the replaced value and not the copy that byval creates implicitly.
4485     Argument *Arg = getAssociatedArgument();
4486     if (Arg->hasByValAttr()) {
4487       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4488       if (!MemAA.isAssumedReadOnly())
4489         return indicatePessimisticFixpoint();
4490     }
4491 
4492     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4493 
4494     auto PredForCallSite = [&](AbstractCallSite ACS) {
4495       // Check if we have an associated argument or not (which can happen for
4496       // callback calls).
4497       Value *ArgOp = ACS.getCallArgOperand(getArgNo());
4498       if (!ArgOp)
4499         return false;
4500       // We can only propagate thread independent values through callbacks.
4501       // This is different to direct/indirect call sites because for them we
4502       // know the thread executing the caller and callee is the same. For
4503       // callbacks this is not guaranteed, thus a thread dependent value could
4504       // be different for the caller and callee, making it invalid to propagate.
4505       if (ACS.isCallbackCall())
4506         if (auto *C = dyn_cast<Constant>(ArgOp))
4507           if (C->isThreadDependent())
4508             return false;
4509       return checkAndUpdate(A, *this, *ArgOp, SimplifiedAssociatedValue);
4510     };
4511 
4512     bool AllCallSitesKnown;
4513     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4514                                 AllCallSitesKnown))
4515       if (!askSimplifiedValueForAAValueConstantRange(A))
4516         return indicatePessimisticFixpoint();
4517 
4518     // If a candicate was found in this update, return CHANGED.
4519     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4520                ? ChangeStatus::UNCHANGED
4521                : ChangeStatus ::CHANGED;
4522   }
4523 
4524   /// See AbstractAttribute::trackStatistics()
4525   void trackStatistics() const override {
4526     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4527   }
4528 };
4529 
4530 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4531   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4532 
4533   /// See AbstractAttribute::updateImpl(...).
4534   ChangeStatus updateImpl(Attributor &A) override {
4535     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4536 
4537     auto PredForReturned = [&](Value &V) {
4538       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4539     };
4540 
4541     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4542       if (!askSimplifiedValueForAAValueConstantRange(A))
4543         return indicatePessimisticFixpoint();
4544 
4545     // If a candicate was found in this update, return CHANGED.
4546     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4547                ? ChangeStatus::UNCHANGED
4548                : ChangeStatus ::CHANGED;
4549   }
4550   /// See AbstractAttribute::trackStatistics()
4551   void trackStatistics() const override {
4552     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4553   }
4554 };
4555 
4556 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4557   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4558 
4559   /// See AbstractAttribute::initialize(...).
4560   void initialize(Attributor &A) override {
4561     Value &V = getAnchorValue();
4562 
4563     // TODO: add other stuffs
4564     if (isa<Constant>(V))
4565       indicatePessimisticFixpoint();
4566   }
4567 
4568   /// See AbstractAttribute::updateImpl(...).
4569   ChangeStatus updateImpl(Attributor &A) override {
4570     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4571 
4572     auto VisitValueCB = [&](Value &V, bool &, bool Stripped) -> bool {
4573       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4574       if (!Stripped && this == &AA) {
4575         // TODO: Look the instruction and check recursively.
4576 
4577         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4578                           << "\n");
4579         return false;
4580       }
4581       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4582     };
4583 
4584     bool Dummy = false;
4585     if (!genericValueTraversal<AAValueSimplify, bool>(A, getIRPosition(), *this,
4586                                                       Dummy, VisitValueCB))
4587       if (!askSimplifiedValueForAAValueConstantRange(A))
4588         return indicatePessimisticFixpoint();
4589 
4590     // If a candicate was found in this update, return CHANGED.
4591 
4592     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4593                ? ChangeStatus::UNCHANGED
4594                : ChangeStatus ::CHANGED;
4595   }
4596 
4597   /// See AbstractAttribute::trackStatistics()
4598   void trackStatistics() const override {
4599     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4600   }
4601 };
4602 
4603 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4604   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4605 
4606   /// See AbstractAttribute::initialize(...).
4607   void initialize(Attributor &A) override {
4608     SimplifiedAssociatedValue = &getAnchorValue();
4609     indicateOptimisticFixpoint();
4610   }
4611   /// See AbstractAttribute::initialize(...).
4612   ChangeStatus updateImpl(Attributor &A) override {
4613     llvm_unreachable(
4614         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4615   }
4616   /// See AbstractAttribute::trackStatistics()
4617   void trackStatistics() const override {
4618     STATS_DECLTRACK_FN_ATTR(value_simplify)
4619   }
4620 };
4621 
4622 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4623   AAValueSimplifyCallSite(const IRPosition &IRP)
4624       : AAValueSimplifyFunction(IRP) {}
4625   /// See AbstractAttribute::trackStatistics()
4626   void trackStatistics() const override {
4627     STATS_DECLTRACK_CS_ATTR(value_simplify)
4628   }
4629 };
4630 
4631 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4632   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4633       : AAValueSimplifyReturned(IRP) {}
4634 
4635   void trackStatistics() const override {
4636     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4637   }
4638 };
4639 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4640   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4641       : AAValueSimplifyFloating(IRP) {}
4642 
4643   void trackStatistics() const override {
4644     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4645   }
4646 };
4647 
4648 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4649 struct AAHeapToStackImpl : public AAHeapToStack {
4650   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4651 
4652   const std::string getAsStr() const override {
4653     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4654   }
4655 
4656   ChangeStatus manifest(Attributor &A) override {
4657     assert(getState().isValidState() &&
4658            "Attempted to manifest an invalid state!");
4659 
4660     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4661     Function *F = getAssociatedFunction();
4662     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4663 
4664     for (Instruction *MallocCall : MallocCalls) {
4665       // This malloc cannot be replaced.
4666       if (BadMallocCalls.count(MallocCall))
4667         continue;
4668 
4669       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4670         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4671         A.deleteAfterManifest(*FreeCall);
4672         HasChanged = ChangeStatus::CHANGED;
4673       }
4674 
4675       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4676                         << "\n");
4677 
4678       Constant *Size;
4679       if (isCallocLikeFn(MallocCall, TLI)) {
4680         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4681         auto *SizeT = dyn_cast<ConstantInt>(MallocCall->getOperand(1));
4682         APInt TotalSize = SizeT->getValue() * Num->getValue();
4683         Size =
4684             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4685       } else {
4686         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4687       }
4688 
4689       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4690       Instruction *AI = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
4691                                        Size, "", MallocCall->getNextNode());
4692 
4693       if (AI->getType() != MallocCall->getType())
4694         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4695                              AI->getNextNode());
4696 
4697       replaceAllInstructionUsesWith(*MallocCall, *AI);
4698 
4699       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4700         auto *NBB = II->getNormalDest();
4701         BranchInst::Create(NBB, MallocCall->getParent());
4702         A.deleteAfterManifest(*MallocCall);
4703       } else {
4704         A.deleteAfterManifest(*MallocCall);
4705       }
4706 
4707       if (isCallocLikeFn(MallocCall, TLI)) {
4708         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4709                                    AI->getNextNode());
4710         Value *Ops[] = {
4711             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4712             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4713 
4714         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4715         Module *M = F->getParent();
4716         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4717         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4718       }
4719       HasChanged = ChangeStatus::CHANGED;
4720     }
4721 
4722     return HasChanged;
4723   }
4724 
4725   /// Collection of all malloc calls in a function.
4726   SmallSetVector<Instruction *, 4> MallocCalls;
4727 
4728   /// Collection of malloc calls that cannot be converted.
4729   DenseSet<const Instruction *> BadMallocCalls;
4730 
4731   /// A map for each malloc call to the set of associated free calls.
4732   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4733 
4734   ChangeStatus updateImpl(Attributor &A) override;
4735 };
4736 
4737 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4738   const Function *F = getAssociatedFunction();
4739   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4740 
4741   MustBeExecutedContextExplorer &Explorer =
4742       A.getInfoCache().getMustBeExecutedContextExplorer();
4743 
4744   auto FreeCheck = [&](Instruction &I) {
4745     const auto &Frees = FreesForMalloc.lookup(&I);
4746     if (Frees.size() != 1)
4747       return false;
4748     Instruction *UniqueFree = *Frees.begin();
4749     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4750   };
4751 
4752   auto UsesCheck = [&](Instruction &I) {
4753     bool ValidUsesOnly = true;
4754     bool MustUse = true;
4755     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4756       Instruction *UserI = cast<Instruction>(U.getUser());
4757       if (isa<LoadInst>(UserI))
4758         return true;
4759       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4760         if (SI->getValueOperand() == U.get()) {
4761           LLVM_DEBUG(dbgs()
4762                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4763           ValidUsesOnly = false;
4764         } else {
4765           // A store into the malloc'ed memory is fine.
4766         }
4767         return true;
4768       }
4769       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4770         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4771           return true;
4772         // Record malloc.
4773         if (isFreeCall(UserI, TLI)) {
4774           if (MustUse) {
4775             FreesForMalloc[&I].insert(UserI);
4776           } else {
4777             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4778                               << *UserI << "\n");
4779             ValidUsesOnly = false;
4780           }
4781           return true;
4782         }
4783 
4784         unsigned ArgNo = CB->getArgOperandNo(&U);
4785 
4786         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4787             *this, IRPosition::callsite_argument(*CB, ArgNo));
4788 
4789         // If a callsite argument use is nofree, we are fine.
4790         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4791             *this, IRPosition::callsite_argument(*CB, ArgNo));
4792 
4793         if (!NoCaptureAA.isAssumedNoCapture() ||
4794             !ArgNoFreeAA.isAssumedNoFree()) {
4795           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4796           ValidUsesOnly = false;
4797         }
4798         return true;
4799       }
4800 
4801       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4802           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4803         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4804         Follow = true;
4805         return true;
4806       }
4807       // Unknown user for which we can not track uses further (in a way that
4808       // makes sense).
4809       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4810       ValidUsesOnly = false;
4811       return true;
4812     };
4813     A.checkForAllUses(Pred, *this, I);
4814     return ValidUsesOnly;
4815   };
4816 
4817   auto MallocCallocCheck = [&](Instruction &I) {
4818     if (BadMallocCalls.count(&I))
4819       return true;
4820 
4821     bool IsMalloc = isMallocLikeFn(&I, TLI);
4822     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4823     if (!IsMalloc && !IsCalloc) {
4824       BadMallocCalls.insert(&I);
4825       return true;
4826     }
4827 
4828     if (IsMalloc) {
4829       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4830         if (Size->getValue().ule(MaxHeapToStackSize))
4831           if (UsesCheck(I) || FreeCheck(I)) {
4832             MallocCalls.insert(&I);
4833             return true;
4834           }
4835     } else if (IsCalloc) {
4836       bool Overflow = false;
4837       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4838         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4839           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4840                   .ule(MaxHeapToStackSize))
4841             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4842               MallocCalls.insert(&I);
4843               return true;
4844             }
4845     }
4846 
4847     BadMallocCalls.insert(&I);
4848     return true;
4849   };
4850 
4851   size_t NumBadMallocs = BadMallocCalls.size();
4852 
4853   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4854 
4855   if (NumBadMallocs != BadMallocCalls.size())
4856     return ChangeStatus::CHANGED;
4857 
4858   return ChangeStatus::UNCHANGED;
4859 }
4860 
4861 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4862   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
4863 
4864   /// See AbstractAttribute::trackStatistics()
4865   void trackStatistics() const override {
4866     STATS_DECL(MallocCalls, Function,
4867                "Number of malloc calls converted to allocas");
4868     for (auto *C : MallocCalls)
4869       if (!BadMallocCalls.count(C))
4870         ++BUILD_STAT_NAME(MallocCalls, Function);
4871   }
4872 };
4873 
4874 /// ----------------------- Privatizable Pointers ------------------------------
4875 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
4876   AAPrivatizablePtrImpl(const IRPosition &IRP)
4877       : AAPrivatizablePtr(IRP), PrivatizableType(llvm::None) {}
4878 
4879   ChangeStatus indicatePessimisticFixpoint() override {
4880     AAPrivatizablePtr::indicatePessimisticFixpoint();
4881     PrivatizableType = nullptr;
4882     return ChangeStatus::CHANGED;
4883   }
4884 
4885   /// Identify the type we can chose for a private copy of the underlying
4886   /// argument. None means it is not clear yet, nullptr means there is none.
4887   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
4888 
4889   /// Return a privatizable type that encloses both T0 and T1.
4890   /// TODO: This is merely a stub for now as we should manage a mapping as well.
4891   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
4892     if (!T0.hasValue())
4893       return T1;
4894     if (!T1.hasValue())
4895       return T0;
4896     if (T0 == T1)
4897       return T0;
4898     return nullptr;
4899   }
4900 
4901   Optional<Type *> getPrivatizableType() const override {
4902     return PrivatizableType;
4903   }
4904 
4905   const std::string getAsStr() const override {
4906     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
4907   }
4908 
4909 protected:
4910   Optional<Type *> PrivatizableType;
4911 };
4912 
4913 // TODO: Do this for call site arguments (probably also other values) as well.
4914 
4915 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
4916   AAPrivatizablePtrArgument(const IRPosition &IRP)
4917       : AAPrivatizablePtrImpl(IRP) {}
4918 
4919   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
4920   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
4921     // If this is a byval argument and we know all the call sites (so we can
4922     // rewrite them), there is no need to check them explicitly.
4923     bool AllCallSitesKnown;
4924     if (getIRPosition().hasAttr(Attribute::ByVal) &&
4925         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
4926                                true, AllCallSitesKnown))
4927       return getAssociatedValue().getType()->getPointerElementType();
4928 
4929     Optional<Type *> Ty;
4930     unsigned ArgNo = getIRPosition().getArgNo();
4931 
4932     // Make sure the associated call site argument has the same type at all call
4933     // sites and it is an allocation we know is safe to privatize, for now that
4934     // means we only allow alloca instructions.
4935     // TODO: We can additionally analyze the accesses in the callee to  create
4936     //       the type from that information instead. That is a little more
4937     //       involved and will be done in a follow up patch.
4938     auto CallSiteCheck = [&](AbstractCallSite ACS) {
4939       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
4940       // Check if a coresponding argument was found or if it is one not
4941       // associated (which can happen for callback calls).
4942       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4943         return false;
4944 
4945       // Check that all call sites agree on a type.
4946       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
4947       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
4948 
4949       LLVM_DEBUG({
4950         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
4951         if (CSTy.hasValue() && CSTy.getValue())
4952           CSTy.getValue()->print(dbgs());
4953         else if (CSTy.hasValue())
4954           dbgs() << "<nullptr>";
4955         else
4956           dbgs() << "<none>";
4957       });
4958 
4959       Ty = combineTypes(Ty, CSTy);
4960 
4961       LLVM_DEBUG({
4962         dbgs() << " : New Type: ";
4963         if (Ty.hasValue() && Ty.getValue())
4964           Ty.getValue()->print(dbgs());
4965         else if (Ty.hasValue())
4966           dbgs() << "<nullptr>";
4967         else
4968           dbgs() << "<none>";
4969         dbgs() << "\n";
4970       });
4971 
4972       return !Ty.hasValue() || Ty.getValue();
4973     };
4974 
4975     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
4976       return nullptr;
4977     return Ty;
4978   }
4979 
4980   /// See AbstractAttribute::updateImpl(...).
4981   ChangeStatus updateImpl(Attributor &A) override {
4982     PrivatizableType = identifyPrivatizableType(A);
4983     if (!PrivatizableType.hasValue())
4984       return ChangeStatus::UNCHANGED;
4985     if (!PrivatizableType.getValue())
4986       return indicatePessimisticFixpoint();
4987 
4988     // Avoid arguments with padding for now.
4989     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
4990         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
4991                                                 A.getInfoCache().getDL())) {
4992       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
4993       return indicatePessimisticFixpoint();
4994     }
4995 
4996     // Verify callee and caller agree on how the promoted argument would be
4997     // passed.
4998     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
4999     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5000     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5001     Function &Fn = *getIRPosition().getAnchorScope();
5002     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5003     ArgsToPromote.insert(getAssociatedArgument());
5004     const auto *TTI =
5005         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5006     if (!TTI ||
5007         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5008             Fn, *TTI, ArgsToPromote, Dummy) ||
5009         ArgsToPromote.empty()) {
5010       LLVM_DEBUG(
5011           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5012                  << Fn.getName() << "\n");
5013       return indicatePessimisticFixpoint();
5014     }
5015 
5016     // Collect the types that will replace the privatizable type in the function
5017     // signature.
5018     SmallVector<Type *, 16> ReplacementTypes;
5019     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5020 
5021     // Register a rewrite of the argument.
5022     Argument *Arg = getAssociatedArgument();
5023     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5024       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5025       return indicatePessimisticFixpoint();
5026     }
5027 
5028     unsigned ArgNo = Arg->getArgNo();
5029 
5030     // Helper to check if for the given call site the associated argument is
5031     // passed to a callback where the privatization would be different.
5032     auto IsCompatiblePrivArgOfCallback = [&](CallSite CS) {
5033       SmallVector<const Use *, 4> CBUses;
5034       AbstractCallSite::getCallbackUses(CS, CBUses);
5035       for (const Use *U : CBUses) {
5036         AbstractCallSite CBACS(U);
5037         assert(CBACS && CBACS.isCallbackCall());
5038         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5039           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5040 
5041           LLVM_DEBUG({
5042             dbgs()
5043                 << "[AAPrivatizablePtr] Argument " << *Arg
5044                 << "check if can be privatized in the context of its parent ("
5045                 << Arg->getParent()->getName()
5046                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5047                    "callback ("
5048                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5049                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5050                 << CBACS.getCallArgOperand(CBArg) << " vs "
5051                 << CS.getArgOperand(ArgNo) << "\n"
5052                 << "[AAPrivatizablePtr] " << CBArg << " : "
5053                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5054           });
5055 
5056           if (CBArgNo != int(ArgNo))
5057             continue;
5058           const auto &CBArgPrivAA =
5059               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5060           if (CBArgPrivAA.isValidState()) {
5061             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5062             if (!CBArgPrivTy.hasValue())
5063               continue;
5064             if (CBArgPrivTy.getValue() == PrivatizableType)
5065               continue;
5066           }
5067 
5068           LLVM_DEBUG({
5069             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5070                    << " cannot be privatized in the context of its parent ("
5071                    << Arg->getParent()->getName()
5072                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5073                       "callback ("
5074                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5075                    << ").\n[AAPrivatizablePtr] for which the argument "
5076                       "privatization is not compatible.\n";
5077           });
5078           return false;
5079         }
5080       }
5081       return true;
5082     };
5083 
5084     // Helper to check if for the given call site the associated argument is
5085     // passed to a direct call where the privatization would be different.
5086     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5087       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5088       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5089       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5090              "Expected a direct call operand for callback call operand");
5091 
5092       LLVM_DEBUG({
5093         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5094                << " check if be privatized in the context of its parent ("
5095                << Arg->getParent()->getName()
5096                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5097                   "direct call of ("
5098                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5099                << ").\n";
5100       });
5101 
5102       Function *DCCallee = DC->getCalledFunction();
5103       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5104         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5105             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5106         if (DCArgPrivAA.isValidState()) {
5107           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5108           if (!DCArgPrivTy.hasValue())
5109             return true;
5110           if (DCArgPrivTy.getValue() == PrivatizableType)
5111             return true;
5112         }
5113       }
5114 
5115       LLVM_DEBUG({
5116         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5117                << " cannot be privatized in the context of its parent ("
5118                << Arg->getParent()->getName()
5119                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5120                   "direct call of ("
5121                << ACS.getCallSite().getCalledFunction()->getName()
5122                << ").\n[AAPrivatizablePtr] for which the argument "
5123                   "privatization is not compatible.\n";
5124       });
5125       return false;
5126     };
5127 
5128     // Helper to check if the associated argument is used at the given abstract
5129     // call site in a way that is incompatible with the privatization assumed
5130     // here.
5131     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5132       if (ACS.isDirectCall())
5133         return IsCompatiblePrivArgOfCallback(ACS.getCallSite());
5134       if (ACS.isCallbackCall())
5135         return IsCompatiblePrivArgOfDirectCS(ACS);
5136       return false;
5137     };
5138 
5139     bool AllCallSitesKnown;
5140     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5141                                 AllCallSitesKnown))
5142       return indicatePessimisticFixpoint();
5143 
5144     return ChangeStatus::UNCHANGED;
5145   }
5146 
5147   /// Given a type to private \p PrivType, collect the constituates (which are
5148   /// used) in \p ReplacementTypes.
5149   static void
5150   identifyReplacementTypes(Type *PrivType,
5151                            SmallVectorImpl<Type *> &ReplacementTypes) {
5152     // TODO: For now we expand the privatization type to the fullest which can
5153     //       lead to dead arguments that need to be removed later.
5154     assert(PrivType && "Expected privatizable type!");
5155 
5156     // Traverse the type, extract constituate types on the outermost level.
5157     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5158       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5159         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5160     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5161       ReplacementTypes.append(PrivArrayType->getNumElements(),
5162                               PrivArrayType->getElementType());
5163     } else {
5164       ReplacementTypes.push_back(PrivType);
5165     }
5166   }
5167 
5168   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5169   /// The values needed are taken from the arguments of \p F starting at
5170   /// position \p ArgNo.
5171   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5172                                    unsigned ArgNo, Instruction &IP) {
5173     assert(PrivType && "Expected privatizable type!");
5174 
5175     IRBuilder<NoFolder> IRB(&IP);
5176     const DataLayout &DL = F.getParent()->getDataLayout();
5177 
5178     // Traverse the type, build GEPs and stores.
5179     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5180       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5181       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5182         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5183         Value *Ptr = constructPointer(
5184             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5185         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5186       }
5187     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5188       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5189       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5190       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5191         Value *Ptr =
5192             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5193         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5194       }
5195     } else {
5196       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5197     }
5198   }
5199 
5200   /// Extract values from \p Base according to the type \p PrivType at the
5201   /// call position \p ACS. The values are appended to \p ReplacementValues.
5202   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5203                                Value *Base,
5204                                SmallVectorImpl<Value *> &ReplacementValues) {
5205     assert(Base && "Expected base value!");
5206     assert(PrivType && "Expected privatizable type!");
5207     Instruction *IP = ACS.getInstruction();
5208 
5209     IRBuilder<NoFolder> IRB(IP);
5210     const DataLayout &DL = IP->getModule()->getDataLayout();
5211 
5212     if (Base->getType()->getPointerElementType() != PrivType)
5213       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5214                                                  "", ACS.getInstruction());
5215 
5216     // TODO: Improve the alignment of the loads.
5217     // Traverse the type, build GEPs and loads.
5218     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5219       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5220       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5221         Type *PointeeTy = PrivStructType->getElementType(u);
5222         Value *Ptr =
5223             constructPointer(PointeeTy->getPointerTo(), Base,
5224                              PrivStructLayout->getElementOffset(u), IRB, DL);
5225         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5226         L->setAlignment(MaybeAlign(1));
5227         ReplacementValues.push_back(L);
5228       }
5229     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5230       Type *PointeeTy = PrivArrayType->getElementType();
5231       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5232       Type *PointeePtrTy = PointeeTy->getPointerTo();
5233       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5234         Value *Ptr =
5235             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5236         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5237         L->setAlignment(MaybeAlign(1));
5238         ReplacementValues.push_back(L);
5239       }
5240     } else {
5241       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5242       L->setAlignment(MaybeAlign(1));
5243       ReplacementValues.push_back(L);
5244     }
5245   }
5246 
5247   /// See AbstractAttribute::manifest(...)
5248   ChangeStatus manifest(Attributor &A) override {
5249     if (!PrivatizableType.hasValue())
5250       return ChangeStatus::UNCHANGED;
5251     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5252 
5253     // Collect all tail calls in the function as we cannot allow new allocas to
5254     // escape into tail recursion.
5255     // TODO: Be smarter about new allocas escaping into tail calls.
5256     SmallVector<CallInst *, 16> TailCalls;
5257     if (!A.checkForAllInstructions(
5258             [&](Instruction &I) {
5259               CallInst &CI = cast<CallInst>(I);
5260               if (CI.isTailCall())
5261                 TailCalls.push_back(&CI);
5262               return true;
5263             },
5264             *this, {Instruction::Call}))
5265       return ChangeStatus::UNCHANGED;
5266 
5267     Argument *Arg = getAssociatedArgument();
5268 
5269     // Callback to repair the associated function. A new alloca is placed at the
5270     // beginning and initialized with the values passed through arguments. The
5271     // new alloca replaces the use of the old pointer argument.
5272     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5273         [=](const Attributor::ArgumentReplacementInfo &ARI,
5274             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5275           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5276           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5277           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5278                                     Arg->getName() + ".priv", IP);
5279           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5280                                ArgIt->getArgNo(), *IP);
5281           Arg->replaceAllUsesWith(AI);
5282 
5283           for (CallInst *CI : TailCalls)
5284             CI->setTailCall(false);
5285         };
5286 
5287     // Callback to repair a call site of the associated function. The elements
5288     // of the privatizable type are loaded prior to the call and passed to the
5289     // new function version.
5290     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5291         [=](const Attributor::ArgumentReplacementInfo &ARI,
5292             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5293           createReplacementValues(
5294               PrivatizableType.getValue(), ACS,
5295               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5296                                   NewArgOperands);
5297         };
5298 
5299     // Collect the types that will replace the privatizable type in the function
5300     // signature.
5301     SmallVector<Type *, 16> ReplacementTypes;
5302     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5303 
5304     // Register a rewrite of the argument.
5305     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5306                                            std::move(FnRepairCB),
5307                                            std::move(ACSRepairCB)))
5308       return ChangeStatus::CHANGED;
5309     return ChangeStatus::UNCHANGED;
5310   }
5311 
5312   /// See AbstractAttribute::trackStatistics()
5313   void trackStatistics() const override {
5314     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5315   }
5316 };
5317 
5318 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5319   AAPrivatizablePtrFloating(const IRPosition &IRP)
5320       : AAPrivatizablePtrImpl(IRP) {}
5321 
5322   /// See AbstractAttribute::initialize(...).
5323   virtual void initialize(Attributor &A) override {
5324     // TODO: We can privatize more than arguments.
5325     indicatePessimisticFixpoint();
5326   }
5327 
5328   ChangeStatus updateImpl(Attributor &A) override {
5329     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5330                      "updateImpl will not be called");
5331   }
5332 
5333   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5334   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5335     Value *Obj =
5336         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5337     if (!Obj) {
5338       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5339       return nullptr;
5340     }
5341 
5342     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5343       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5344         if (CI->isOne())
5345           return Obj->getType()->getPointerElementType();
5346     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5347       auto &PrivArgAA =
5348           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5349       if (PrivArgAA.isAssumedPrivatizablePtr())
5350         return Obj->getType()->getPointerElementType();
5351     }
5352 
5353     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5354                          "alloca nor privatizable argument: "
5355                       << *Obj << "!\n");
5356     return nullptr;
5357   }
5358 
5359   /// See AbstractAttribute::trackStatistics()
5360   void trackStatistics() const override {
5361     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5362   }
5363 };
5364 
5365 struct AAPrivatizablePtrCallSiteArgument final
5366     : public AAPrivatizablePtrFloating {
5367   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP)
5368       : AAPrivatizablePtrFloating(IRP) {}
5369 
5370   /// See AbstractAttribute::initialize(...).
5371   void initialize(Attributor &A) override {
5372     if (getIRPosition().hasAttr(Attribute::ByVal))
5373       indicateOptimisticFixpoint();
5374   }
5375 
5376   /// See AbstractAttribute::updateImpl(...).
5377   ChangeStatus updateImpl(Attributor &A) override {
5378     PrivatizableType = identifyPrivatizableType(A);
5379     if (!PrivatizableType.hasValue())
5380       return ChangeStatus::UNCHANGED;
5381     if (!PrivatizableType.getValue())
5382       return indicatePessimisticFixpoint();
5383 
5384     const IRPosition &IRP = getIRPosition();
5385     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5386     if (!NoCaptureAA.isAssumedNoCapture()) {
5387       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5388       return indicatePessimisticFixpoint();
5389     }
5390 
5391     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5392     if (!NoAliasAA.isAssumedNoAlias()) {
5393       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5394       return indicatePessimisticFixpoint();
5395     }
5396 
5397     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5398     if (!MemBehaviorAA.isAssumedReadOnly()) {
5399       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5400       return indicatePessimisticFixpoint();
5401     }
5402 
5403     return ChangeStatus::UNCHANGED;
5404   }
5405 
5406   /// See AbstractAttribute::trackStatistics()
5407   void trackStatistics() const override {
5408     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5409   }
5410 };
5411 
5412 struct AAPrivatizablePtrCallSiteReturned final
5413     : public AAPrivatizablePtrFloating {
5414   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP)
5415       : AAPrivatizablePtrFloating(IRP) {}
5416 
5417   /// See AbstractAttribute::initialize(...).
5418   void initialize(Attributor &A) override {
5419     // TODO: We can privatize more than arguments.
5420     indicatePessimisticFixpoint();
5421   }
5422 
5423   /// See AbstractAttribute::trackStatistics()
5424   void trackStatistics() const override {
5425     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5426   }
5427 };
5428 
5429 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5430   AAPrivatizablePtrReturned(const IRPosition &IRP)
5431       : AAPrivatizablePtrFloating(IRP) {}
5432 
5433   /// See AbstractAttribute::initialize(...).
5434   void initialize(Attributor &A) override {
5435     // TODO: We can privatize more than arguments.
5436     indicatePessimisticFixpoint();
5437   }
5438 
5439   /// See AbstractAttribute::trackStatistics()
5440   void trackStatistics() const override {
5441     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5442   }
5443 };
5444 
5445 /// -------------------- Memory Behavior Attributes ----------------------------
5446 /// Includes read-none, read-only, and write-only.
5447 /// ----------------------------------------------------------------------------
5448 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5449   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
5450 
5451   /// See AbstractAttribute::initialize(...).
5452   void initialize(Attributor &A) override {
5453     intersectAssumedBits(BEST_STATE);
5454     getKnownStateFromValue(getIRPosition(), getState());
5455     IRAttribute::initialize(A);
5456   }
5457 
5458   /// Return the memory behavior information encoded in the IR for \p IRP.
5459   static void getKnownStateFromValue(const IRPosition &IRP,
5460                                      BitIntegerState &State,
5461                                      bool IgnoreSubsumingPositions = false) {
5462     SmallVector<Attribute, 2> Attrs;
5463     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5464     for (const Attribute &Attr : Attrs) {
5465       switch (Attr.getKindAsEnum()) {
5466       case Attribute::ReadNone:
5467         State.addKnownBits(NO_ACCESSES);
5468         break;
5469       case Attribute::ReadOnly:
5470         State.addKnownBits(NO_WRITES);
5471         break;
5472       case Attribute::WriteOnly:
5473         State.addKnownBits(NO_READS);
5474         break;
5475       default:
5476         llvm_unreachable("Unexpcted attribute!");
5477       }
5478     }
5479 
5480     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5481       if (!I->mayReadFromMemory())
5482         State.addKnownBits(NO_READS);
5483       if (!I->mayWriteToMemory())
5484         State.addKnownBits(NO_WRITES);
5485     }
5486   }
5487 
5488   /// See AbstractAttribute::getDeducedAttributes(...).
5489   void getDeducedAttributes(LLVMContext &Ctx,
5490                             SmallVectorImpl<Attribute> &Attrs) const override {
5491     assert(Attrs.size() == 0);
5492     if (isAssumedReadNone())
5493       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5494     else if (isAssumedReadOnly())
5495       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5496     else if (isAssumedWriteOnly())
5497       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5498     assert(Attrs.size() <= 1);
5499   }
5500 
5501   /// See AbstractAttribute::manifest(...).
5502   ChangeStatus manifest(Attributor &A) override {
5503     const IRPosition &IRP = getIRPosition();
5504 
5505     // Check if we would improve the existing attributes first.
5506     SmallVector<Attribute, 4> DeducedAttrs;
5507     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5508     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5509           return IRP.hasAttr(Attr.getKindAsEnum(),
5510                              /* IgnoreSubsumingPositions */ true);
5511         }))
5512       return ChangeStatus::UNCHANGED;
5513 
5514     // Clear existing attributes.
5515     IRP.removeAttrs(AttrKinds);
5516 
5517     // Use the generic manifest method.
5518     return IRAttribute::manifest(A);
5519   }
5520 
5521   /// See AbstractState::getAsStr().
5522   const std::string getAsStr() const override {
5523     if (isAssumedReadNone())
5524       return "readnone";
5525     if (isAssumedReadOnly())
5526       return "readonly";
5527     if (isAssumedWriteOnly())
5528       return "writeonly";
5529     return "may-read/write";
5530   }
5531 
5532   /// The set of IR attributes AAMemoryBehavior deals with.
5533   static const Attribute::AttrKind AttrKinds[3];
5534 };
5535 
5536 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5537     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5538 
5539 /// Memory behavior attribute for a floating value.
5540 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5541   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5542 
5543   /// See AbstractAttribute::initialize(...).
5544   void initialize(Attributor &A) override {
5545     AAMemoryBehaviorImpl::initialize(A);
5546     // Initialize the use vector with all direct uses of the associated value.
5547     for (const Use &U : getAssociatedValue().uses())
5548       Uses.insert(&U);
5549   }
5550 
5551   /// See AbstractAttribute::updateImpl(...).
5552   ChangeStatus updateImpl(Attributor &A) override;
5553 
5554   /// See AbstractAttribute::trackStatistics()
5555   void trackStatistics() const override {
5556     if (isAssumedReadNone())
5557       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5558     else if (isAssumedReadOnly())
5559       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5560     else if (isAssumedWriteOnly())
5561       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5562   }
5563 
5564 private:
5565   /// Return true if users of \p UserI might access the underlying
5566   /// variable/location described by \p U and should therefore be analyzed.
5567   bool followUsersOfUseIn(Attributor &A, const Use *U,
5568                           const Instruction *UserI);
5569 
5570   /// Update the state according to the effect of use \p U in \p UserI.
5571   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5572 
5573 protected:
5574   /// Container for (transitive) uses of the associated argument.
5575   SetVector<const Use *> Uses;
5576 };
5577 
5578 /// Memory behavior attribute for function argument.
5579 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5580   AAMemoryBehaviorArgument(const IRPosition &IRP)
5581       : AAMemoryBehaviorFloating(IRP) {}
5582 
5583   /// See AbstractAttribute::initialize(...).
5584   void initialize(Attributor &A) override {
5585     intersectAssumedBits(BEST_STATE);
5586     const IRPosition &IRP = getIRPosition();
5587     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5588     // can query it when we use has/getAttr. That would allow us to reuse the
5589     // initialize of the base class here.
5590     bool HasByVal =
5591         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5592     getKnownStateFromValue(IRP, getState(),
5593                            /* IgnoreSubsumingPositions */ HasByVal);
5594 
5595     // Initialize the use vector with all direct uses of the associated value.
5596     Argument *Arg = getAssociatedArgument();
5597     if (!Arg || !Arg->getParent()->hasExactDefinition()) {
5598       indicatePessimisticFixpoint();
5599     } else {
5600       // Initialize the use vector with all direct uses of the associated value.
5601       for (const Use &U : Arg->uses())
5602         Uses.insert(&U);
5603     }
5604   }
5605 
5606   ChangeStatus manifest(Attributor &A) override {
5607     // TODO: From readattrs.ll: "inalloca parameters are always
5608     //                           considered written"
5609     if (hasAttr({Attribute::InAlloca})) {
5610       removeKnownBits(NO_WRITES);
5611       removeAssumedBits(NO_WRITES);
5612     }
5613     return AAMemoryBehaviorFloating::manifest(A);
5614   }
5615 
5616   /// See AbstractAttribute::trackStatistics()
5617   void trackStatistics() const override {
5618     if (isAssumedReadNone())
5619       STATS_DECLTRACK_ARG_ATTR(readnone)
5620     else if (isAssumedReadOnly())
5621       STATS_DECLTRACK_ARG_ATTR(readonly)
5622     else if (isAssumedWriteOnly())
5623       STATS_DECLTRACK_ARG_ATTR(writeonly)
5624   }
5625 };
5626 
5627 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5628   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
5629       : AAMemoryBehaviorArgument(IRP) {}
5630 
5631   /// See AbstractAttribute::initialize(...).
5632   void initialize(Attributor &A) override {
5633     if (Argument *Arg = getAssociatedArgument()) {
5634       if (Arg->hasByValAttr()) {
5635         addKnownBits(NO_WRITES);
5636         removeKnownBits(NO_READS);
5637         removeAssumedBits(NO_READS);
5638       }
5639     } else {
5640     }
5641     AAMemoryBehaviorArgument::initialize(A);
5642   }
5643 
5644   /// See AbstractAttribute::updateImpl(...).
5645   ChangeStatus updateImpl(Attributor &A) override {
5646     // TODO: Once we have call site specific value information we can provide
5647     //       call site specific liveness liveness information and then it makes
5648     //       sense to specialize attributes for call sites arguments instead of
5649     //       redirecting requests to the callee argument.
5650     Argument *Arg = getAssociatedArgument();
5651     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5652     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5653     return clampStateAndIndicateChange(
5654         getState(),
5655         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5656   }
5657 
5658   /// See AbstractAttribute::trackStatistics()
5659   void trackStatistics() const override {
5660     if (isAssumedReadNone())
5661       STATS_DECLTRACK_CSARG_ATTR(readnone)
5662     else if (isAssumedReadOnly())
5663       STATS_DECLTRACK_CSARG_ATTR(readonly)
5664     else if (isAssumedWriteOnly())
5665       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5666   }
5667 };
5668 
5669 /// Memory behavior attribute for a call site return position.
5670 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5671   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
5672       : AAMemoryBehaviorFloating(IRP) {}
5673 
5674   /// See AbstractAttribute::manifest(...).
5675   ChangeStatus manifest(Attributor &A) override {
5676     // We do not annotate returned values.
5677     return ChangeStatus::UNCHANGED;
5678   }
5679 
5680   /// See AbstractAttribute::trackStatistics()
5681   void trackStatistics() const override {}
5682 };
5683 
5684 /// An AA to represent the memory behavior function attributes.
5685 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5686   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5687 
5688   /// See AbstractAttribute::updateImpl(Attributor &A).
5689   virtual ChangeStatus updateImpl(Attributor &A) override;
5690 
5691   /// See AbstractAttribute::manifest(...).
5692   ChangeStatus manifest(Attributor &A) override {
5693     Function &F = cast<Function>(getAnchorValue());
5694     if (isAssumedReadNone()) {
5695       F.removeFnAttr(Attribute::ArgMemOnly);
5696       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5697       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5698     }
5699     return AAMemoryBehaviorImpl::manifest(A);
5700   }
5701 
5702   /// See AbstractAttribute::trackStatistics()
5703   void trackStatistics() const override {
5704     if (isAssumedReadNone())
5705       STATS_DECLTRACK_FN_ATTR(readnone)
5706     else if (isAssumedReadOnly())
5707       STATS_DECLTRACK_FN_ATTR(readonly)
5708     else if (isAssumedWriteOnly())
5709       STATS_DECLTRACK_FN_ATTR(writeonly)
5710   }
5711 };
5712 
5713 /// AAMemoryBehavior attribute for call sites.
5714 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5715   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5716 
5717   /// See AbstractAttribute::initialize(...).
5718   void initialize(Attributor &A) override {
5719     AAMemoryBehaviorImpl::initialize(A);
5720     Function *F = getAssociatedFunction();
5721     if (!F || !F->hasExactDefinition())
5722       indicatePessimisticFixpoint();
5723   }
5724 
5725   /// See AbstractAttribute::updateImpl(...).
5726   ChangeStatus updateImpl(Attributor &A) override {
5727     // TODO: Once we have call site specific value information we can provide
5728     //       call site specific liveness liveness information and then it makes
5729     //       sense to specialize attributes for call sites arguments instead of
5730     //       redirecting requests to the callee argument.
5731     Function *F = getAssociatedFunction();
5732     const IRPosition &FnPos = IRPosition::function(*F);
5733     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5734     return clampStateAndIndicateChange(
5735         getState(),
5736         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5737   }
5738 
5739   /// See AbstractAttribute::trackStatistics()
5740   void trackStatistics() const override {
5741     if (isAssumedReadNone())
5742       STATS_DECLTRACK_CS_ATTR(readnone)
5743     else if (isAssumedReadOnly())
5744       STATS_DECLTRACK_CS_ATTR(readonly)
5745     else if (isAssumedWriteOnly())
5746       STATS_DECLTRACK_CS_ATTR(writeonly)
5747   }
5748 };
5749 } // namespace
5750 
5751 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5752 
5753   // The current assumed state used to determine a change.
5754   auto AssumedState = getAssumed();
5755 
5756   auto CheckRWInst = [&](Instruction &I) {
5757     // If the instruction has an own memory behavior state, use it to restrict
5758     // the local state. No further analysis is required as the other memory
5759     // state is as optimistic as it gets.
5760     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
5761       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5762           *this, IRPosition::callsite_function(ICS));
5763       intersectAssumedBits(MemBehaviorAA.getAssumed());
5764       return !isAtFixpoint();
5765     }
5766 
5767     // Remove access kind modifiers if necessary.
5768     if (I.mayReadFromMemory())
5769       removeAssumedBits(NO_READS);
5770     if (I.mayWriteToMemory())
5771       removeAssumedBits(NO_WRITES);
5772     return !isAtFixpoint();
5773   };
5774 
5775   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5776     return indicatePessimisticFixpoint();
5777 
5778   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5779                                         : ChangeStatus::UNCHANGED;
5780 }
5781 
5782 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5783 
5784   const IRPosition &IRP = getIRPosition();
5785   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5786   AAMemoryBehavior::StateType &S = getState();
5787 
5788   // First, check the function scope. We take the known information and we avoid
5789   // work if the assumed information implies the current assumed information for
5790   // this attribute. This is a valid for all but byval arguments.
5791   Argument *Arg = IRP.getAssociatedArgument();
5792   AAMemoryBehavior::base_t FnMemAssumedState =
5793       AAMemoryBehavior::StateType::getWorstState();
5794   if (!Arg || !Arg->hasByValAttr()) {
5795     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5796         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5797     FnMemAssumedState = FnMemAA.getAssumed();
5798     S.addKnownBits(FnMemAA.getKnown());
5799     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5800       return ChangeStatus::UNCHANGED;
5801   }
5802 
5803   // Make sure the value is not captured (except through "return"), if
5804   // it is, any information derived would be irrelevant anyway as we cannot
5805   // check the potential aliases introduced by the capture. However, no need
5806   // to fall back to anythign less optimistic than the function state.
5807   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5808       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5809   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5810     S.intersectAssumedBits(FnMemAssumedState);
5811     return ChangeStatus::CHANGED;
5812   }
5813 
5814   // The current assumed state used to determine a change.
5815   auto AssumedState = S.getAssumed();
5816 
5817   // Liveness information to exclude dead users.
5818   // TODO: Take the FnPos once we have call site specific liveness information.
5819   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5820       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5821       /* TrackDependence */ false);
5822 
5823   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5824   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5825     const Use *U = Uses[i];
5826     Instruction *UserI = cast<Instruction>(U->getUser());
5827     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5828                       << " [Dead: " << (LivenessAA.isAssumedDead(UserI))
5829                       << "]\n");
5830     if (LivenessAA.isAssumedDead(UserI)) {
5831       A.recordDependence(LivenessAA, *this, DepClassTy::OPTIONAL);
5832       continue;
5833     }
5834 
5835     // Check if the users of UserI should also be visited.
5836     if (followUsersOfUseIn(A, U, UserI))
5837       for (const Use &UserIUse : UserI->uses())
5838         Uses.insert(&UserIUse);
5839 
5840     // If UserI might touch memory we analyze the use in detail.
5841     if (UserI->mayReadOrWriteMemory())
5842       analyzeUseIn(A, U, UserI);
5843   }
5844 
5845   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5846                                         : ChangeStatus::UNCHANGED;
5847 }
5848 
5849 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5850                                                   const Instruction *UserI) {
5851   // The loaded value is unrelated to the pointer argument, no need to
5852   // follow the users of the load.
5853   if (isa<LoadInst>(UserI))
5854     return false;
5855 
5856   // By default we follow all uses assuming UserI might leak information on U,
5857   // we have special handling for call sites operands though.
5858   ImmutableCallSite ICS(UserI);
5859   if (!ICS || !ICS.isArgOperand(U))
5860     return true;
5861 
5862   // If the use is a call argument known not to be captured, the users of
5863   // the call do not need to be visited because they have to be unrelated to
5864   // the input. Note that this check is not trivial even though we disallow
5865   // general capturing of the underlying argument. The reason is that the
5866   // call might the argument "through return", which we allow and for which we
5867   // need to check call users.
5868   if (U->get()->getType()->isPointerTy()) {
5869     unsigned ArgNo = ICS.getArgumentNo(U);
5870     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5871         *this, IRPosition::callsite_argument(ICS, ArgNo),
5872         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5873     return !ArgNoCaptureAA.isAssumedNoCapture();
5874   }
5875 
5876   return true;
5877 }
5878 
5879 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5880                                             const Instruction *UserI) {
5881   assert(UserI->mayReadOrWriteMemory());
5882 
5883   switch (UserI->getOpcode()) {
5884   default:
5885     // TODO: Handle all atomics and other side-effect operations we know of.
5886     break;
5887   case Instruction::Load:
5888     // Loads cause the NO_READS property to disappear.
5889     removeAssumedBits(NO_READS);
5890     return;
5891 
5892   case Instruction::Store:
5893     // Stores cause the NO_WRITES property to disappear if the use is the
5894     // pointer operand. Note that we do assume that capturing was taken care of
5895     // somewhere else.
5896     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5897       removeAssumedBits(NO_WRITES);
5898     return;
5899 
5900   case Instruction::Call:
5901   case Instruction::CallBr:
5902   case Instruction::Invoke: {
5903     // For call sites we look at the argument memory behavior attribute (this
5904     // could be recursive!) in order to restrict our own state.
5905     ImmutableCallSite ICS(UserI);
5906 
5907     // Give up on operand bundles.
5908     if (ICS.isBundleOperand(U)) {
5909       indicatePessimisticFixpoint();
5910       return;
5911     }
5912 
5913     // Calling a function does read the function pointer, maybe write it if the
5914     // function is self-modifying.
5915     if (ICS.isCallee(U)) {
5916       removeAssumedBits(NO_READS);
5917       break;
5918     }
5919 
5920     // Adjust the possible access behavior based on the information on the
5921     // argument.
5922     IRPosition Pos;
5923     if (U->get()->getType()->isPointerTy())
5924       Pos = IRPosition::callsite_argument(ICS, ICS.getArgumentNo(U));
5925     else
5926       Pos = IRPosition::callsite_function(ICS);
5927     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5928         *this, Pos,
5929         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5930     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5931     // and at least "known".
5932     intersectAssumedBits(MemBehaviorAA.getAssumed());
5933     return;
5934   }
5935   };
5936 
5937   // Generally, look at the "may-properties" and adjust the assumed state if we
5938   // did not trigger special handling before.
5939   if (UserI->mayReadFromMemory())
5940     removeAssumedBits(NO_READS);
5941   if (UserI->mayWriteToMemory())
5942     removeAssumedBits(NO_WRITES);
5943 }
5944 /// ------------------ Value Constant Range Attribute -------------------------
5945 
5946 struct AAValueConstantRangeImpl : AAValueConstantRange {
5947   using StateType = IntegerRangeState;
5948   AAValueConstantRangeImpl(const IRPosition &IRP) : AAValueConstantRange(IRP) {}
5949 
5950   /// See AbstractAttribute::getAsStr().
5951   const std::string getAsStr() const override {
5952     std::string Str;
5953     llvm::raw_string_ostream OS(Str);
5954     OS << "range(" << getBitWidth() << ")<";
5955     getKnown().print(OS);
5956     OS << " / ";
5957     getAssumed().print(OS);
5958     OS << ">";
5959     return OS.str();
5960   }
5961 
5962   /// Helper function to get a SCEV expr for the associated value at program
5963   /// point \p I.
5964   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
5965     if (!getAnchorScope())
5966       return nullptr;
5967 
5968     ScalarEvolution *SE =
5969         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
5970             *getAnchorScope());
5971 
5972     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
5973         *getAnchorScope());
5974 
5975     if (!SE || !LI)
5976       return nullptr;
5977 
5978     const SCEV *S = SE->getSCEV(&getAssociatedValue());
5979     if (!I)
5980       return S;
5981 
5982     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
5983   }
5984 
5985   /// Helper function to get a range from SCEV for the associated value at
5986   /// program point \p I.
5987   ConstantRange getConstantRangeFromSCEV(Attributor &A,
5988                                          const Instruction *I = nullptr) const {
5989     if (!getAnchorScope())
5990       return getWorstState(getBitWidth());
5991 
5992     ScalarEvolution *SE =
5993         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
5994             *getAnchorScope());
5995 
5996     const SCEV *S = getSCEV(A, I);
5997     if (!SE || !S)
5998       return getWorstState(getBitWidth());
5999 
6000     return SE->getUnsignedRange(S);
6001   }
6002 
6003   /// Helper function to get a range from LVI for the associated value at
6004   /// program point \p I.
6005   ConstantRange
6006   getConstantRangeFromLVI(Attributor &A,
6007                           const Instruction *CtxI = nullptr) const {
6008     if (!getAnchorScope())
6009       return getWorstState(getBitWidth());
6010 
6011     LazyValueInfo *LVI =
6012         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6013             *getAnchorScope());
6014 
6015     if (!LVI || !CtxI)
6016       return getWorstState(getBitWidth());
6017     return LVI->getConstantRange(&getAssociatedValue(),
6018                                  const_cast<BasicBlock *>(CtxI->getParent()),
6019                                  const_cast<Instruction *>(CtxI));
6020   }
6021 
6022   /// See AAValueConstantRange::getKnownConstantRange(..).
6023   ConstantRange
6024   getKnownConstantRange(Attributor &A,
6025                         const Instruction *CtxI = nullptr) const override {
6026     if (!CtxI || CtxI == getCtxI())
6027       return getKnown();
6028 
6029     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6030     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6031     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6032   }
6033 
6034   /// See AAValueConstantRange::getAssumedConstantRange(..).
6035   ConstantRange
6036   getAssumedConstantRange(Attributor &A,
6037                           const Instruction *CtxI = nullptr) const override {
6038     // TODO: Make SCEV use Attributor assumption.
6039     //       We may be able to bound a variable range via assumptions in
6040     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6041     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6042 
6043     if (!CtxI || CtxI == getCtxI())
6044       return getAssumed();
6045 
6046     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6047     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6048     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6049   }
6050 
6051   /// See AbstractAttribute::initialize(..).
6052   void initialize(Attributor &A) override {
6053     // Intersect a range given by SCEV.
6054     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6055 
6056     // Intersect a range given by LVI.
6057     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6058   }
6059 
6060   /// Helper function to create MDNode for range metadata.
6061   static MDNode *
6062   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6063                             const ConstantRange &AssumedConstantRange) {
6064     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6065                                   Ty, AssumedConstantRange.getLower())),
6066                               ConstantAsMetadata::get(ConstantInt::get(
6067                                   Ty, AssumedConstantRange.getUpper()))};
6068     return MDNode::get(Ctx, LowAndHigh);
6069   }
6070 
6071   /// Return true if \p Assumed is included in \p KnownRanges.
6072   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6073 
6074     if (Assumed.isFullSet())
6075       return false;
6076 
6077     if (!KnownRanges)
6078       return true;
6079 
6080     // If multiple ranges are annotated in IR, we give up to annotate assumed
6081     // range for now.
6082 
6083     // TODO:  If there exists a known range which containts assumed range, we
6084     // can say assumed range is better.
6085     if (KnownRanges->getNumOperands() > 2)
6086       return false;
6087 
6088     ConstantInt *Lower =
6089         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6090     ConstantInt *Upper =
6091         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6092 
6093     ConstantRange Known(Lower->getValue(), Upper->getValue());
6094     return Known.contains(Assumed) && Known != Assumed;
6095   }
6096 
6097   /// Helper function to set range metadata.
6098   static bool
6099   setRangeMetadataIfisBetterRange(Instruction *I,
6100                                   const ConstantRange &AssumedConstantRange) {
6101     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6102     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6103       if (!AssumedConstantRange.isEmptySet()) {
6104         I->setMetadata(LLVMContext::MD_range,
6105                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6106                                                  AssumedConstantRange));
6107         return true;
6108       }
6109     }
6110     return false;
6111   }
6112 
6113   /// See AbstractAttribute::manifest()
6114   ChangeStatus manifest(Attributor &A) override {
6115     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6116     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6117     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6118 
6119     auto &V = getAssociatedValue();
6120     if (!AssumedConstantRange.isEmptySet() &&
6121         !AssumedConstantRange.isSingleElement()) {
6122       if (Instruction *I = dyn_cast<Instruction>(&V))
6123         if (isa<CallInst>(I) || isa<LoadInst>(I))
6124           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6125             Changed = ChangeStatus::CHANGED;
6126     }
6127 
6128     return Changed;
6129   }
6130 };
6131 
6132 struct AAValueConstantRangeArgument final
6133     : AAArgumentFromCallSiteArguments<
6134           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6135   AAValueConstantRangeArgument(const IRPosition &IRP)
6136       : AAArgumentFromCallSiteArguments<
6137             AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>(
6138             IRP) {}
6139 
6140   /// See AbstractAttribute::trackStatistics()
6141   void trackStatistics() const override {
6142     STATS_DECLTRACK_ARG_ATTR(value_range)
6143   }
6144 };
6145 
6146 struct AAValueConstantRangeReturned
6147     : AAReturnedFromReturnedValues<AAValueConstantRange,
6148                                    AAValueConstantRangeImpl> {
6149   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6150                                             AAValueConstantRangeImpl>;
6151   AAValueConstantRangeReturned(const IRPosition &IRP) : Base(IRP) {}
6152 
6153   /// See AbstractAttribute::initialize(...).
6154   void initialize(Attributor &A) override {}
6155 
6156   /// See AbstractAttribute::trackStatistics()
6157   void trackStatistics() const override {
6158     STATS_DECLTRACK_FNRET_ATTR(value_range)
6159   }
6160 };
6161 
6162 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6163   AAValueConstantRangeFloating(const IRPosition &IRP)
6164       : AAValueConstantRangeImpl(IRP) {}
6165 
6166   /// See AbstractAttribute::initialize(...).
6167   void initialize(Attributor &A) override {
6168     AAValueConstantRange::initialize(A);
6169     Value &V = getAssociatedValue();
6170 
6171     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6172       unionAssumed(ConstantRange(C->getValue()));
6173       indicateOptimisticFixpoint();
6174       return;
6175     }
6176 
6177     if (isa<UndefValue>(&V)) {
6178       indicateOptimisticFixpoint();
6179       return;
6180     }
6181 
6182     if (auto *I = dyn_cast<Instruction>(&V))
6183       if (isa<BinaryOperator>(I) || isa<CmpInst>(I)) {
6184         Value *LHS = I->getOperand(0);
6185         Value *RHS = I->getOperand(1);
6186 
6187         if (LHS->getType()->isIntegerTy() && RHS->getType()->isIntegerTy())
6188           return;
6189       }
6190 
6191     // If it is a load instruction with range metadata, use it.
6192     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6193       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6194         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6195         return;
6196       }
6197 
6198     // Otherwise we give up.
6199     indicatePessimisticFixpoint();
6200 
6201     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6202                       << getAssociatedValue() << "\n");
6203   }
6204 
6205   bool calculateBinaryOperator(Attributor &A, BinaryOperator *BinOp,
6206                                IntegerRangeState &T, Instruction *CtxI) {
6207     Value *LHS = BinOp->getOperand(0);
6208     Value *RHS = BinOp->getOperand(1);
6209 
6210     auto &LHSAA =
6211         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6212     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6213 
6214     auto &RHSAA =
6215         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6216     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6217 
6218     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6219 
6220     T.unionAssumed(AssumedRange);
6221 
6222     // TODO: Track a known state too.
6223 
6224     return T.isValidState();
6225   }
6226 
6227   bool calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
6228                         Instruction *CtxI) {
6229     Value *LHS = CmpI->getOperand(0);
6230     Value *RHS = CmpI->getOperand(1);
6231 
6232     auto &LHSAA =
6233         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6234     auto &RHSAA =
6235         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6236 
6237     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6238     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6239 
6240     // If one of them is empty set, we can't decide.
6241     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
6242       return true;
6243 
6244     bool MustTrue = false, MustFalse = false;
6245 
6246     auto AllowedRegion =
6247         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
6248 
6249     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
6250         CmpI->getPredicate(), RHSAARange);
6251 
6252     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
6253       MustFalse = true;
6254 
6255     if (SatisfyingRegion.contains(LHSAARange))
6256       MustTrue = true;
6257 
6258     assert((!MustTrue || !MustFalse) &&
6259            "Either MustTrue or MustFalse should be false!");
6260 
6261     if (MustTrue)
6262       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
6263     else if (MustFalse)
6264       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
6265     else
6266       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
6267 
6268     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
6269                       << " " << RHSAA << "\n");
6270 
6271     // TODO: Track a known state too.
6272     return T.isValidState();
6273   }
6274 
6275   /// See AbstractAttribute::updateImpl(...).
6276   ChangeStatus updateImpl(Attributor &A) override {
6277     Instruction *CtxI = getCtxI();
6278     auto VisitValueCB = [&](Value &V, IntegerRangeState &T,
6279                             bool Stripped) -> bool {
6280       Instruction *I = dyn_cast<Instruction>(&V);
6281       if (!I) {
6282 
6283         // If the value is not instruction, we query AA to Attributor.
6284         const auto &AA =
6285             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
6286 
6287         // Clamp operator is not used to utilize a program point CtxI.
6288         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
6289 
6290         return T.isValidState();
6291       }
6292 
6293       if (auto *BinOp = dyn_cast<BinaryOperator>(I))
6294         return calculateBinaryOperator(A, BinOp, T, CtxI);
6295       else if (auto *CmpI = dyn_cast<CmpInst>(I))
6296         return calculateCmpInst(A, CmpI, T, CtxI);
6297       else {
6298         // Give up with other instructions.
6299         // TODO: Add other instructions
6300 
6301         T.indicatePessimisticFixpoint();
6302         return false;
6303       }
6304     };
6305 
6306     IntegerRangeState T(getBitWidth());
6307 
6308     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
6309             A, getIRPosition(), *this, T, VisitValueCB))
6310       return indicatePessimisticFixpoint();
6311 
6312     return clampStateAndIndicateChange(getState(), T);
6313   }
6314 
6315   /// See AbstractAttribute::trackStatistics()
6316   void trackStatistics() const override {
6317     STATS_DECLTRACK_FLOATING_ATTR(value_range)
6318   }
6319 };
6320 
6321 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
6322   AAValueConstantRangeFunction(const IRPosition &IRP)
6323       : AAValueConstantRangeImpl(IRP) {}
6324 
6325   /// See AbstractAttribute::initialize(...).
6326   ChangeStatus updateImpl(Attributor &A) override {
6327     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
6328                      "not be called");
6329   }
6330 
6331   /// See AbstractAttribute::trackStatistics()
6332   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
6333 };
6334 
6335 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
6336   AAValueConstantRangeCallSite(const IRPosition &IRP)
6337       : AAValueConstantRangeFunction(IRP) {}
6338 
6339   /// See AbstractAttribute::trackStatistics()
6340   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
6341 };
6342 
6343 struct AAValueConstantRangeCallSiteReturned
6344     : AACallSiteReturnedFromReturned<AAValueConstantRange,
6345                                      AAValueConstantRangeImpl> {
6346   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP)
6347       : AACallSiteReturnedFromReturned<AAValueConstantRange,
6348                                        AAValueConstantRangeImpl>(IRP) {}
6349 
6350   /// See AbstractAttribute::initialize(...).
6351   void initialize(Attributor &A) override {
6352     // If it is a load instruction with range metadata, use the metadata.
6353     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
6354       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
6355         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6356 
6357     AAValueConstantRangeImpl::initialize(A);
6358   }
6359 
6360   /// See AbstractAttribute::trackStatistics()
6361   void trackStatistics() const override {
6362     STATS_DECLTRACK_CSRET_ATTR(value_range)
6363   }
6364 };
6365 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
6366   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP)
6367       : AAValueConstantRangeFloating(IRP) {}
6368 
6369   /// See AbstractAttribute::trackStatistics()
6370   void trackStatistics() const override {
6371     STATS_DECLTRACK_CSARG_ATTR(value_range)
6372   }
6373 };
6374 /// ----------------------------------------------------------------------------
6375 ///                               Attributor
6376 /// ----------------------------------------------------------------------------
6377 
6378 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
6379                                const AAIsDead *LivenessAA) {
6380   const Instruction *CtxI = AA.getIRPosition().getCtxI();
6381   if (!CtxI)
6382     return false;
6383 
6384   // TODO: Find a good way to utilize fine and coarse grained liveness
6385   // information.
6386   if (!LivenessAA)
6387     LivenessAA =
6388         &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
6389                             /* TrackDependence */ false);
6390 
6391   // Don't check liveness for AAIsDead.
6392   if (&AA == LivenessAA)
6393     return false;
6394 
6395   if (!LivenessAA->isAssumedDead(CtxI))
6396     return false;
6397 
6398   // We actually used liveness information so we have to record a dependence.
6399   recordDependence(*LivenessAA, AA, DepClassTy::OPTIONAL);
6400 
6401   return true;
6402 }
6403 
6404 bool Attributor::checkForAllUses(
6405     const function_ref<bool(const Use &, bool &)> &Pred,
6406     const AbstractAttribute &QueryingAA, const Value &V) {
6407   const IRPosition &IRP = QueryingAA.getIRPosition();
6408   SmallVector<const Use *, 16> Worklist;
6409   SmallPtrSet<const Use *, 16> Visited;
6410 
6411   for (const Use &U : V.uses())
6412     Worklist.push_back(&U);
6413 
6414   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
6415                     << " initial uses to check\n");
6416 
6417   if (Worklist.empty())
6418     return true;
6419 
6420   bool AnyDead = false;
6421   const Function *ScopeFn = IRP.getAnchorScope();
6422   const auto *LivenessAA =
6423       ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
6424                                     /* TrackDependence */ false)
6425               : nullptr;
6426 
6427   while (!Worklist.empty()) {
6428     const Use *U = Worklist.pop_back_val();
6429     if (!Visited.insert(U).second)
6430       continue;
6431     LLVM_DEBUG(dbgs() << "[Attributor] Check use: " << **U << "\n");
6432     if (Instruction *UserI = dyn_cast<Instruction>(U->getUser()))
6433       if (LivenessAA && LivenessAA->isAssumedDead(UserI)) {
6434         LLVM_DEBUG(dbgs() << "[Attributor] Dead user: " << *UserI << ": "
6435                           << *LivenessAA << "\n");
6436         AnyDead = true;
6437         continue;
6438       }
6439 
6440     bool Follow = false;
6441     if (!Pred(*U, Follow))
6442       return false;
6443     if (!Follow)
6444       continue;
6445     for (const Use &UU : U->getUser()->uses())
6446       Worklist.push_back(&UU);
6447   }
6448 
6449   if (AnyDead)
6450     recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
6451 
6452   return true;
6453 }
6454 
6455 bool Attributor::checkForAllCallSites(
6456     const function_ref<bool(AbstractCallSite)> &Pred,
6457     const AbstractAttribute &QueryingAA, bool RequireAllCallSites,
6458     bool &AllCallSitesKnown) {
6459   // We can try to determine information from
6460   // the call sites. However, this is only possible all call sites are known,
6461   // hence the function has internal linkage.
6462   const IRPosition &IRP = QueryingAA.getIRPosition();
6463   const Function *AssociatedFunction = IRP.getAssociatedFunction();
6464   if (!AssociatedFunction) {
6465     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
6466                       << "\n");
6467     AllCallSitesKnown = false;
6468     return false;
6469   }
6470 
6471   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
6472                               &QueryingAA, AllCallSitesKnown);
6473 }
6474 
6475 bool Attributor::checkForAllCallSites(
6476     const function_ref<bool(AbstractCallSite)> &Pred, const Function &Fn,
6477     bool RequireAllCallSites, const AbstractAttribute *QueryingAA,
6478     bool &AllCallSitesKnown) {
6479   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
6480     LLVM_DEBUG(
6481         dbgs()
6482         << "[Attributor] Function " << Fn.getName()
6483         << " has no internal linkage, hence not all call sites are known\n");
6484     AllCallSitesKnown = false;
6485     return false;
6486   }
6487 
6488   // If we do not require all call sites we might not see all.
6489   AllCallSitesKnown = RequireAllCallSites;
6490 
6491   for (const Use &U : Fn.uses()) {
6492     AbstractCallSite ACS(&U);
6493     if (!ACS) {
6494       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
6495                         << " has non call site use " << *U.get() << " in "
6496                         << *U.getUser() << "\n");
6497       // BlockAddress users are allowed.
6498       if (isa<BlockAddress>(U.getUser()))
6499         continue;
6500       return false;
6501     }
6502 
6503     Instruction *I = ACS.getInstruction();
6504     Function *Caller = I->getFunction();
6505 
6506     const auto *LivenessAA =
6507         lookupAAFor<AAIsDead>(IRPosition::function(*Caller), QueryingAA,
6508                               /* TrackDependence */ false);
6509 
6510     // Skip dead calls.
6511     if (LivenessAA && LivenessAA->isAssumedDead(I)) {
6512       // We actually used liveness information so we have to record a
6513       // dependence.
6514       if (QueryingAA)
6515         recordDependence(*LivenessAA, *QueryingAA, DepClassTy::OPTIONAL);
6516       AllCallSitesKnown = false;
6517       continue;
6518     }
6519 
6520     const Use *EffectiveUse =
6521         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
6522     if (!ACS.isCallee(EffectiveUse)) {
6523       if (!RequireAllCallSites)
6524         continue;
6525       LLVM_DEBUG(dbgs() << "[Attributor] User " << EffectiveUse->getUser()
6526                         << " is an invalid use of " << Fn.getName() << "\n");
6527       return false;
6528     }
6529 
6530     if (Pred(ACS))
6531       continue;
6532 
6533     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
6534                       << *ACS.getInstruction() << "\n");
6535     return false;
6536   }
6537 
6538   return true;
6539 }
6540 
6541 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
6542     const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
6543         &Pred,
6544     const AbstractAttribute &QueryingAA) {
6545 
6546   const IRPosition &IRP = QueryingAA.getIRPosition();
6547   // Since we need to provide return instructions we have to have an exact
6548   // definition.
6549   const Function *AssociatedFunction = IRP.getAssociatedFunction();
6550   if (!AssociatedFunction)
6551     return false;
6552 
6553   // If this is a call site query we use the call site specific return values
6554   // and liveness information.
6555   // TODO: use the function scope once we have call site AAReturnedValues.
6556   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
6557   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
6558   if (!AARetVal.getState().isValidState())
6559     return false;
6560 
6561   return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
6562 }
6563 
6564 bool Attributor::checkForAllReturnedValues(
6565     const function_ref<bool(Value &)> &Pred,
6566     const AbstractAttribute &QueryingAA) {
6567 
6568   const IRPosition &IRP = QueryingAA.getIRPosition();
6569   const Function *AssociatedFunction = IRP.getAssociatedFunction();
6570   if (!AssociatedFunction)
6571     return false;
6572 
6573   // TODO: use the function scope once we have call site AAReturnedValues.
6574   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
6575   const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
6576   if (!AARetVal.getState().isValidState())
6577     return false;
6578 
6579   return AARetVal.checkForAllReturnedValuesAndReturnInsts(
6580       [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
6581         return Pred(RV);
6582       });
6583 }
6584 
6585 static bool
6586 checkForAllInstructionsImpl(InformationCache::OpcodeInstMapTy &OpcodeInstMap,
6587                             const function_ref<bool(Instruction &)> &Pred,
6588                             const AAIsDead *LivenessAA, bool &AnyDead,
6589                             const ArrayRef<unsigned> &Opcodes) {
6590   for (unsigned Opcode : Opcodes) {
6591     for (Instruction *I : OpcodeInstMap[Opcode]) {
6592       // Skip dead instructions.
6593       if (LivenessAA && LivenessAA->isAssumedDead(I)) {
6594         AnyDead = true;
6595         continue;
6596       }
6597 
6598       if (!Pred(*I))
6599         return false;
6600     }
6601   }
6602   return true;
6603 }
6604 
6605 bool Attributor::checkForAllInstructions(
6606     const llvm::function_ref<bool(Instruction &)> &Pred,
6607     const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
6608 
6609   const IRPosition &IRP = QueryingAA.getIRPosition();
6610   // Since we need to provide instructions we have to have an exact definition.
6611   const Function *AssociatedFunction = IRP.getAssociatedFunction();
6612   if (!AssociatedFunction)
6613     return false;
6614 
6615   // TODO: use the function scope once we have call site AAReturnedValues.
6616   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
6617   const auto &LivenessAA =
6618       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
6619   bool AnyDead = false;
6620 
6621   auto &OpcodeInstMap =
6622       InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
6623   if (!checkForAllInstructionsImpl(OpcodeInstMap, Pred, &LivenessAA, AnyDead,
6624                                    Opcodes))
6625     return false;
6626 
6627   // If we actually used liveness information so we have to record a dependence.
6628   if (AnyDead)
6629     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
6630 
6631   return true;
6632 }
6633 
6634 bool Attributor::checkForAllReadWriteInstructions(
6635     const llvm::function_ref<bool(Instruction &)> &Pred,
6636     AbstractAttribute &QueryingAA) {
6637 
6638   const Function *AssociatedFunction =
6639       QueryingAA.getIRPosition().getAssociatedFunction();
6640   if (!AssociatedFunction)
6641     return false;
6642 
6643   // TODO: use the function scope once we have call site AAReturnedValues.
6644   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
6645   const auto &LivenessAA =
6646       getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
6647   bool AnyDead = false;
6648 
6649   for (Instruction *I :
6650        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
6651     // Skip dead instructions.
6652     if (LivenessAA.isAssumedDead(I)) {
6653       AnyDead = true;
6654       continue;
6655     }
6656 
6657     if (!Pred(*I))
6658       return false;
6659   }
6660 
6661   // If we actually used liveness information so we have to record a dependence.
6662   if (AnyDead)
6663     recordDependence(LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
6664 
6665   return true;
6666 }
6667 
6668 ChangeStatus Attributor::run(Module &M) {
6669   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
6670                     << AllAbstractAttributes.size()
6671                     << " abstract attributes.\n");
6672 
6673   // Now that all abstract attributes are collected and initialized we start
6674   // the abstract analysis.
6675 
6676   unsigned IterationCounter = 1;
6677 
6678   SmallVector<AbstractAttribute *, 64> ChangedAAs;
6679   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
6680   Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
6681 
6682   bool RecomputeDependences = false;
6683 
6684   do {
6685     // Remember the size to determine new attributes.
6686     size_t NumAAs = AllAbstractAttributes.size();
6687     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
6688                       << ", Worklist size: " << Worklist.size() << "\n");
6689 
6690     // For invalid AAs we can fix dependent AAs that have a required dependence,
6691     // thereby folding long dependence chains in a single step without the need
6692     // to run updates.
6693     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
6694       AbstractAttribute *InvalidAA = InvalidAAs[u];
6695       auto &QuerriedAAs = QueryMap[InvalidAA];
6696       LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "
6697                         << QuerriedAAs.RequiredAAs.size() << "/"
6698                         << QuerriedAAs.OptionalAAs.size()
6699                         << " required/optional dependences\n");
6700       for (AbstractAttribute *DepOnInvalidAA : QuerriedAAs.RequiredAAs) {
6701         AbstractState &DOIAAState = DepOnInvalidAA->getState();
6702         DOIAAState.indicatePessimisticFixpoint();
6703         ++NumAttributesFixedDueToRequiredDependences;
6704         assert(DOIAAState.isAtFixpoint() && "Expected fixpoint state!");
6705         if (!DOIAAState.isValidState())
6706           InvalidAAs.insert(DepOnInvalidAA);
6707         else
6708           ChangedAAs.push_back(DepOnInvalidAA);
6709       }
6710       if (!RecomputeDependences)
6711         Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
6712                         QuerriedAAs.OptionalAAs.end());
6713     }
6714 
6715     // If dependences (=QueryMap) are recomputed we have to look at all abstract
6716     // attributes again, regardless of what changed in the last iteration.
6717     if (RecomputeDependences) {
6718       LLVM_DEBUG(
6719           dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
6720       QueryMap.clear();
6721       ChangedAAs.clear();
6722       Worklist.insert(AllAbstractAttributes.begin(),
6723                       AllAbstractAttributes.end());
6724     }
6725 
6726     // Add all abstract attributes that are potentially dependent on one that
6727     // changed to the work list.
6728     for (AbstractAttribute *ChangedAA : ChangedAAs) {
6729       auto &QuerriedAAs = QueryMap[ChangedAA];
6730       Worklist.insert(QuerriedAAs.OptionalAAs.begin(),
6731                       QuerriedAAs.OptionalAAs.end());
6732       Worklist.insert(QuerriedAAs.RequiredAAs.begin(),
6733                       QuerriedAAs.RequiredAAs.end());
6734     }
6735 
6736     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
6737                       << ", Worklist+Dependent size: " << Worklist.size()
6738                       << "\n");
6739 
6740     // Reset the changed and invalid set.
6741     ChangedAAs.clear();
6742     InvalidAAs.clear();
6743 
6744     // Update all abstract attribute in the work list and record the ones that
6745     // changed.
6746     for (AbstractAttribute *AA : Worklist)
6747       if (!AA->getState().isAtFixpoint() && !isAssumedDead(*AA, nullptr)) {
6748         QueriedNonFixAA = false;
6749         if (AA->update(*this) == ChangeStatus::CHANGED) {
6750           ChangedAAs.push_back(AA);
6751           if (!AA->getState().isValidState())
6752             InvalidAAs.insert(AA);
6753         } else if (!QueriedNonFixAA) {
6754           // If the attribute did not query any non-fix information, the state
6755           // will not change and we can indicate that right away.
6756           AA->getState().indicateOptimisticFixpoint();
6757         }
6758       }
6759 
6760     // Check if we recompute the dependences in the next iteration.
6761     RecomputeDependences = (DepRecomputeInterval > 0 &&
6762                             IterationCounter % DepRecomputeInterval == 0);
6763 
6764     // Add attributes to the changed set if they have been created in the last
6765     // iteration.
6766     ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
6767                       AllAbstractAttributes.end());
6768 
6769     // Reset the work list and repopulate with the changed abstract attributes.
6770     // Note that dependent ones are added above.
6771     Worklist.clear();
6772     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
6773 
6774   } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations ||
6775                                  VerifyMaxFixpointIterations));
6776 
6777   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
6778                     << IterationCounter << "/" << MaxFixpointIterations
6779                     << " iterations\n");
6780 
6781   size_t NumFinalAAs = AllAbstractAttributes.size();
6782 
6783   // Reset abstract arguments not settled in a sound fixpoint by now. This
6784   // happens when we stopped the fixpoint iteration early. Note that only the
6785   // ones marked as "changed" *and* the ones transitively depending on them
6786   // need to be reverted to a pessimistic state. Others might not be in a
6787   // fixpoint state but we can use the optimistic results for them anyway.
6788   SmallPtrSet<AbstractAttribute *, 32> Visited;
6789   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
6790     AbstractAttribute *ChangedAA = ChangedAAs[u];
6791     if (!Visited.insert(ChangedAA).second)
6792       continue;
6793 
6794     AbstractState &State = ChangedAA->getState();
6795     if (!State.isAtFixpoint()) {
6796       State.indicatePessimisticFixpoint();
6797 
6798       NumAttributesTimedOut++;
6799     }
6800 
6801     auto &QuerriedAAs = QueryMap[ChangedAA];
6802     ChangedAAs.append(QuerriedAAs.OptionalAAs.begin(),
6803                       QuerriedAAs.OptionalAAs.end());
6804     ChangedAAs.append(QuerriedAAs.RequiredAAs.begin(),
6805                       QuerriedAAs.RequiredAAs.end());
6806   }
6807 
6808   LLVM_DEBUG({
6809     if (!Visited.empty())
6810       dbgs() << "\n[Attributor] Finalized " << Visited.size()
6811              << " abstract attributes.\n";
6812   });
6813 
6814   unsigned NumManifested = 0;
6815   unsigned NumAtFixpoint = 0;
6816   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
6817   for (AbstractAttribute *AA : AllAbstractAttributes) {
6818     AbstractState &State = AA->getState();
6819 
6820     // If there is not already a fixpoint reached, we can now take the
6821     // optimistic state. This is correct because we enforced a pessimistic one
6822     // on abstract attributes that were transitively dependent on a changed one
6823     // already above.
6824     if (!State.isAtFixpoint())
6825       State.indicateOptimisticFixpoint();
6826 
6827     // If the state is invalid, we do not try to manifest it.
6828     if (!State.isValidState())
6829       continue;
6830 
6831     // Skip dead code.
6832     if (isAssumedDead(*AA, nullptr))
6833       continue;
6834     // Manifest the state and record if we changed the IR.
6835     ChangeStatus LocalChange = AA->manifest(*this);
6836     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
6837       AA->trackStatistics();
6838 
6839     ManifestChange = ManifestChange | LocalChange;
6840 
6841     NumAtFixpoint++;
6842     NumManifested += (LocalChange == ChangeStatus::CHANGED);
6843   }
6844 
6845   (void)NumManifested;
6846   (void)NumAtFixpoint;
6847   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
6848                     << " arguments while " << NumAtFixpoint
6849                     << " were in a valid fixpoint state\n");
6850 
6851   NumAttributesManifested += NumManifested;
6852   NumAttributesValidFixpoint += NumAtFixpoint;
6853 
6854   (void)NumFinalAAs;
6855   assert(
6856       NumFinalAAs == AllAbstractAttributes.size() &&
6857       "Expected the final number of abstract attributes to remain unchanged!");
6858 
6859   // Delete stuff at the end to avoid invalid references and a nice order.
6860   {
6861     LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
6862                       << ToBeDeletedFunctions.size() << " functions and "
6863                       << ToBeDeletedBlocks.size() << " blocks and "
6864                       << ToBeDeletedInsts.size() << " instructions and "
6865                       << ToBeChangedUses.size() << " uses\n");
6866 
6867     SmallVector<WeakTrackingVH, 32> DeadInsts;
6868     SmallVector<Instruction *, 32> TerminatorsToFold;
6869 
6870     for (auto &It : ToBeChangedUses) {
6871       Use *U = It.first;
6872       Value *NewV = It.second;
6873       Value *OldV = U->get();
6874       LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
6875                         << " instead of " << *OldV << "\n");
6876       U->set(NewV);
6877       if (Instruction *I = dyn_cast<Instruction>(OldV))
6878         if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
6879             isInstructionTriviallyDead(I)) {
6880           DeadInsts.push_back(I);
6881         }
6882       if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
6883         Instruction *UserI = cast<Instruction>(U->getUser());
6884         if (isa<UndefValue>(NewV)) {
6885           ToBeChangedToUnreachableInsts.insert(UserI);
6886         } else {
6887           TerminatorsToFold.push_back(UserI);
6888         }
6889       }
6890     }
6891     for (auto &V : InvokeWithDeadSuccessor)
6892       if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
6893         bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
6894         bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
6895         bool Invoke2CallAllowed =
6896             !AAIsDeadFunction::mayCatchAsynchronousExceptions(
6897                 *II->getFunction());
6898         assert((UnwindBBIsDead || NormalBBIsDead) &&
6899                "Invoke does not have dead successors!");
6900         BasicBlock *BB = II->getParent();
6901         BasicBlock *NormalDestBB = II->getNormalDest();
6902         if (UnwindBBIsDead) {
6903           Instruction *NormalNextIP = &NormalDestBB->front();
6904           if (Invoke2CallAllowed) {
6905             changeToCall(II);
6906             NormalNextIP = BB->getTerminator();
6907           }
6908           if (NormalBBIsDead)
6909             ToBeChangedToUnreachableInsts.insert(NormalNextIP);
6910         } else {
6911           assert(NormalBBIsDead && "Broken invariant!");
6912           if (!NormalDestBB->getUniquePredecessor())
6913             NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
6914           ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
6915         }
6916       }
6917     for (auto &V : ToBeChangedToUnreachableInsts)
6918       if (Instruction *I = dyn_cast_or_null<Instruction>(V))
6919         changeToUnreachable(I, /* UseLLVMTrap */ false);
6920     for (Instruction *I : TerminatorsToFold)
6921       ConstantFoldTerminator(I->getParent());
6922 
6923     for (auto &V : ToBeDeletedInsts) {
6924       if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
6925         I->replaceAllUsesWith(UndefValue::get(I->getType()));
6926         if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
6927           DeadInsts.push_back(I);
6928         else
6929           I->eraseFromParent();
6930       }
6931     }
6932 
6933     RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
6934 
6935     if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
6936       SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
6937       ToBeDeletedBBs.reserve(NumDeadBlocks);
6938       ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end());
6939       // Actually we do not delete the blocks but squash them into a single
6940       // unreachable but untangling branches that jump here is something we need
6941       // to do in a more generic way.
6942       DetatchDeadBlocks(ToBeDeletedBBs, nullptr);
6943       STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
6944       BUILD_STAT_NAME(AAIsDead, BasicBlock) += ToBeDeletedBlocks.size();
6945     }
6946 
6947     // Identify dead internal functions and delete them. This happens outside
6948     // the other fixpoint analysis as we might treat potentially dead functions
6949     // as live to lower the number of iterations. If they happen to be dead, the
6950     // below fixpoint loop will identify and eliminate them.
6951     SmallVector<Function *, 8> InternalFns;
6952     for (Function &F : M)
6953       if (F.hasLocalLinkage())
6954         InternalFns.push_back(&F);
6955 
6956     bool FoundDeadFn = true;
6957     while (FoundDeadFn) {
6958       FoundDeadFn = false;
6959       for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
6960         Function *F = InternalFns[u];
6961         if (!F)
6962           continue;
6963 
6964         bool AllCallSitesKnown;
6965         if (!checkForAllCallSites(
6966                 [this](AbstractCallSite ACS) {
6967                   return ToBeDeletedFunctions.count(
6968                       ACS.getInstruction()->getFunction());
6969                 },
6970                 *F, true, nullptr, AllCallSitesKnown))
6971           continue;
6972 
6973         ToBeDeletedFunctions.insert(F);
6974         InternalFns[u] = nullptr;
6975         FoundDeadFn = true;
6976       }
6977     }
6978   }
6979 
6980   STATS_DECL(AAIsDead, Function, "Number of dead functions deleted.");
6981   BUILD_STAT_NAME(AAIsDead, Function) += ToBeDeletedFunctions.size();
6982 
6983   // Rewrite the functions as requested during manifest.
6984   ManifestChange = ManifestChange | rewriteFunctionSignatures();
6985 
6986   for (Function *Fn : ToBeDeletedFunctions) {
6987     Fn->deleteBody();
6988     Fn->replaceAllUsesWith(UndefValue::get(Fn->getType()));
6989     Fn->eraseFromParent();
6990   }
6991 
6992   if (VerifyMaxFixpointIterations &&
6993       IterationCounter != MaxFixpointIterations) {
6994     errs() << "\n[Attributor] Fixpoint iteration done after: "
6995            << IterationCounter << "/" << MaxFixpointIterations
6996            << " iterations\n";
6997     llvm_unreachable("The fixpoint was not reached with exactly the number of "
6998                      "specified iterations!");
6999   }
7000 
7001   return ManifestChange;
7002 }
7003 
7004 bool Attributor::isValidFunctionSignatureRewrite(
7005     Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
7006 
7007   auto CallSiteCanBeChanged = [](AbstractCallSite ACS) {
7008     // Forbid must-tail calls for now.
7009     return !ACS.isCallbackCall() && !ACS.getCallSite().isMustTailCall();
7010   };
7011 
7012   Function *Fn = Arg.getParent();
7013   // Avoid var-arg functions for now.
7014   if (Fn->isVarArg()) {
7015     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
7016     return false;
7017   }
7018 
7019   // Avoid functions with complicated argument passing semantics.
7020   AttributeList FnAttributeList = Fn->getAttributes();
7021   if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
7022       FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
7023       FnAttributeList.hasAttrSomewhere(Attribute::InAlloca)) {
7024     LLVM_DEBUG(
7025         dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
7026     return false;
7027   }
7028 
7029   // Avoid callbacks for now.
7030   bool AllCallSitesKnown;
7031   if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
7032                             AllCallSitesKnown)) {
7033     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
7034     return false;
7035   }
7036 
7037   auto InstPred = [](Instruction &I) {
7038     if (auto *CI = dyn_cast<CallInst>(&I))
7039       return !CI->isMustTailCall();
7040     return true;
7041   };
7042 
7043   // Forbid must-tail calls for now.
7044   // TODO:
7045   bool AnyDead;
7046   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
7047   if (!checkForAllInstructionsImpl(OpcodeInstMap, InstPred, nullptr, AnyDead,
7048                                    {Instruction::Call})) {
7049     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
7050     return false;
7051   }
7052 
7053   return true;
7054 }
7055 
7056 bool Attributor::registerFunctionSignatureRewrite(
7057     Argument &Arg, ArrayRef<Type *> ReplacementTypes,
7058     ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
7059     ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
7060   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
7061                     << Arg.getParent()->getName() << " with "
7062                     << ReplacementTypes.size() << " replacements\n");
7063   assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
7064          "Cannot register an invalid rewrite");
7065 
7066   Function *Fn = Arg.getParent();
7067   SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = ArgumentReplacementMap[Fn];
7068   if (ARIs.empty())
7069     ARIs.resize(Fn->arg_size());
7070 
7071   // If we have a replacement already with less than or equal new arguments,
7072   // ignore this request.
7073   ArgumentReplacementInfo *&ARI = ARIs[Arg.getArgNo()];
7074   if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
7075     LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
7076     return false;
7077   }
7078 
7079   // If we have a replacement already but we like the new one better, delete
7080   // the old.
7081   if (ARI)
7082     delete ARI;
7083 
7084   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
7085                     << Arg.getParent()->getName() << " with "
7086                     << ReplacementTypes.size() << " replacements\n");
7087 
7088   // Remember the replacement.
7089   ARI = new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
7090                                     std::move(CalleeRepairCB),
7091                                     std::move(ACSRepairCB));
7092 
7093   return true;
7094 }
7095 
7096 ChangeStatus Attributor::rewriteFunctionSignatures() {
7097   ChangeStatus Changed = ChangeStatus::UNCHANGED;
7098 
7099   for (auto &It : ArgumentReplacementMap) {
7100     Function *OldFn = It.getFirst();
7101 
7102     // Deleted functions do not require rewrites.
7103     if (ToBeDeletedFunctions.count(OldFn))
7104       continue;
7105 
7106     const SmallVectorImpl<ArgumentReplacementInfo *> &ARIs = It.getSecond();
7107     assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
7108 
7109     SmallVector<Type *, 16> NewArgumentTypes;
7110     SmallVector<AttributeSet, 16> NewArgumentAttributes;
7111 
7112     // Collect replacement argument types and copy over existing attributes.
7113     AttributeList OldFnAttributeList = OldFn->getAttributes();
7114     for (Argument &Arg : OldFn->args()) {
7115       if (ArgumentReplacementInfo *ARI = ARIs[Arg.getArgNo()]) {
7116         NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
7117                                 ARI->ReplacementTypes.end());
7118         NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
7119                                      AttributeSet());
7120       } else {
7121         NewArgumentTypes.push_back(Arg.getType());
7122         NewArgumentAttributes.push_back(
7123             OldFnAttributeList.getParamAttributes(Arg.getArgNo()));
7124       }
7125     }
7126 
7127     FunctionType *OldFnTy = OldFn->getFunctionType();
7128     Type *RetTy = OldFnTy->getReturnType();
7129 
7130     // Construct the new function type using the new arguments types.
7131     FunctionType *NewFnTy =
7132         FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
7133 
7134     LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
7135                       << "' from " << *OldFn->getFunctionType() << " to "
7136                       << *NewFnTy << "\n");
7137 
7138     // Create the new function body and insert it into the module.
7139     Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
7140                                        OldFn->getAddressSpace(), "");
7141     OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
7142     NewFn->takeName(OldFn);
7143     NewFn->copyAttributesFrom(OldFn);
7144 
7145     // Patch the pointer to LLVM function in debug info descriptor.
7146     NewFn->setSubprogram(OldFn->getSubprogram());
7147     OldFn->setSubprogram(nullptr);
7148 
7149     // Recompute the parameter attributes list based on the new arguments for
7150     // the function.
7151     LLVMContext &Ctx = OldFn->getContext();
7152     NewFn->setAttributes(AttributeList::get(
7153         Ctx, OldFnAttributeList.getFnAttributes(),
7154         OldFnAttributeList.getRetAttributes(), NewArgumentAttributes));
7155 
7156     // Since we have now created the new function, splice the body of the old
7157     // function right into the new function, leaving the old rotting hulk of the
7158     // function empty.
7159     NewFn->getBasicBlockList().splice(NewFn->begin(),
7160                                       OldFn->getBasicBlockList());
7161 
7162     // Set of all "call-like" instructions that invoke the old function mapped
7163     // to their new replacements.
7164     SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
7165 
7166     // Callback to create a new "call-like" instruction for a given one.
7167     auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
7168       CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
7169       const AttributeList &OldCallAttributeList = OldCB->getAttributes();
7170 
7171       // Collect the new argument operands for the replacement call site.
7172       SmallVector<Value *, 16> NewArgOperands;
7173       SmallVector<AttributeSet, 16> NewArgOperandAttributes;
7174       for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
7175         unsigned NewFirstArgNum = NewArgOperands.size();
7176         (void)NewFirstArgNum; // only used inside assert.
7177         if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
7178           if (ARI->ACSRepairCB)
7179             ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
7180           assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
7181                      NewArgOperands.size() &&
7182                  "ACS repair callback did not provide as many operand as new "
7183                  "types were registered!");
7184           // TODO: Exose the attribute set to the ACS repair callback
7185           NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
7186                                          AttributeSet());
7187         } else {
7188           NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
7189           NewArgOperandAttributes.push_back(
7190               OldCallAttributeList.getParamAttributes(OldArgNum));
7191         }
7192       }
7193 
7194       assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
7195              "Mismatch # argument operands vs. # argument operand attributes!");
7196       assert(NewArgOperands.size() == NewFn->arg_size() &&
7197              "Mismatch # argument operands vs. # function arguments!");
7198 
7199       SmallVector<OperandBundleDef, 4> OperandBundleDefs;
7200       OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
7201 
7202       // Create a new call or invoke instruction to replace the old one.
7203       CallBase *NewCB;
7204       if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
7205         NewCB =
7206             InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
7207                                NewArgOperands, OperandBundleDefs, "", OldCB);
7208       } else {
7209         auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
7210                                        "", OldCB);
7211         NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
7212         NewCB = NewCI;
7213       }
7214 
7215       // Copy over various properties and the new attributes.
7216       uint64_t W;
7217       if (OldCB->extractProfTotalWeight(W))
7218         NewCB->setProfWeight(W);
7219       NewCB->setCallingConv(OldCB->getCallingConv());
7220       NewCB->setDebugLoc(OldCB->getDebugLoc());
7221       NewCB->takeName(OldCB);
7222       NewCB->setAttributes(AttributeList::get(
7223           Ctx, OldCallAttributeList.getFnAttributes(),
7224           OldCallAttributeList.getRetAttributes(), NewArgOperandAttributes));
7225 
7226       CallSitePairs.push_back({OldCB, NewCB});
7227       return true;
7228     };
7229 
7230     // Use the CallSiteReplacementCreator to create replacement call sites.
7231     bool AllCallSitesKnown;
7232     bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
7233                                         true, nullptr, AllCallSitesKnown);
7234     (void)Success;
7235     assert(Success && "Assumed call site replacement to succeed!");
7236 
7237     // Rewire the arguments.
7238     auto OldFnArgIt = OldFn->arg_begin();
7239     auto NewFnArgIt = NewFn->arg_begin();
7240     for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
7241          ++OldArgNum, ++OldFnArgIt) {
7242       if (ArgumentReplacementInfo *ARI = ARIs[OldArgNum]) {
7243         if (ARI->CalleeRepairCB)
7244           ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
7245         NewFnArgIt += ARI->ReplacementTypes.size();
7246       } else {
7247         NewFnArgIt->takeName(&*OldFnArgIt);
7248         OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
7249         ++NewFnArgIt;
7250       }
7251     }
7252 
7253     // Eliminate the instructions *after* we visited all of them.
7254     for (auto &CallSitePair : CallSitePairs) {
7255       CallBase &OldCB = *CallSitePair.first;
7256       CallBase &NewCB = *CallSitePair.second;
7257       OldCB.replaceAllUsesWith(&NewCB);
7258       OldCB.eraseFromParent();
7259     }
7260 
7261     ToBeDeletedFunctions.insert(OldFn);
7262 
7263     Changed = ChangeStatus::CHANGED;
7264   }
7265 
7266   return Changed;
7267 }
7268 
7269 void Attributor::initializeInformationCache(Function &F) {
7270 
7271   // Walk all instructions to find interesting instructions that might be
7272   // queried by abstract attributes during their initialization or update.
7273   // This has to happen before we create attributes.
7274   auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
7275   auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
7276 
7277   for (Instruction &I : instructions(&F)) {
7278     bool IsInterestingOpcode = false;
7279 
7280     // To allow easy access to all instructions in a function with a given
7281     // opcode we store them in the InfoCache. As not all opcodes are interesting
7282     // to concrete attributes we only cache the ones that are as identified in
7283     // the following switch.
7284     // Note: There are no concrete attributes now so this is initially empty.
7285     switch (I.getOpcode()) {
7286     default:
7287       assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
7288              "New call site/base instruction type needs to be known int the "
7289              "Attributor.");
7290       break;
7291     case Instruction::Load:
7292       // The alignment of a pointer is interesting for loads.
7293     case Instruction::Store:
7294       // The alignment of a pointer is interesting for stores.
7295     case Instruction::Call:
7296     case Instruction::CallBr:
7297     case Instruction::Invoke:
7298     case Instruction::CleanupRet:
7299     case Instruction::CatchSwitch:
7300     case Instruction::AtomicRMW:
7301     case Instruction::AtomicCmpXchg:
7302     case Instruction::Br:
7303     case Instruction::Resume:
7304     case Instruction::Ret:
7305       IsInterestingOpcode = true;
7306     }
7307     if (IsInterestingOpcode)
7308       InstOpcodeMap[I.getOpcode()].push_back(&I);
7309     if (I.mayReadOrWriteMemory())
7310       ReadOrWriteInsts.push_back(&I);
7311   }
7312 }
7313 
7314 void Attributor::recordDependence(const AbstractAttribute &FromAA,
7315                                   const AbstractAttribute &ToAA,
7316                                   DepClassTy DepClass) {
7317   if (FromAA.getState().isAtFixpoint())
7318     return;
7319 
7320   if (DepClass == DepClassTy::REQUIRED)
7321     QueryMap[&FromAA].RequiredAAs.insert(
7322         const_cast<AbstractAttribute *>(&ToAA));
7323   else
7324     QueryMap[&FromAA].OptionalAAs.insert(
7325         const_cast<AbstractAttribute *>(&ToAA));
7326   QueriedNonFixAA = true;
7327 }
7328 
7329 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
7330   if (!VisitedFunctions.insert(&F).second)
7331     return;
7332   if (F.isDeclaration())
7333     return;
7334 
7335   IRPosition FPos = IRPosition::function(F);
7336 
7337   // Check for dead BasicBlocks in every function.
7338   // We need dead instruction detection because we do not want to deal with
7339   // broken IR in which SSA rules do not apply.
7340   getOrCreateAAFor<AAIsDead>(FPos);
7341 
7342   // Every function might be "will-return".
7343   getOrCreateAAFor<AAWillReturn>(FPos);
7344 
7345   // Every function might contain instructions that cause "undefined behavior".
7346   getOrCreateAAFor<AAUndefinedBehavior>(FPos);
7347 
7348   // Every function can be nounwind.
7349   getOrCreateAAFor<AANoUnwind>(FPos);
7350 
7351   // Every function might be marked "nosync"
7352   getOrCreateAAFor<AANoSync>(FPos);
7353 
7354   // Every function might be "no-free".
7355   getOrCreateAAFor<AANoFree>(FPos);
7356 
7357   // Every function might be "no-return".
7358   getOrCreateAAFor<AANoReturn>(FPos);
7359 
7360   // Every function might be "no-recurse".
7361   getOrCreateAAFor<AANoRecurse>(FPos);
7362 
7363   // Every function might be "readnone/readonly/writeonly/...".
7364   getOrCreateAAFor<AAMemoryBehavior>(FPos);
7365 
7366   // Every function might be applicable for Heap-To-Stack conversion.
7367   if (EnableHeapToStack)
7368     getOrCreateAAFor<AAHeapToStack>(FPos);
7369 
7370   // Return attributes are only appropriate if the return type is non void.
7371   Type *ReturnType = F.getReturnType();
7372   if (!ReturnType->isVoidTy()) {
7373     // Argument attribute "returned" --- Create only one per function even
7374     // though it is an argument attribute.
7375     getOrCreateAAFor<AAReturnedValues>(FPos);
7376 
7377     IRPosition RetPos = IRPosition::returned(F);
7378 
7379     // Every returned value might be dead.
7380     getOrCreateAAFor<AAIsDead>(RetPos);
7381 
7382     // Every function might be simplified.
7383     getOrCreateAAFor<AAValueSimplify>(RetPos);
7384 
7385     if (ReturnType->isPointerTy()) {
7386 
7387       // Every function with pointer return type might be marked align.
7388       getOrCreateAAFor<AAAlign>(RetPos);
7389 
7390       // Every function with pointer return type might be marked nonnull.
7391       getOrCreateAAFor<AANonNull>(RetPos);
7392 
7393       // Every function with pointer return type might be marked noalias.
7394       getOrCreateAAFor<AANoAlias>(RetPos);
7395 
7396       // Every function with pointer return type might be marked
7397       // dereferenceable.
7398       getOrCreateAAFor<AADereferenceable>(RetPos);
7399     }
7400   }
7401 
7402   for (Argument &Arg : F.args()) {
7403     IRPosition ArgPos = IRPosition::argument(Arg);
7404 
7405     // Every argument might be simplified.
7406     getOrCreateAAFor<AAValueSimplify>(ArgPos);
7407 
7408     if (Arg.getType()->isPointerTy()) {
7409       // Every argument with pointer type might be marked nonnull.
7410       getOrCreateAAFor<AANonNull>(ArgPos);
7411 
7412       // Every argument with pointer type might be marked noalias.
7413       getOrCreateAAFor<AANoAlias>(ArgPos);
7414 
7415       // Every argument with pointer type might be marked dereferenceable.
7416       getOrCreateAAFor<AADereferenceable>(ArgPos);
7417 
7418       // Every argument with pointer type might be marked align.
7419       getOrCreateAAFor<AAAlign>(ArgPos);
7420 
7421       // Every argument with pointer type might be marked nocapture.
7422       getOrCreateAAFor<AANoCapture>(ArgPos);
7423 
7424       // Every argument with pointer type might be marked
7425       // "readnone/readonly/writeonly/..."
7426       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
7427 
7428       // Every argument with pointer type might be marked nofree.
7429       getOrCreateAAFor<AANoFree>(ArgPos);
7430 
7431       // Every argument with pointer type might be privatizable (or promotable)
7432       getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
7433     }
7434   }
7435 
7436   auto CallSitePred = [&](Instruction &I) -> bool {
7437     CallSite CS(&I);
7438     if (Function *Callee = CS.getCalledFunction()) {
7439       // Skip declerations except if annotations on their call sites were
7440       // explicitly requested.
7441       if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
7442           !Callee->hasMetadata(LLVMContext::MD_callback))
7443         return true;
7444 
7445       if (!Callee->getReturnType()->isVoidTy() && !CS->use_empty()) {
7446 
7447         IRPosition CSRetPos = IRPosition::callsite_returned(CS);
7448 
7449         // Call site return values might be dead.
7450         getOrCreateAAFor<AAIsDead>(CSRetPos);
7451 
7452         // Call site return integer values might be limited by a constant range.
7453         if (Callee->getReturnType()->isIntegerTy()) {
7454           getOrCreateAAFor<AAValueConstantRange>(CSRetPos);
7455         }
7456       }
7457 
7458       for (int i = 0, e = CS.getNumArgOperands(); i < e; i++) {
7459 
7460         IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
7461 
7462         // Every call site argument might be dead.
7463         getOrCreateAAFor<AAIsDead>(CSArgPos);
7464 
7465         // Call site argument might be simplified.
7466         getOrCreateAAFor<AAValueSimplify>(CSArgPos);
7467 
7468         if (!CS.getArgument(i)->getType()->isPointerTy())
7469           continue;
7470 
7471         // Call site argument attribute "non-null".
7472         getOrCreateAAFor<AANonNull>(CSArgPos);
7473 
7474         // Call site argument attribute "no-alias".
7475         getOrCreateAAFor<AANoAlias>(CSArgPos);
7476 
7477         // Call site argument attribute "dereferenceable".
7478         getOrCreateAAFor<AADereferenceable>(CSArgPos);
7479 
7480         // Call site argument attribute "align".
7481         getOrCreateAAFor<AAAlign>(CSArgPos);
7482 
7483         // Call site argument attribute
7484         // "readnone/readonly/writeonly/..."
7485         getOrCreateAAFor<AAMemoryBehavior>(CSArgPos);
7486 
7487         // Call site argument attribute "nofree".
7488         getOrCreateAAFor<AANoFree>(CSArgPos);
7489       }
7490     }
7491     return true;
7492   };
7493 
7494   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
7495   bool Success, AnyDead = false;
7496   Success = checkForAllInstructionsImpl(
7497       OpcodeInstMap, CallSitePred, nullptr, AnyDead,
7498       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
7499        (unsigned)Instruction::Call});
7500   (void)Success;
7501   assert(Success && !AnyDead && "Expected the check call to be successful!");
7502 
7503   auto LoadStorePred = [&](Instruction &I) -> bool {
7504     if (isa<LoadInst>(I))
7505       getOrCreateAAFor<AAAlign>(
7506           IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
7507     else
7508       getOrCreateAAFor<AAAlign>(
7509           IRPosition::value(*cast<StoreInst>(I).getPointerOperand()));
7510     return true;
7511   };
7512   Success = checkForAllInstructionsImpl(
7513       OpcodeInstMap, LoadStorePred, nullptr, AnyDead,
7514       {(unsigned)Instruction::Load, (unsigned)Instruction::Store});
7515   (void)Success;
7516   assert(Success && !AnyDead && "Expected the check call to be successful!");
7517 }
7518 
7519 /// Helpers to ease debugging through output streams and print calls.
7520 ///
7521 ///{
7522 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
7523   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
7524 }
7525 
7526 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
7527   switch (AP) {
7528   case IRPosition::IRP_INVALID:
7529     return OS << "inv";
7530   case IRPosition::IRP_FLOAT:
7531     return OS << "flt";
7532   case IRPosition::IRP_RETURNED:
7533     return OS << "fn_ret";
7534   case IRPosition::IRP_CALL_SITE_RETURNED:
7535     return OS << "cs_ret";
7536   case IRPosition::IRP_FUNCTION:
7537     return OS << "fn";
7538   case IRPosition::IRP_CALL_SITE:
7539     return OS << "cs";
7540   case IRPosition::IRP_ARGUMENT:
7541     return OS << "arg";
7542   case IRPosition::IRP_CALL_SITE_ARGUMENT:
7543     return OS << "cs_arg";
7544   }
7545   llvm_unreachable("Unknown attribute position!");
7546 }
7547 
7548 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
7549   const Value &AV = Pos.getAssociatedValue();
7550   return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
7551             << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
7552 }
7553 
7554 template <typename base_ty, base_ty BestState, base_ty WorstState>
7555 raw_ostream &
7556 llvm::operator<<(raw_ostream &OS,
7557                  const IntegerStateBase<base_ty, BestState, WorstState> &S) {
7558   return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
7559             << static_cast<const AbstractState &>(S);
7560 }
7561 
7562 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
7563   OS << "range-state(" << S.getBitWidth() << ")<";
7564   S.getKnown().print(OS);
7565   OS << " / ";
7566   S.getAssumed().print(OS);
7567   OS << ">";
7568 
7569   return OS << static_cast<const AbstractState &>(S);
7570 }
7571 
7572 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
7573   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
7574 }
7575 
7576 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
7577   AA.print(OS);
7578   return OS;
7579 }
7580 
7581 void AbstractAttribute::print(raw_ostream &OS) const {
7582   OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
7583      << "]";
7584 }
7585 ///}
7586 
7587 /// ----------------------------------------------------------------------------
7588 ///                       Pass (Manager) Boilerplate
7589 /// ----------------------------------------------------------------------------
7590 
7591 static bool runAttributorOnModule(Module &M, AnalysisGetter &AG) {
7592   if (DisableAttributor)
7593     return false;
7594 
7595   LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
7596                     << " functions.\n");
7597 
7598   // Create an Attributor and initially empty information cache that is filled
7599   // while we identify default attribute opportunities.
7600   InformationCache InfoCache(M, AG);
7601   Attributor A(InfoCache, DepRecInterval);
7602 
7603   for (Function &F : M)
7604     A.initializeInformationCache(F);
7605 
7606   for (Function &F : M) {
7607     if (F.hasExactDefinition())
7608       NumFnWithExactDefinition++;
7609     else
7610       NumFnWithoutExactDefinition++;
7611 
7612     // We look at internal functions only on-demand but if any use is not a
7613     // direct call, we have to do it eagerly.
7614     if (F.hasLocalLinkage()) {
7615       if (llvm::all_of(F.uses(), [](const Use &U) {
7616             return ImmutableCallSite(U.getUser()) &&
7617                    ImmutableCallSite(U.getUser()).isCallee(&U);
7618           }))
7619         continue;
7620     }
7621 
7622     // Populate the Attributor with abstract attribute opportunities in the
7623     // function and the information cache with IR information.
7624     A.identifyDefaultAbstractAttributes(F);
7625   }
7626 
7627   bool Changed = A.run(M) == ChangeStatus::CHANGED;
7628   assert(!verifyModule(M, &errs()) && "Module verification failed!");
7629   return Changed;
7630 }
7631 
7632 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
7633   AnalysisGetter AG(AM);
7634   if (runAttributorOnModule(M, AG)) {
7635     // FIXME: Think about passes we will preserve and add them here.
7636     return PreservedAnalyses::none();
7637   }
7638   return PreservedAnalyses::all();
7639 }
7640 
7641 namespace {
7642 
7643 struct AttributorLegacyPass : public ModulePass {
7644   static char ID;
7645 
7646   AttributorLegacyPass() : ModulePass(ID) {
7647     initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
7648   }
7649 
7650   bool runOnModule(Module &M) override {
7651     if (skipModule(M))
7652       return false;
7653 
7654     AnalysisGetter AG;
7655     return runAttributorOnModule(M, AG);
7656   }
7657 
7658   void getAnalysisUsage(AnalysisUsage &AU) const override {
7659     // FIXME: Think about passes we will preserve and add them here.
7660     AU.addRequired<TargetLibraryInfoWrapperPass>();
7661   }
7662 };
7663 
7664 } // end anonymous namespace
7665 
7666 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
7667 
7668 char AttributorLegacyPass::ID = 0;
7669 
7670 const char AAReturnedValues::ID = 0;
7671 const char AANoUnwind::ID = 0;
7672 const char AANoSync::ID = 0;
7673 const char AANoFree::ID = 0;
7674 const char AANonNull::ID = 0;
7675 const char AANoRecurse::ID = 0;
7676 const char AAWillReturn::ID = 0;
7677 const char AAUndefinedBehavior::ID = 0;
7678 const char AANoAlias::ID = 0;
7679 const char AAReachability::ID = 0;
7680 const char AANoReturn::ID = 0;
7681 const char AAIsDead::ID = 0;
7682 const char AADereferenceable::ID = 0;
7683 const char AAAlign::ID = 0;
7684 const char AANoCapture::ID = 0;
7685 const char AAValueSimplify::ID = 0;
7686 const char AAHeapToStack::ID = 0;
7687 const char AAPrivatizablePtr::ID = 0;
7688 const char AAMemoryBehavior::ID = 0;
7689 const char AAValueConstantRange::ID = 0;
7690 
7691 // Macro magic to create the static generator function for attributes that
7692 // follow the naming scheme.
7693 
7694 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
7695   case IRPosition::PK:                                                         \
7696     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
7697 
7698 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
7699   case IRPosition::PK:                                                         \
7700     AA = new CLASS##SUFFIX(IRP);                                               \
7701     break;
7702 
7703 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
7704   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7705     CLASS *AA = nullptr;                                                       \
7706     switch (IRP.getPositionKind()) {                                           \
7707       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7708       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7709       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7710       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7711       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7712       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7713       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7714       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7715     }                                                                          \
7716     return *AA;                                                                \
7717   }
7718 
7719 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
7720   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7721     CLASS *AA = nullptr;                                                       \
7722     switch (IRP.getPositionKind()) {                                           \
7723       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7724       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
7725       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7726       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7727       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7728       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7729       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7730       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7731     }                                                                          \
7732     return *AA;                                                                \
7733   }
7734 
7735 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
7736   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7737     CLASS *AA = nullptr;                                                       \
7738     switch (IRP.getPositionKind()) {                                           \
7739       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7740       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7741       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7742       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7743       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7744       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7745       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7746       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7747     }                                                                          \
7748     return *AA;                                                                \
7749   }
7750 
7751 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
7752   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7753     CLASS *AA = nullptr;                                                       \
7754     switch (IRP.getPositionKind()) {                                           \
7755       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7756       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7757       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7758       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7759       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7760       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7761       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7762       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7763     }                                                                          \
7764     return *AA;                                                                \
7765   }
7766 
7767 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
7768   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7769     CLASS *AA = nullptr;                                                       \
7770     switch (IRP.getPositionKind()) {                                           \
7771       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7772       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7773       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7774       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7775       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7776       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7777       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7778       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7779     }                                                                          \
7780     return *AA;                                                                \
7781   }
7782 
7783 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7784 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7785 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7786 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7787 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7788 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7789 
7790 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7791 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7792 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7793 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7794 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7795 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7796 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7797 
7798 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7799 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7800 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7801 
7802 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7803 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7804 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7805 
7806 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7807 
7808 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7809 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7810 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7811 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7812 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7813 #undef SWITCH_PK_CREATE
7814 #undef SWITCH_PK_INV
7815 
7816 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
7817                       "Deduce and propagate attributes", false, false)
7818 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
7819 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
7820                     "Deduce and propagate attributes", false, false)
7821