1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/AssumeBundleQueries.h"
19 #include "llvm/Analysis/CaptureTracking.h"
20 #include "llvm/Analysis/LazyValueInfo.h"
21 #include "llvm/Analysis/MemoryBuiltins.h"
22 #include "llvm/Analysis/ScalarEvolution.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 
30 #include <cassert>
31 
32 using namespace llvm;
33 
34 #define DEBUG_TYPE "attributor"
35 
36 static cl::opt<bool> ManifestInternal(
37     "attributor-manifest-internal", cl::Hidden,
38     cl::desc("Manifest Attributor internal string attributes."),
39     cl::init(false));
40 
41 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
42                                        cl::Hidden);
43 
44 STATISTIC(NumAAs, "Number of abstract attributes created");
45 
46 // Some helper macros to deal with statistics tracking.
47 //
48 // Usage:
49 // For simple IR attribute tracking overload trackStatistics in the abstract
50 // attribute and choose the right STATS_DECLTRACK_********* macro,
51 // e.g.,:
52 //  void trackStatistics() const override {
53 //    STATS_DECLTRACK_ARG_ATTR(returned)
54 //  }
55 // If there is a single "increment" side one can use the macro
56 // STATS_DECLTRACK with a custom message. If there are multiple increment
57 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
58 //
59 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
60   ("Number of " #TYPE " marked '" #NAME "'")
61 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
62 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
63 #define STATS_DECL(NAME, TYPE, MSG)                                            \
64   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
65 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
66 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
67   {                                                                            \
68     STATS_DECL(NAME, TYPE, MSG)                                                \
69     STATS_TRACK(NAME, TYPE)                                                    \
70   }
71 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
72   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
73 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
74   STATS_DECLTRACK(NAME, CSArguments,                                           \
75                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
76 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
77   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
78 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
79   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
80 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
81   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
82                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
83 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
84   STATS_DECLTRACK(NAME, CSReturn,                                              \
85                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
86 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
87   STATS_DECLTRACK(NAME, Floating,                                              \
88                   ("Number of floating values known to be '" #NAME "'"))
89 
90 // Specialization of the operator<< for abstract attributes subclasses. This
91 // disambiguates situations where multiple operators are applicable.
92 namespace llvm {
93 #define PIPE_OPERATOR(CLASS)                                                   \
94   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
95     return OS << static_cast<const AbstractAttribute &>(AA);                   \
96   }
97 
98 PIPE_OPERATOR(AAIsDead)
99 PIPE_OPERATOR(AANoUnwind)
100 PIPE_OPERATOR(AANoSync)
101 PIPE_OPERATOR(AANoRecurse)
102 PIPE_OPERATOR(AAWillReturn)
103 PIPE_OPERATOR(AANoReturn)
104 PIPE_OPERATOR(AAReturnedValues)
105 PIPE_OPERATOR(AANonNull)
106 PIPE_OPERATOR(AANoAlias)
107 PIPE_OPERATOR(AADereferenceable)
108 PIPE_OPERATOR(AAAlign)
109 PIPE_OPERATOR(AANoCapture)
110 PIPE_OPERATOR(AAValueSimplify)
111 PIPE_OPERATOR(AANoFree)
112 PIPE_OPERATOR(AAHeapToStack)
113 PIPE_OPERATOR(AAReachability)
114 PIPE_OPERATOR(AAMemoryBehavior)
115 PIPE_OPERATOR(AAMemoryLocation)
116 PIPE_OPERATOR(AAValueConstantRange)
117 PIPE_OPERATOR(AAPrivatizablePtr)
118 PIPE_OPERATOR(AAUndefinedBehavior)
119 
120 #undef PIPE_OPERATOR
121 } // namespace llvm
122 
123 namespace {
124 
125 static Optional<ConstantInt *>
126 getAssumedConstantInt(Attributor &A, const Value &V,
127                       const AbstractAttribute &AA,
128                       bool &UsedAssumedInformation) {
129   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
130   if (C.hasValue())
131     return dyn_cast_or_null<ConstantInt>(C.getValue());
132   return llvm::None;
133 }
134 
135 /// Get pointer operand of memory accessing instruction. If \p I is
136 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
137 /// is set to false and the instruction is volatile, return nullptr.
138 static const Value *getPointerOperand(const Instruction *I,
139                                       bool AllowVolatile) {
140   if (auto *LI = dyn_cast<LoadInst>(I)) {
141     if (!AllowVolatile && LI->isVolatile())
142       return nullptr;
143     return LI->getPointerOperand();
144   }
145 
146   if (auto *SI = dyn_cast<StoreInst>(I)) {
147     if (!AllowVolatile && SI->isVolatile())
148       return nullptr;
149     return SI->getPointerOperand();
150   }
151 
152   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
153     if (!AllowVolatile && CXI->isVolatile())
154       return nullptr;
155     return CXI->getPointerOperand();
156   }
157 
158   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
159     if (!AllowVolatile && RMWI->isVolatile())
160       return nullptr;
161     return RMWI->getPointerOperand();
162   }
163 
164   return nullptr;
165 }
166 
167 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
168 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
169 /// getelement pointer instructions that traverse the natural type of \p Ptr if
170 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
171 /// through a cast to i8*.
172 ///
173 /// TODO: This could probably live somewhere more prominantly if it doesn't
174 ///       already exist.
175 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
176                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
177   assert(Offset >= 0 && "Negative offset not supported yet!");
178   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
179                     << "-bytes as " << *ResTy << "\n");
180 
181   // The initial type we are trying to traverse to get nice GEPs.
182   Type *Ty = Ptr->getType();
183 
184   SmallVector<Value *, 4> Indices;
185   std::string GEPName = Ptr->getName().str();
186   while (Offset) {
187     uint64_t Idx, Rem;
188 
189     if (auto *STy = dyn_cast<StructType>(Ty)) {
190       const StructLayout *SL = DL.getStructLayout(STy);
191       if (int64_t(SL->getSizeInBytes()) < Offset)
192         break;
193       Idx = SL->getElementContainingOffset(Offset);
194       assert(Idx < STy->getNumElements() && "Offset calculation error!");
195       Rem = Offset - SL->getElementOffset(Idx);
196       Ty = STy->getElementType(Idx);
197     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
198       Ty = PTy->getElementType();
199       if (!Ty->isSized())
200         break;
201       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
202       assert(ElementSize && "Expected type with size!");
203       Idx = Offset / ElementSize;
204       Rem = Offset % ElementSize;
205     } else {
206       // Non-aggregate type, we cast and make byte-wise progress now.
207       break;
208     }
209 
210     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
211                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
212 
213     GEPName += "." + std::to_string(Idx);
214     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
215     Offset = Rem;
216   }
217 
218   // Create a GEP if we collected indices above.
219   if (Indices.size())
220     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
221 
222   // If an offset is left we use byte-wise adjustment.
223   if (Offset) {
224     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
225     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
226                         GEPName + ".b" + Twine(Offset));
227   }
228 
229   // Ensure the result has the requested type.
230   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
231 
232   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
233   return Ptr;
234 }
235 
236 /// Recursively visit all values that might become \p IRP at some point. This
237 /// will be done by looking through cast instructions, selects, phis, and calls
238 /// with the "returned" attribute. Once we cannot look through the value any
239 /// further, the callback \p VisitValueCB is invoked and passed the current
240 /// value, the \p State, and a flag to indicate if we stripped anything.
241 /// Stripped means that we unpacked the value associated with \p IRP at least
242 /// once. Note that the value used for the callback may still be the value
243 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
244 /// we will never visit more values than specified by \p MaxValues.
245 template <typename AAType, typename StateTy>
246 static bool genericValueTraversal(
247     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
248     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
249         VisitValueCB,
250     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
251     function_ref<Value *(Value *)> StripCB = nullptr) {
252 
253   const AAIsDead *LivenessAA = nullptr;
254   if (IRP.getAnchorScope())
255     LivenessAA = &A.getAAFor<AAIsDead>(
256         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
257         /* TrackDependence */ false);
258   bool AnyDead = false;
259 
260   using Item = std::pair<Value *, const Instruction *>;
261   SmallSet<Item, 16> Visited;
262   SmallVector<Item, 16> Worklist;
263   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
264 
265   int Iteration = 0;
266   do {
267     Item I = Worklist.pop_back_val();
268     Value *V = I.first;
269     CtxI = I.second;
270     if (StripCB)
271       V = StripCB(V);
272 
273     // Check if we should process the current value. To prevent endless
274     // recursion keep a record of the values we followed!
275     if (!Visited.insert(I).second)
276       continue;
277 
278     // Make sure we limit the compile time for complex expressions.
279     if (Iteration++ >= MaxValues)
280       return false;
281 
282     // Explicitly look through calls with a "returned" attribute if we do
283     // not have a pointer as stripPointerCasts only works on them.
284     Value *NewV = nullptr;
285     if (V->getType()->isPointerTy()) {
286       NewV = V->stripPointerCasts();
287     } else {
288       auto *CB = dyn_cast<CallBase>(V);
289       if (CB && CB->getCalledFunction()) {
290         for (Argument &Arg : CB->getCalledFunction()->args())
291           if (Arg.hasReturnedAttr()) {
292             NewV = CB->getArgOperand(Arg.getArgNo());
293             break;
294           }
295       }
296     }
297     if (NewV && NewV != V) {
298       Worklist.push_back({NewV, CtxI});
299       continue;
300     }
301 
302     // Look through select instructions, visit both potential values.
303     if (auto *SI = dyn_cast<SelectInst>(V)) {
304       Worklist.push_back({SI->getTrueValue(), CtxI});
305       Worklist.push_back({SI->getFalseValue(), CtxI});
306       continue;
307     }
308 
309     // Look through phi nodes, visit all live operands.
310     if (auto *PHI = dyn_cast<PHINode>(V)) {
311       assert(LivenessAA &&
312              "Expected liveness in the presence of instructions!");
313       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
314         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
315         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
316                             LivenessAA,
317                             /* CheckBBLivenessOnly */ true)) {
318           AnyDead = true;
319           continue;
320         }
321         Worklist.push_back(
322             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
323       }
324       continue;
325     }
326 
327     if (UseValueSimplify && !isa<Constant>(V)) {
328       bool UsedAssumedInformation = false;
329       Optional<Constant *> C =
330           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
331       if (!C.hasValue())
332         continue;
333       if (Value *NewV = C.getValue()) {
334         Worklist.push_back({NewV, CtxI});
335         continue;
336       }
337     }
338 
339     // Once a leaf is reached we inform the user through the callback.
340     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
341       return false;
342   } while (!Worklist.empty());
343 
344   // If we actually used liveness information so we have to record a dependence.
345   if (AnyDead)
346     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
347 
348   // All values have been visited.
349   return true;
350 }
351 
352 static const Value *
353 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
354                                      const DataLayout &DL,
355                                      bool AllowNonInbounds = false) {
356   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
357   if (!Ptr)
358     return nullptr;
359 
360   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
361                                           AllowNonInbounds);
362 }
363 
364 /// Helper function to clamp a state \p S of type \p StateType with the
365 /// information in \p R and indicate/return if \p S did change (as-in update is
366 /// required to be run again).
367 template <typename StateType>
368 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
369   auto Assumed = S.getAssumed();
370   S ^= R;
371   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
372                                    : ChangeStatus::CHANGED;
373 }
374 
375 /// Clamp the information known for all returned values of a function
376 /// (identified by \p QueryingAA) into \p S.
377 template <typename AAType, typename StateType = typename AAType::StateType>
378 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
379                                      StateType &S) {
380   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
381                     << QueryingAA << " into " << S << "\n");
382 
383   assert((QueryingAA.getIRPosition().getPositionKind() ==
384               IRPosition::IRP_RETURNED ||
385           QueryingAA.getIRPosition().getPositionKind() ==
386               IRPosition::IRP_CALL_SITE_RETURNED) &&
387          "Can only clamp returned value states for a function returned or call "
388          "site returned position!");
389 
390   // Use an optional state as there might not be any return values and we want
391   // to join (IntegerState::operator&) the state of all there are.
392   Optional<StateType> T;
393 
394   // Callback for each possibly returned value.
395   auto CheckReturnValue = [&](Value &RV) -> bool {
396     const IRPosition &RVPos = IRPosition::value(RV);
397     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
398     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
399                       << " @ " << RVPos << "\n");
400     const StateType &AAS = static_cast<const StateType &>(AA.getState());
401     if (T.hasValue())
402       *T &= AAS;
403     else
404       T = AAS;
405     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
406                       << "\n");
407     return T->isValidState();
408   };
409 
410   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
411     S.indicatePessimisticFixpoint();
412   else if (T.hasValue())
413     S ^= *T;
414 }
415 
416 /// Helper class for generic deduction: return value -> returned position.
417 template <typename AAType, typename BaseType,
418           typename StateType = typename BaseType::StateType>
419 struct AAReturnedFromReturnedValues : public BaseType {
420   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
421       : BaseType(IRP, A) {}
422 
423   /// See AbstractAttribute::updateImpl(...).
424   ChangeStatus updateImpl(Attributor &A) override {
425     StateType S(StateType::getBestState(this->getState()));
426     clampReturnedValueStates<AAType, StateType>(A, *this, S);
427     // TODO: If we know we visited all returned values, thus no are assumed
428     // dead, we can take the known information from the state T.
429     return clampStateAndIndicateChange<StateType>(this->getState(), S);
430   }
431 };
432 
433 /// Clamp the information known at all call sites for a given argument
434 /// (identified by \p QueryingAA) into \p S.
435 template <typename AAType, typename StateType = typename AAType::StateType>
436 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
437                                         StateType &S) {
438   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
439                     << QueryingAA << " into " << S << "\n");
440 
441   assert(QueryingAA.getIRPosition().getPositionKind() ==
442              IRPosition::IRP_ARGUMENT &&
443          "Can only clamp call site argument states for an argument position!");
444 
445   // Use an optional state as there might not be any return values and we want
446   // to join (IntegerState::operator&) the state of all there are.
447   Optional<StateType> T;
448 
449   // The argument number which is also the call site argument number.
450   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
451 
452   auto CallSiteCheck = [&](AbstractCallSite ACS) {
453     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
454     // Check if a coresponding argument was found or if it is on not associated
455     // (which can happen for callback calls).
456     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
457       return false;
458 
459     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
460     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
461                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
462     const StateType &AAS = static_cast<const StateType &>(AA.getState());
463     if (T.hasValue())
464       *T &= AAS;
465     else
466       T = AAS;
467     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
468                       << "\n");
469     return T->isValidState();
470   };
471 
472   bool AllCallSitesKnown;
473   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
474                               AllCallSitesKnown))
475     S.indicatePessimisticFixpoint();
476   else if (T.hasValue())
477     S ^= *T;
478 }
479 
480 /// Helper class for generic deduction: call site argument -> argument position.
481 template <typename AAType, typename BaseType,
482           typename StateType = typename AAType::StateType>
483 struct AAArgumentFromCallSiteArguments : public BaseType {
484   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
485       : BaseType(IRP, A) {}
486 
487   /// See AbstractAttribute::updateImpl(...).
488   ChangeStatus updateImpl(Attributor &A) override {
489     StateType S(StateType::getBestState(this->getState()));
490     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
491     // TODO: If we know we visited all incoming values, thus no are assumed
492     // dead, we can take the known information from the state T.
493     return clampStateAndIndicateChange<StateType>(this->getState(), S);
494   }
495 };
496 
497 /// Helper class for generic replication: function returned -> cs returned.
498 template <typename AAType, typename BaseType,
499           typename StateType = typename BaseType::StateType>
500 struct AACallSiteReturnedFromReturned : public BaseType {
501   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
502       : BaseType(IRP, A) {}
503 
504   /// See AbstractAttribute::updateImpl(...).
505   ChangeStatus updateImpl(Attributor &A) override {
506     assert(this->getIRPosition().getPositionKind() ==
507                IRPosition::IRP_CALL_SITE_RETURNED &&
508            "Can only wrap function returned positions for call site returned "
509            "positions!");
510     auto &S = this->getState();
511 
512     const Function *AssociatedFunction =
513         this->getIRPosition().getAssociatedFunction();
514     if (!AssociatedFunction)
515       return S.indicatePessimisticFixpoint();
516 
517     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
518     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
519     return clampStateAndIndicateChange(
520         S, static_cast<const StateType &>(AA.getState()));
521   }
522 };
523 
524 /// Helper function to accumulate uses.
525 template <class AAType, typename StateType = typename AAType::StateType>
526 static void followUsesInContext(AAType &AA, Attributor &A,
527                                 MustBeExecutedContextExplorer &Explorer,
528                                 const Instruction *CtxI,
529                                 SetVector<const Use *> &Uses,
530                                 StateType &State) {
531   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
532   for (unsigned u = 0; u < Uses.size(); ++u) {
533     const Use *U = Uses[u];
534     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
535       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
536       if (Found && AA.followUseInMBEC(A, U, UserI, State))
537         for (const Use &Us : UserI->uses())
538           Uses.insert(&Us);
539     }
540   }
541 }
542 
543 /// Use the must-be-executed-context around \p I to add information into \p S.
544 /// The AAType class is required to have `followUseInMBEC` method with the
545 /// following signature and behaviour:
546 ///
547 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
548 /// U - Underlying use.
549 /// I - The user of the \p U.
550 /// Returns true if the value should be tracked transitively.
551 ///
552 template <class AAType, typename StateType = typename AAType::StateType>
553 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
554                              Instruction &CtxI) {
555 
556   // Container for (transitive) uses of the associated value.
557   SetVector<const Use *> Uses;
558   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
559     Uses.insert(&U);
560 
561   MustBeExecutedContextExplorer &Explorer =
562       A.getInfoCache().getMustBeExecutedContextExplorer();
563 
564   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
565 
566   if (S.isAtFixpoint())
567     return;
568 
569   SmallVector<const BranchInst *, 4> BrInsts;
570   auto Pred = [&](const Instruction *I) {
571     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
572       if (Br->isConditional())
573         BrInsts.push_back(Br);
574     return true;
575   };
576 
577   // Here, accumulate conditional branch instructions in the context. We
578   // explore the child paths and collect the known states. The disjunction of
579   // those states can be merged to its own state. Let ParentState_i be a state
580   // to indicate the known information for an i-th branch instruction in the
581   // context. ChildStates are created for its successors respectively.
582   //
583   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
584   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
585   //      ...
586   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
587   //
588   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
589   //
590   // FIXME: Currently, recursive branches are not handled. For example, we
591   // can't deduce that ptr must be dereferenced in below function.
592   //
593   // void f(int a, int c, int *ptr) {
594   //    if(a)
595   //      if (b) {
596   //        *ptr = 0;
597   //      } else {
598   //        *ptr = 1;
599   //      }
600   //    else {
601   //      if (b) {
602   //        *ptr = 0;
603   //      } else {
604   //        *ptr = 1;
605   //      }
606   //    }
607   // }
608 
609   Explorer.checkForAllContext(&CtxI, Pred);
610   for (const BranchInst *Br : BrInsts) {
611     StateType ParentState;
612 
613     // The known state of the parent state is a conjunction of children's
614     // known states so it is initialized with a best state.
615     ParentState.indicateOptimisticFixpoint();
616 
617     for (const BasicBlock *BB : Br->successors()) {
618       StateType ChildState;
619 
620       size_t BeforeSize = Uses.size();
621       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
622 
623       // Erase uses which only appear in the child.
624       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
625         It = Uses.erase(It);
626 
627       ParentState &= ChildState;
628     }
629 
630     // Use only known state.
631     S += ParentState;
632   }
633 }
634 
635 /// -----------------------NoUnwind Function Attribute--------------------------
636 
637 struct AANoUnwindImpl : AANoUnwind {
638   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
639 
640   const std::string getAsStr() const override {
641     return getAssumed() ? "nounwind" : "may-unwind";
642   }
643 
644   /// See AbstractAttribute::updateImpl(...).
645   ChangeStatus updateImpl(Attributor &A) override {
646     auto Opcodes = {
647         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
648         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
649         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
650 
651     auto CheckForNoUnwind = [&](Instruction &I) {
652       if (!I.mayThrow())
653         return true;
654 
655       if (const auto *CB = dyn_cast<CallBase>(&I)) {
656         const auto &NoUnwindAA =
657             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
658         return NoUnwindAA.isAssumedNoUnwind();
659       }
660       return false;
661     };
662 
663     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
664       return indicatePessimisticFixpoint();
665 
666     return ChangeStatus::UNCHANGED;
667   }
668 };
669 
670 struct AANoUnwindFunction final : public AANoUnwindImpl {
671   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
672       : AANoUnwindImpl(IRP, A) {}
673 
674   /// See AbstractAttribute::trackStatistics()
675   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
676 };
677 
678 /// NoUnwind attribute deduction for a call sites.
679 struct AANoUnwindCallSite final : AANoUnwindImpl {
680   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
681       : AANoUnwindImpl(IRP, A) {}
682 
683   /// See AbstractAttribute::initialize(...).
684   void initialize(Attributor &A) override {
685     AANoUnwindImpl::initialize(A);
686     Function *F = getAssociatedFunction();
687     if (!F)
688       indicatePessimisticFixpoint();
689   }
690 
691   /// See AbstractAttribute::updateImpl(...).
692   ChangeStatus updateImpl(Attributor &A) override {
693     // TODO: Once we have call site specific value information we can provide
694     //       call site specific liveness information and then it makes
695     //       sense to specialize attributes for call sites arguments instead of
696     //       redirecting requests to the callee argument.
697     Function *F = getAssociatedFunction();
698     const IRPosition &FnPos = IRPosition::function(*F);
699     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
700     return clampStateAndIndicateChange(
701         getState(),
702         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
703   }
704 
705   /// See AbstractAttribute::trackStatistics()
706   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
707 };
708 
709 /// --------------------- Function Return Values -------------------------------
710 
711 /// "Attribute" that collects all potential returned values and the return
712 /// instructions that they arise from.
713 ///
714 /// If there is a unique returned value R, the manifest method will:
715 ///   - mark R with the "returned" attribute, if R is an argument.
716 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
717 
718   /// Mapping of values potentially returned by the associated function to the
719   /// return instructions that might return them.
720   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
721 
722   /// Mapping to remember the number of returned values for a call site such
723   /// that we can avoid updates if nothing changed.
724   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
725 
726   /// Set of unresolved calls returned by the associated function.
727   SmallSetVector<CallBase *, 4> UnresolvedCalls;
728 
729   /// State flags
730   ///
731   ///{
732   bool IsFixed = false;
733   bool IsValidState = true;
734   ///}
735 
736 public:
737   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
738       : AAReturnedValues(IRP, A) {}
739 
740   /// See AbstractAttribute::initialize(...).
741   void initialize(Attributor &A) override {
742     // Reset the state.
743     IsFixed = false;
744     IsValidState = true;
745     ReturnedValues.clear();
746 
747     Function *F = getAssociatedFunction();
748     if (!F) {
749       indicatePessimisticFixpoint();
750       return;
751     }
752     assert(!F->getReturnType()->isVoidTy() &&
753            "Did not expect a void return type!");
754 
755     // The map from instruction opcodes to those instructions in the function.
756     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
757 
758     // Look through all arguments, if one is marked as returned we are done.
759     for (Argument &Arg : F->args()) {
760       if (Arg.hasReturnedAttr()) {
761         auto &ReturnInstSet = ReturnedValues[&Arg];
762         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
763           for (Instruction *RI : *Insts)
764             ReturnInstSet.insert(cast<ReturnInst>(RI));
765 
766         indicateOptimisticFixpoint();
767         return;
768       }
769     }
770 
771     if (!A.isFunctionIPOAmendable(*F))
772       indicatePessimisticFixpoint();
773   }
774 
775   /// See AbstractAttribute::manifest(...).
776   ChangeStatus manifest(Attributor &A) override;
777 
778   /// See AbstractAttribute::getState(...).
779   AbstractState &getState() override { return *this; }
780 
781   /// See AbstractAttribute::getState(...).
782   const AbstractState &getState() const override { return *this; }
783 
784   /// See AbstractAttribute::updateImpl(Attributor &A).
785   ChangeStatus updateImpl(Attributor &A) override;
786 
787   llvm::iterator_range<iterator> returned_values() override {
788     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
789   }
790 
791   llvm::iterator_range<const_iterator> returned_values() const override {
792     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
793   }
794 
795   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
796     return UnresolvedCalls;
797   }
798 
799   /// Return the number of potential return values, -1 if unknown.
800   size_t getNumReturnValues() const override {
801     return isValidState() ? ReturnedValues.size() : -1;
802   }
803 
804   /// Return an assumed unique return value if a single candidate is found. If
805   /// there cannot be one, return a nullptr. If it is not clear yet, return the
806   /// Optional::NoneType.
807   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
808 
809   /// See AbstractState::checkForAllReturnedValues(...).
810   bool checkForAllReturnedValuesAndReturnInsts(
811       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
812       const override;
813 
814   /// Pretty print the attribute similar to the IR representation.
815   const std::string getAsStr() const override;
816 
817   /// See AbstractState::isAtFixpoint().
818   bool isAtFixpoint() const override { return IsFixed; }
819 
820   /// See AbstractState::isValidState().
821   bool isValidState() const override { return IsValidState; }
822 
823   /// See AbstractState::indicateOptimisticFixpoint(...).
824   ChangeStatus indicateOptimisticFixpoint() override {
825     IsFixed = true;
826     return ChangeStatus::UNCHANGED;
827   }
828 
829   ChangeStatus indicatePessimisticFixpoint() override {
830     IsFixed = true;
831     IsValidState = false;
832     return ChangeStatus::CHANGED;
833   }
834 };
835 
836 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
837   ChangeStatus Changed = ChangeStatus::UNCHANGED;
838 
839   // Bookkeeping.
840   assert(isValidState());
841   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
842                   "Number of function with known return values");
843 
844   // Check if we have an assumed unique return value that we could manifest.
845   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
846 
847   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
848     return Changed;
849 
850   // Bookkeeping.
851   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
852                   "Number of function with unique return");
853 
854   // Callback to replace the uses of CB with the constant C.
855   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
856     if (CB.use_empty())
857       return ChangeStatus::UNCHANGED;
858     if (A.changeValueAfterManifest(CB, C))
859       return ChangeStatus::CHANGED;
860     return ChangeStatus::UNCHANGED;
861   };
862 
863   // If the assumed unique return value is an argument, annotate it.
864   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
865     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
866             getAssociatedFunction()->getReturnType())) {
867       getIRPosition() = IRPosition::argument(*UniqueRVArg);
868       Changed = IRAttribute::manifest(A);
869     }
870   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
871     // We can replace the returned value with the unique returned constant.
872     Value &AnchorValue = getAnchorValue();
873     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
874       for (const Use &U : F->uses())
875         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
876           if (CB->isCallee(&U)) {
877             Constant *RVCCast =
878                 CB->getType() == RVC->getType()
879                     ? RVC
880                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
881             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
882           }
883     } else {
884       assert(isa<CallBase>(AnchorValue) &&
885              "Expcected a function or call base anchor!");
886       Constant *RVCCast =
887           AnchorValue.getType() == RVC->getType()
888               ? RVC
889               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
890       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
891     }
892     if (Changed == ChangeStatus::CHANGED)
893       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
894                       "Number of function returns replaced by constant return");
895   }
896 
897   return Changed;
898 }
899 
900 const std::string AAReturnedValuesImpl::getAsStr() const {
901   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
902          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
903          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
904 }
905 
906 Optional<Value *>
907 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
908   // If checkForAllReturnedValues provides a unique value, ignoring potential
909   // undef values that can also be present, it is assumed to be the actual
910   // return value and forwarded to the caller of this method. If there are
911   // multiple, a nullptr is returned indicating there cannot be a unique
912   // returned value.
913   Optional<Value *> UniqueRV;
914 
915   auto Pred = [&](Value &RV) -> bool {
916     // If we found a second returned value and neither the current nor the saved
917     // one is an undef, there is no unique returned value. Undefs are special
918     // since we can pretend they have any value.
919     if (UniqueRV.hasValue() && UniqueRV != &RV &&
920         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
921       UniqueRV = nullptr;
922       return false;
923     }
924 
925     // Do not overwrite a value with an undef.
926     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
927       UniqueRV = &RV;
928 
929     return true;
930   };
931 
932   if (!A.checkForAllReturnedValues(Pred, *this))
933     UniqueRV = nullptr;
934 
935   return UniqueRV;
936 }
937 
938 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
939     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
940     const {
941   if (!isValidState())
942     return false;
943 
944   // Check all returned values but ignore call sites as long as we have not
945   // encountered an overdefined one during an update.
946   for (auto &It : ReturnedValues) {
947     Value *RV = It.first;
948 
949     CallBase *CB = dyn_cast<CallBase>(RV);
950     if (CB && !UnresolvedCalls.count(CB))
951       continue;
952 
953     if (!Pred(*RV, It.second))
954       return false;
955   }
956 
957   return true;
958 }
959 
960 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
961   size_t NumUnresolvedCalls = UnresolvedCalls.size();
962   bool Changed = false;
963 
964   // State used in the value traversals starting in returned values.
965   struct RVState {
966     // The map in which we collect return values -> return instrs.
967     decltype(ReturnedValues) &RetValsMap;
968     // The flag to indicate a change.
969     bool &Changed;
970     // The return instrs we come from.
971     SmallSetVector<ReturnInst *, 4> RetInsts;
972   };
973 
974   // Callback for a leaf value returned by the associated function.
975   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
976                          bool) -> bool {
977     auto Size = RVS.RetValsMap[&Val].size();
978     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
979     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
980     RVS.Changed |= Inserted;
981     LLVM_DEBUG({
982       if (Inserted)
983         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
984                << " => " << RVS.RetInsts.size() << "\n";
985     });
986     return true;
987   };
988 
989   // Helper method to invoke the generic value traversal.
990   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
991                                 const Instruction *CtxI) {
992     IRPosition RetValPos = IRPosition::value(RV);
993     return genericValueTraversal<AAReturnedValues, RVState>(
994         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
995         /* UseValueSimplify */ false);
996   };
997 
998   // Callback for all "return intructions" live in the associated function.
999   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1000     ReturnInst &Ret = cast<ReturnInst>(I);
1001     RVState RVS({ReturnedValues, Changed, {}});
1002     RVS.RetInsts.insert(&Ret);
1003     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1004   };
1005 
1006   // Start by discovering returned values from all live returned instructions in
1007   // the associated function.
1008   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1009     return indicatePessimisticFixpoint();
1010 
1011   // Once returned values "directly" present in the code are handled we try to
1012   // resolve returned calls.
1013   decltype(ReturnedValues) NewRVsMap;
1014   for (auto &It : ReturnedValues) {
1015     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1016                       << " by #" << It.second.size() << " RIs\n");
1017     CallBase *CB = dyn_cast<CallBase>(It.first);
1018     if (!CB || UnresolvedCalls.count(CB))
1019       continue;
1020 
1021     if (!CB->getCalledFunction()) {
1022       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1023                         << "\n");
1024       UnresolvedCalls.insert(CB);
1025       continue;
1026     }
1027 
1028     // TODO: use the function scope once we have call site AAReturnedValues.
1029     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1030         *this, IRPosition::function(*CB->getCalledFunction()));
1031     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1032                       << RetValAA << "\n");
1033 
1034     // Skip dead ends, thus if we do not know anything about the returned
1035     // call we mark it as unresolved and it will stay that way.
1036     if (!RetValAA.getState().isValidState()) {
1037       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1038                         << "\n");
1039       UnresolvedCalls.insert(CB);
1040       continue;
1041     }
1042 
1043     // Do not try to learn partial information. If the callee has unresolved
1044     // return values we will treat the call as unresolved/opaque.
1045     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1046     if (!RetValAAUnresolvedCalls.empty()) {
1047       UnresolvedCalls.insert(CB);
1048       continue;
1049     }
1050 
1051     // Now check if we can track transitively returned values. If possible, thus
1052     // if all return value can be represented in the current scope, do so.
1053     bool Unresolved = false;
1054     for (auto &RetValAAIt : RetValAA.returned_values()) {
1055       Value *RetVal = RetValAAIt.first;
1056       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1057           isa<Constant>(RetVal))
1058         continue;
1059       // Anything that did not fit in the above categories cannot be resolved,
1060       // mark the call as unresolved.
1061       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1062                            "cannot be translated: "
1063                         << *RetVal << "\n");
1064       UnresolvedCalls.insert(CB);
1065       Unresolved = true;
1066       break;
1067     }
1068 
1069     if (Unresolved)
1070       continue;
1071 
1072     // Now track transitively returned values.
1073     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1074     if (NumRetAA == RetValAA.getNumReturnValues()) {
1075       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1076                            "changed since it was seen last\n");
1077       continue;
1078     }
1079     NumRetAA = RetValAA.getNumReturnValues();
1080 
1081     for (auto &RetValAAIt : RetValAA.returned_values()) {
1082       Value *RetVal = RetValAAIt.first;
1083       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1084         // Arguments are mapped to call site operands and we begin the traversal
1085         // again.
1086         bool Unused = false;
1087         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1088         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1089         continue;
1090       } else if (isa<CallBase>(RetVal)) {
1091         // Call sites are resolved by the callee attribute over time, no need to
1092         // do anything for us.
1093         continue;
1094       } else if (isa<Constant>(RetVal)) {
1095         // Constants are valid everywhere, we can simply take them.
1096         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1097         continue;
1098       }
1099     }
1100   }
1101 
1102   // To avoid modifications to the ReturnedValues map while we iterate over it
1103   // we kept record of potential new entries in a copy map, NewRVsMap.
1104   for (auto &It : NewRVsMap) {
1105     assert(!It.second.empty() && "Entry does not add anything.");
1106     auto &ReturnInsts = ReturnedValues[It.first];
1107     for (ReturnInst *RI : It.second)
1108       if (ReturnInsts.insert(RI)) {
1109         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1110                           << *It.first << " => " << *RI << "\n");
1111         Changed = true;
1112       }
1113   }
1114 
1115   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1116   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1117 }
1118 
1119 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1120   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1121       : AAReturnedValuesImpl(IRP, A) {}
1122 
1123   /// See AbstractAttribute::trackStatistics()
1124   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1125 };
1126 
1127 /// Returned values information for a call sites.
1128 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1129   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1130       : AAReturnedValuesImpl(IRP, A) {}
1131 
1132   /// See AbstractAttribute::initialize(...).
1133   void initialize(Attributor &A) override {
1134     // TODO: Once we have call site specific value information we can provide
1135     //       call site specific liveness information and then it makes
1136     //       sense to specialize attributes for call sites instead of
1137     //       redirecting requests to the callee.
1138     llvm_unreachable("Abstract attributes for returned values are not "
1139                      "supported for call sites yet!");
1140   }
1141 
1142   /// See AbstractAttribute::updateImpl(...).
1143   ChangeStatus updateImpl(Attributor &A) override {
1144     return indicatePessimisticFixpoint();
1145   }
1146 
1147   /// See AbstractAttribute::trackStatistics()
1148   void trackStatistics() const override {}
1149 };
1150 
1151 /// ------------------------ NoSync Function Attribute -------------------------
1152 
1153 struct AANoSyncImpl : AANoSync {
1154   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1155 
1156   const std::string getAsStr() const override {
1157     return getAssumed() ? "nosync" : "may-sync";
1158   }
1159 
1160   /// See AbstractAttribute::updateImpl(...).
1161   ChangeStatus updateImpl(Attributor &A) override;
1162 
1163   /// Helper function used to determine whether an instruction is non-relaxed
1164   /// atomic. In other words, if an atomic instruction does not have unordered
1165   /// or monotonic ordering
1166   static bool isNonRelaxedAtomic(Instruction *I);
1167 
1168   /// Helper function used to determine whether an instruction is volatile.
1169   static bool isVolatile(Instruction *I);
1170 
1171   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1172   /// memset).
1173   static bool isNoSyncIntrinsic(Instruction *I);
1174 };
1175 
1176 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1177   if (!I->isAtomic())
1178     return false;
1179 
1180   AtomicOrdering Ordering;
1181   switch (I->getOpcode()) {
1182   case Instruction::AtomicRMW:
1183     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1184     break;
1185   case Instruction::Store:
1186     Ordering = cast<StoreInst>(I)->getOrdering();
1187     break;
1188   case Instruction::Load:
1189     Ordering = cast<LoadInst>(I)->getOrdering();
1190     break;
1191   case Instruction::Fence: {
1192     auto *FI = cast<FenceInst>(I);
1193     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1194       return false;
1195     Ordering = FI->getOrdering();
1196     break;
1197   }
1198   case Instruction::AtomicCmpXchg: {
1199     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1200     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1201     // Only if both are relaxed, than it can be treated as relaxed.
1202     // Otherwise it is non-relaxed.
1203     if (Success != AtomicOrdering::Unordered &&
1204         Success != AtomicOrdering::Monotonic)
1205       return true;
1206     if (Failure != AtomicOrdering::Unordered &&
1207         Failure != AtomicOrdering::Monotonic)
1208       return true;
1209     return false;
1210   }
1211   default:
1212     llvm_unreachable(
1213         "New atomic operations need to be known in the attributor.");
1214   }
1215 
1216   // Relaxed.
1217   if (Ordering == AtomicOrdering::Unordered ||
1218       Ordering == AtomicOrdering::Monotonic)
1219     return false;
1220   return true;
1221 }
1222 
1223 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1224 /// FIXME: We should ipmrove the handling of intrinsics.
1225 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1226   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1227     switch (II->getIntrinsicID()) {
1228     /// Element wise atomic memory intrinsics are can only be unordered,
1229     /// therefore nosync.
1230     case Intrinsic::memset_element_unordered_atomic:
1231     case Intrinsic::memmove_element_unordered_atomic:
1232     case Intrinsic::memcpy_element_unordered_atomic:
1233       return true;
1234     case Intrinsic::memset:
1235     case Intrinsic::memmove:
1236     case Intrinsic::memcpy:
1237       if (!cast<MemIntrinsic>(II)->isVolatile())
1238         return true;
1239       return false;
1240     default:
1241       return false;
1242     }
1243   }
1244   return false;
1245 }
1246 
1247 bool AANoSyncImpl::isVolatile(Instruction *I) {
1248   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1249 
1250   switch (I->getOpcode()) {
1251   case Instruction::AtomicRMW:
1252     return cast<AtomicRMWInst>(I)->isVolatile();
1253   case Instruction::Store:
1254     return cast<StoreInst>(I)->isVolatile();
1255   case Instruction::Load:
1256     return cast<LoadInst>(I)->isVolatile();
1257   case Instruction::AtomicCmpXchg:
1258     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1259   default:
1260     return false;
1261   }
1262 }
1263 
1264 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1265 
1266   auto CheckRWInstForNoSync = [&](Instruction &I) {
1267     /// We are looking for volatile instructions or Non-Relaxed atomics.
1268     /// FIXME: We should improve the handling of intrinsics.
1269 
1270     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1271       return true;
1272 
1273     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1274       if (CB->hasFnAttr(Attribute::NoSync))
1275         return true;
1276 
1277       const auto &NoSyncAA =
1278           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1279       if (NoSyncAA.isAssumedNoSync())
1280         return true;
1281       return false;
1282     }
1283 
1284     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1285       return true;
1286 
1287     return false;
1288   };
1289 
1290   auto CheckForNoSync = [&](Instruction &I) {
1291     // At this point we handled all read/write effects and they are all
1292     // nosync, so they can be skipped.
1293     if (I.mayReadOrWriteMemory())
1294       return true;
1295 
1296     // non-convergent and readnone imply nosync.
1297     return !cast<CallBase>(I).isConvergent();
1298   };
1299 
1300   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1301       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1302     return indicatePessimisticFixpoint();
1303 
1304   return ChangeStatus::UNCHANGED;
1305 }
1306 
1307 struct AANoSyncFunction final : public AANoSyncImpl {
1308   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1309       : AANoSyncImpl(IRP, A) {}
1310 
1311   /// See AbstractAttribute::trackStatistics()
1312   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1313 };
1314 
1315 /// NoSync attribute deduction for a call sites.
1316 struct AANoSyncCallSite final : AANoSyncImpl {
1317   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1318       : AANoSyncImpl(IRP, A) {}
1319 
1320   /// See AbstractAttribute::initialize(...).
1321   void initialize(Attributor &A) override {
1322     AANoSyncImpl::initialize(A);
1323     Function *F = getAssociatedFunction();
1324     if (!F)
1325       indicatePessimisticFixpoint();
1326   }
1327 
1328   /// See AbstractAttribute::updateImpl(...).
1329   ChangeStatus updateImpl(Attributor &A) override {
1330     // TODO: Once we have call site specific value information we can provide
1331     //       call site specific liveness information and then it makes
1332     //       sense to specialize attributes for call sites arguments instead of
1333     //       redirecting requests to the callee argument.
1334     Function *F = getAssociatedFunction();
1335     const IRPosition &FnPos = IRPosition::function(*F);
1336     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1337     return clampStateAndIndicateChange(
1338         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1339   }
1340 
1341   /// See AbstractAttribute::trackStatistics()
1342   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1343 };
1344 
1345 /// ------------------------ No-Free Attributes ----------------------------
1346 
1347 struct AANoFreeImpl : public AANoFree {
1348   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1349 
1350   /// See AbstractAttribute::updateImpl(...).
1351   ChangeStatus updateImpl(Attributor &A) override {
1352     auto CheckForNoFree = [&](Instruction &I) {
1353       const auto &CB = cast<CallBase>(I);
1354       if (CB.hasFnAttr(Attribute::NoFree))
1355         return true;
1356 
1357       const auto &NoFreeAA =
1358           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1359       return NoFreeAA.isAssumedNoFree();
1360     };
1361 
1362     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1363       return indicatePessimisticFixpoint();
1364     return ChangeStatus::UNCHANGED;
1365   }
1366 
1367   /// See AbstractAttribute::getAsStr().
1368   const std::string getAsStr() const override {
1369     return getAssumed() ? "nofree" : "may-free";
1370   }
1371 };
1372 
1373 struct AANoFreeFunction final : public AANoFreeImpl {
1374   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1375       : AANoFreeImpl(IRP, A) {}
1376 
1377   /// See AbstractAttribute::trackStatistics()
1378   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1379 };
1380 
1381 /// NoFree attribute deduction for a call sites.
1382 struct AANoFreeCallSite final : AANoFreeImpl {
1383   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1384       : AANoFreeImpl(IRP, A) {}
1385 
1386   /// See AbstractAttribute::initialize(...).
1387   void initialize(Attributor &A) override {
1388     AANoFreeImpl::initialize(A);
1389     Function *F = getAssociatedFunction();
1390     if (!F)
1391       indicatePessimisticFixpoint();
1392   }
1393 
1394   /// See AbstractAttribute::updateImpl(...).
1395   ChangeStatus updateImpl(Attributor &A) override {
1396     // TODO: Once we have call site specific value information we can provide
1397     //       call site specific liveness information and then it makes
1398     //       sense to specialize attributes for call sites arguments instead of
1399     //       redirecting requests to the callee argument.
1400     Function *F = getAssociatedFunction();
1401     const IRPosition &FnPos = IRPosition::function(*F);
1402     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1403     return clampStateAndIndicateChange(
1404         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1405   }
1406 
1407   /// See AbstractAttribute::trackStatistics()
1408   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1409 };
1410 
1411 /// NoFree attribute for floating values.
1412 struct AANoFreeFloating : AANoFreeImpl {
1413   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1414       : AANoFreeImpl(IRP, A) {}
1415 
1416   /// See AbstractAttribute::trackStatistics()
1417   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1418 
1419   /// See Abstract Attribute::updateImpl(...).
1420   ChangeStatus updateImpl(Attributor &A) override {
1421     const IRPosition &IRP = getIRPosition();
1422 
1423     const auto &NoFreeAA =
1424         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1425     if (NoFreeAA.isAssumedNoFree())
1426       return ChangeStatus::UNCHANGED;
1427 
1428     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1429     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1430       Instruction *UserI = cast<Instruction>(U.getUser());
1431       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1432         if (CB->isBundleOperand(&U))
1433           return false;
1434         if (!CB->isArgOperand(&U))
1435           return true;
1436         unsigned ArgNo = CB->getArgOperandNo(&U);
1437 
1438         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1439             *this, IRPosition::callsite_argument(*CB, ArgNo));
1440         return NoFreeArg.isAssumedNoFree();
1441       }
1442 
1443       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1444           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1445         Follow = true;
1446         return true;
1447       }
1448       if (isa<ReturnInst>(UserI))
1449         return true;
1450 
1451       // Unknown user.
1452       return false;
1453     };
1454     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1455       return indicatePessimisticFixpoint();
1456 
1457     return ChangeStatus::UNCHANGED;
1458   }
1459 };
1460 
1461 /// NoFree attribute for a call site argument.
1462 struct AANoFreeArgument final : AANoFreeFloating {
1463   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1464       : AANoFreeFloating(IRP, A) {}
1465 
1466   /// See AbstractAttribute::trackStatistics()
1467   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1468 };
1469 
1470 /// NoFree attribute for call site arguments.
1471 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1472   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1473       : AANoFreeFloating(IRP, A) {}
1474 
1475   /// See AbstractAttribute::updateImpl(...).
1476   ChangeStatus updateImpl(Attributor &A) override {
1477     // TODO: Once we have call site specific value information we can provide
1478     //       call site specific liveness information and then it makes
1479     //       sense to specialize attributes for call sites arguments instead of
1480     //       redirecting requests to the callee argument.
1481     Argument *Arg = getAssociatedArgument();
1482     if (!Arg)
1483       return indicatePessimisticFixpoint();
1484     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1485     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1486     return clampStateAndIndicateChange(
1487         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1488   }
1489 
1490   /// See AbstractAttribute::trackStatistics()
1491   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1492 };
1493 
1494 /// NoFree attribute for function return value.
1495 struct AANoFreeReturned final : AANoFreeFloating {
1496   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1497       : AANoFreeFloating(IRP, A) {
1498     llvm_unreachable("NoFree is not applicable to function returns!");
1499   }
1500 
1501   /// See AbstractAttribute::initialize(...).
1502   void initialize(Attributor &A) override {
1503     llvm_unreachable("NoFree is not applicable to function returns!");
1504   }
1505 
1506   /// See AbstractAttribute::updateImpl(...).
1507   ChangeStatus updateImpl(Attributor &A) override {
1508     llvm_unreachable("NoFree is not applicable to function returns!");
1509   }
1510 
1511   /// See AbstractAttribute::trackStatistics()
1512   void trackStatistics() const override {}
1513 };
1514 
1515 /// NoFree attribute deduction for a call site return value.
1516 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1517   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1518       : AANoFreeFloating(IRP, A) {}
1519 
1520   ChangeStatus manifest(Attributor &A) override {
1521     return ChangeStatus::UNCHANGED;
1522   }
1523   /// See AbstractAttribute::trackStatistics()
1524   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1525 };
1526 
1527 /// ------------------------ NonNull Argument Attribute ------------------------
1528 static int64_t getKnownNonNullAndDerefBytesForUse(
1529     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1530     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1531   TrackUse = false;
1532 
1533   const Value *UseV = U->get();
1534   if (!UseV->getType()->isPointerTy())
1535     return 0;
1536 
1537   Type *PtrTy = UseV->getType();
1538   const Function *F = I->getFunction();
1539   bool NullPointerIsDefined =
1540       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1541   const DataLayout &DL = A.getInfoCache().getDL();
1542   if (const auto *CB = dyn_cast<CallBase>(I)) {
1543     if (CB->isBundleOperand(U)) {
1544       if (RetainedKnowledge RK = getKnowledgeFromUse(
1545               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1546         IsNonNull |=
1547             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1548         return RK.ArgValue;
1549       }
1550       return 0;
1551     }
1552 
1553     if (CB->isCallee(U)) {
1554       IsNonNull |= !NullPointerIsDefined;
1555       return 0;
1556     }
1557 
1558     unsigned ArgNo = CB->getArgOperandNo(U);
1559     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1560     // As long as we only use known information there is no need to track
1561     // dependences here.
1562     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1563                                                   /* TrackDependence */ false);
1564     IsNonNull |= DerefAA.isKnownNonNull();
1565     return DerefAA.getKnownDereferenceableBytes();
1566   }
1567 
1568   // We need to follow common pointer manipulation uses to the accesses they
1569   // feed into. We can try to be smart to avoid looking through things we do not
1570   // like for now, e.g., non-inbounds GEPs.
1571   if (isa<CastInst>(I)) {
1572     TrackUse = true;
1573     return 0;
1574   }
1575   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1576     if (GEP->hasAllConstantIndices()) {
1577       TrackUse = true;
1578       return 0;
1579     }
1580 
1581   int64_t Offset;
1582   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1583     if (Base == &AssociatedValue &&
1584         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1585       int64_t DerefBytes =
1586           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1587 
1588       IsNonNull |= !NullPointerIsDefined;
1589       return std::max(int64_t(0), DerefBytes);
1590     }
1591   }
1592 
1593   /// Corner case when an offset is 0.
1594   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1595           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1596     if (Offset == 0 && Base == &AssociatedValue &&
1597         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1598       int64_t DerefBytes =
1599           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1600       IsNonNull |= !NullPointerIsDefined;
1601       return std::max(int64_t(0), DerefBytes);
1602     }
1603   }
1604 
1605   return 0;
1606 }
1607 
1608 struct AANonNullImpl : AANonNull {
1609   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1610       : AANonNull(IRP, A),
1611         NullIsDefined(NullPointerIsDefined(
1612             getAnchorScope(),
1613             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1614 
1615   /// See AbstractAttribute::initialize(...).
1616   void initialize(Attributor &A) override {
1617     Value &V = getAssociatedValue();
1618     if (!NullIsDefined &&
1619         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1620                 /* IgnoreSubsumingPositions */ false, &A))
1621       indicateOptimisticFixpoint();
1622     else if (isa<ConstantPointerNull>(V))
1623       indicatePessimisticFixpoint();
1624     else
1625       AANonNull::initialize(A);
1626 
1627     bool CanBeNull = true;
1628     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull))
1629       if (!CanBeNull)
1630         indicateOptimisticFixpoint();
1631 
1632     if (!getState().isAtFixpoint())
1633       if (Instruction *CtxI = getCtxI())
1634         followUsesInMBEC(*this, A, getState(), *CtxI);
1635   }
1636 
1637   /// See followUsesInMBEC
1638   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1639                        AANonNull::StateType &State) {
1640     bool IsNonNull = false;
1641     bool TrackUse = false;
1642     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1643                                        IsNonNull, TrackUse);
1644     State.setKnown(IsNonNull);
1645     return TrackUse;
1646   }
1647 
1648   /// See AbstractAttribute::getAsStr().
1649   const std::string getAsStr() const override {
1650     return getAssumed() ? "nonnull" : "may-null";
1651   }
1652 
1653   /// Flag to determine if the underlying value can be null and still allow
1654   /// valid accesses.
1655   const bool NullIsDefined;
1656 };
1657 
1658 /// NonNull attribute for a floating value.
1659 struct AANonNullFloating : public AANonNullImpl {
1660   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1661       : AANonNullImpl(IRP, A) {}
1662 
1663   /// See AbstractAttribute::updateImpl(...).
1664   ChangeStatus updateImpl(Attributor &A) override {
1665     if (!NullIsDefined) {
1666       const auto &DerefAA =
1667           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1668       if (DerefAA.getAssumedDereferenceableBytes())
1669         return ChangeStatus::UNCHANGED;
1670     }
1671 
1672     const DataLayout &DL = A.getDataLayout();
1673 
1674     DominatorTree *DT = nullptr;
1675     AssumptionCache *AC = nullptr;
1676     InformationCache &InfoCache = A.getInfoCache();
1677     if (const Function *Fn = getAnchorScope()) {
1678       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1679       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1680     }
1681 
1682     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1683                             AANonNull::StateType &T, bool Stripped) -> bool {
1684       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1685       if (!Stripped && this == &AA) {
1686         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1687           T.indicatePessimisticFixpoint();
1688       } else {
1689         // Use abstract attribute information.
1690         const AANonNull::StateType &NS =
1691             static_cast<const AANonNull::StateType &>(AA.getState());
1692         T ^= NS;
1693       }
1694       return T.isValidState();
1695     };
1696 
1697     StateType T;
1698     if (!genericValueTraversal<AANonNull, StateType>(
1699             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1700       return indicatePessimisticFixpoint();
1701 
1702     return clampStateAndIndicateChange(getState(), T);
1703   }
1704 
1705   /// See AbstractAttribute::trackStatistics()
1706   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1707 };
1708 
1709 /// NonNull attribute for function return value.
1710 struct AANonNullReturned final
1711     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1712   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1713       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {}
1714 
1715   /// See AbstractAttribute::trackStatistics()
1716   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1717 };
1718 
1719 /// NonNull attribute for function argument.
1720 struct AANonNullArgument final
1721     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1722   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1723       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1724 
1725   /// See AbstractAttribute::trackStatistics()
1726   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1727 };
1728 
1729 struct AANonNullCallSiteArgument final : AANonNullFloating {
1730   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1731       : AANonNullFloating(IRP, A) {}
1732 
1733   /// See AbstractAttribute::trackStatistics()
1734   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1735 };
1736 
1737 /// NonNull attribute for a call site return position.
1738 struct AANonNullCallSiteReturned final
1739     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1740   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1741       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1742 
1743   /// See AbstractAttribute::trackStatistics()
1744   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1745 };
1746 
1747 /// ------------------------ No-Recurse Attributes ----------------------------
1748 
1749 struct AANoRecurseImpl : public AANoRecurse {
1750   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1751 
1752   /// See AbstractAttribute::getAsStr()
1753   const std::string getAsStr() const override {
1754     return getAssumed() ? "norecurse" : "may-recurse";
1755   }
1756 };
1757 
1758 struct AANoRecurseFunction final : AANoRecurseImpl {
1759   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1760       : AANoRecurseImpl(IRP, A) {}
1761 
1762   /// See AbstractAttribute::initialize(...).
1763   void initialize(Attributor &A) override {
1764     AANoRecurseImpl::initialize(A);
1765     if (const Function *F = getAnchorScope())
1766       if (A.getInfoCache().getSccSize(*F) != 1)
1767         indicatePessimisticFixpoint();
1768   }
1769 
1770   /// See AbstractAttribute::updateImpl(...).
1771   ChangeStatus updateImpl(Attributor &A) override {
1772 
1773     // If all live call sites are known to be no-recurse, we are as well.
1774     auto CallSitePred = [&](AbstractCallSite ACS) {
1775       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1776           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1777           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1778       return NoRecurseAA.isKnownNoRecurse();
1779     };
1780     bool AllCallSitesKnown;
1781     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1782       // If we know all call sites and all are known no-recurse, we are done.
1783       // If all known call sites, which might not be all that exist, are known
1784       // to be no-recurse, we are not done but we can continue to assume
1785       // no-recurse. If one of the call sites we have not visited will become
1786       // live, another update is triggered.
1787       if (AllCallSitesKnown)
1788         indicateOptimisticFixpoint();
1789       return ChangeStatus::UNCHANGED;
1790     }
1791 
1792     // If the above check does not hold anymore we look at the calls.
1793     auto CheckForNoRecurse = [&](Instruction &I) {
1794       const auto &CB = cast<CallBase>(I);
1795       if (CB.hasFnAttr(Attribute::NoRecurse))
1796         return true;
1797 
1798       const auto &NoRecurseAA =
1799           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1800       if (!NoRecurseAA.isAssumedNoRecurse())
1801         return false;
1802 
1803       // Recursion to the same function
1804       if (CB.getCalledFunction() == getAnchorScope())
1805         return false;
1806 
1807       return true;
1808     };
1809 
1810     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1811       return indicatePessimisticFixpoint();
1812     return ChangeStatus::UNCHANGED;
1813   }
1814 
1815   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1816 };
1817 
1818 /// NoRecurse attribute deduction for a call sites.
1819 struct AANoRecurseCallSite final : AANoRecurseImpl {
1820   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1821       : AANoRecurseImpl(IRP, A) {}
1822 
1823   /// See AbstractAttribute::initialize(...).
1824   void initialize(Attributor &A) override {
1825     AANoRecurseImpl::initialize(A);
1826     Function *F = getAssociatedFunction();
1827     if (!F)
1828       indicatePessimisticFixpoint();
1829   }
1830 
1831   /// See AbstractAttribute::updateImpl(...).
1832   ChangeStatus updateImpl(Attributor &A) override {
1833     // TODO: Once we have call site specific value information we can provide
1834     //       call site specific liveness information and then it makes
1835     //       sense to specialize attributes for call sites arguments instead of
1836     //       redirecting requests to the callee argument.
1837     Function *F = getAssociatedFunction();
1838     const IRPosition &FnPos = IRPosition::function(*F);
1839     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1840     return clampStateAndIndicateChange(
1841         getState(),
1842         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1843   }
1844 
1845   /// See AbstractAttribute::trackStatistics()
1846   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1847 };
1848 
1849 /// -------------------- Undefined-Behavior Attributes ------------------------
1850 
1851 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1852   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1853       : AAUndefinedBehavior(IRP, A) {}
1854 
1855   /// See AbstractAttribute::updateImpl(...).
1856   // through a pointer (i.e. also branches etc.)
1857   ChangeStatus updateImpl(Attributor &A) override {
1858     const size_t UBPrevSize = KnownUBInsts.size();
1859     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1860 
1861     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1862       // Skip instructions that are already saved.
1863       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1864         return true;
1865 
1866       // If we reach here, we know we have an instruction
1867       // that accesses memory through a pointer operand,
1868       // for which getPointerOperand() should give it to us.
1869       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1870       assert(PtrOp &&
1871              "Expected pointer operand of memory accessing instruction");
1872 
1873       // Either we stopped and the appropriate action was taken,
1874       // or we got back a simplified value to continue.
1875       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1876       if (!SimplifiedPtrOp.hasValue())
1877         return true;
1878       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1879 
1880       // A memory access through a pointer is considered UB
1881       // only if the pointer has constant null value.
1882       // TODO: Expand it to not only check constant values.
1883       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1884         AssumedNoUBInsts.insert(&I);
1885         return true;
1886       }
1887       const Type *PtrTy = PtrOpVal->getType();
1888 
1889       // Because we only consider instructions inside functions,
1890       // assume that a parent function exists.
1891       const Function *F = I.getFunction();
1892 
1893       // A memory access using constant null pointer is only considered UB
1894       // if null pointer is _not_ defined for the target platform.
1895       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1896         AssumedNoUBInsts.insert(&I);
1897       else
1898         KnownUBInsts.insert(&I);
1899       return true;
1900     };
1901 
1902     auto InspectBrInstForUB = [&](Instruction &I) {
1903       // A conditional branch instruction is considered UB if it has `undef`
1904       // condition.
1905 
1906       // Skip instructions that are already saved.
1907       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1908         return true;
1909 
1910       // We know we have a branch instruction.
1911       auto BrInst = cast<BranchInst>(&I);
1912 
1913       // Unconditional branches are never considered UB.
1914       if (BrInst->isUnconditional())
1915         return true;
1916 
1917       // Either we stopped and the appropriate action was taken,
1918       // or we got back a simplified value to continue.
1919       Optional<Value *> SimplifiedCond =
1920           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1921       if (!SimplifiedCond.hasValue())
1922         return true;
1923       AssumedNoUBInsts.insert(&I);
1924       return true;
1925     };
1926 
1927     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
1928                               {Instruction::Load, Instruction::Store,
1929                                Instruction::AtomicCmpXchg,
1930                                Instruction::AtomicRMW},
1931                               /* CheckBBLivenessOnly */ true);
1932     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
1933                               /* CheckBBLivenessOnly */ true);
1934     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
1935         UBPrevSize != KnownUBInsts.size())
1936       return ChangeStatus::CHANGED;
1937     return ChangeStatus::UNCHANGED;
1938   }
1939 
1940   bool isKnownToCauseUB(Instruction *I) const override {
1941     return KnownUBInsts.count(I);
1942   }
1943 
1944   bool isAssumedToCauseUB(Instruction *I) const override {
1945     // In simple words, if an instruction is not in the assumed to _not_
1946     // cause UB, then it is assumed UB (that includes those
1947     // in the KnownUBInsts set). The rest is boilerplate
1948     // is to ensure that it is one of the instructions we test
1949     // for UB.
1950 
1951     switch (I->getOpcode()) {
1952     case Instruction::Load:
1953     case Instruction::Store:
1954     case Instruction::AtomicCmpXchg:
1955     case Instruction::AtomicRMW:
1956       return !AssumedNoUBInsts.count(I);
1957     case Instruction::Br: {
1958       auto BrInst = cast<BranchInst>(I);
1959       if (BrInst->isUnconditional())
1960         return false;
1961       return !AssumedNoUBInsts.count(I);
1962     } break;
1963     default:
1964       return false;
1965     }
1966     return false;
1967   }
1968 
1969   ChangeStatus manifest(Attributor &A) override {
1970     if (KnownUBInsts.empty())
1971       return ChangeStatus::UNCHANGED;
1972     for (Instruction *I : KnownUBInsts)
1973       A.changeToUnreachableAfterManifest(I);
1974     return ChangeStatus::CHANGED;
1975   }
1976 
1977   /// See AbstractAttribute::getAsStr()
1978   const std::string getAsStr() const override {
1979     return getAssumed() ? "undefined-behavior" : "no-ub";
1980   }
1981 
1982   /// Note: The correctness of this analysis depends on the fact that the
1983   /// following 2 sets will stop changing after some point.
1984   /// "Change" here means that their size changes.
1985   /// The size of each set is monotonically increasing
1986   /// (we only add items to them) and it is upper bounded by the number of
1987   /// instructions in the processed function (we can never save more
1988   /// elements in either set than this number). Hence, at some point,
1989   /// they will stop increasing.
1990   /// Consequently, at some point, both sets will have stopped
1991   /// changing, effectively making the analysis reach a fixpoint.
1992 
1993   /// Note: These 2 sets are disjoint and an instruction can be considered
1994   /// one of 3 things:
1995   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
1996   ///    the KnownUBInsts set.
1997   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
1998   ///    has a reason to assume it).
1999   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2000   ///    could not find a reason to assume or prove that it can cause UB,
2001   ///    hence it assumes it doesn't. We have a set for these instructions
2002   ///    so that we don't reprocess them in every update.
2003   ///    Note however that instructions in this set may cause UB.
2004 
2005 protected:
2006   /// A set of all live instructions _known_ to cause UB.
2007   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2008 
2009 private:
2010   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2011   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2012 
2013   // Should be called on updates in which if we're processing an instruction
2014   // \p I that depends on a value \p V, one of the following has to happen:
2015   // - If the value is assumed, then stop.
2016   // - If the value is known but undef, then consider it UB.
2017   // - Otherwise, do specific processing with the simplified value.
2018   // We return None in the first 2 cases to signify that an appropriate
2019   // action was taken and the caller should stop.
2020   // Otherwise, we return the simplified value that the caller should
2021   // use for specific processing.
2022   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2023                                          Instruction *I) {
2024     const auto &ValueSimplifyAA =
2025         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2026     Optional<Value *> SimplifiedV =
2027         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2028     if (!ValueSimplifyAA.isKnown()) {
2029       // Don't depend on assumed values.
2030       return llvm::None;
2031     }
2032     if (!SimplifiedV.hasValue()) {
2033       // If it is known (which we tested above) but it doesn't have a value,
2034       // then we can assume `undef` and hence the instruction is UB.
2035       KnownUBInsts.insert(I);
2036       return llvm::None;
2037     }
2038     Value *Val = SimplifiedV.getValue();
2039     if (isa<UndefValue>(Val)) {
2040       KnownUBInsts.insert(I);
2041       return llvm::None;
2042     }
2043     return Val;
2044   }
2045 };
2046 
2047 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2048   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2049       : AAUndefinedBehaviorImpl(IRP, A) {}
2050 
2051   /// See AbstractAttribute::trackStatistics()
2052   void trackStatistics() const override {
2053     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2054                "Number of instructions known to have UB");
2055     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2056         KnownUBInsts.size();
2057   }
2058 };
2059 
2060 /// ------------------------ Will-Return Attributes ----------------------------
2061 
2062 // Helper function that checks whether a function has any cycle which we don't
2063 // know if it is bounded or not.
2064 // Loops with maximum trip count are considered bounded, any other cycle not.
2065 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2066   ScalarEvolution *SE =
2067       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2068   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2069   // If either SCEV or LoopInfo is not available for the function then we assume
2070   // any cycle to be unbounded cycle.
2071   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2072   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2073   if (!SE || !LI) {
2074     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2075       if (SCCI.hasCycle())
2076         return true;
2077     return false;
2078   }
2079 
2080   // If there's irreducible control, the function may contain non-loop cycles.
2081   if (mayContainIrreducibleControl(F, LI))
2082     return true;
2083 
2084   // Any loop that does not have a max trip count is considered unbounded cycle.
2085   for (auto *L : LI->getLoopsInPreorder()) {
2086     if (!SE->getSmallConstantMaxTripCount(L))
2087       return true;
2088   }
2089   return false;
2090 }
2091 
2092 struct AAWillReturnImpl : public AAWillReturn {
2093   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2094       : AAWillReturn(IRP, A) {}
2095 
2096   /// See AbstractAttribute::initialize(...).
2097   void initialize(Attributor &A) override {
2098     AAWillReturn::initialize(A);
2099 
2100     Function *F = getAnchorScope();
2101     if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A))
2102       indicatePessimisticFixpoint();
2103   }
2104 
2105   /// See AbstractAttribute::updateImpl(...).
2106   ChangeStatus updateImpl(Attributor &A) override {
2107     auto CheckForWillReturn = [&](Instruction &I) {
2108       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2109       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2110       if (WillReturnAA.isKnownWillReturn())
2111         return true;
2112       if (!WillReturnAA.isAssumedWillReturn())
2113         return false;
2114       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2115       return NoRecurseAA.isAssumedNoRecurse();
2116     };
2117 
2118     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2119       return indicatePessimisticFixpoint();
2120 
2121     return ChangeStatus::UNCHANGED;
2122   }
2123 
2124   /// See AbstractAttribute::getAsStr()
2125   const std::string getAsStr() const override {
2126     return getAssumed() ? "willreturn" : "may-noreturn";
2127   }
2128 };
2129 
2130 struct AAWillReturnFunction final : AAWillReturnImpl {
2131   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2132       : AAWillReturnImpl(IRP, A) {}
2133 
2134   /// See AbstractAttribute::trackStatistics()
2135   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2136 };
2137 
2138 /// WillReturn attribute deduction for a call sites.
2139 struct AAWillReturnCallSite final : AAWillReturnImpl {
2140   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2141       : AAWillReturnImpl(IRP, A) {}
2142 
2143   /// See AbstractAttribute::initialize(...).
2144   void initialize(Attributor &A) override {
2145     AAWillReturnImpl::initialize(A);
2146     Function *F = getAssociatedFunction();
2147     if (!F)
2148       indicatePessimisticFixpoint();
2149   }
2150 
2151   /// See AbstractAttribute::updateImpl(...).
2152   ChangeStatus updateImpl(Attributor &A) override {
2153     // TODO: Once we have call site specific value information we can provide
2154     //       call site specific liveness information and then it makes
2155     //       sense to specialize attributes for call sites arguments instead of
2156     //       redirecting requests to the callee argument.
2157     Function *F = getAssociatedFunction();
2158     const IRPosition &FnPos = IRPosition::function(*F);
2159     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2160     return clampStateAndIndicateChange(
2161         getState(),
2162         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2163   }
2164 
2165   /// See AbstractAttribute::trackStatistics()
2166   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2167 };
2168 
2169 /// -------------------AAReachability Attribute--------------------------
2170 
2171 struct AAReachabilityImpl : AAReachability {
2172   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2173       : AAReachability(IRP, A) {}
2174 
2175   const std::string getAsStr() const override {
2176     // TODO: Return the number of reachable queries.
2177     return "reachable";
2178   }
2179 
2180   /// See AbstractAttribute::initialize(...).
2181   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2182 
2183   /// See AbstractAttribute::updateImpl(...).
2184   ChangeStatus updateImpl(Attributor &A) override {
2185     return indicatePessimisticFixpoint();
2186   }
2187 };
2188 
2189 struct AAReachabilityFunction final : public AAReachabilityImpl {
2190   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2191       : AAReachabilityImpl(IRP, A) {}
2192 
2193   /// See AbstractAttribute::trackStatistics()
2194   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2195 };
2196 
2197 /// ------------------------ NoAlias Argument Attribute ------------------------
2198 
2199 struct AANoAliasImpl : AANoAlias {
2200   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2201     assert(getAssociatedType()->isPointerTy() &&
2202            "Noalias is a pointer attribute");
2203   }
2204 
2205   const std::string getAsStr() const override {
2206     return getAssumed() ? "noalias" : "may-alias";
2207   }
2208 };
2209 
2210 /// NoAlias attribute for a floating value.
2211 struct AANoAliasFloating final : AANoAliasImpl {
2212   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2213       : AANoAliasImpl(IRP, A) {}
2214 
2215   /// See AbstractAttribute::initialize(...).
2216   void initialize(Attributor &A) override {
2217     AANoAliasImpl::initialize(A);
2218     Value *Val = &getAssociatedValue();
2219     do {
2220       CastInst *CI = dyn_cast<CastInst>(Val);
2221       if (!CI)
2222         break;
2223       Value *Base = CI->getOperand(0);
2224       if (!Base->hasOneUse())
2225         break;
2226       Val = Base;
2227     } while (true);
2228 
2229     if (!Val->getType()->isPointerTy()) {
2230       indicatePessimisticFixpoint();
2231       return;
2232     }
2233 
2234     if (isa<AllocaInst>(Val))
2235       indicateOptimisticFixpoint();
2236     else if (isa<ConstantPointerNull>(Val) &&
2237              !NullPointerIsDefined(getAnchorScope(),
2238                                    Val->getType()->getPointerAddressSpace()))
2239       indicateOptimisticFixpoint();
2240     else if (Val != &getAssociatedValue()) {
2241       const auto &ValNoAliasAA =
2242           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2243       if (ValNoAliasAA.isKnownNoAlias())
2244         indicateOptimisticFixpoint();
2245     }
2246   }
2247 
2248   /// See AbstractAttribute::updateImpl(...).
2249   ChangeStatus updateImpl(Attributor &A) override {
2250     // TODO: Implement this.
2251     return indicatePessimisticFixpoint();
2252   }
2253 
2254   /// See AbstractAttribute::trackStatistics()
2255   void trackStatistics() const override {
2256     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2257   }
2258 };
2259 
2260 /// NoAlias attribute for an argument.
2261 struct AANoAliasArgument final
2262     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2263   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2264   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2265 
2266   /// See AbstractAttribute::initialize(...).
2267   void initialize(Attributor &A) override {
2268     Base::initialize(A);
2269     // See callsite argument attribute and callee argument attribute.
2270     if (hasAttr({Attribute::ByVal}))
2271       indicateOptimisticFixpoint();
2272   }
2273 
2274   /// See AbstractAttribute::update(...).
2275   ChangeStatus updateImpl(Attributor &A) override {
2276     // We have to make sure no-alias on the argument does not break
2277     // synchronization when this is a callback argument, see also [1] below.
2278     // If synchronization cannot be affected, we delegate to the base updateImpl
2279     // function, otherwise we give up for now.
2280 
2281     // If the function is no-sync, no-alias cannot break synchronization.
2282     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2283         *this, IRPosition::function_scope(getIRPosition()));
2284     if (NoSyncAA.isAssumedNoSync())
2285       return Base::updateImpl(A);
2286 
2287     // If the argument is read-only, no-alias cannot break synchronization.
2288     const auto &MemBehaviorAA =
2289         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2290     if (MemBehaviorAA.isAssumedReadOnly())
2291       return Base::updateImpl(A);
2292 
2293     // If the argument is never passed through callbacks, no-alias cannot break
2294     // synchronization.
2295     bool AllCallSitesKnown;
2296     if (A.checkForAllCallSites(
2297             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2298             true, AllCallSitesKnown))
2299       return Base::updateImpl(A);
2300 
2301     // TODO: add no-alias but make sure it doesn't break synchronization by
2302     // introducing fake uses. See:
2303     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2304     //     International Workshop on OpenMP 2018,
2305     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2306 
2307     return indicatePessimisticFixpoint();
2308   }
2309 
2310   /// See AbstractAttribute::trackStatistics()
2311   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2312 };
2313 
2314 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2315   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2316       : AANoAliasImpl(IRP, A) {}
2317 
2318   /// See AbstractAttribute::initialize(...).
2319   void initialize(Attributor &A) override {
2320     // See callsite argument attribute and callee argument attribute.
2321     const auto &CB = cast<CallBase>(getAnchorValue());
2322     if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias))
2323       indicateOptimisticFixpoint();
2324     Value &Val = getAssociatedValue();
2325     if (isa<ConstantPointerNull>(Val) &&
2326         !NullPointerIsDefined(getAnchorScope(),
2327                               Val.getType()->getPointerAddressSpace()))
2328       indicateOptimisticFixpoint();
2329   }
2330 
2331   /// Determine if the underlying value may alias with the call site argument
2332   /// \p OtherArgNo of \p ICS (= the underlying call site).
2333   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2334                             const AAMemoryBehavior &MemBehaviorAA,
2335                             const CallBase &CB, unsigned OtherArgNo) {
2336     // We do not need to worry about aliasing with the underlying IRP.
2337     if (this->getArgNo() == (int)OtherArgNo)
2338       return false;
2339 
2340     // If it is not a pointer or pointer vector we do not alias.
2341     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2342     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2343       return false;
2344 
2345     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2346         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2347         /* TrackDependence */ false);
2348 
2349     // If the argument is readnone, there is no read-write aliasing.
2350     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2351       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2352       return false;
2353     }
2354 
2355     // If the argument is readonly and the underlying value is readonly, there
2356     // is no read-write aliasing.
2357     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2358     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2359       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2360       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2361       return false;
2362     }
2363 
2364     // We have to utilize actual alias analysis queries so we need the object.
2365     if (!AAR)
2366       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2367 
2368     // Try to rule it out at the call site.
2369     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2370     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2371                          "callsite arguments: "
2372                       << getAssociatedValue() << " " << *ArgOp << " => "
2373                       << (IsAliasing ? "" : "no-") << "alias \n");
2374 
2375     return IsAliasing;
2376   }
2377 
2378   bool
2379   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2380                                          const AAMemoryBehavior &MemBehaviorAA,
2381                                          const AANoAlias &NoAliasAA) {
2382     // We can deduce "noalias" if the following conditions hold.
2383     // (i)   Associated value is assumed to be noalias in the definition.
2384     // (ii)  Associated value is assumed to be no-capture in all the uses
2385     //       possibly executed before this callsite.
2386     // (iii) There is no other pointer argument which could alias with the
2387     //       value.
2388 
2389     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2390     if (!AssociatedValueIsNoAliasAtDef) {
2391       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2392                         << " is not no-alias at the definition\n");
2393       return false;
2394     }
2395 
2396     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2397 
2398     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2399     auto &NoCaptureAA =
2400         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2401     // Check whether the value is captured in the scope using AANoCapture.
2402     //      Look at CFG and check only uses possibly executed before this
2403     //      callsite.
2404     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2405       Instruction *UserI = cast<Instruction>(U.getUser());
2406 
2407       // If user if curr instr and only use.
2408       if (UserI == getCtxI() && UserI->hasOneUse())
2409         return true;
2410 
2411       const Function *ScopeFn = VIRP.getAnchorScope();
2412       if (ScopeFn) {
2413         const auto &ReachabilityAA =
2414             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2415 
2416         if (!ReachabilityAA.isAssumedReachable(UserI, getCtxI()))
2417           return true;
2418 
2419         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2420           if (CB->isArgOperand(&U)) {
2421 
2422             unsigned ArgNo = CB->getArgOperandNo(&U);
2423 
2424             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2425                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2426 
2427             if (NoCaptureAA.isAssumedNoCapture())
2428               return true;
2429           }
2430         }
2431       }
2432 
2433       // For cases which can potentially have more users
2434       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2435           isa<SelectInst>(U)) {
2436         Follow = true;
2437         return true;
2438       }
2439 
2440       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2441       return false;
2442     };
2443 
2444     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2445       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2446         LLVM_DEBUG(
2447             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2448                    << " cannot be noalias as it is potentially captured\n");
2449         return false;
2450       }
2451     }
2452     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2453 
2454     // Check there is no other pointer argument which could alias with the
2455     // value passed at this call site.
2456     // TODO: AbstractCallSite
2457     const auto &CB = cast<CallBase>(getAnchorValue());
2458     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2459          OtherArgNo++)
2460       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2461         return false;
2462 
2463     return true;
2464   }
2465 
2466   /// See AbstractAttribute::updateImpl(...).
2467   ChangeStatus updateImpl(Attributor &A) override {
2468     // If the argument is readnone we are done as there are no accesses via the
2469     // argument.
2470     auto &MemBehaviorAA =
2471         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2472                                      /* TrackDependence */ false);
2473     if (MemBehaviorAA.isAssumedReadNone()) {
2474       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2475       return ChangeStatus::UNCHANGED;
2476     }
2477 
2478     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2479     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2480                                                   /* TrackDependence */ false);
2481 
2482     AAResults *AAR = nullptr;
2483     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2484                                                NoAliasAA)) {
2485       LLVM_DEBUG(
2486           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2487       return ChangeStatus::UNCHANGED;
2488     }
2489 
2490     return indicatePessimisticFixpoint();
2491   }
2492 
2493   /// See AbstractAttribute::trackStatistics()
2494   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2495 };
2496 
2497 /// NoAlias attribute for function return value.
2498 struct AANoAliasReturned final : AANoAliasImpl {
2499   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2500       : AANoAliasImpl(IRP, A) {}
2501 
2502   /// See AbstractAttribute::updateImpl(...).
2503   virtual ChangeStatus updateImpl(Attributor &A) override {
2504 
2505     auto CheckReturnValue = [&](Value &RV) -> bool {
2506       if (Constant *C = dyn_cast<Constant>(&RV))
2507         if (C->isNullValue() || isa<UndefValue>(C))
2508           return true;
2509 
2510       /// For now, we can only deduce noalias if we have call sites.
2511       /// FIXME: add more support.
2512       if (!isa<CallBase>(&RV))
2513         return false;
2514 
2515       const IRPosition &RVPos = IRPosition::value(RV);
2516       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2517       if (!NoAliasAA.isAssumedNoAlias())
2518         return false;
2519 
2520       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2521       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2522     };
2523 
2524     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2525       return indicatePessimisticFixpoint();
2526 
2527     return ChangeStatus::UNCHANGED;
2528   }
2529 
2530   /// See AbstractAttribute::trackStatistics()
2531   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2532 };
2533 
2534 /// NoAlias attribute deduction for a call site return value.
2535 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2536   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2537       : AANoAliasImpl(IRP, A) {}
2538 
2539   /// See AbstractAttribute::initialize(...).
2540   void initialize(Attributor &A) override {
2541     AANoAliasImpl::initialize(A);
2542     Function *F = getAssociatedFunction();
2543     if (!F)
2544       indicatePessimisticFixpoint();
2545   }
2546 
2547   /// See AbstractAttribute::updateImpl(...).
2548   ChangeStatus updateImpl(Attributor &A) override {
2549     // TODO: Once we have call site specific value information we can provide
2550     //       call site specific liveness information and then it makes
2551     //       sense to specialize attributes for call sites arguments instead of
2552     //       redirecting requests to the callee argument.
2553     Function *F = getAssociatedFunction();
2554     const IRPosition &FnPos = IRPosition::returned(*F);
2555     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2556     return clampStateAndIndicateChange(
2557         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2558   }
2559 
2560   /// See AbstractAttribute::trackStatistics()
2561   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2562 };
2563 
2564 /// -------------------AAIsDead Function Attribute-----------------------
2565 
2566 struct AAIsDeadValueImpl : public AAIsDead {
2567   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2568 
2569   /// See AAIsDead::isAssumedDead().
2570   bool isAssumedDead() const override { return getAssumed(); }
2571 
2572   /// See AAIsDead::isKnownDead().
2573   bool isKnownDead() const override { return getKnown(); }
2574 
2575   /// See AAIsDead::isAssumedDead(BasicBlock *).
2576   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2577 
2578   /// See AAIsDead::isKnownDead(BasicBlock *).
2579   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2580 
2581   /// See AAIsDead::isAssumedDead(Instruction *I).
2582   bool isAssumedDead(const Instruction *I) const override {
2583     return I == getCtxI() && isAssumedDead();
2584   }
2585 
2586   /// See AAIsDead::isKnownDead(Instruction *I).
2587   bool isKnownDead(const Instruction *I) const override {
2588     return isAssumedDead(I) && getKnown();
2589   }
2590 
2591   /// See AbstractAttribute::getAsStr().
2592   const std::string getAsStr() const override {
2593     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2594   }
2595 
2596   /// Check if all uses are assumed dead.
2597   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2598     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2599     // Explicitly set the dependence class to required because we want a long
2600     // chain of N dependent instructions to be considered live as soon as one is
2601     // without going through N update cycles. This is not required for
2602     // correctness.
2603     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2604   }
2605 
2606   /// Determine if \p I is assumed to be side-effect free.
2607   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2608     if (!I || wouldInstructionBeTriviallyDead(I))
2609       return true;
2610 
2611     auto *CB = dyn_cast<CallBase>(I);
2612     if (!CB || isa<IntrinsicInst>(CB))
2613       return false;
2614 
2615     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2616     const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, CallIRP);
2617     if (!NoUnwindAA.isAssumedNoUnwind())
2618       return false;
2619 
2620     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2621         *this, CallIRP, /* TrackDependence */ false);
2622     if (MemBehaviorAA.isAssumedReadOnly()) {
2623       if (!MemBehaviorAA.isKnownReadOnly())
2624         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2625       return true;
2626     }
2627     return false;
2628   }
2629 };
2630 
2631 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2632   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2633       : AAIsDeadValueImpl(IRP, A) {}
2634 
2635   /// See AbstractAttribute::initialize(...).
2636   void initialize(Attributor &A) override {
2637     if (isa<UndefValue>(getAssociatedValue())) {
2638       indicatePessimisticFixpoint();
2639       return;
2640     }
2641 
2642     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2643     if (!isAssumedSideEffectFree(A, I))
2644       indicatePessimisticFixpoint();
2645   }
2646 
2647   /// See AbstractAttribute::updateImpl(...).
2648   ChangeStatus updateImpl(Attributor &A) override {
2649     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2650     if (!isAssumedSideEffectFree(A, I))
2651       return indicatePessimisticFixpoint();
2652 
2653     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2654       return indicatePessimisticFixpoint();
2655     return ChangeStatus::UNCHANGED;
2656   }
2657 
2658   /// See AbstractAttribute::manifest(...).
2659   ChangeStatus manifest(Attributor &A) override {
2660     Value &V = getAssociatedValue();
2661     if (auto *I = dyn_cast<Instruction>(&V)) {
2662       // If we get here we basically know the users are all dead. We check if
2663       // isAssumedSideEffectFree returns true here again because it might not be
2664       // the case and only the users are dead but the instruction (=call) is
2665       // still needed.
2666       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2667         A.deleteAfterManifest(*I);
2668         return ChangeStatus::CHANGED;
2669       }
2670     }
2671     if (V.use_empty())
2672       return ChangeStatus::UNCHANGED;
2673 
2674     bool UsedAssumedInformation = false;
2675     Optional<Constant *> C =
2676         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2677     if (C.hasValue() && C.getValue())
2678       return ChangeStatus::UNCHANGED;
2679 
2680     // Replace the value with undef as it is dead but keep droppable uses around
2681     // as they provide information we don't want to give up on just yet.
2682     UndefValue &UV = *UndefValue::get(V.getType());
2683     bool AnyChange =
2684         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2685     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2686   }
2687 
2688   /// See AbstractAttribute::trackStatistics()
2689   void trackStatistics() const override {
2690     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2691   }
2692 };
2693 
2694 struct AAIsDeadArgument : public AAIsDeadFloating {
2695   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2696       : AAIsDeadFloating(IRP, A) {}
2697 
2698   /// See AbstractAttribute::initialize(...).
2699   void initialize(Attributor &A) override {
2700     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2701       indicatePessimisticFixpoint();
2702   }
2703 
2704   /// See AbstractAttribute::manifest(...).
2705   ChangeStatus manifest(Attributor &A) override {
2706     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2707     Argument &Arg = *getAssociatedArgument();
2708     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2709       if (A.registerFunctionSignatureRewrite(
2710               Arg, /* ReplacementTypes */ {},
2711               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2712               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2713         Arg.dropDroppableUses();
2714         return ChangeStatus::CHANGED;
2715       }
2716     return Changed;
2717   }
2718 
2719   /// See AbstractAttribute::trackStatistics()
2720   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2721 };
2722 
2723 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2724   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2725       : AAIsDeadValueImpl(IRP, A) {}
2726 
2727   /// See AbstractAttribute::initialize(...).
2728   void initialize(Attributor &A) override {
2729     if (isa<UndefValue>(getAssociatedValue()))
2730       indicatePessimisticFixpoint();
2731   }
2732 
2733   /// See AbstractAttribute::updateImpl(...).
2734   ChangeStatus updateImpl(Attributor &A) override {
2735     // TODO: Once we have call site specific value information we can provide
2736     //       call site specific liveness information and then it makes
2737     //       sense to specialize attributes for call sites arguments instead of
2738     //       redirecting requests to the callee argument.
2739     Argument *Arg = getAssociatedArgument();
2740     if (!Arg)
2741       return indicatePessimisticFixpoint();
2742     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2743     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2744     return clampStateAndIndicateChange(
2745         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2746   }
2747 
2748   /// See AbstractAttribute::manifest(...).
2749   ChangeStatus manifest(Attributor &A) override {
2750     CallBase &CB = cast<CallBase>(getAnchorValue());
2751     Use &U = CB.getArgOperandUse(getArgNo());
2752     assert(!isa<UndefValue>(U.get()) &&
2753            "Expected undef values to be filtered out!");
2754     UndefValue &UV = *UndefValue::get(U->getType());
2755     if (A.changeUseAfterManifest(U, UV))
2756       return ChangeStatus::CHANGED;
2757     return ChangeStatus::UNCHANGED;
2758   }
2759 
2760   /// See AbstractAttribute::trackStatistics()
2761   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2762 };
2763 
2764 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2765   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2766       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2767 
2768   /// See AAIsDead::isAssumedDead().
2769   bool isAssumedDead() const override {
2770     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2771   }
2772 
2773   /// See AbstractAttribute::initialize(...).
2774   void initialize(Attributor &A) override {
2775     if (isa<UndefValue>(getAssociatedValue())) {
2776       indicatePessimisticFixpoint();
2777       return;
2778     }
2779 
2780     // We track this separately as a secondary state.
2781     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2782   }
2783 
2784   /// See AbstractAttribute::updateImpl(...).
2785   ChangeStatus updateImpl(Attributor &A) override {
2786     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2787     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2788       IsAssumedSideEffectFree = false;
2789       Changed = ChangeStatus::CHANGED;
2790     }
2791 
2792     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2793       return indicatePessimisticFixpoint();
2794     return Changed;
2795   }
2796 
2797   /// See AbstractAttribute::trackStatistics()
2798   void trackStatistics() const override {
2799     if (IsAssumedSideEffectFree)
2800       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2801     else
2802       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2803   }
2804 
2805   /// See AbstractAttribute::getAsStr().
2806   const std::string getAsStr() const override {
2807     return isAssumedDead()
2808                ? "assumed-dead"
2809                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
2810   }
2811 
2812 private:
2813   bool IsAssumedSideEffectFree;
2814 };
2815 
2816 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2817   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
2818       : AAIsDeadValueImpl(IRP, A) {}
2819 
2820   /// See AbstractAttribute::updateImpl(...).
2821   ChangeStatus updateImpl(Attributor &A) override {
2822 
2823     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
2824                               {Instruction::Ret});
2825 
2826     auto PredForCallSite = [&](AbstractCallSite ACS) {
2827       if (ACS.isCallbackCall() || !ACS.getInstruction())
2828         return false;
2829       return areAllUsesAssumedDead(A, *ACS.getInstruction());
2830     };
2831 
2832     bool AllCallSitesKnown;
2833     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
2834                                 AllCallSitesKnown))
2835       return indicatePessimisticFixpoint();
2836 
2837     return ChangeStatus::UNCHANGED;
2838   }
2839 
2840   /// See AbstractAttribute::manifest(...).
2841   ChangeStatus manifest(Attributor &A) override {
2842     // TODO: Rewrite the signature to return void?
2843     bool AnyChange = false;
2844     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2845     auto RetInstPred = [&](Instruction &I) {
2846       ReturnInst &RI = cast<ReturnInst>(I);
2847       if (!isa<UndefValue>(RI.getReturnValue()))
2848         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2849       return true;
2850     };
2851     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2852     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2853   }
2854 
2855   /// See AbstractAttribute::trackStatistics()
2856   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2857 };
2858 
2859 struct AAIsDeadFunction : public AAIsDead {
2860   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2861 
2862   /// See AbstractAttribute::initialize(...).
2863   void initialize(Attributor &A) override {
2864     const Function *F = getAnchorScope();
2865     if (F && !F->isDeclaration()) {
2866       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2867       assumeLive(A, F->getEntryBlock());
2868     }
2869   }
2870 
2871   /// See AbstractAttribute::getAsStr().
2872   const std::string getAsStr() const override {
2873     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2874            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
2875            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2876            std::to_string(KnownDeadEnds.size()) + "]";
2877   }
2878 
2879   /// See AbstractAttribute::manifest(...).
2880   ChangeStatus manifest(Attributor &A) override {
2881     assert(getState().isValidState() &&
2882            "Attempted to manifest an invalid state!");
2883 
2884     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2885     Function &F = *getAnchorScope();
2886 
2887     if (AssumedLiveBlocks.empty()) {
2888       A.deleteAfterManifest(F);
2889       return ChangeStatus::CHANGED;
2890     }
2891 
2892     // Flag to determine if we can change an invoke to a call assuming the
2893     // callee is nounwind. This is not possible if the personality of the
2894     // function allows to catch asynchronous exceptions.
2895     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2896 
2897     KnownDeadEnds.set_union(ToBeExploredFrom);
2898     for (const Instruction *DeadEndI : KnownDeadEnds) {
2899       auto *CB = dyn_cast<CallBase>(DeadEndI);
2900       if (!CB)
2901         continue;
2902       const auto &NoReturnAA =
2903           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2904       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2905       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2906         continue;
2907 
2908       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
2909         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
2910       else
2911         A.changeToUnreachableAfterManifest(
2912             const_cast<Instruction *>(DeadEndI->getNextNode()));
2913       HasChanged = ChangeStatus::CHANGED;
2914     }
2915 
2916     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
2917     for (BasicBlock &BB : F)
2918       if (!AssumedLiveBlocks.count(&BB)) {
2919         A.deleteAfterManifest(BB);
2920         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
2921       }
2922 
2923     return HasChanged;
2924   }
2925 
2926   /// See AbstractAttribute::updateImpl(...).
2927   ChangeStatus updateImpl(Attributor &A) override;
2928 
2929   /// See AbstractAttribute::trackStatistics()
2930   void trackStatistics() const override {}
2931 
2932   /// Returns true if the function is assumed dead.
2933   bool isAssumedDead() const override { return false; }
2934 
2935   /// See AAIsDead::isKnownDead().
2936   bool isKnownDead() const override { return false; }
2937 
2938   /// See AAIsDead::isAssumedDead(BasicBlock *).
2939   bool isAssumedDead(const BasicBlock *BB) const override {
2940     assert(BB->getParent() == getAnchorScope() &&
2941            "BB must be in the same anchor scope function.");
2942 
2943     if (!getAssumed())
2944       return false;
2945     return !AssumedLiveBlocks.count(BB);
2946   }
2947 
2948   /// See AAIsDead::isKnownDead(BasicBlock *).
2949   bool isKnownDead(const BasicBlock *BB) const override {
2950     return getKnown() && isAssumedDead(BB);
2951   }
2952 
2953   /// See AAIsDead::isAssumed(Instruction *I).
2954   bool isAssumedDead(const Instruction *I) const override {
2955     assert(I->getParent()->getParent() == getAnchorScope() &&
2956            "Instruction must be in the same anchor scope function.");
2957 
2958     if (!getAssumed())
2959       return false;
2960 
2961     // If it is not in AssumedLiveBlocks then it for sure dead.
2962     // Otherwise, it can still be after noreturn call in a live block.
2963     if (!AssumedLiveBlocks.count(I->getParent()))
2964       return true;
2965 
2966     // If it is not after a liveness barrier it is live.
2967     const Instruction *PrevI = I->getPrevNode();
2968     while (PrevI) {
2969       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2970         return true;
2971       PrevI = PrevI->getPrevNode();
2972     }
2973     return false;
2974   }
2975 
2976   /// See AAIsDead::isKnownDead(Instruction *I).
2977   bool isKnownDead(const Instruction *I) const override {
2978     return getKnown() && isAssumedDead(I);
2979   }
2980 
2981   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2982   /// that internal function called from \p BB should now be looked at.
2983   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2984     if (!AssumedLiveBlocks.insert(&BB).second)
2985       return false;
2986 
2987     // We assume that all of BB is (probably) live now and if there are calls to
2988     // internal functions we will assume that those are now live as well. This
2989     // is a performance optimization for blocks with calls to a lot of internal
2990     // functions. It can however cause dead functions to be treated as live.
2991     for (const Instruction &I : BB)
2992       if (const auto *CB = dyn_cast<CallBase>(&I))
2993         if (const Function *F = CB->getCalledFunction())
2994           if (F->hasLocalLinkage())
2995             A.markLiveInternalFunction(*F);
2996     return true;
2997   }
2998 
2999   /// Collection of instructions that need to be explored again, e.g., we
3000   /// did assume they do not transfer control to (one of their) successors.
3001   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3002 
3003   /// Collection of instructions that are known to not transfer control.
3004   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3005 
3006   /// Collection of all assumed live BasicBlocks.
3007   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3008 };
3009 
3010 static bool
3011 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3012                         AbstractAttribute &AA,
3013                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3014   const IRPosition &IPos = IRPosition::callsite_function(CB);
3015 
3016   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
3017   if (NoReturnAA.isAssumedNoReturn())
3018     return !NoReturnAA.isKnownNoReturn();
3019   if (CB.isTerminator())
3020     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3021   else
3022     AliveSuccessors.push_back(CB.getNextNode());
3023   return false;
3024 }
3025 
3026 static bool
3027 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3028                         AbstractAttribute &AA,
3029                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3030   bool UsedAssumedInformation =
3031       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3032 
3033   // First, determine if we can change an invoke to a call assuming the
3034   // callee is nounwind. This is not possible if the personality of the
3035   // function allows to catch asynchronous exceptions.
3036   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3037     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3038   } else {
3039     const IRPosition &IPos = IRPosition::callsite_function(II);
3040     const auto &AANoUnw = A.getAAFor<AANoUnwind>(
3041         AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3042     if (AANoUnw.isAssumedNoUnwind()) {
3043       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3044     } else {
3045       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3046     }
3047   }
3048   return UsedAssumedInformation;
3049 }
3050 
3051 static bool
3052 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3053                         AbstractAttribute &AA,
3054                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3055   bool UsedAssumedInformation = false;
3056   if (BI.getNumSuccessors() == 1) {
3057     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3058   } else {
3059     Optional<ConstantInt *> CI = getAssumedConstantInt(
3060         A, *BI.getCondition(), AA, UsedAssumedInformation);
3061     if (!CI.hasValue()) {
3062       // No value yet, assume both edges are dead.
3063     } else if (CI.getValue()) {
3064       const BasicBlock *SuccBB =
3065           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3066       AliveSuccessors.push_back(&SuccBB->front());
3067     } else {
3068       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3069       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3070       UsedAssumedInformation = false;
3071     }
3072   }
3073   return UsedAssumedInformation;
3074 }
3075 
3076 static bool
3077 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3078                         AbstractAttribute &AA,
3079                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3080   bool UsedAssumedInformation = false;
3081   Optional<ConstantInt *> CI =
3082       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3083   if (!CI.hasValue()) {
3084     // No value yet, assume all edges are dead.
3085   } else if (CI.getValue()) {
3086     for (auto &CaseIt : SI.cases()) {
3087       if (CaseIt.getCaseValue() == CI.getValue()) {
3088         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3089         return UsedAssumedInformation;
3090       }
3091     }
3092     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3093     return UsedAssumedInformation;
3094   } else {
3095     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3096       AliveSuccessors.push_back(&SuccBB->front());
3097   }
3098   return UsedAssumedInformation;
3099 }
3100 
3101 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3102   ChangeStatus Change = ChangeStatus::UNCHANGED;
3103 
3104   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3105                     << getAnchorScope()->size() << "] BBs and "
3106                     << ToBeExploredFrom.size() << " exploration points and "
3107                     << KnownDeadEnds.size() << " known dead ends\n");
3108 
3109   // Copy and clear the list of instructions we need to explore from. It is
3110   // refilled with instructions the next update has to look at.
3111   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3112                                                ToBeExploredFrom.end());
3113   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3114 
3115   SmallVector<const Instruction *, 8> AliveSuccessors;
3116   while (!Worklist.empty()) {
3117     const Instruction *I = Worklist.pop_back_val();
3118     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3119 
3120     AliveSuccessors.clear();
3121 
3122     bool UsedAssumedInformation = false;
3123     switch (I->getOpcode()) {
3124     // TODO: look for (assumed) UB to backwards propagate "deadness".
3125     default:
3126       if (I->isTerminator()) {
3127         for (const BasicBlock *SuccBB : successors(I->getParent()))
3128           AliveSuccessors.push_back(&SuccBB->front());
3129       } else {
3130         AliveSuccessors.push_back(I->getNextNode());
3131       }
3132       break;
3133     case Instruction::Call:
3134       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3135                                                        *this, AliveSuccessors);
3136       break;
3137     case Instruction::Invoke:
3138       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3139                                                        *this, AliveSuccessors);
3140       break;
3141     case Instruction::Br:
3142       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3143                                                        *this, AliveSuccessors);
3144       break;
3145     case Instruction::Switch:
3146       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3147                                                        *this, AliveSuccessors);
3148       break;
3149     }
3150 
3151     if (UsedAssumedInformation) {
3152       NewToBeExploredFrom.insert(I);
3153     } else {
3154       Change = ChangeStatus::CHANGED;
3155       if (AliveSuccessors.empty() ||
3156           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3157         KnownDeadEnds.insert(I);
3158     }
3159 
3160     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3161                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3162                       << UsedAssumedInformation << "\n");
3163 
3164     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3165       if (!I->isTerminator()) {
3166         assert(AliveSuccessors.size() == 1 &&
3167                "Non-terminator expected to have a single successor!");
3168         Worklist.push_back(AliveSuccessor);
3169       } else {
3170         if (assumeLive(A, *AliveSuccessor->getParent()))
3171           Worklist.push_back(AliveSuccessor);
3172       }
3173     }
3174   }
3175 
3176   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3177 
3178   // If we know everything is live there is no need to query for liveness.
3179   // Instead, indicating a pessimistic fixpoint will cause the state to be
3180   // "invalid" and all queries to be answered conservatively without lookups.
3181   // To be in this state we have to (1) finished the exploration and (3) not
3182   // discovered any non-trivial dead end and (2) not ruled unreachable code
3183   // dead.
3184   if (ToBeExploredFrom.empty() &&
3185       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3186       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3187         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3188       }))
3189     return indicatePessimisticFixpoint();
3190   return Change;
3191 }
3192 
3193 /// Liveness information for a call sites.
3194 struct AAIsDeadCallSite final : AAIsDeadFunction {
3195   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3196       : AAIsDeadFunction(IRP, A) {}
3197 
3198   /// See AbstractAttribute::initialize(...).
3199   void initialize(Attributor &A) override {
3200     // TODO: Once we have call site specific value information we can provide
3201     //       call site specific liveness information and then it makes
3202     //       sense to specialize attributes for call sites instead of
3203     //       redirecting requests to the callee.
3204     llvm_unreachable("Abstract attributes for liveness are not "
3205                      "supported for call sites yet!");
3206   }
3207 
3208   /// See AbstractAttribute::updateImpl(...).
3209   ChangeStatus updateImpl(Attributor &A) override {
3210     return indicatePessimisticFixpoint();
3211   }
3212 
3213   /// See AbstractAttribute::trackStatistics()
3214   void trackStatistics() const override {}
3215 };
3216 
3217 /// -------------------- Dereferenceable Argument Attribute --------------------
3218 
3219 template <>
3220 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3221                                                      const DerefState &R) {
3222   ChangeStatus CS0 =
3223       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3224   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3225   return CS0 | CS1;
3226 }
3227 
3228 struct AADereferenceableImpl : AADereferenceable {
3229   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3230       : AADereferenceable(IRP, A) {}
3231   using StateType = DerefState;
3232 
3233   /// See AbstractAttribute::initialize(...).
3234   void initialize(Attributor &A) override {
3235     SmallVector<Attribute, 4> Attrs;
3236     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3237              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3238     for (const Attribute &Attr : Attrs)
3239       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3240 
3241     const IRPosition &IRP = this->getIRPosition();
3242     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP,
3243                                        /* TrackDependence */ false);
3244 
3245     bool CanBeNull;
3246     takeKnownDerefBytesMaximum(
3247         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3248             A.getDataLayout(), CanBeNull));
3249 
3250     bool IsFnInterface = IRP.isFnInterfaceKind();
3251     Function *FnScope = IRP.getAnchorScope();
3252     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3253       indicatePessimisticFixpoint();
3254       return;
3255     }
3256 
3257     if (Instruction *CtxI = getCtxI())
3258       followUsesInMBEC(*this, A, getState(), *CtxI);
3259   }
3260 
3261   /// See AbstractAttribute::getState()
3262   /// {
3263   StateType &getState() override { return *this; }
3264   const StateType &getState() const override { return *this; }
3265   /// }
3266 
3267   /// Helper function for collecting accessed bytes in must-be-executed-context
3268   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3269                               DerefState &State) {
3270     const Value *UseV = U->get();
3271     if (!UseV->getType()->isPointerTy())
3272       return;
3273 
3274     Type *PtrTy = UseV->getType();
3275     const DataLayout &DL = A.getDataLayout();
3276     int64_t Offset;
3277     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3278             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3279       if (Base == &getAssociatedValue() &&
3280           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3281         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3282         State.addAccessedBytes(Offset, Size);
3283       }
3284     }
3285     return;
3286   }
3287 
3288   /// See followUsesInMBEC
3289   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3290                        AADereferenceable::StateType &State) {
3291     bool IsNonNull = false;
3292     bool TrackUse = false;
3293     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3294         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3295 
3296     addAccessedBytesForUse(A, U, I, State);
3297     State.takeKnownDerefBytesMaximum(DerefBytes);
3298     return TrackUse;
3299   }
3300 
3301   /// See AbstractAttribute::manifest(...).
3302   ChangeStatus manifest(Attributor &A) override {
3303     ChangeStatus Change = AADereferenceable::manifest(A);
3304     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3305       removeAttrs({Attribute::DereferenceableOrNull});
3306       return ChangeStatus::CHANGED;
3307     }
3308     return Change;
3309   }
3310 
3311   void getDeducedAttributes(LLVMContext &Ctx,
3312                             SmallVectorImpl<Attribute> &Attrs) const override {
3313     // TODO: Add *_globally support
3314     if (isAssumedNonNull())
3315       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3316           Ctx, getAssumedDereferenceableBytes()));
3317     else
3318       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3319           Ctx, getAssumedDereferenceableBytes()));
3320   }
3321 
3322   /// See AbstractAttribute::getAsStr().
3323   const std::string getAsStr() const override {
3324     if (!getAssumedDereferenceableBytes())
3325       return "unknown-dereferenceable";
3326     return std::string("dereferenceable") +
3327            (isAssumedNonNull() ? "" : "_or_null") +
3328            (isAssumedGlobal() ? "_globally" : "") + "<" +
3329            std::to_string(getKnownDereferenceableBytes()) + "-" +
3330            std::to_string(getAssumedDereferenceableBytes()) + ">";
3331   }
3332 };
3333 
3334 /// Dereferenceable attribute for a floating value.
3335 struct AADereferenceableFloating : AADereferenceableImpl {
3336   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3337       : AADereferenceableImpl(IRP, A) {}
3338 
3339   /// See AbstractAttribute::updateImpl(...).
3340   ChangeStatus updateImpl(Attributor &A) override {
3341     const DataLayout &DL = A.getDataLayout();
3342 
3343     auto VisitValueCB = [&](Value &V, const Instruction *, DerefState &T,
3344                             bool Stripped) -> bool {
3345       unsigned IdxWidth =
3346           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3347       APInt Offset(IdxWidth, 0);
3348       const Value *Base =
3349           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3350 
3351       const auto &AA =
3352           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3353       int64_t DerefBytes = 0;
3354       if (!Stripped && this == &AA) {
3355         // Use IR information if we did not strip anything.
3356         // TODO: track globally.
3357         bool CanBeNull;
3358         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3359         T.GlobalState.indicatePessimisticFixpoint();
3360       } else {
3361         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3362         DerefBytes = DS.DerefBytesState.getAssumed();
3363         T.GlobalState &= DS.GlobalState;
3364       }
3365 
3366       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3367 
3368       // For now we do not try to "increase" dereferenceability due to negative
3369       // indices as we first have to come up with code to deal with loops and
3370       // for overflows of the dereferenceable bytes.
3371       int64_t OffsetSExt = Offset.getSExtValue();
3372       if (OffsetSExt < 0)
3373         OffsetSExt = 0;
3374 
3375       T.takeAssumedDerefBytesMinimum(
3376           std::max(int64_t(0), DerefBytes - OffsetSExt));
3377 
3378       if (this == &AA) {
3379         if (!Stripped) {
3380           // If nothing was stripped IR information is all we got.
3381           T.takeKnownDerefBytesMaximum(
3382               std::max(int64_t(0), DerefBytes - OffsetSExt));
3383           T.indicatePessimisticFixpoint();
3384         } else if (OffsetSExt > 0) {
3385           // If something was stripped but there is circular reasoning we look
3386           // for the offset. If it is positive we basically decrease the
3387           // dereferenceable bytes in a circluar loop now, which will simply
3388           // drive them down to the known value in a very slow way which we
3389           // can accelerate.
3390           T.indicatePessimisticFixpoint();
3391         }
3392       }
3393 
3394       return T.isValidState();
3395     };
3396 
3397     DerefState T;
3398     if (!genericValueTraversal<AADereferenceable, DerefState>(
3399             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3400       return indicatePessimisticFixpoint();
3401 
3402     return clampStateAndIndicateChange(getState(), T);
3403   }
3404 
3405   /// See AbstractAttribute::trackStatistics()
3406   void trackStatistics() const override {
3407     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3408   }
3409 };
3410 
3411 /// Dereferenceable attribute for a return value.
3412 struct AADereferenceableReturned final
3413     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3414   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3415       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3416             IRP, A) {}
3417 
3418   /// See AbstractAttribute::trackStatistics()
3419   void trackStatistics() const override {
3420     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3421   }
3422 };
3423 
3424 /// Dereferenceable attribute for an argument
3425 struct AADereferenceableArgument final
3426     : AAArgumentFromCallSiteArguments<AADereferenceable,
3427                                       AADereferenceableImpl> {
3428   using Base =
3429       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3430   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3431       : Base(IRP, A) {}
3432 
3433   /// See AbstractAttribute::trackStatistics()
3434   void trackStatistics() const override {
3435     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3436   }
3437 };
3438 
3439 /// Dereferenceable attribute for a call site argument.
3440 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3441   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3442       : AADereferenceableFloating(IRP, A) {}
3443 
3444   /// See AbstractAttribute::trackStatistics()
3445   void trackStatistics() const override {
3446     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3447   }
3448 };
3449 
3450 /// Dereferenceable attribute deduction for a call site return value.
3451 struct AADereferenceableCallSiteReturned final
3452     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3453   using Base =
3454       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3455   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3456       : Base(IRP, A) {}
3457 
3458   /// See AbstractAttribute::trackStatistics()
3459   void trackStatistics() const override {
3460     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3461   }
3462 };
3463 
3464 // ------------------------ Align Argument Attribute ------------------------
3465 
3466 /// \p Ptr is accessed so we can get alignment information if the ABI requires
3467 /// the element type to be aligned.
3468 static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr,
3469                                                    const DataLayout &DL) {
3470   MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL);
3471   Type *ElementTy = Ptr->getType()->getPointerElementType();
3472   if (ElementTy->isSized())
3473     KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy));
3474   return KnownAlignment;
3475 }
3476 
3477 static unsigned getKnownAlignForUse(Attributor &A,
3478                                     AbstractAttribute &QueryingAA,
3479                                     Value &AssociatedValue, const Use *U,
3480                                     const Instruction *I, bool &TrackUse) {
3481   // We need to follow common pointer manipulation uses to the accesses they
3482   // feed into.
3483   if (isa<CastInst>(I)) {
3484     // Follow all but ptr2int casts.
3485     TrackUse = !isa<PtrToIntInst>(I);
3486     return 0;
3487   }
3488   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3489     if (GEP->hasAllConstantIndices()) {
3490       TrackUse = true;
3491       return 0;
3492     }
3493   }
3494 
3495   MaybeAlign MA;
3496   if (const auto *CB = dyn_cast<CallBase>(I)) {
3497     if (CB->isBundleOperand(U) || CB->isCallee(U))
3498       return 0;
3499 
3500     unsigned ArgNo = CB->getArgOperandNo(U);
3501     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3502     // As long as we only use known information there is no need to track
3503     // dependences here.
3504     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3505                                         /* TrackDependence */ false);
3506     MA = MaybeAlign(AlignAA.getKnownAlign());
3507   }
3508 
3509   const DataLayout &DL = A.getDataLayout();
3510   const Value *UseV = U->get();
3511   if (auto *SI = dyn_cast<StoreInst>(I)) {
3512     if (SI->getPointerOperand() == UseV) {
3513       if (unsigned SIAlign = SI->getAlignment())
3514         MA = MaybeAlign(SIAlign);
3515       else
3516         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3517     }
3518   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3519     if (LI->getPointerOperand() == UseV) {
3520       if (unsigned LIAlign = LI->getAlignment())
3521         MA = MaybeAlign(LIAlign);
3522       else
3523         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3524     }
3525   }
3526 
3527   if (!MA.hasValue() || MA <= 1)
3528     return 0;
3529 
3530   unsigned Alignment = MA->value();
3531   int64_t Offset;
3532 
3533   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3534     if (Base == &AssociatedValue) {
3535       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3536       // So we can say that the maximum power of two which is a divisor of
3537       // gcd(Offset, Alignment) is an alignment.
3538 
3539       uint32_t gcd =
3540           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3541       Alignment = llvm::PowerOf2Floor(gcd);
3542     }
3543   }
3544 
3545   return Alignment;
3546 }
3547 
3548 struct AAAlignImpl : AAAlign {
3549   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3550 
3551   /// See AbstractAttribute::initialize(...).
3552   void initialize(Attributor &A) override {
3553     SmallVector<Attribute, 4> Attrs;
3554     getAttrs({Attribute::Alignment}, Attrs);
3555     for (const Attribute &Attr : Attrs)
3556       takeKnownMaximum(Attr.getValueAsInt());
3557 
3558     Value &V = getAssociatedValue();
3559     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3560     //       use of the function pointer. This was caused by D73131. We want to
3561     //       avoid this for function pointers especially because we iterate
3562     //       their uses and int2ptr is not handled. It is not a correctness
3563     //       problem though!
3564     if (!V.getType()->getPointerElementType()->isFunctionTy())
3565       takeKnownMaximum(
3566           V.getPointerAlignment(A.getDataLayout()).valueOrOne().value());
3567 
3568     if (getIRPosition().isFnInterfaceKind() &&
3569         (!getAnchorScope() ||
3570          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3571       indicatePessimisticFixpoint();
3572       return;
3573     }
3574 
3575     if (Instruction *CtxI = getCtxI())
3576       followUsesInMBEC(*this, A, getState(), *CtxI);
3577   }
3578 
3579   /// See AbstractAttribute::manifest(...).
3580   ChangeStatus manifest(Attributor &A) override {
3581     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3582 
3583     // Check for users that allow alignment annotations.
3584     Value &AssociatedValue = getAssociatedValue();
3585     for (const Use &U : AssociatedValue.uses()) {
3586       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3587         if (SI->getPointerOperand() == &AssociatedValue)
3588           if (SI->getAlignment() < getAssumedAlign()) {
3589             STATS_DECLTRACK(AAAlign, Store,
3590                             "Number of times alignment added to a store");
3591             SI->setAlignment(Align(getAssumedAlign()));
3592             LoadStoreChanged = ChangeStatus::CHANGED;
3593           }
3594       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3595         if (LI->getPointerOperand() == &AssociatedValue)
3596           if (LI->getAlignment() < getAssumedAlign()) {
3597             LI->setAlignment(Align(getAssumedAlign()));
3598             STATS_DECLTRACK(AAAlign, Load,
3599                             "Number of times alignment added to a load");
3600             LoadStoreChanged = ChangeStatus::CHANGED;
3601           }
3602       }
3603     }
3604 
3605     ChangeStatus Changed = AAAlign::manifest(A);
3606 
3607     MaybeAlign InheritAlign =
3608         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3609     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3610       return LoadStoreChanged;
3611     return Changed | LoadStoreChanged;
3612   }
3613 
3614   // TODO: Provide a helper to determine the implied ABI alignment and check in
3615   //       the existing manifest method and a new one for AAAlignImpl that value
3616   //       to avoid making the alignment explicit if it did not improve.
3617 
3618   /// See AbstractAttribute::getDeducedAttributes
3619   virtual void
3620   getDeducedAttributes(LLVMContext &Ctx,
3621                        SmallVectorImpl<Attribute> &Attrs) const override {
3622     if (getAssumedAlign() > 1)
3623       Attrs.emplace_back(
3624           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3625   }
3626 
3627   /// See followUsesInMBEC
3628   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3629                        AAAlign::StateType &State) {
3630     bool TrackUse = false;
3631 
3632     unsigned int KnownAlign =
3633         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3634     State.takeKnownMaximum(KnownAlign);
3635 
3636     return TrackUse;
3637   }
3638 
3639   /// See AbstractAttribute::getAsStr().
3640   const std::string getAsStr() const override {
3641     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3642                                 "-" + std::to_string(getAssumedAlign()) + ">")
3643                              : "unknown-align";
3644   }
3645 };
3646 
3647 /// Align attribute for a floating value.
3648 struct AAAlignFloating : AAAlignImpl {
3649   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3650 
3651   /// See AbstractAttribute::updateImpl(...).
3652   ChangeStatus updateImpl(Attributor &A) override {
3653     const DataLayout &DL = A.getDataLayout();
3654 
3655     auto VisitValueCB = [&](Value &V, const Instruction *,
3656                             AAAlign::StateType &T, bool Stripped) -> bool {
3657       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3658       if (!Stripped && this == &AA) {
3659         // Use only IR information if we did not strip anything.
3660         const MaybeAlign PA = V.getPointerAlignment(DL);
3661         T.takeKnownMaximum(PA ? PA->value() : 0);
3662         T.indicatePessimisticFixpoint();
3663       } else {
3664         // Use abstract attribute information.
3665         const AAAlign::StateType &DS =
3666             static_cast<const AAAlign::StateType &>(AA.getState());
3667         T ^= DS;
3668       }
3669       return T.isValidState();
3670     };
3671 
3672     StateType T;
3673     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3674                                                    VisitValueCB, getCtxI()))
3675       return indicatePessimisticFixpoint();
3676 
3677     // TODO: If we know we visited all incoming values, thus no are assumed
3678     // dead, we can take the known information from the state T.
3679     return clampStateAndIndicateChange(getState(), T);
3680   }
3681 
3682   /// See AbstractAttribute::trackStatistics()
3683   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3684 };
3685 
3686 /// Align attribute for function return value.
3687 struct AAAlignReturned final
3688     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3689   AAAlignReturned(const IRPosition &IRP, Attributor &A)
3690       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {}
3691 
3692   /// See AbstractAttribute::trackStatistics()
3693   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3694 };
3695 
3696 /// Align attribute for function argument.
3697 struct AAAlignArgument final
3698     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3699   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3700   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3701 
3702   /// See AbstractAttribute::manifest(...).
3703   ChangeStatus manifest(Attributor &A) override {
3704     // If the associated argument is involved in a must-tail call we give up
3705     // because we would need to keep the argument alignments of caller and
3706     // callee in-sync. Just does not seem worth the trouble right now.
3707     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3708       return ChangeStatus::UNCHANGED;
3709     return Base::manifest(A);
3710   }
3711 
3712   /// See AbstractAttribute::trackStatistics()
3713   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3714 };
3715 
3716 struct AAAlignCallSiteArgument final : AAAlignFloating {
3717   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3718       : AAAlignFloating(IRP, A) {}
3719 
3720   /// See AbstractAttribute::manifest(...).
3721   ChangeStatus manifest(Attributor &A) override {
3722     // If the associated argument is involved in a must-tail call we give up
3723     // because we would need to keep the argument alignments of caller and
3724     // callee in-sync. Just does not seem worth the trouble right now.
3725     if (Argument *Arg = getAssociatedArgument())
3726       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3727         return ChangeStatus::UNCHANGED;
3728     ChangeStatus Changed = AAAlignImpl::manifest(A);
3729     MaybeAlign InheritAlign =
3730         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3731     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3732       Changed = ChangeStatus::UNCHANGED;
3733     return Changed;
3734   }
3735 
3736   /// See AbstractAttribute::updateImpl(Attributor &A).
3737   ChangeStatus updateImpl(Attributor &A) override {
3738     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3739     if (Argument *Arg = getAssociatedArgument()) {
3740       // We only take known information from the argument
3741       // so we do not need to track a dependence.
3742       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3743           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3744       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3745     }
3746     return Changed;
3747   }
3748 
3749   /// See AbstractAttribute::trackStatistics()
3750   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3751 };
3752 
3753 /// Align attribute deduction for a call site return value.
3754 struct AAAlignCallSiteReturned final
3755     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3756   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3757   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3758       : Base(IRP, A) {}
3759 
3760   /// See AbstractAttribute::initialize(...).
3761   void initialize(Attributor &A) override {
3762     Base::initialize(A);
3763     Function *F = getAssociatedFunction();
3764     if (!F)
3765       indicatePessimisticFixpoint();
3766   }
3767 
3768   /// See AbstractAttribute::trackStatistics()
3769   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3770 };
3771 
3772 /// ------------------ Function No-Return Attribute ----------------------------
3773 struct AANoReturnImpl : public AANoReturn {
3774   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3775 
3776   /// See AbstractAttribute::initialize(...).
3777   void initialize(Attributor &A) override {
3778     AANoReturn::initialize(A);
3779     Function *F = getAssociatedFunction();
3780     if (!F)
3781       indicatePessimisticFixpoint();
3782   }
3783 
3784   /// See AbstractAttribute::getAsStr().
3785   const std::string getAsStr() const override {
3786     return getAssumed() ? "noreturn" : "may-return";
3787   }
3788 
3789   /// See AbstractAttribute::updateImpl(Attributor &A).
3790   virtual ChangeStatus updateImpl(Attributor &A) override {
3791     auto CheckForNoReturn = [](Instruction &) { return false; };
3792     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3793                                    {(unsigned)Instruction::Ret}))
3794       return indicatePessimisticFixpoint();
3795     return ChangeStatus::UNCHANGED;
3796   }
3797 };
3798 
3799 struct AANoReturnFunction final : AANoReturnImpl {
3800   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
3801       : AANoReturnImpl(IRP, A) {}
3802 
3803   /// See AbstractAttribute::trackStatistics()
3804   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3805 };
3806 
3807 /// NoReturn attribute deduction for a call sites.
3808 struct AANoReturnCallSite final : AANoReturnImpl {
3809   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
3810       : AANoReturnImpl(IRP, A) {}
3811 
3812   /// See AbstractAttribute::updateImpl(...).
3813   ChangeStatus updateImpl(Attributor &A) override {
3814     // TODO: Once we have call site specific value information we can provide
3815     //       call site specific liveness information and then it makes
3816     //       sense to specialize attributes for call sites arguments instead of
3817     //       redirecting requests to the callee argument.
3818     Function *F = getAssociatedFunction();
3819     const IRPosition &FnPos = IRPosition::function(*F);
3820     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3821     return clampStateAndIndicateChange(
3822         getState(),
3823         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3824   }
3825 
3826   /// See AbstractAttribute::trackStatistics()
3827   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3828 };
3829 
3830 /// ----------------------- Variable Capturing ---------------------------------
3831 
3832 /// A class to hold the state of for no-capture attributes.
3833 struct AANoCaptureImpl : public AANoCapture {
3834   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
3835 
3836   /// See AbstractAttribute::initialize(...).
3837   void initialize(Attributor &A) override {
3838     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3839       indicateOptimisticFixpoint();
3840       return;
3841     }
3842     Function *AnchorScope = getAnchorScope();
3843     if (isFnInterfaceKind() &&
3844         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
3845       indicatePessimisticFixpoint();
3846       return;
3847     }
3848 
3849     // You cannot "capture" null in the default address space.
3850     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3851         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3852       indicateOptimisticFixpoint();
3853       return;
3854     }
3855 
3856     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3857 
3858     // Check what state the associated function can actually capture.
3859     if (F)
3860       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3861     else
3862       indicatePessimisticFixpoint();
3863   }
3864 
3865   /// See AbstractAttribute::updateImpl(...).
3866   ChangeStatus updateImpl(Attributor &A) override;
3867 
3868   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3869   virtual void
3870   getDeducedAttributes(LLVMContext &Ctx,
3871                        SmallVectorImpl<Attribute> &Attrs) const override {
3872     if (!isAssumedNoCaptureMaybeReturned())
3873       return;
3874 
3875     if (getArgNo() >= 0) {
3876       if (isAssumedNoCapture())
3877         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3878       else if (ManifestInternal)
3879         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3880     }
3881   }
3882 
3883   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3884   /// depending on the ability of the function associated with \p IRP to capture
3885   /// state in memory and through "returning/throwing", respectively.
3886   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3887                                                    const Function &F,
3888                                                    BitIntegerState &State) {
3889     // TODO: Once we have memory behavior attributes we should use them here.
3890 
3891     // If we know we cannot communicate or write to memory, we do not care about
3892     // ptr2int anymore.
3893     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3894         F.getReturnType()->isVoidTy()) {
3895       State.addKnownBits(NO_CAPTURE);
3896       return;
3897     }
3898 
3899     // A function cannot capture state in memory if it only reads memory, it can
3900     // however return/throw state and the state might be influenced by the
3901     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3902     if (F.onlyReadsMemory())
3903       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3904 
3905     // A function cannot communicate state back if it does not through
3906     // exceptions and doesn not return values.
3907     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3908       State.addKnownBits(NOT_CAPTURED_IN_RET);
3909 
3910     // Check existing "returned" attributes.
3911     int ArgNo = IRP.getArgNo();
3912     if (F.doesNotThrow() && ArgNo >= 0) {
3913       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3914         if (F.hasParamAttribute(u, Attribute::Returned)) {
3915           if (u == unsigned(ArgNo))
3916             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3917           else if (F.onlyReadsMemory())
3918             State.addKnownBits(NO_CAPTURE);
3919           else
3920             State.addKnownBits(NOT_CAPTURED_IN_RET);
3921           break;
3922         }
3923     }
3924   }
3925 
3926   /// See AbstractState::getAsStr().
3927   const std::string getAsStr() const override {
3928     if (isKnownNoCapture())
3929       return "known not-captured";
3930     if (isAssumedNoCapture())
3931       return "assumed not-captured";
3932     if (isKnownNoCaptureMaybeReturned())
3933       return "known not-captured-maybe-returned";
3934     if (isAssumedNoCaptureMaybeReturned())
3935       return "assumed not-captured-maybe-returned";
3936     return "assumed-captured";
3937   }
3938 };
3939 
3940 /// Attributor-aware capture tracker.
3941 struct AACaptureUseTracker final : public CaptureTracker {
3942 
3943   /// Create a capture tracker that can lookup in-flight abstract attributes
3944   /// through the Attributor \p A.
3945   ///
3946   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3947   /// search is stopped. If a use leads to a return instruction,
3948   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3949   /// If a use leads to a ptr2int which may capture the value,
3950   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3951   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3952   /// set. All values in \p PotentialCopies are later tracked as well. For every
3953   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3954   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3955   /// conservatively set to true.
3956   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3957                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3958                       SmallVectorImpl<const Value *> &PotentialCopies,
3959                       unsigned &RemainingUsesToExplore)
3960       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3961         PotentialCopies(PotentialCopies),
3962         RemainingUsesToExplore(RemainingUsesToExplore) {}
3963 
3964   /// Determine if \p V maybe captured. *Also updates the state!*
3965   bool valueMayBeCaptured(const Value *V) {
3966     if (V->getType()->isPointerTy()) {
3967       PointerMayBeCaptured(V, this);
3968     } else {
3969       State.indicatePessimisticFixpoint();
3970     }
3971     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3972   }
3973 
3974   /// See CaptureTracker::tooManyUses().
3975   void tooManyUses() override {
3976     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3977   }
3978 
3979   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3980     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3981       return true;
3982     const auto &DerefAA = A.getAAFor<AADereferenceable>(
3983         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
3984         DepClassTy::OPTIONAL);
3985     return DerefAA.getAssumedDereferenceableBytes();
3986   }
3987 
3988   /// See CaptureTracker::captured(...).
3989   bool captured(const Use *U) override {
3990     Instruction *UInst = cast<Instruction>(U->getUser());
3991     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3992                       << "\n");
3993 
3994     // Because we may reuse the tracker multiple times we keep track of the
3995     // number of explored uses ourselves as well.
3996     if (RemainingUsesToExplore-- == 0) {
3997       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3998       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3999                           /* Return */ true);
4000     }
4001 
4002     // Deal with ptr2int by following uses.
4003     if (isa<PtrToIntInst>(UInst)) {
4004       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4005       return valueMayBeCaptured(UInst);
4006     }
4007 
4008     // Explicitly catch return instructions.
4009     if (isa<ReturnInst>(UInst))
4010       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4011                           /* Return */ true);
4012 
4013     // For now we only use special logic for call sites. However, the tracker
4014     // itself knows about a lot of other non-capturing cases already.
4015     auto *CB = dyn_cast<CallBase>(UInst);
4016     if (!CB || !CB->isArgOperand(U))
4017       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4018                           /* Return */ true);
4019 
4020     unsigned ArgNo = CB->getArgOperandNo(U);
4021     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4022     // If we have a abstract no-capture attribute for the argument we can use
4023     // it to justify a non-capture attribute here. This allows recursion!
4024     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4025     if (ArgNoCaptureAA.isAssumedNoCapture())
4026       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4027                           /* Return */ false);
4028     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4029       addPotentialCopy(*CB);
4030       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4031                           /* Return */ false);
4032     }
4033 
4034     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4035     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4036                         /* Return */ true);
4037   }
4038 
4039   /// Register \p CS as potential copy of the value we are checking.
4040   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4041 
4042   /// See CaptureTracker::shouldExplore(...).
4043   bool shouldExplore(const Use *U) override {
4044     // Check liveness and ignore droppable users.
4045     return !U->getUser()->isDroppable() &&
4046            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4047   }
4048 
4049   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4050   /// \p CapturedInRet, then return the appropriate value for use in the
4051   /// CaptureTracker::captured() interface.
4052   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4053                     bool CapturedInRet) {
4054     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4055                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4056     if (CapturedInMem)
4057       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4058     if (CapturedInInt)
4059       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4060     if (CapturedInRet)
4061       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4062     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4063   }
4064 
4065 private:
4066   /// The attributor providing in-flight abstract attributes.
4067   Attributor &A;
4068 
4069   /// The abstract attribute currently updated.
4070   AANoCapture &NoCaptureAA;
4071 
4072   /// The abstract liveness state.
4073   const AAIsDead &IsDeadAA;
4074 
4075   /// The state currently updated.
4076   AANoCapture::StateType &State;
4077 
4078   /// Set of potential copies of the tracked value.
4079   SmallVectorImpl<const Value *> &PotentialCopies;
4080 
4081   /// Global counter to limit the number of explored uses.
4082   unsigned &RemainingUsesToExplore;
4083 };
4084 
4085 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4086   const IRPosition &IRP = getIRPosition();
4087   const Value *V =
4088       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4089   if (!V)
4090     return indicatePessimisticFixpoint();
4091 
4092   const Function *F =
4093       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4094   assert(F && "Expected a function!");
4095   const IRPosition &FnPos = IRPosition::function(*F);
4096   const auto &IsDeadAA =
4097       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4098 
4099   AANoCapture::StateType T;
4100 
4101   // Readonly means we cannot capture through memory.
4102   const auto &FnMemAA =
4103       A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false);
4104   if (FnMemAA.isAssumedReadOnly()) {
4105     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4106     if (FnMemAA.isKnownReadOnly())
4107       addKnownBits(NOT_CAPTURED_IN_MEM);
4108     else
4109       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4110   }
4111 
4112   // Make sure all returned values are different than the underlying value.
4113   // TODO: we could do this in a more sophisticated way inside
4114   //       AAReturnedValues, e.g., track all values that escape through returns
4115   //       directly somehow.
4116   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4117     bool SeenConstant = false;
4118     for (auto &It : RVAA.returned_values()) {
4119       if (isa<Constant>(It.first)) {
4120         if (SeenConstant)
4121           return false;
4122         SeenConstant = true;
4123       } else if (!isa<Argument>(It.first) ||
4124                  It.first == getAssociatedArgument())
4125         return false;
4126     }
4127     return true;
4128   };
4129 
4130   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4131       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4132   if (NoUnwindAA.isAssumedNoUnwind()) {
4133     bool IsVoidTy = F->getReturnType()->isVoidTy();
4134     const AAReturnedValues *RVAA =
4135         IsVoidTy ? nullptr
4136                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4137                                                  /* TrackDependence */ true,
4138                                                  DepClassTy::OPTIONAL);
4139     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4140       T.addKnownBits(NOT_CAPTURED_IN_RET);
4141       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4142         return ChangeStatus::UNCHANGED;
4143       if (NoUnwindAA.isKnownNoUnwind() &&
4144           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4145         addKnownBits(NOT_CAPTURED_IN_RET);
4146         if (isKnown(NOT_CAPTURED_IN_MEM))
4147           return indicateOptimisticFixpoint();
4148       }
4149     }
4150   }
4151 
4152   // Use the CaptureTracker interface and logic with the specialized tracker,
4153   // defined in AACaptureUseTracker, that can look at in-flight abstract
4154   // attributes and directly updates the assumed state.
4155   SmallVector<const Value *, 4> PotentialCopies;
4156   unsigned RemainingUsesToExplore =
4157       getDefaultMaxUsesToExploreForCaptureTracking();
4158   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4159                               RemainingUsesToExplore);
4160 
4161   // Check all potential copies of the associated value until we can assume
4162   // none will be captured or we have to assume at least one might be.
4163   unsigned Idx = 0;
4164   PotentialCopies.push_back(V);
4165   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4166     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4167 
4168   AANoCapture::StateType &S = getState();
4169   auto Assumed = S.getAssumed();
4170   S.intersectAssumedBits(T.getAssumed());
4171   if (!isAssumedNoCaptureMaybeReturned())
4172     return indicatePessimisticFixpoint();
4173   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4174                                    : ChangeStatus::CHANGED;
4175 }
4176 
4177 /// NoCapture attribute for function arguments.
4178 struct AANoCaptureArgument final : AANoCaptureImpl {
4179   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4180       : AANoCaptureImpl(IRP, A) {}
4181 
4182   /// See AbstractAttribute::trackStatistics()
4183   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4184 };
4185 
4186 /// NoCapture attribute for call site arguments.
4187 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4188   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4189       : AANoCaptureImpl(IRP, A) {}
4190 
4191   /// See AbstractAttribute::initialize(...).
4192   void initialize(Attributor &A) override {
4193     if (Argument *Arg = getAssociatedArgument())
4194       if (Arg->hasByValAttr())
4195         indicateOptimisticFixpoint();
4196     AANoCaptureImpl::initialize(A);
4197   }
4198 
4199   /// See AbstractAttribute::updateImpl(...).
4200   ChangeStatus updateImpl(Attributor &A) override {
4201     // TODO: Once we have call site specific value information we can provide
4202     //       call site specific liveness information and then it makes
4203     //       sense to specialize attributes for call sites arguments instead of
4204     //       redirecting requests to the callee argument.
4205     Argument *Arg = getAssociatedArgument();
4206     if (!Arg)
4207       return indicatePessimisticFixpoint();
4208     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4209     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4210     return clampStateAndIndicateChange(
4211         getState(),
4212         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4213   }
4214 
4215   /// See AbstractAttribute::trackStatistics()
4216   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4217 };
4218 
4219 /// NoCapture attribute for floating values.
4220 struct AANoCaptureFloating final : AANoCaptureImpl {
4221   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4222       : AANoCaptureImpl(IRP, A) {}
4223 
4224   /// See AbstractAttribute::trackStatistics()
4225   void trackStatistics() const override {
4226     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4227   }
4228 };
4229 
4230 /// NoCapture attribute for function return value.
4231 struct AANoCaptureReturned final : AANoCaptureImpl {
4232   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4233       : AANoCaptureImpl(IRP, A) {
4234     llvm_unreachable("NoCapture is not applicable to function returns!");
4235   }
4236 
4237   /// See AbstractAttribute::initialize(...).
4238   void initialize(Attributor &A) override {
4239     llvm_unreachable("NoCapture is not applicable to function returns!");
4240   }
4241 
4242   /// See AbstractAttribute::updateImpl(...).
4243   ChangeStatus updateImpl(Attributor &A) override {
4244     llvm_unreachable("NoCapture is not applicable to function returns!");
4245   }
4246 
4247   /// See AbstractAttribute::trackStatistics()
4248   void trackStatistics() const override {}
4249 };
4250 
4251 /// NoCapture attribute deduction for a call site return value.
4252 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4253   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4254       : AANoCaptureImpl(IRP, A) {}
4255 
4256   /// See AbstractAttribute::trackStatistics()
4257   void trackStatistics() const override {
4258     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4259   }
4260 };
4261 
4262 /// ------------------ Value Simplify Attribute ----------------------------
4263 struct AAValueSimplifyImpl : AAValueSimplify {
4264   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4265       : AAValueSimplify(IRP, A) {}
4266 
4267   /// See AbstractAttribute::initialize(...).
4268   void initialize(Attributor &A) override {
4269     if (getAssociatedValue().getType()->isVoidTy())
4270       indicatePessimisticFixpoint();
4271   }
4272 
4273   /// See AbstractAttribute::getAsStr().
4274   const std::string getAsStr() const override {
4275     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4276                         : "not-simple";
4277   }
4278 
4279   /// See AbstractAttribute::trackStatistics()
4280   void trackStatistics() const override {}
4281 
4282   /// See AAValueSimplify::getAssumedSimplifiedValue()
4283   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4284     if (!getAssumed())
4285       return const_cast<Value *>(&getAssociatedValue());
4286     return SimplifiedAssociatedValue;
4287   }
4288 
4289   /// Helper function for querying AAValueSimplify and updating candicate.
4290   /// \param QueryingValue Value trying to unify with SimplifiedValue
4291   /// \param AccumulatedSimplifiedValue Current simplification result.
4292   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4293                              Value &QueryingValue,
4294                              Optional<Value *> &AccumulatedSimplifiedValue) {
4295     // FIXME: Add a typecast support.
4296 
4297     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4298         QueryingAA, IRPosition::value(QueryingValue));
4299 
4300     Optional<Value *> QueryingValueSimplified =
4301         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4302 
4303     if (!QueryingValueSimplified.hasValue())
4304       return true;
4305 
4306     if (!QueryingValueSimplified.getValue())
4307       return false;
4308 
4309     Value &QueryingValueSimplifiedUnwrapped =
4310         *QueryingValueSimplified.getValue();
4311 
4312     if (AccumulatedSimplifiedValue.hasValue() &&
4313         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4314         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4315       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4316     if (AccumulatedSimplifiedValue.hasValue() &&
4317         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4318       return true;
4319 
4320     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4321                       << " is assumed to be "
4322                       << QueryingValueSimplifiedUnwrapped << "\n");
4323 
4324     AccumulatedSimplifiedValue = QueryingValueSimplified;
4325     return true;
4326   }
4327 
4328   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4329     if (!getAssociatedValue().getType()->isIntegerTy())
4330       return false;
4331 
4332     const auto &ValueConstantRangeAA =
4333         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4334 
4335     Optional<ConstantInt *> COpt =
4336         ValueConstantRangeAA.getAssumedConstantInt(A);
4337     if (COpt.hasValue()) {
4338       if (auto *C = COpt.getValue())
4339         SimplifiedAssociatedValue = C;
4340       else
4341         return false;
4342     } else {
4343       SimplifiedAssociatedValue = llvm::None;
4344     }
4345     return true;
4346   }
4347 
4348   /// See AbstractAttribute::manifest(...).
4349   ChangeStatus manifest(Attributor &A) override {
4350     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4351 
4352     if (SimplifiedAssociatedValue.hasValue() &&
4353         !SimplifiedAssociatedValue.getValue())
4354       return Changed;
4355 
4356     Value &V = getAssociatedValue();
4357     auto *C = SimplifiedAssociatedValue.hasValue()
4358                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4359                   : UndefValue::get(V.getType());
4360     if (C) {
4361       // We can replace the AssociatedValue with the constant.
4362       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4363         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4364                           << " :: " << *this << "\n");
4365         if (A.changeValueAfterManifest(V, *C))
4366           Changed = ChangeStatus::CHANGED;
4367       }
4368     }
4369 
4370     return Changed | AAValueSimplify::manifest(A);
4371   }
4372 
4373   /// See AbstractState::indicatePessimisticFixpoint(...).
4374   ChangeStatus indicatePessimisticFixpoint() override {
4375     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4376     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4377     SimplifiedAssociatedValue = &getAssociatedValue();
4378     indicateOptimisticFixpoint();
4379     return ChangeStatus::CHANGED;
4380   }
4381 
4382 protected:
4383   // An assumed simplified value. Initially, it is set to Optional::None, which
4384   // means that the value is not clear under current assumption. If in the
4385   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4386   // returns orignal associated value.
4387   Optional<Value *> SimplifiedAssociatedValue;
4388 };
4389 
4390 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4391   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4392       : AAValueSimplifyImpl(IRP, A) {}
4393 
4394   void initialize(Attributor &A) override {
4395     AAValueSimplifyImpl::initialize(A);
4396     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4397       indicatePessimisticFixpoint();
4398     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4399                 /* IgnoreSubsumingPositions */ true))
4400       indicatePessimisticFixpoint();
4401 
4402     // FIXME: This is a hack to prevent us from propagating function poiner in
4403     // the new pass manager CGSCC pass as it creates call edges the
4404     // CallGraphUpdater cannot handle yet.
4405     Value &V = getAssociatedValue();
4406     if (V.getType()->isPointerTy() &&
4407         V.getType()->getPointerElementType()->isFunctionTy() &&
4408         !A.isModulePass())
4409       indicatePessimisticFixpoint();
4410   }
4411 
4412   /// See AbstractAttribute::updateImpl(...).
4413   ChangeStatus updateImpl(Attributor &A) override {
4414     // Byval is only replacable if it is readonly otherwise we would write into
4415     // the replaced value and not the copy that byval creates implicitly.
4416     Argument *Arg = getAssociatedArgument();
4417     if (Arg->hasByValAttr()) {
4418       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4419       //       there is no race by not copying a constant byval.
4420       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4421       if (!MemAA.isAssumedReadOnly())
4422         return indicatePessimisticFixpoint();
4423     }
4424 
4425     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4426 
4427     auto PredForCallSite = [&](AbstractCallSite ACS) {
4428       const IRPosition &ACSArgPos =
4429           IRPosition::callsite_argument(ACS, getArgNo());
4430       // Check if a coresponding argument was found or if it is on not
4431       // associated (which can happen for callback calls).
4432       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4433         return false;
4434 
4435       // We can only propagate thread independent values through callbacks.
4436       // This is different to direct/indirect call sites because for them we
4437       // know the thread executing the caller and callee is the same. For
4438       // callbacks this is not guaranteed, thus a thread dependent value could
4439       // be different for the caller and callee, making it invalid to propagate.
4440       Value &ArgOp = ACSArgPos.getAssociatedValue();
4441       if (ACS.isCallbackCall())
4442         if (auto *C = dyn_cast<Constant>(&ArgOp))
4443           if (C->isThreadDependent())
4444             return false;
4445       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4446     };
4447 
4448     bool AllCallSitesKnown;
4449     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4450                                 AllCallSitesKnown))
4451       if (!askSimplifiedValueForAAValueConstantRange(A))
4452         return indicatePessimisticFixpoint();
4453 
4454     // If a candicate was found in this update, return CHANGED.
4455     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4456                ? ChangeStatus::UNCHANGED
4457                : ChangeStatus ::CHANGED;
4458   }
4459 
4460   /// See AbstractAttribute::trackStatistics()
4461   void trackStatistics() const override {
4462     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4463   }
4464 };
4465 
4466 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4467   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4468       : AAValueSimplifyImpl(IRP, A) {}
4469 
4470   /// See AbstractAttribute::updateImpl(...).
4471   ChangeStatus updateImpl(Attributor &A) override {
4472     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4473 
4474     auto PredForReturned = [&](Value &V) {
4475       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4476     };
4477 
4478     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4479       if (!askSimplifiedValueForAAValueConstantRange(A))
4480         return indicatePessimisticFixpoint();
4481 
4482     // If a candicate was found in this update, return CHANGED.
4483     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4484                ? ChangeStatus::UNCHANGED
4485                : ChangeStatus ::CHANGED;
4486   }
4487 
4488   ChangeStatus manifest(Attributor &A) override {
4489     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4490 
4491     if (SimplifiedAssociatedValue.hasValue() &&
4492         !SimplifiedAssociatedValue.getValue())
4493       return Changed;
4494 
4495     Value &V = getAssociatedValue();
4496     auto *C = SimplifiedAssociatedValue.hasValue()
4497                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4498                   : UndefValue::get(V.getType());
4499     if (C) {
4500       auto PredForReturned =
4501           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4502             // We can replace the AssociatedValue with the constant.
4503             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4504               return true;
4505 
4506             for (ReturnInst *RI : RetInsts) {
4507               if (RI->getFunction() != getAnchorScope())
4508                 continue;
4509               auto *RC = C;
4510               if (RC->getType() != RI->getReturnValue()->getType())
4511                 RC = ConstantExpr::getBitCast(RC,
4512                                               RI->getReturnValue()->getType());
4513               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4514                                 << " in " << *RI << " :: " << *this << "\n");
4515               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4516                 Changed = ChangeStatus::CHANGED;
4517             }
4518             return true;
4519           };
4520       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4521     }
4522 
4523     return Changed | AAValueSimplify::manifest(A);
4524   }
4525 
4526   /// See AbstractAttribute::trackStatistics()
4527   void trackStatistics() const override {
4528     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4529   }
4530 };
4531 
4532 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4533   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4534       : AAValueSimplifyImpl(IRP, A) {}
4535 
4536   /// See AbstractAttribute::initialize(...).
4537   void initialize(Attributor &A) override {
4538     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4539     //        Needs investigation.
4540     // AAValueSimplifyImpl::initialize(A);
4541     Value &V = getAnchorValue();
4542 
4543     // TODO: add other stuffs
4544     if (isa<Constant>(V))
4545       indicatePessimisticFixpoint();
4546   }
4547 
4548   /// See AbstractAttribute::updateImpl(...).
4549   ChangeStatus updateImpl(Attributor &A) override {
4550     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4551 
4552     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4553                             bool Stripped) -> bool {
4554       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4555       if (!Stripped && this == &AA) {
4556         // TODO: Look the instruction and check recursively.
4557 
4558         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4559                           << "\n");
4560         return false;
4561       }
4562       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4563     };
4564 
4565     bool Dummy = false;
4566     if (!genericValueTraversal<AAValueSimplify, bool>(
4567             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4568             /* UseValueSimplify */ false))
4569       if (!askSimplifiedValueForAAValueConstantRange(A))
4570         return indicatePessimisticFixpoint();
4571 
4572     // If a candicate was found in this update, return CHANGED.
4573 
4574     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4575                ? ChangeStatus::UNCHANGED
4576                : ChangeStatus ::CHANGED;
4577   }
4578 
4579   /// See AbstractAttribute::trackStatistics()
4580   void trackStatistics() const override {
4581     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4582   }
4583 };
4584 
4585 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4586   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4587       : AAValueSimplifyImpl(IRP, A) {}
4588 
4589   /// See AbstractAttribute::initialize(...).
4590   void initialize(Attributor &A) override {
4591     SimplifiedAssociatedValue = &getAnchorValue();
4592     indicateOptimisticFixpoint();
4593   }
4594   /// See AbstractAttribute::initialize(...).
4595   ChangeStatus updateImpl(Attributor &A) override {
4596     llvm_unreachable(
4597         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4598   }
4599   /// See AbstractAttribute::trackStatistics()
4600   void trackStatistics() const override {
4601     STATS_DECLTRACK_FN_ATTR(value_simplify)
4602   }
4603 };
4604 
4605 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4606   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4607       : AAValueSimplifyFunction(IRP, A) {}
4608   /// See AbstractAttribute::trackStatistics()
4609   void trackStatistics() const override {
4610     STATS_DECLTRACK_CS_ATTR(value_simplify)
4611   }
4612 };
4613 
4614 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4615   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4616       : AAValueSimplifyReturned(IRP, A) {}
4617 
4618   /// See AbstractAttribute::manifest(...).
4619   ChangeStatus manifest(Attributor &A) override {
4620     return AAValueSimplifyImpl::manifest(A);
4621   }
4622 
4623   void trackStatistics() const override {
4624     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4625   }
4626 };
4627 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4628   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4629       : AAValueSimplifyFloating(IRP, A) {}
4630 
4631   void trackStatistics() const override {
4632     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4633   }
4634 };
4635 
4636 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4637 struct AAHeapToStackImpl : public AAHeapToStack {
4638   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4639       : AAHeapToStack(IRP, A) {}
4640 
4641   const std::string getAsStr() const override {
4642     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4643   }
4644 
4645   ChangeStatus manifest(Attributor &A) override {
4646     assert(getState().isValidState() &&
4647            "Attempted to manifest an invalid state!");
4648 
4649     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4650     Function *F = getAnchorScope();
4651     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4652 
4653     for (Instruction *MallocCall : MallocCalls) {
4654       // This malloc cannot be replaced.
4655       if (BadMallocCalls.count(MallocCall))
4656         continue;
4657 
4658       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4659         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4660         A.deleteAfterManifest(*FreeCall);
4661         HasChanged = ChangeStatus::CHANGED;
4662       }
4663 
4664       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4665                         << "\n");
4666 
4667       MaybeAlign Alignment;
4668       Constant *Size;
4669       if (isCallocLikeFn(MallocCall, TLI)) {
4670         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4671         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4672         APInt TotalSize = SizeT->getValue() * Num->getValue();
4673         Size =
4674             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4675       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
4676         Size = cast<ConstantInt>(MallocCall->getOperand(1));
4677         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
4678                                    ->getValue()
4679                                    .getZExtValue());
4680       } else {
4681         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4682       }
4683 
4684       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4685       Instruction *AI =
4686           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
4687                          "", MallocCall->getNextNode());
4688 
4689       if (AI->getType() != MallocCall->getType())
4690         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4691                              AI->getNextNode());
4692 
4693       A.changeValueAfterManifest(*MallocCall, *AI);
4694 
4695       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4696         auto *NBB = II->getNormalDest();
4697         BranchInst::Create(NBB, MallocCall->getParent());
4698         A.deleteAfterManifest(*MallocCall);
4699       } else {
4700         A.deleteAfterManifest(*MallocCall);
4701       }
4702 
4703       // Zero out the allocated memory if it was a calloc.
4704       if (isCallocLikeFn(MallocCall, TLI)) {
4705         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4706                                    AI->getNextNode());
4707         Value *Ops[] = {
4708             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4709             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4710 
4711         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4712         Module *M = F->getParent();
4713         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4714         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4715       }
4716       HasChanged = ChangeStatus::CHANGED;
4717     }
4718 
4719     return HasChanged;
4720   }
4721 
4722   /// Collection of all malloc calls in a function.
4723   SmallSetVector<Instruction *, 4> MallocCalls;
4724 
4725   /// Collection of malloc calls that cannot be converted.
4726   DenseSet<const Instruction *> BadMallocCalls;
4727 
4728   /// A map for each malloc call to the set of associated free calls.
4729   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4730 
4731   ChangeStatus updateImpl(Attributor &A) override;
4732 };
4733 
4734 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4735   const Function *F = getAnchorScope();
4736   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4737 
4738   MustBeExecutedContextExplorer &Explorer =
4739       A.getInfoCache().getMustBeExecutedContextExplorer();
4740 
4741   auto FreeCheck = [&](Instruction &I) {
4742     const auto &Frees = FreesForMalloc.lookup(&I);
4743     if (Frees.size() != 1)
4744       return false;
4745     Instruction *UniqueFree = *Frees.begin();
4746     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4747   };
4748 
4749   auto UsesCheck = [&](Instruction &I) {
4750     bool ValidUsesOnly = true;
4751     bool MustUse = true;
4752     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4753       Instruction *UserI = cast<Instruction>(U.getUser());
4754       if (isa<LoadInst>(UserI))
4755         return true;
4756       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4757         if (SI->getValueOperand() == U.get()) {
4758           LLVM_DEBUG(dbgs()
4759                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4760           ValidUsesOnly = false;
4761         } else {
4762           // A store into the malloc'ed memory is fine.
4763         }
4764         return true;
4765       }
4766       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4767         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4768           return true;
4769         // Record malloc.
4770         if (isFreeCall(UserI, TLI)) {
4771           if (MustUse) {
4772             FreesForMalloc[&I].insert(UserI);
4773           } else {
4774             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4775                               << *UserI << "\n");
4776             ValidUsesOnly = false;
4777           }
4778           return true;
4779         }
4780 
4781         unsigned ArgNo = CB->getArgOperandNo(&U);
4782 
4783         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4784             *this, IRPosition::callsite_argument(*CB, ArgNo));
4785 
4786         // If a callsite argument use is nofree, we are fine.
4787         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4788             *this, IRPosition::callsite_argument(*CB, ArgNo));
4789 
4790         if (!NoCaptureAA.isAssumedNoCapture() ||
4791             !ArgNoFreeAA.isAssumedNoFree()) {
4792           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4793           ValidUsesOnly = false;
4794         }
4795         return true;
4796       }
4797 
4798       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4799           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4800         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4801         Follow = true;
4802         return true;
4803       }
4804       // Unknown user for which we can not track uses further (in a way that
4805       // makes sense).
4806       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4807       ValidUsesOnly = false;
4808       return true;
4809     };
4810     A.checkForAllUses(Pred, *this, I);
4811     return ValidUsesOnly;
4812   };
4813 
4814   auto MallocCallocCheck = [&](Instruction &I) {
4815     if (BadMallocCalls.count(&I))
4816       return true;
4817 
4818     bool IsMalloc = isMallocLikeFn(&I, TLI);
4819     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
4820     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4821     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
4822       BadMallocCalls.insert(&I);
4823       return true;
4824     }
4825 
4826     if (IsMalloc) {
4827       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4828         if (Size->getValue().ule(MaxHeapToStackSize))
4829           if (UsesCheck(I) || FreeCheck(I)) {
4830             MallocCalls.insert(&I);
4831             return true;
4832           }
4833     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
4834       // Only if the alignment and sizes are constant.
4835       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4836         if (Size->getValue().ule(MaxHeapToStackSize))
4837           if (UsesCheck(I) || FreeCheck(I)) {
4838             MallocCalls.insert(&I);
4839             return true;
4840           }
4841     } else if (IsCalloc) {
4842       bool Overflow = false;
4843       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4844         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4845           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4846                   .ule(MaxHeapToStackSize))
4847             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4848               MallocCalls.insert(&I);
4849               return true;
4850             }
4851     }
4852 
4853     BadMallocCalls.insert(&I);
4854     return true;
4855   };
4856 
4857   size_t NumBadMallocs = BadMallocCalls.size();
4858 
4859   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4860 
4861   if (NumBadMallocs != BadMallocCalls.size())
4862     return ChangeStatus::CHANGED;
4863 
4864   return ChangeStatus::UNCHANGED;
4865 }
4866 
4867 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4868   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
4869       : AAHeapToStackImpl(IRP, A) {}
4870 
4871   /// See AbstractAttribute::trackStatistics().
4872   void trackStatistics() const override {
4873     STATS_DECL(
4874         MallocCalls, Function,
4875         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
4876     for (auto *C : MallocCalls)
4877       if (!BadMallocCalls.count(C))
4878         ++BUILD_STAT_NAME(MallocCalls, Function);
4879   }
4880 };
4881 
4882 /// ----------------------- Privatizable Pointers ------------------------------
4883 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
4884   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
4885       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
4886 
4887   ChangeStatus indicatePessimisticFixpoint() override {
4888     AAPrivatizablePtr::indicatePessimisticFixpoint();
4889     PrivatizableType = nullptr;
4890     return ChangeStatus::CHANGED;
4891   }
4892 
4893   /// Identify the type we can chose for a private copy of the underlying
4894   /// argument. None means it is not clear yet, nullptr means there is none.
4895   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
4896 
4897   /// Return a privatizable type that encloses both T0 and T1.
4898   /// TODO: This is merely a stub for now as we should manage a mapping as well.
4899   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
4900     if (!T0.hasValue())
4901       return T1;
4902     if (!T1.hasValue())
4903       return T0;
4904     if (T0 == T1)
4905       return T0;
4906     return nullptr;
4907   }
4908 
4909   Optional<Type *> getPrivatizableType() const override {
4910     return PrivatizableType;
4911   }
4912 
4913   const std::string getAsStr() const override {
4914     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
4915   }
4916 
4917 protected:
4918   Optional<Type *> PrivatizableType;
4919 };
4920 
4921 // TODO: Do this for call site arguments (probably also other values) as well.
4922 
4923 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
4924   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
4925       : AAPrivatizablePtrImpl(IRP, A) {}
4926 
4927   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
4928   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
4929     // If this is a byval argument and we know all the call sites (so we can
4930     // rewrite them), there is no need to check them explicitly.
4931     bool AllCallSitesKnown;
4932     if (getIRPosition().hasAttr(Attribute::ByVal) &&
4933         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
4934                                true, AllCallSitesKnown))
4935       return getAssociatedValue().getType()->getPointerElementType();
4936 
4937     Optional<Type *> Ty;
4938     unsigned ArgNo = getIRPosition().getArgNo();
4939 
4940     // Make sure the associated call site argument has the same type at all call
4941     // sites and it is an allocation we know is safe to privatize, for now that
4942     // means we only allow alloca instructions.
4943     // TODO: We can additionally analyze the accesses in the callee to  create
4944     //       the type from that information instead. That is a little more
4945     //       involved and will be done in a follow up patch.
4946     auto CallSiteCheck = [&](AbstractCallSite ACS) {
4947       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
4948       // Check if a coresponding argument was found or if it is one not
4949       // associated (which can happen for callback calls).
4950       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4951         return false;
4952 
4953       // Check that all call sites agree on a type.
4954       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
4955       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
4956 
4957       LLVM_DEBUG({
4958         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
4959         if (CSTy.hasValue() && CSTy.getValue())
4960           CSTy.getValue()->print(dbgs());
4961         else if (CSTy.hasValue())
4962           dbgs() << "<nullptr>";
4963         else
4964           dbgs() << "<none>";
4965       });
4966 
4967       Ty = combineTypes(Ty, CSTy);
4968 
4969       LLVM_DEBUG({
4970         dbgs() << " : New Type: ";
4971         if (Ty.hasValue() && Ty.getValue())
4972           Ty.getValue()->print(dbgs());
4973         else if (Ty.hasValue())
4974           dbgs() << "<nullptr>";
4975         else
4976           dbgs() << "<none>";
4977         dbgs() << "\n";
4978       });
4979 
4980       return !Ty.hasValue() || Ty.getValue();
4981     };
4982 
4983     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
4984       return nullptr;
4985     return Ty;
4986   }
4987 
4988   /// See AbstractAttribute::updateImpl(...).
4989   ChangeStatus updateImpl(Attributor &A) override {
4990     PrivatizableType = identifyPrivatizableType(A);
4991     if (!PrivatizableType.hasValue())
4992       return ChangeStatus::UNCHANGED;
4993     if (!PrivatizableType.getValue())
4994       return indicatePessimisticFixpoint();
4995 
4996     // Avoid arguments with padding for now.
4997     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
4998         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
4999                                                 A.getInfoCache().getDL())) {
5000       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5001       return indicatePessimisticFixpoint();
5002     }
5003 
5004     // Verify callee and caller agree on how the promoted argument would be
5005     // passed.
5006     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5007     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5008     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5009     Function &Fn = *getIRPosition().getAnchorScope();
5010     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5011     ArgsToPromote.insert(getAssociatedArgument());
5012     const auto *TTI =
5013         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5014     if (!TTI ||
5015         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5016             Fn, *TTI, ArgsToPromote, Dummy) ||
5017         ArgsToPromote.empty()) {
5018       LLVM_DEBUG(
5019           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5020                  << Fn.getName() << "\n");
5021       return indicatePessimisticFixpoint();
5022     }
5023 
5024     // Collect the types that will replace the privatizable type in the function
5025     // signature.
5026     SmallVector<Type *, 16> ReplacementTypes;
5027     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5028 
5029     // Register a rewrite of the argument.
5030     Argument *Arg = getAssociatedArgument();
5031     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5032       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5033       return indicatePessimisticFixpoint();
5034     }
5035 
5036     unsigned ArgNo = Arg->getArgNo();
5037 
5038     // Helper to check if for the given call site the associated argument is
5039     // passed to a callback where the privatization would be different.
5040     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5041       SmallVector<const Use *, 4> CallbackUses;
5042       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5043       for (const Use *U : CallbackUses) {
5044         AbstractCallSite CBACS(U);
5045         assert(CBACS && CBACS.isCallbackCall());
5046         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5047           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5048 
5049           LLVM_DEBUG({
5050             dbgs()
5051                 << "[AAPrivatizablePtr] Argument " << *Arg
5052                 << "check if can be privatized in the context of its parent ("
5053                 << Arg->getParent()->getName()
5054                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5055                    "callback ("
5056                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5057                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5058                 << CBACS.getCallArgOperand(CBArg) << " vs "
5059                 << CB.getArgOperand(ArgNo) << "\n"
5060                 << "[AAPrivatizablePtr] " << CBArg << " : "
5061                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5062           });
5063 
5064           if (CBArgNo != int(ArgNo))
5065             continue;
5066           const auto &CBArgPrivAA =
5067               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5068           if (CBArgPrivAA.isValidState()) {
5069             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5070             if (!CBArgPrivTy.hasValue())
5071               continue;
5072             if (CBArgPrivTy.getValue() == PrivatizableType)
5073               continue;
5074           }
5075 
5076           LLVM_DEBUG({
5077             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5078                    << " cannot be privatized in the context of its parent ("
5079                    << Arg->getParent()->getName()
5080                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5081                       "callback ("
5082                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5083                    << ").\n[AAPrivatizablePtr] for which the argument "
5084                       "privatization is not compatible.\n";
5085           });
5086           return false;
5087         }
5088       }
5089       return true;
5090     };
5091 
5092     // Helper to check if for the given call site the associated argument is
5093     // passed to a direct call where the privatization would be different.
5094     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5095       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5096       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5097       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5098              "Expected a direct call operand for callback call operand");
5099 
5100       LLVM_DEBUG({
5101         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5102                << " check if be privatized in the context of its parent ("
5103                << Arg->getParent()->getName()
5104                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5105                   "direct call of ("
5106                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5107                << ").\n";
5108       });
5109 
5110       Function *DCCallee = DC->getCalledFunction();
5111       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5112         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5113             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5114         if (DCArgPrivAA.isValidState()) {
5115           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5116           if (!DCArgPrivTy.hasValue())
5117             return true;
5118           if (DCArgPrivTy.getValue() == PrivatizableType)
5119             return true;
5120         }
5121       }
5122 
5123       LLVM_DEBUG({
5124         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5125                << " cannot be privatized in the context of its parent ("
5126                << Arg->getParent()->getName()
5127                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5128                   "direct call of ("
5129                << ACS.getInstruction()->getCalledFunction()->getName()
5130                << ").\n[AAPrivatizablePtr] for which the argument "
5131                   "privatization is not compatible.\n";
5132       });
5133       return false;
5134     };
5135 
5136     // Helper to check if the associated argument is used at the given abstract
5137     // call site in a way that is incompatible with the privatization assumed
5138     // here.
5139     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5140       if (ACS.isDirectCall())
5141         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5142       if (ACS.isCallbackCall())
5143         return IsCompatiblePrivArgOfDirectCS(ACS);
5144       return false;
5145     };
5146 
5147     bool AllCallSitesKnown;
5148     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5149                                 AllCallSitesKnown))
5150       return indicatePessimisticFixpoint();
5151 
5152     return ChangeStatus::UNCHANGED;
5153   }
5154 
5155   /// Given a type to private \p PrivType, collect the constituates (which are
5156   /// used) in \p ReplacementTypes.
5157   static void
5158   identifyReplacementTypes(Type *PrivType,
5159                            SmallVectorImpl<Type *> &ReplacementTypes) {
5160     // TODO: For now we expand the privatization type to the fullest which can
5161     //       lead to dead arguments that need to be removed later.
5162     assert(PrivType && "Expected privatizable type!");
5163 
5164     // Traverse the type, extract constituate types on the outermost level.
5165     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5166       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5167         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5168     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5169       ReplacementTypes.append(PrivArrayType->getNumElements(),
5170                               PrivArrayType->getElementType());
5171     } else {
5172       ReplacementTypes.push_back(PrivType);
5173     }
5174   }
5175 
5176   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5177   /// The values needed are taken from the arguments of \p F starting at
5178   /// position \p ArgNo.
5179   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5180                                    unsigned ArgNo, Instruction &IP) {
5181     assert(PrivType && "Expected privatizable type!");
5182 
5183     IRBuilder<NoFolder> IRB(&IP);
5184     const DataLayout &DL = F.getParent()->getDataLayout();
5185 
5186     // Traverse the type, build GEPs and stores.
5187     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5188       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5189       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5190         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5191         Value *Ptr = constructPointer(
5192             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5193         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5194       }
5195     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5196       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5197       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5198       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5199         Value *Ptr =
5200             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5201         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5202       }
5203     } else {
5204       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5205     }
5206   }
5207 
5208   /// Extract values from \p Base according to the type \p PrivType at the
5209   /// call position \p ACS. The values are appended to \p ReplacementValues.
5210   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5211                                Value *Base,
5212                                SmallVectorImpl<Value *> &ReplacementValues) {
5213     assert(Base && "Expected base value!");
5214     assert(PrivType && "Expected privatizable type!");
5215     Instruction *IP = ACS.getInstruction();
5216 
5217     IRBuilder<NoFolder> IRB(IP);
5218     const DataLayout &DL = IP->getModule()->getDataLayout();
5219 
5220     if (Base->getType()->getPointerElementType() != PrivType)
5221       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5222                                                  "", ACS.getInstruction());
5223 
5224     // TODO: Improve the alignment of the loads.
5225     // Traverse the type, build GEPs and loads.
5226     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5227       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5228       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5229         Type *PointeeTy = PrivStructType->getElementType(u);
5230         Value *Ptr =
5231             constructPointer(PointeeTy->getPointerTo(), Base,
5232                              PrivStructLayout->getElementOffset(u), IRB, DL);
5233         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5234         L->setAlignment(Align(1));
5235         ReplacementValues.push_back(L);
5236       }
5237     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5238       Type *PointeeTy = PrivArrayType->getElementType();
5239       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5240       Type *PointeePtrTy = PointeeTy->getPointerTo();
5241       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5242         Value *Ptr =
5243             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5244         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5245         L->setAlignment(Align(1));
5246         ReplacementValues.push_back(L);
5247       }
5248     } else {
5249       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5250       L->setAlignment(Align(1));
5251       ReplacementValues.push_back(L);
5252     }
5253   }
5254 
5255   /// See AbstractAttribute::manifest(...)
5256   ChangeStatus manifest(Attributor &A) override {
5257     if (!PrivatizableType.hasValue())
5258       return ChangeStatus::UNCHANGED;
5259     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5260 
5261     // Collect all tail calls in the function as we cannot allow new allocas to
5262     // escape into tail recursion.
5263     // TODO: Be smarter about new allocas escaping into tail calls.
5264     SmallVector<CallInst *, 16> TailCalls;
5265     if (!A.checkForAllInstructions(
5266             [&](Instruction &I) {
5267               CallInst &CI = cast<CallInst>(I);
5268               if (CI.isTailCall())
5269                 TailCalls.push_back(&CI);
5270               return true;
5271             },
5272             *this, {Instruction::Call}))
5273       return ChangeStatus::UNCHANGED;
5274 
5275     Argument *Arg = getAssociatedArgument();
5276 
5277     // Callback to repair the associated function. A new alloca is placed at the
5278     // beginning and initialized with the values passed through arguments. The
5279     // new alloca replaces the use of the old pointer argument.
5280     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5281         [=](const Attributor::ArgumentReplacementInfo &ARI,
5282             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5283           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5284           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5285           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5286                                     Arg->getName() + ".priv", IP);
5287           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5288                                ArgIt->getArgNo(), *IP);
5289           Arg->replaceAllUsesWith(AI);
5290 
5291           for (CallInst *CI : TailCalls)
5292             CI->setTailCall(false);
5293         };
5294 
5295     // Callback to repair a call site of the associated function. The elements
5296     // of the privatizable type are loaded prior to the call and passed to the
5297     // new function version.
5298     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5299         [=](const Attributor::ArgumentReplacementInfo &ARI,
5300             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5301           createReplacementValues(
5302               PrivatizableType.getValue(), ACS,
5303               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5304               NewArgOperands);
5305         };
5306 
5307     // Collect the types that will replace the privatizable type in the function
5308     // signature.
5309     SmallVector<Type *, 16> ReplacementTypes;
5310     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5311 
5312     // Register a rewrite of the argument.
5313     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5314                                            std::move(FnRepairCB),
5315                                            std::move(ACSRepairCB)))
5316       return ChangeStatus::CHANGED;
5317     return ChangeStatus::UNCHANGED;
5318   }
5319 
5320   /// See AbstractAttribute::trackStatistics()
5321   void trackStatistics() const override {
5322     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5323   }
5324 };
5325 
5326 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5327   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5328       : AAPrivatizablePtrImpl(IRP, A) {}
5329 
5330   /// See AbstractAttribute::initialize(...).
5331   virtual void initialize(Attributor &A) override {
5332     // TODO: We can privatize more than arguments.
5333     indicatePessimisticFixpoint();
5334   }
5335 
5336   ChangeStatus updateImpl(Attributor &A) override {
5337     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5338                      "updateImpl will not be called");
5339   }
5340 
5341   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5342   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5343     Value *Obj =
5344         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5345     if (!Obj) {
5346       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5347       return nullptr;
5348     }
5349 
5350     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5351       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5352         if (CI->isOne())
5353           return Obj->getType()->getPointerElementType();
5354     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5355       auto &PrivArgAA =
5356           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5357       if (PrivArgAA.isAssumedPrivatizablePtr())
5358         return Obj->getType()->getPointerElementType();
5359     }
5360 
5361     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5362                          "alloca nor privatizable argument: "
5363                       << *Obj << "!\n");
5364     return nullptr;
5365   }
5366 
5367   /// See AbstractAttribute::trackStatistics()
5368   void trackStatistics() const override {
5369     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5370   }
5371 };
5372 
5373 struct AAPrivatizablePtrCallSiteArgument final
5374     : public AAPrivatizablePtrFloating {
5375   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5376       : AAPrivatizablePtrFloating(IRP, A) {}
5377 
5378   /// See AbstractAttribute::initialize(...).
5379   void initialize(Attributor &A) override {
5380     if (getIRPosition().hasAttr(Attribute::ByVal))
5381       indicateOptimisticFixpoint();
5382   }
5383 
5384   /// See AbstractAttribute::updateImpl(...).
5385   ChangeStatus updateImpl(Attributor &A) override {
5386     PrivatizableType = identifyPrivatizableType(A);
5387     if (!PrivatizableType.hasValue())
5388       return ChangeStatus::UNCHANGED;
5389     if (!PrivatizableType.getValue())
5390       return indicatePessimisticFixpoint();
5391 
5392     const IRPosition &IRP = getIRPosition();
5393     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5394     if (!NoCaptureAA.isAssumedNoCapture()) {
5395       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5396       return indicatePessimisticFixpoint();
5397     }
5398 
5399     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5400     if (!NoAliasAA.isAssumedNoAlias()) {
5401       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5402       return indicatePessimisticFixpoint();
5403     }
5404 
5405     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5406     if (!MemBehaviorAA.isAssumedReadOnly()) {
5407       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5408       return indicatePessimisticFixpoint();
5409     }
5410 
5411     return ChangeStatus::UNCHANGED;
5412   }
5413 
5414   /// See AbstractAttribute::trackStatistics()
5415   void trackStatistics() const override {
5416     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5417   }
5418 };
5419 
5420 struct AAPrivatizablePtrCallSiteReturned final
5421     : public AAPrivatizablePtrFloating {
5422   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5423       : AAPrivatizablePtrFloating(IRP, A) {}
5424 
5425   /// See AbstractAttribute::initialize(...).
5426   void initialize(Attributor &A) override {
5427     // TODO: We can privatize more than arguments.
5428     indicatePessimisticFixpoint();
5429   }
5430 
5431   /// See AbstractAttribute::trackStatistics()
5432   void trackStatistics() const override {
5433     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5434   }
5435 };
5436 
5437 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5438   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5439       : AAPrivatizablePtrFloating(IRP, A) {}
5440 
5441   /// See AbstractAttribute::initialize(...).
5442   void initialize(Attributor &A) override {
5443     // TODO: We can privatize more than arguments.
5444     indicatePessimisticFixpoint();
5445   }
5446 
5447   /// See AbstractAttribute::trackStatistics()
5448   void trackStatistics() const override {
5449     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5450   }
5451 };
5452 
5453 /// -------------------- Memory Behavior Attributes ----------------------------
5454 /// Includes read-none, read-only, and write-only.
5455 /// ----------------------------------------------------------------------------
5456 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5457   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5458       : AAMemoryBehavior(IRP, A) {}
5459 
5460   /// See AbstractAttribute::initialize(...).
5461   void initialize(Attributor &A) override {
5462     intersectAssumedBits(BEST_STATE);
5463     getKnownStateFromValue(getIRPosition(), getState());
5464     IRAttribute::initialize(A);
5465   }
5466 
5467   /// Return the memory behavior information encoded in the IR for \p IRP.
5468   static void getKnownStateFromValue(const IRPosition &IRP,
5469                                      BitIntegerState &State,
5470                                      bool IgnoreSubsumingPositions = false) {
5471     SmallVector<Attribute, 2> Attrs;
5472     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5473     for (const Attribute &Attr : Attrs) {
5474       switch (Attr.getKindAsEnum()) {
5475       case Attribute::ReadNone:
5476         State.addKnownBits(NO_ACCESSES);
5477         break;
5478       case Attribute::ReadOnly:
5479         State.addKnownBits(NO_WRITES);
5480         break;
5481       case Attribute::WriteOnly:
5482         State.addKnownBits(NO_READS);
5483         break;
5484       default:
5485         llvm_unreachable("Unexpected attribute!");
5486       }
5487     }
5488 
5489     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5490       if (!I->mayReadFromMemory())
5491         State.addKnownBits(NO_READS);
5492       if (!I->mayWriteToMemory())
5493         State.addKnownBits(NO_WRITES);
5494     }
5495   }
5496 
5497   /// See AbstractAttribute::getDeducedAttributes(...).
5498   void getDeducedAttributes(LLVMContext &Ctx,
5499                             SmallVectorImpl<Attribute> &Attrs) const override {
5500     assert(Attrs.size() == 0);
5501     if (isAssumedReadNone())
5502       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5503     else if (isAssumedReadOnly())
5504       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5505     else if (isAssumedWriteOnly())
5506       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5507     assert(Attrs.size() <= 1);
5508   }
5509 
5510   /// See AbstractAttribute::manifest(...).
5511   ChangeStatus manifest(Attributor &A) override {
5512     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5513       return ChangeStatus::UNCHANGED;
5514 
5515     const IRPosition &IRP = getIRPosition();
5516 
5517     // Check if we would improve the existing attributes first.
5518     SmallVector<Attribute, 4> DeducedAttrs;
5519     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5520     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5521           return IRP.hasAttr(Attr.getKindAsEnum(),
5522                              /* IgnoreSubsumingPositions */ true);
5523         }))
5524       return ChangeStatus::UNCHANGED;
5525 
5526     // Clear existing attributes.
5527     IRP.removeAttrs(AttrKinds);
5528 
5529     // Use the generic manifest method.
5530     return IRAttribute::manifest(A);
5531   }
5532 
5533   /// See AbstractState::getAsStr().
5534   const std::string getAsStr() const override {
5535     if (isAssumedReadNone())
5536       return "readnone";
5537     if (isAssumedReadOnly())
5538       return "readonly";
5539     if (isAssumedWriteOnly())
5540       return "writeonly";
5541     return "may-read/write";
5542   }
5543 
5544   /// The set of IR attributes AAMemoryBehavior deals with.
5545   static const Attribute::AttrKind AttrKinds[3];
5546 };
5547 
5548 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5549     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5550 
5551 /// Memory behavior attribute for a floating value.
5552 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5553   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5554       : AAMemoryBehaviorImpl(IRP, A) {}
5555 
5556   /// See AbstractAttribute::initialize(...).
5557   void initialize(Attributor &A) override {
5558     AAMemoryBehaviorImpl::initialize(A);
5559     // Initialize the use vector with all direct uses of the associated value.
5560     for (const Use &U : getAssociatedValue().uses())
5561       Uses.insert(&U);
5562   }
5563 
5564   /// See AbstractAttribute::updateImpl(...).
5565   ChangeStatus updateImpl(Attributor &A) override;
5566 
5567   /// See AbstractAttribute::trackStatistics()
5568   void trackStatistics() const override {
5569     if (isAssumedReadNone())
5570       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5571     else if (isAssumedReadOnly())
5572       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5573     else if (isAssumedWriteOnly())
5574       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5575   }
5576 
5577 private:
5578   /// Return true if users of \p UserI might access the underlying
5579   /// variable/location described by \p U and should therefore be analyzed.
5580   bool followUsersOfUseIn(Attributor &A, const Use *U,
5581                           const Instruction *UserI);
5582 
5583   /// Update the state according to the effect of use \p U in \p UserI.
5584   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5585 
5586 protected:
5587   /// Container for (transitive) uses of the associated argument.
5588   SetVector<const Use *> Uses;
5589 };
5590 
5591 /// Memory behavior attribute for function argument.
5592 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5593   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5594       : AAMemoryBehaviorFloating(IRP, A) {}
5595 
5596   /// See AbstractAttribute::initialize(...).
5597   void initialize(Attributor &A) override {
5598     intersectAssumedBits(BEST_STATE);
5599     const IRPosition &IRP = getIRPosition();
5600     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5601     // can query it when we use has/getAttr. That would allow us to reuse the
5602     // initialize of the base class here.
5603     bool HasByVal =
5604         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5605     getKnownStateFromValue(IRP, getState(),
5606                            /* IgnoreSubsumingPositions */ HasByVal);
5607 
5608     // Initialize the use vector with all direct uses of the associated value.
5609     Argument *Arg = getAssociatedArgument();
5610     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5611       indicatePessimisticFixpoint();
5612     } else {
5613       // Initialize the use vector with all direct uses of the associated value.
5614       for (const Use &U : Arg->uses())
5615         Uses.insert(&U);
5616     }
5617   }
5618 
5619   ChangeStatus manifest(Attributor &A) override {
5620     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5621     if (!getAssociatedValue().getType()->isPointerTy())
5622       return ChangeStatus::UNCHANGED;
5623 
5624     // TODO: From readattrs.ll: "inalloca parameters are always
5625     //                           considered written"
5626     if (hasAttr({Attribute::InAlloca})) {
5627       removeKnownBits(NO_WRITES);
5628       removeAssumedBits(NO_WRITES);
5629     }
5630     return AAMemoryBehaviorFloating::manifest(A);
5631   }
5632 
5633   /// See AbstractAttribute::trackStatistics()
5634   void trackStatistics() const override {
5635     if (isAssumedReadNone())
5636       STATS_DECLTRACK_ARG_ATTR(readnone)
5637     else if (isAssumedReadOnly())
5638       STATS_DECLTRACK_ARG_ATTR(readonly)
5639     else if (isAssumedWriteOnly())
5640       STATS_DECLTRACK_ARG_ATTR(writeonly)
5641   }
5642 };
5643 
5644 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5645   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5646       : AAMemoryBehaviorArgument(IRP, A) {}
5647 
5648   /// See AbstractAttribute::initialize(...).
5649   void initialize(Attributor &A) override {
5650     if (Argument *Arg = getAssociatedArgument()) {
5651       if (Arg->hasByValAttr()) {
5652         addKnownBits(NO_WRITES);
5653         removeKnownBits(NO_READS);
5654         removeAssumedBits(NO_READS);
5655       }
5656     }
5657     AAMemoryBehaviorArgument::initialize(A);
5658   }
5659 
5660   /// See AbstractAttribute::updateImpl(...).
5661   ChangeStatus updateImpl(Attributor &A) override {
5662     // TODO: Once we have call site specific value information we can provide
5663     //       call site specific liveness liveness information and then it makes
5664     //       sense to specialize attributes for call sites arguments instead of
5665     //       redirecting requests to the callee argument.
5666     Argument *Arg = getAssociatedArgument();
5667     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5668     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5669     return clampStateAndIndicateChange(
5670         getState(),
5671         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5672   }
5673 
5674   /// See AbstractAttribute::trackStatistics()
5675   void trackStatistics() const override {
5676     if (isAssumedReadNone())
5677       STATS_DECLTRACK_CSARG_ATTR(readnone)
5678     else if (isAssumedReadOnly())
5679       STATS_DECLTRACK_CSARG_ATTR(readonly)
5680     else if (isAssumedWriteOnly())
5681       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5682   }
5683 };
5684 
5685 /// Memory behavior attribute for a call site return position.
5686 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5687   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
5688       : AAMemoryBehaviorFloating(IRP, A) {}
5689 
5690   /// See AbstractAttribute::manifest(...).
5691   ChangeStatus manifest(Attributor &A) override {
5692     // We do not annotate returned values.
5693     return ChangeStatus::UNCHANGED;
5694   }
5695 
5696   /// See AbstractAttribute::trackStatistics()
5697   void trackStatistics() const override {}
5698 };
5699 
5700 /// An AA to represent the memory behavior function attributes.
5701 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5702   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
5703       : AAMemoryBehaviorImpl(IRP, A) {}
5704 
5705   /// See AbstractAttribute::updateImpl(Attributor &A).
5706   virtual ChangeStatus updateImpl(Attributor &A) override;
5707 
5708   /// See AbstractAttribute::manifest(...).
5709   ChangeStatus manifest(Attributor &A) override {
5710     Function &F = cast<Function>(getAnchorValue());
5711     if (isAssumedReadNone()) {
5712       F.removeFnAttr(Attribute::ArgMemOnly);
5713       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5714       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5715     }
5716     return AAMemoryBehaviorImpl::manifest(A);
5717   }
5718 
5719   /// See AbstractAttribute::trackStatistics()
5720   void trackStatistics() const override {
5721     if (isAssumedReadNone())
5722       STATS_DECLTRACK_FN_ATTR(readnone)
5723     else if (isAssumedReadOnly())
5724       STATS_DECLTRACK_FN_ATTR(readonly)
5725     else if (isAssumedWriteOnly())
5726       STATS_DECLTRACK_FN_ATTR(writeonly)
5727   }
5728 };
5729 
5730 /// AAMemoryBehavior attribute for call sites.
5731 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5732   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
5733       : AAMemoryBehaviorImpl(IRP, A) {}
5734 
5735   /// See AbstractAttribute::initialize(...).
5736   void initialize(Attributor &A) override {
5737     AAMemoryBehaviorImpl::initialize(A);
5738     Function *F = getAssociatedFunction();
5739     if (!F || !A.isFunctionIPOAmendable(*F)) {
5740       indicatePessimisticFixpoint();
5741       return;
5742     }
5743   }
5744 
5745   /// See AbstractAttribute::updateImpl(...).
5746   ChangeStatus updateImpl(Attributor &A) override {
5747     // TODO: Once we have call site specific value information we can provide
5748     //       call site specific liveness liveness information and then it makes
5749     //       sense to specialize attributes for call sites arguments instead of
5750     //       redirecting requests to the callee argument.
5751     Function *F = getAssociatedFunction();
5752     const IRPosition &FnPos = IRPosition::function(*F);
5753     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5754     return clampStateAndIndicateChange(
5755         getState(),
5756         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5757   }
5758 
5759   /// See AbstractAttribute::trackStatistics()
5760   void trackStatistics() const override {
5761     if (isAssumedReadNone())
5762       STATS_DECLTRACK_CS_ATTR(readnone)
5763     else if (isAssumedReadOnly())
5764       STATS_DECLTRACK_CS_ATTR(readonly)
5765     else if (isAssumedWriteOnly())
5766       STATS_DECLTRACK_CS_ATTR(writeonly)
5767   }
5768 };
5769 
5770 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5771 
5772   // The current assumed state used to determine a change.
5773   auto AssumedState = getAssumed();
5774 
5775   auto CheckRWInst = [&](Instruction &I) {
5776     // If the instruction has an own memory behavior state, use it to restrict
5777     // the local state. No further analysis is required as the other memory
5778     // state is as optimistic as it gets.
5779     if (const auto *CB = dyn_cast<CallBase>(&I)) {
5780       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5781           *this, IRPosition::callsite_function(*CB));
5782       intersectAssumedBits(MemBehaviorAA.getAssumed());
5783       return !isAtFixpoint();
5784     }
5785 
5786     // Remove access kind modifiers if necessary.
5787     if (I.mayReadFromMemory())
5788       removeAssumedBits(NO_READS);
5789     if (I.mayWriteToMemory())
5790       removeAssumedBits(NO_WRITES);
5791     return !isAtFixpoint();
5792   };
5793 
5794   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5795     return indicatePessimisticFixpoint();
5796 
5797   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5798                                         : ChangeStatus::UNCHANGED;
5799 }
5800 
5801 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5802 
5803   const IRPosition &IRP = getIRPosition();
5804   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5805   AAMemoryBehavior::StateType &S = getState();
5806 
5807   // First, check the function scope. We take the known information and we avoid
5808   // work if the assumed information implies the current assumed information for
5809   // this attribute. This is a valid for all but byval arguments.
5810   Argument *Arg = IRP.getAssociatedArgument();
5811   AAMemoryBehavior::base_t FnMemAssumedState =
5812       AAMemoryBehavior::StateType::getWorstState();
5813   if (!Arg || !Arg->hasByValAttr()) {
5814     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5815         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5816     FnMemAssumedState = FnMemAA.getAssumed();
5817     S.addKnownBits(FnMemAA.getKnown());
5818     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5819       return ChangeStatus::UNCHANGED;
5820   }
5821 
5822   // Make sure the value is not captured (except through "return"), if
5823   // it is, any information derived would be irrelevant anyway as we cannot
5824   // check the potential aliases introduced by the capture. However, no need
5825   // to fall back to anythign less optimistic than the function state.
5826   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5827       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5828   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5829     S.intersectAssumedBits(FnMemAssumedState);
5830     return ChangeStatus::CHANGED;
5831   }
5832 
5833   // The current assumed state used to determine a change.
5834   auto AssumedState = S.getAssumed();
5835 
5836   // Liveness information to exclude dead users.
5837   // TODO: Take the FnPos once we have call site specific liveness information.
5838   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5839       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5840       /* TrackDependence */ false);
5841 
5842   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5843   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5844     const Use *U = Uses[i];
5845     Instruction *UserI = cast<Instruction>(U->getUser());
5846     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5847                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
5848                       << "]\n");
5849     if (A.isAssumedDead(*U, this, &LivenessAA))
5850       continue;
5851 
5852     // Droppable users, e.g., llvm::assume does not actually perform any action.
5853     if (UserI->isDroppable())
5854       continue;
5855 
5856     // Check if the users of UserI should also be visited.
5857     if (followUsersOfUseIn(A, U, UserI))
5858       for (const Use &UserIUse : UserI->uses())
5859         Uses.insert(&UserIUse);
5860 
5861     // If UserI might touch memory we analyze the use in detail.
5862     if (UserI->mayReadOrWriteMemory())
5863       analyzeUseIn(A, U, UserI);
5864   }
5865 
5866   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5867                                         : ChangeStatus::UNCHANGED;
5868 }
5869 
5870 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5871                                                   const Instruction *UserI) {
5872   // The loaded value is unrelated to the pointer argument, no need to
5873   // follow the users of the load.
5874   if (isa<LoadInst>(UserI))
5875     return false;
5876 
5877   // By default we follow all uses assuming UserI might leak information on U,
5878   // we have special handling for call sites operands though.
5879   const auto *CB = dyn_cast<CallBase>(UserI);
5880   if (!CB || !CB->isArgOperand(U))
5881     return true;
5882 
5883   // If the use is a call argument known not to be captured, the users of
5884   // the call do not need to be visited because they have to be unrelated to
5885   // the input. Note that this check is not trivial even though we disallow
5886   // general capturing of the underlying argument. The reason is that the
5887   // call might the argument "through return", which we allow and for which we
5888   // need to check call users.
5889   if (U->get()->getType()->isPointerTy()) {
5890     unsigned ArgNo = CB->getArgOperandNo(U);
5891     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5892         *this, IRPosition::callsite_argument(*CB, ArgNo),
5893         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5894     return !ArgNoCaptureAA.isAssumedNoCapture();
5895   }
5896 
5897   return true;
5898 }
5899 
5900 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5901                                             const Instruction *UserI) {
5902   assert(UserI->mayReadOrWriteMemory());
5903 
5904   switch (UserI->getOpcode()) {
5905   default:
5906     // TODO: Handle all atomics and other side-effect operations we know of.
5907     break;
5908   case Instruction::Load:
5909     // Loads cause the NO_READS property to disappear.
5910     removeAssumedBits(NO_READS);
5911     return;
5912 
5913   case Instruction::Store:
5914     // Stores cause the NO_WRITES property to disappear if the use is the
5915     // pointer operand. Note that we do assume that capturing was taken care of
5916     // somewhere else.
5917     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5918       removeAssumedBits(NO_WRITES);
5919     return;
5920 
5921   case Instruction::Call:
5922   case Instruction::CallBr:
5923   case Instruction::Invoke: {
5924     // For call sites we look at the argument memory behavior attribute (this
5925     // could be recursive!) in order to restrict our own state.
5926     const auto *CB = cast<CallBase>(UserI);
5927 
5928     // Give up on operand bundles.
5929     if (CB->isBundleOperand(U)) {
5930       indicatePessimisticFixpoint();
5931       return;
5932     }
5933 
5934     // Calling a function does read the function pointer, maybe write it if the
5935     // function is self-modifying.
5936     if (CB->isCallee(U)) {
5937       removeAssumedBits(NO_READS);
5938       break;
5939     }
5940 
5941     // Adjust the possible access behavior based on the information on the
5942     // argument.
5943     IRPosition Pos;
5944     if (U->get()->getType()->isPointerTy())
5945       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
5946     else
5947       Pos = IRPosition::callsite_function(*CB);
5948     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5949         *this, Pos,
5950         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5951     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5952     // and at least "known".
5953     intersectAssumedBits(MemBehaviorAA.getAssumed());
5954     return;
5955   }
5956   };
5957 
5958   // Generally, look at the "may-properties" and adjust the assumed state if we
5959   // did not trigger special handling before.
5960   if (UserI->mayReadFromMemory())
5961     removeAssumedBits(NO_READS);
5962   if (UserI->mayWriteToMemory())
5963     removeAssumedBits(NO_WRITES);
5964 }
5965 
5966 } // namespace
5967 
5968 /// -------------------- Memory Locations Attributes ---------------------------
5969 /// Includes read-none, argmemonly, inaccessiblememonly,
5970 /// inaccessiblememorargmemonly
5971 /// ----------------------------------------------------------------------------
5972 
5973 std::string AAMemoryLocation::getMemoryLocationsAsStr(
5974     AAMemoryLocation::MemoryLocationsKind MLK) {
5975   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
5976     return "all memory";
5977   if (MLK == AAMemoryLocation::NO_LOCATIONS)
5978     return "no memory";
5979   std::string S = "memory:";
5980   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
5981     S += "stack,";
5982   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
5983     S += "constant,";
5984   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
5985     S += "internal global,";
5986   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
5987     S += "external global,";
5988   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
5989     S += "argument,";
5990   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
5991     S += "inaccessible,";
5992   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
5993     S += "malloced,";
5994   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
5995     S += "unknown,";
5996   S.pop_back();
5997   return S;
5998 }
5999 
6000 namespace {
6001 struct AAMemoryLocationImpl : public AAMemoryLocation {
6002 
6003   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6004       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6005     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6006       AccessKind2Accesses[u] = nullptr;
6007   }
6008 
6009   ~AAMemoryLocationImpl() {
6010     // The AccessSets are allocated via a BumpPtrAllocator, we call
6011     // the destructor manually.
6012     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6013       if (AccessKind2Accesses[u])
6014         AccessKind2Accesses[u]->~AccessSet();
6015   }
6016 
6017   /// See AbstractAttribute::initialize(...).
6018   void initialize(Attributor &A) override {
6019     intersectAssumedBits(BEST_STATE);
6020     getKnownStateFromValue(A, getIRPosition(), getState());
6021     IRAttribute::initialize(A);
6022   }
6023 
6024   /// Return the memory behavior information encoded in the IR for \p IRP.
6025   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6026                                      BitIntegerState &State,
6027                                      bool IgnoreSubsumingPositions = false) {
6028     // For internal functions we ignore `argmemonly` and
6029     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6030     // constant propagation. It is unclear if this is the best way but it is
6031     // unlikely this will cause real performance problems. If we are deriving
6032     // attributes for the anchor function we even remove the attribute in
6033     // addition to ignoring it.
6034     bool UseArgMemOnly = true;
6035     Function *AnchorFn = IRP.getAnchorScope();
6036     if (AnchorFn && A.isRunOn(*AnchorFn))
6037       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6038 
6039     SmallVector<Attribute, 2> Attrs;
6040     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6041     for (const Attribute &Attr : Attrs) {
6042       switch (Attr.getKindAsEnum()) {
6043       case Attribute::ReadNone:
6044         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6045         break;
6046       case Attribute::InaccessibleMemOnly:
6047         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6048         break;
6049       case Attribute::ArgMemOnly:
6050         if (UseArgMemOnly)
6051           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6052         else
6053           IRP.removeAttrs({Attribute::ArgMemOnly});
6054         break;
6055       case Attribute::InaccessibleMemOrArgMemOnly:
6056         if (UseArgMemOnly)
6057           State.addKnownBits(inverseLocation(
6058               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6059         else
6060           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6061         break;
6062       default:
6063         llvm_unreachable("Unexpected attribute!");
6064       }
6065     }
6066   }
6067 
6068   /// See AbstractAttribute::getDeducedAttributes(...).
6069   void getDeducedAttributes(LLVMContext &Ctx,
6070                             SmallVectorImpl<Attribute> &Attrs) const override {
6071     assert(Attrs.size() == 0);
6072     if (isAssumedReadNone()) {
6073       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6074     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6075       if (isAssumedInaccessibleMemOnly())
6076         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6077       else if (isAssumedArgMemOnly())
6078         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6079       else if (isAssumedInaccessibleOrArgMemOnly())
6080         Attrs.push_back(
6081             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6082     }
6083     assert(Attrs.size() <= 1);
6084   }
6085 
6086   /// See AbstractAttribute::manifest(...).
6087   ChangeStatus manifest(Attributor &A) override {
6088     const IRPosition &IRP = getIRPosition();
6089 
6090     // Check if we would improve the existing attributes first.
6091     SmallVector<Attribute, 4> DeducedAttrs;
6092     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6093     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6094           return IRP.hasAttr(Attr.getKindAsEnum(),
6095                              /* IgnoreSubsumingPositions */ true);
6096         }))
6097       return ChangeStatus::UNCHANGED;
6098 
6099     // Clear existing attributes.
6100     IRP.removeAttrs(AttrKinds);
6101     if (isAssumedReadNone())
6102       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6103 
6104     // Use the generic manifest method.
6105     return IRAttribute::manifest(A);
6106   }
6107 
6108   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6109   bool checkForAllAccessesToMemoryKind(
6110       function_ref<bool(const Instruction *, const Value *, AccessKind,
6111                         MemoryLocationsKind)>
6112           Pred,
6113       MemoryLocationsKind RequestedMLK) const override {
6114     if (!isValidState())
6115       return false;
6116 
6117     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6118     if (AssumedMLK == NO_LOCATIONS)
6119       return true;
6120 
6121     unsigned Idx = 0;
6122     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6123          CurMLK *= 2, ++Idx) {
6124       if (CurMLK & RequestedMLK)
6125         continue;
6126 
6127       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6128         for (const AccessInfo &AI : *Accesses)
6129           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6130             return false;
6131     }
6132 
6133     return true;
6134   }
6135 
6136   ChangeStatus indicatePessimisticFixpoint() override {
6137     // If we give up and indicate a pessimistic fixpoint this instruction will
6138     // become an access for all potential access kinds:
6139     // TODO: Add pointers for argmemonly and globals to improve the results of
6140     //       checkForAllAccessesToMemoryKind.
6141     bool Changed = false;
6142     MemoryLocationsKind KnownMLK = getKnown();
6143     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6144     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6145       if (!(CurMLK & KnownMLK))
6146         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6147                                   getAccessKindFromInst(I));
6148     return AAMemoryLocation::indicatePessimisticFixpoint();
6149   }
6150 
6151 protected:
6152   /// Helper struct to tie together an instruction that has a read or write
6153   /// effect with the pointer it accesses (if any).
6154   struct AccessInfo {
6155 
6156     /// The instruction that caused the access.
6157     const Instruction *I;
6158 
6159     /// The base pointer that is accessed, or null if unknown.
6160     const Value *Ptr;
6161 
6162     /// The kind of access (read/write/read+write).
6163     AccessKind Kind;
6164 
6165     bool operator==(const AccessInfo &RHS) const {
6166       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6167     }
6168     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6169       if (LHS.I != RHS.I)
6170         return LHS.I < RHS.I;
6171       if (LHS.Ptr != RHS.Ptr)
6172         return LHS.Ptr < RHS.Ptr;
6173       if (LHS.Kind != RHS.Kind)
6174         return LHS.Kind < RHS.Kind;
6175       return false;
6176     }
6177   };
6178 
6179   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6180   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6181   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6182   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6183 
6184   /// Return the kind(s) of location that may be accessed by \p V.
6185   AAMemoryLocation::MemoryLocationsKind
6186   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6187 
6188   /// Return the access kind as determined by \p I.
6189   AccessKind getAccessKindFromInst(const Instruction *I) {
6190     AccessKind AK = READ_WRITE;
6191     if (I) {
6192       AK = I->mayReadFromMemory() ? READ : NONE;
6193       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6194     }
6195     return AK;
6196   }
6197 
6198   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6199   /// an access of kind \p AK to a \p MLK memory location with the access
6200   /// pointer \p Ptr.
6201   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6202                                  MemoryLocationsKind MLK, const Instruction *I,
6203                                  const Value *Ptr, bool &Changed,
6204                                  AccessKind AK = READ_WRITE) {
6205 
6206     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6207     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6208     if (!Accesses)
6209       Accesses = new (Allocator) AccessSet();
6210     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6211     State.removeAssumedBits(MLK);
6212   }
6213 
6214   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6215   /// arguments, and update the state and access map accordingly.
6216   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6217                           AAMemoryLocation::StateType &State, bool &Changed);
6218 
6219   /// Used to allocate access sets.
6220   BumpPtrAllocator &Allocator;
6221 
6222   /// The set of IR attributes AAMemoryLocation deals with.
6223   static const Attribute::AttrKind AttrKinds[4];
6224 };
6225 
6226 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6227     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6228     Attribute::InaccessibleMemOrArgMemOnly};
6229 
6230 void AAMemoryLocationImpl::categorizePtrValue(
6231     Attributor &A, const Instruction &I, const Value &Ptr,
6232     AAMemoryLocation::StateType &State, bool &Changed) {
6233   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6234                     << Ptr << " ["
6235                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6236 
6237   auto StripGEPCB = [](Value *V) -> Value * {
6238     auto *GEP = dyn_cast<GEPOperator>(V);
6239     while (GEP) {
6240       V = GEP->getPointerOperand();
6241       GEP = dyn_cast<GEPOperator>(V);
6242     }
6243     return V;
6244   };
6245 
6246   auto VisitValueCB = [&](Value &V, const Instruction *,
6247                           AAMemoryLocation::StateType &T,
6248                           bool Stripped) -> bool {
6249     MemoryLocationsKind MLK = NO_LOCATIONS;
6250     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6251     if (isa<UndefValue>(V))
6252       return true;
6253     if (auto *Arg = dyn_cast<Argument>(&V)) {
6254       if (Arg->hasByValAttr())
6255         MLK = NO_LOCAL_MEM;
6256       else
6257         MLK = NO_ARGUMENT_MEM;
6258     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6259       if (GV->hasLocalLinkage())
6260         MLK = NO_GLOBAL_INTERNAL_MEM;
6261       else
6262         MLK = NO_GLOBAL_EXTERNAL_MEM;
6263     } else if (isa<ConstantPointerNull>(V) &&
6264                !NullPointerIsDefined(getAssociatedFunction(),
6265                                      V.getType()->getPointerAddressSpace())) {
6266       return true;
6267     } else if (isa<AllocaInst>(V)) {
6268       MLK = NO_LOCAL_MEM;
6269     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6270       const auto &NoAliasAA =
6271           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6272       if (NoAliasAA.isAssumedNoAlias())
6273         MLK = NO_MALLOCED_MEM;
6274       else
6275         MLK = NO_UNKOWN_MEM;
6276     } else {
6277       MLK = NO_UNKOWN_MEM;
6278     }
6279 
6280     assert(MLK != NO_LOCATIONS && "No location specified!");
6281     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6282                               getAccessKindFromInst(&I));
6283     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6284                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6285                       << "\n");
6286     return true;
6287   };
6288 
6289   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6290           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6291           /* UseValueSimplify */ true,
6292           /* MaxValues */ 32, StripGEPCB)) {
6293     LLVM_DEBUG(
6294         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6295     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6296                               getAccessKindFromInst(&I));
6297   } else {
6298     LLVM_DEBUG(
6299         dbgs()
6300         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6301         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6302   }
6303 }
6304 
6305 AAMemoryLocation::MemoryLocationsKind
6306 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6307                                                   bool &Changed) {
6308   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6309                     << I << "\n");
6310 
6311   AAMemoryLocation::StateType AccessedLocs;
6312   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6313 
6314   if (auto *CB = dyn_cast<CallBase>(&I)) {
6315 
6316     // First check if we assume any memory is access is visible.
6317     const auto &CBMemLocationAA =
6318         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6319     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6320                       << " [" << CBMemLocationAA << "]\n");
6321 
6322     if (CBMemLocationAA.isAssumedReadNone())
6323       return NO_LOCATIONS;
6324 
6325     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6326       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6327                                 Changed, getAccessKindFromInst(&I));
6328       return AccessedLocs.getAssumed();
6329     }
6330 
6331     uint32_t CBAssumedNotAccessedLocs =
6332         CBMemLocationAA.getAssumedNotAccessedLocation();
6333 
6334     // Set the argmemonly and global bit as we handle them separately below.
6335     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6336         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6337 
6338     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6339       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6340         continue;
6341       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6342                                 getAccessKindFromInst(&I));
6343     }
6344 
6345     // Now handle global memory if it might be accessed. This is slightly tricky
6346     // as NO_GLOBAL_MEM has multiple bits set.
6347     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6348     if (HasGlobalAccesses) {
6349       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6350                             AccessKind Kind, MemoryLocationsKind MLK) {
6351         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6352                                   getAccessKindFromInst(&I));
6353         return true;
6354       };
6355       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6356               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6357         return AccessedLocs.getWorstState();
6358     }
6359 
6360     LLVM_DEBUG(
6361         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6362                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6363 
6364     // Now handle argument memory if it might be accessed.
6365     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6366     if (HasArgAccesses) {
6367       for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E;
6368            ++ArgNo) {
6369 
6370         // Skip non-pointer arguments.
6371         const Value *ArgOp = CB->getArgOperand(ArgNo);
6372         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6373           continue;
6374 
6375         // Skip readnone arguments.
6376         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo);
6377         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6378             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6379 
6380         if (ArgOpMemLocationAA.isAssumedReadNone())
6381           continue;
6382 
6383         // Categorize potentially accessed pointer arguments as if there was an
6384         // access instruction with them as pointer.
6385         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6386       }
6387     }
6388 
6389     LLVM_DEBUG(
6390         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6391                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6392 
6393     return AccessedLocs.getAssumed();
6394   }
6395 
6396   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6397     LLVM_DEBUG(
6398         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6399                << I << " [" << *Ptr << "]\n");
6400     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6401     return AccessedLocs.getAssumed();
6402   }
6403 
6404   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6405                     << I << "\n");
6406   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6407                             getAccessKindFromInst(&I));
6408   return AccessedLocs.getAssumed();
6409 }
6410 
6411 /// An AA to represent the memory behavior function attributes.
6412 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6413   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6414       : AAMemoryLocationImpl(IRP, A) {}
6415 
6416   /// See AbstractAttribute::updateImpl(Attributor &A).
6417   virtual ChangeStatus updateImpl(Attributor &A) override {
6418 
6419     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6420         *this, getIRPosition(), /* TrackDependence */ false);
6421     if (MemBehaviorAA.isAssumedReadNone()) {
6422       if (MemBehaviorAA.isKnownReadNone())
6423         return indicateOptimisticFixpoint();
6424       assert(isAssumedReadNone() &&
6425              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6426       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6427       return ChangeStatus::UNCHANGED;
6428     }
6429 
6430     // The current assumed state used to determine a change.
6431     auto AssumedState = getAssumed();
6432     bool Changed = false;
6433 
6434     auto CheckRWInst = [&](Instruction &I) {
6435       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6436       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6437                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6438       removeAssumedBits(inverseLocation(MLK, false, false));
6439       return true;
6440     };
6441 
6442     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6443       return indicatePessimisticFixpoint();
6444 
6445     Changed |= AssumedState != getAssumed();
6446     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6447   }
6448 
6449   /// See AbstractAttribute::trackStatistics()
6450   void trackStatistics() const override {
6451     if (isAssumedReadNone())
6452       STATS_DECLTRACK_FN_ATTR(readnone)
6453     else if (isAssumedArgMemOnly())
6454       STATS_DECLTRACK_FN_ATTR(argmemonly)
6455     else if (isAssumedInaccessibleMemOnly())
6456       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6457     else if (isAssumedInaccessibleOrArgMemOnly())
6458       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6459   }
6460 };
6461 
6462 /// AAMemoryLocation attribute for call sites.
6463 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6464   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6465       : AAMemoryLocationImpl(IRP, A) {}
6466 
6467   /// See AbstractAttribute::initialize(...).
6468   void initialize(Attributor &A) override {
6469     AAMemoryLocationImpl::initialize(A);
6470     Function *F = getAssociatedFunction();
6471     if (!F || !A.isFunctionIPOAmendable(*F)) {
6472       indicatePessimisticFixpoint();
6473       return;
6474     }
6475   }
6476 
6477   /// See AbstractAttribute::updateImpl(...).
6478   ChangeStatus updateImpl(Attributor &A) override {
6479     // TODO: Once we have call site specific value information we can provide
6480     //       call site specific liveness liveness information and then it makes
6481     //       sense to specialize attributes for call sites arguments instead of
6482     //       redirecting requests to the callee argument.
6483     Function *F = getAssociatedFunction();
6484     const IRPosition &FnPos = IRPosition::function(*F);
6485     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6486     bool Changed = false;
6487     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6488                           AccessKind Kind, MemoryLocationsKind MLK) {
6489       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6490                                 getAccessKindFromInst(I));
6491       return true;
6492     };
6493     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6494       return indicatePessimisticFixpoint();
6495     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6496   }
6497 
6498   /// See AbstractAttribute::trackStatistics()
6499   void trackStatistics() const override {
6500     if (isAssumedReadNone())
6501       STATS_DECLTRACK_CS_ATTR(readnone)
6502   }
6503 };
6504 
6505 /// ------------------ Value Constant Range Attribute -------------------------
6506 
6507 struct AAValueConstantRangeImpl : AAValueConstantRange {
6508   using StateType = IntegerRangeState;
6509   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6510       : AAValueConstantRange(IRP, A) {}
6511 
6512   /// See AbstractAttribute::getAsStr().
6513   const std::string getAsStr() const override {
6514     std::string Str;
6515     llvm::raw_string_ostream OS(Str);
6516     OS << "range(" << getBitWidth() << ")<";
6517     getKnown().print(OS);
6518     OS << " / ";
6519     getAssumed().print(OS);
6520     OS << ">";
6521     return OS.str();
6522   }
6523 
6524   /// Helper function to get a SCEV expr for the associated value at program
6525   /// point \p I.
6526   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6527     if (!getAnchorScope())
6528       return nullptr;
6529 
6530     ScalarEvolution *SE =
6531         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6532             *getAnchorScope());
6533 
6534     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6535         *getAnchorScope());
6536 
6537     if (!SE || !LI)
6538       return nullptr;
6539 
6540     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6541     if (!I)
6542       return S;
6543 
6544     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6545   }
6546 
6547   /// Helper function to get a range from SCEV for the associated value at
6548   /// program point \p I.
6549   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6550                                          const Instruction *I = nullptr) const {
6551     if (!getAnchorScope())
6552       return getWorstState(getBitWidth());
6553 
6554     ScalarEvolution *SE =
6555         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6556             *getAnchorScope());
6557 
6558     const SCEV *S = getSCEV(A, I);
6559     if (!SE || !S)
6560       return getWorstState(getBitWidth());
6561 
6562     return SE->getUnsignedRange(S);
6563   }
6564 
6565   /// Helper function to get a range from LVI for the associated value at
6566   /// program point \p I.
6567   ConstantRange
6568   getConstantRangeFromLVI(Attributor &A,
6569                           const Instruction *CtxI = nullptr) const {
6570     if (!getAnchorScope())
6571       return getWorstState(getBitWidth());
6572 
6573     LazyValueInfo *LVI =
6574         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6575             *getAnchorScope());
6576 
6577     if (!LVI || !CtxI)
6578       return getWorstState(getBitWidth());
6579     return LVI->getConstantRange(&getAssociatedValue(),
6580                                  const_cast<BasicBlock *>(CtxI->getParent()),
6581                                  const_cast<Instruction *>(CtxI));
6582   }
6583 
6584   /// See AAValueConstantRange::getKnownConstantRange(..).
6585   ConstantRange
6586   getKnownConstantRange(Attributor &A,
6587                         const Instruction *CtxI = nullptr) const override {
6588     if (!CtxI || CtxI == getCtxI())
6589       return getKnown();
6590 
6591     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6592     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6593     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6594   }
6595 
6596   /// See AAValueConstantRange::getAssumedConstantRange(..).
6597   ConstantRange
6598   getAssumedConstantRange(Attributor &A,
6599                           const Instruction *CtxI = nullptr) const override {
6600     // TODO: Make SCEV use Attributor assumption.
6601     //       We may be able to bound a variable range via assumptions in
6602     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6603     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6604 
6605     if (!CtxI || CtxI == getCtxI())
6606       return getAssumed();
6607 
6608     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6609     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6610     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6611   }
6612 
6613   /// See AbstractAttribute::initialize(..).
6614   void initialize(Attributor &A) override {
6615     // Intersect a range given by SCEV.
6616     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6617 
6618     // Intersect a range given by LVI.
6619     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6620   }
6621 
6622   /// Helper function to create MDNode for range metadata.
6623   static MDNode *
6624   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6625                             const ConstantRange &AssumedConstantRange) {
6626     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6627                                   Ty, AssumedConstantRange.getLower())),
6628                               ConstantAsMetadata::get(ConstantInt::get(
6629                                   Ty, AssumedConstantRange.getUpper()))};
6630     return MDNode::get(Ctx, LowAndHigh);
6631   }
6632 
6633   /// Return true if \p Assumed is included in \p KnownRanges.
6634   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6635 
6636     if (Assumed.isFullSet())
6637       return false;
6638 
6639     if (!KnownRanges)
6640       return true;
6641 
6642     // If multiple ranges are annotated in IR, we give up to annotate assumed
6643     // range for now.
6644 
6645     // TODO:  If there exists a known range which containts assumed range, we
6646     // can say assumed range is better.
6647     if (KnownRanges->getNumOperands() > 2)
6648       return false;
6649 
6650     ConstantInt *Lower =
6651         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6652     ConstantInt *Upper =
6653         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6654 
6655     ConstantRange Known(Lower->getValue(), Upper->getValue());
6656     return Known.contains(Assumed) && Known != Assumed;
6657   }
6658 
6659   /// Helper function to set range metadata.
6660   static bool
6661   setRangeMetadataIfisBetterRange(Instruction *I,
6662                                   const ConstantRange &AssumedConstantRange) {
6663     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6664     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6665       if (!AssumedConstantRange.isEmptySet()) {
6666         I->setMetadata(LLVMContext::MD_range,
6667                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6668                                                  AssumedConstantRange));
6669         return true;
6670       }
6671     }
6672     return false;
6673   }
6674 
6675   /// See AbstractAttribute::manifest()
6676   ChangeStatus manifest(Attributor &A) override {
6677     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6678     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6679     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6680 
6681     auto &V = getAssociatedValue();
6682     if (!AssumedConstantRange.isEmptySet() &&
6683         !AssumedConstantRange.isSingleElement()) {
6684       if (Instruction *I = dyn_cast<Instruction>(&V))
6685         if (isa<CallInst>(I) || isa<LoadInst>(I))
6686           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6687             Changed = ChangeStatus::CHANGED;
6688     }
6689 
6690     return Changed;
6691   }
6692 };
6693 
6694 struct AAValueConstantRangeArgument final
6695     : AAArgumentFromCallSiteArguments<
6696           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6697   using Base = AAArgumentFromCallSiteArguments<
6698       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
6699   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
6700       : Base(IRP, A) {}
6701 
6702   /// See AbstractAttribute::initialize(..).
6703   void initialize(Attributor &A) override {
6704     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
6705       indicatePessimisticFixpoint();
6706     } else {
6707       Base::initialize(A);
6708     }
6709   }
6710 
6711   /// See AbstractAttribute::trackStatistics()
6712   void trackStatistics() const override {
6713     STATS_DECLTRACK_ARG_ATTR(value_range)
6714   }
6715 };
6716 
6717 struct AAValueConstantRangeReturned
6718     : AAReturnedFromReturnedValues<AAValueConstantRange,
6719                                    AAValueConstantRangeImpl> {
6720   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6721                                             AAValueConstantRangeImpl>;
6722   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
6723       : Base(IRP, A) {}
6724 
6725   /// See AbstractAttribute::initialize(...).
6726   void initialize(Attributor &A) override {}
6727 
6728   /// See AbstractAttribute::trackStatistics()
6729   void trackStatistics() const override {
6730     STATS_DECLTRACK_FNRET_ATTR(value_range)
6731   }
6732 };
6733 
6734 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6735   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
6736       : AAValueConstantRangeImpl(IRP, A) {}
6737 
6738   /// See AbstractAttribute::initialize(...).
6739   void initialize(Attributor &A) override {
6740     AAValueConstantRangeImpl::initialize(A);
6741     Value &V = getAssociatedValue();
6742 
6743     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6744       unionAssumed(ConstantRange(C->getValue()));
6745       indicateOptimisticFixpoint();
6746       return;
6747     }
6748 
6749     if (isa<UndefValue>(&V)) {
6750       // Collapse the undef state to 0.
6751       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6752       indicateOptimisticFixpoint();
6753       return;
6754     }
6755 
6756     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6757       return;
6758     // If it is a load instruction with range metadata, use it.
6759     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6760       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6761         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6762         return;
6763       }
6764 
6765     // We can work with PHI and select instruction as we traverse their operands
6766     // during update.
6767     if (isa<SelectInst>(V) || isa<PHINode>(V))
6768       return;
6769 
6770     // Otherwise we give up.
6771     indicatePessimisticFixpoint();
6772 
6773     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6774                       << getAssociatedValue() << "\n");
6775   }
6776 
6777   bool calculateBinaryOperator(
6778       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6779       const Instruction *CtxI,
6780       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6781     Value *LHS = BinOp->getOperand(0);
6782     Value *RHS = BinOp->getOperand(1);
6783     // TODO: Allow non integers as well.
6784     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6785       return false;
6786 
6787     auto &LHSAA =
6788         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6789     QuerriedAAs.push_back(&LHSAA);
6790     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6791 
6792     auto &RHSAA =
6793         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6794     QuerriedAAs.push_back(&RHSAA);
6795     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6796 
6797     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6798 
6799     T.unionAssumed(AssumedRange);
6800 
6801     // TODO: Track a known state too.
6802 
6803     return T.isValidState();
6804   }
6805 
6806   bool calculateCastInst(
6807       Attributor &A, CastInst *CastI, IntegerRangeState &T,
6808       const Instruction *CtxI,
6809       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6810     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
6811     // TODO: Allow non integers as well.
6812     Value &OpV = *CastI->getOperand(0);
6813     if (!OpV.getType()->isIntegerTy())
6814       return false;
6815 
6816     auto &OpAA =
6817         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
6818     QuerriedAAs.push_back(&OpAA);
6819     T.unionAssumed(
6820         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
6821     return T.isValidState();
6822   }
6823 
6824   bool
6825   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
6826                    const Instruction *CtxI,
6827                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6828     Value *LHS = CmpI->getOperand(0);
6829     Value *RHS = CmpI->getOperand(1);
6830     // TODO: Allow non integers as well.
6831     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6832       return false;
6833 
6834     auto &LHSAA =
6835         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6836     QuerriedAAs.push_back(&LHSAA);
6837     auto &RHSAA =
6838         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6839     QuerriedAAs.push_back(&RHSAA);
6840 
6841     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6842     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6843 
6844     // If one of them is empty set, we can't decide.
6845     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
6846       return true;
6847 
6848     bool MustTrue = false, MustFalse = false;
6849 
6850     auto AllowedRegion =
6851         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
6852 
6853     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
6854         CmpI->getPredicate(), RHSAARange);
6855 
6856     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
6857       MustFalse = true;
6858 
6859     if (SatisfyingRegion.contains(LHSAARange))
6860       MustTrue = true;
6861 
6862     assert((!MustTrue || !MustFalse) &&
6863            "Either MustTrue or MustFalse should be false!");
6864 
6865     if (MustTrue)
6866       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
6867     else if (MustFalse)
6868       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
6869     else
6870       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
6871 
6872     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
6873                       << " " << RHSAA << "\n");
6874 
6875     // TODO: Track a known state too.
6876     return T.isValidState();
6877   }
6878 
6879   /// See AbstractAttribute::updateImpl(...).
6880   ChangeStatus updateImpl(Attributor &A) override {
6881     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
6882                             IntegerRangeState &T, bool Stripped) -> bool {
6883       Instruction *I = dyn_cast<Instruction>(&V);
6884       if (!I || isa<CallBase>(I)) {
6885 
6886         // If the value is not instruction, we query AA to Attributor.
6887         const auto &AA =
6888             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
6889 
6890         // Clamp operator is not used to utilize a program point CtxI.
6891         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
6892 
6893         return T.isValidState();
6894       }
6895 
6896       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
6897       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
6898         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
6899           return false;
6900       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
6901         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
6902           return false;
6903       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
6904         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
6905           return false;
6906       } else {
6907         // Give up with other instructions.
6908         // TODO: Add other instructions
6909 
6910         T.indicatePessimisticFixpoint();
6911         return false;
6912       }
6913 
6914       // Catch circular reasoning in a pessimistic way for now.
6915       // TODO: Check how the range evolves and if we stripped anything, see also
6916       //       AADereferenceable or AAAlign for similar situations.
6917       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
6918         if (QueriedAA != this)
6919           continue;
6920         // If we are in a stady state we do not need to worry.
6921         if (T.getAssumed() == getState().getAssumed())
6922           continue;
6923         T.indicatePessimisticFixpoint();
6924       }
6925 
6926       return T.isValidState();
6927     };
6928 
6929     IntegerRangeState T(getBitWidth());
6930 
6931     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
6932             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
6933             /* UseValueSimplify */ false))
6934       return indicatePessimisticFixpoint();
6935 
6936     return clampStateAndIndicateChange(getState(), T);
6937   }
6938 
6939   /// See AbstractAttribute::trackStatistics()
6940   void trackStatistics() const override {
6941     STATS_DECLTRACK_FLOATING_ATTR(value_range)
6942   }
6943 };
6944 
6945 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
6946   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
6947       : AAValueConstantRangeImpl(IRP, A) {}
6948 
6949   /// See AbstractAttribute::initialize(...).
6950   ChangeStatus updateImpl(Attributor &A) override {
6951     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
6952                      "not be called");
6953   }
6954 
6955   /// See AbstractAttribute::trackStatistics()
6956   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
6957 };
6958 
6959 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
6960   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
6961       : AAValueConstantRangeFunction(IRP, A) {}
6962 
6963   /// See AbstractAttribute::trackStatistics()
6964   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
6965 };
6966 
6967 struct AAValueConstantRangeCallSiteReturned
6968     : AACallSiteReturnedFromReturned<AAValueConstantRange,
6969                                      AAValueConstantRangeImpl> {
6970   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
6971       : AACallSiteReturnedFromReturned<AAValueConstantRange,
6972                                        AAValueConstantRangeImpl>(IRP, A) {}
6973 
6974   /// See AbstractAttribute::initialize(...).
6975   void initialize(Attributor &A) override {
6976     // If it is a load instruction with range metadata, use the metadata.
6977     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
6978       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
6979         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6980 
6981     AAValueConstantRangeImpl::initialize(A);
6982   }
6983 
6984   /// See AbstractAttribute::trackStatistics()
6985   void trackStatistics() const override {
6986     STATS_DECLTRACK_CSRET_ATTR(value_range)
6987   }
6988 };
6989 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
6990   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
6991       : AAValueConstantRangeFloating(IRP, A) {}
6992 
6993   /// See AbstractAttribute::trackStatistics()
6994   void trackStatistics() const override {
6995     STATS_DECLTRACK_CSARG_ATTR(value_range)
6996   }
6997 };
6998 } // namespace
6999 
7000 const char AAReturnedValues::ID = 0;
7001 const char AANoUnwind::ID = 0;
7002 const char AANoSync::ID = 0;
7003 const char AANoFree::ID = 0;
7004 const char AANonNull::ID = 0;
7005 const char AANoRecurse::ID = 0;
7006 const char AAWillReturn::ID = 0;
7007 const char AAUndefinedBehavior::ID = 0;
7008 const char AANoAlias::ID = 0;
7009 const char AAReachability::ID = 0;
7010 const char AANoReturn::ID = 0;
7011 const char AAIsDead::ID = 0;
7012 const char AADereferenceable::ID = 0;
7013 const char AAAlign::ID = 0;
7014 const char AANoCapture::ID = 0;
7015 const char AAValueSimplify::ID = 0;
7016 const char AAHeapToStack::ID = 0;
7017 const char AAPrivatizablePtr::ID = 0;
7018 const char AAMemoryBehavior::ID = 0;
7019 const char AAMemoryLocation::ID = 0;
7020 const char AAValueConstantRange::ID = 0;
7021 
7022 // Macro magic to create the static generator function for attributes that
7023 // follow the naming scheme.
7024 
7025 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
7026   case IRPosition::PK:                                                         \
7027     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
7028 
7029 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
7030   case IRPosition::PK:                                                         \
7031     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
7032     ++NumAAs;                                                                  \
7033     break;
7034 
7035 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
7036   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7037     CLASS *AA = nullptr;                                                       \
7038     switch (IRP.getPositionKind()) {                                           \
7039       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7040       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7041       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7042       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7043       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7044       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7045       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7046       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7047     }                                                                          \
7048     return *AA;                                                                \
7049   }
7050 
7051 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
7052   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7053     CLASS *AA = nullptr;                                                       \
7054     switch (IRP.getPositionKind()) {                                           \
7055       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7056       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
7057       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7058       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7059       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7060       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7061       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7062       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7063     }                                                                          \
7064     return *AA;                                                                \
7065   }
7066 
7067 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
7068   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7069     CLASS *AA = nullptr;                                                       \
7070     switch (IRP.getPositionKind()) {                                           \
7071       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7072       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7073       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7074       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7075       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7076       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7077       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7078       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7079     }                                                                          \
7080     return *AA;                                                                \
7081   }
7082 
7083 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
7084   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7085     CLASS *AA = nullptr;                                                       \
7086     switch (IRP.getPositionKind()) {                                           \
7087       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7088       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7089       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7090       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7091       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7092       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7093       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7094       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7095     }                                                                          \
7096     return *AA;                                                                \
7097   }
7098 
7099 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
7100   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7101     CLASS *AA = nullptr;                                                       \
7102     switch (IRP.getPositionKind()) {                                           \
7103       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7104       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7105       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7106       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7107       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7108       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7109       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7110       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7111     }                                                                          \
7112     return *AA;                                                                \
7113   }
7114 
7115 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7116 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7117 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7118 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7119 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7120 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7121 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
7122 
7123 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7124 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7125 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7126 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7127 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7128 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7129 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7130 
7131 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7132 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7133 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7134 
7135 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7136 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7137 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7138 
7139 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7140 
7141 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7142 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7143 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7144 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7145 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7146 #undef SWITCH_PK_CREATE
7147 #undef SWITCH_PK_INV
7148