1 //===- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation -==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the generic AliasAnalysis interface which is used as the
11 // common interface used by all clients and implementations of alias analysis.
12 //
13 // This file also implements the default version of the AliasAnalysis interface
14 // that is to be used when no other implementation is specified.  This does some
15 // simple tests that detect obvious cases: two different global pointers cannot
16 // alias, a global cannot alias a malloc, two different mallocs cannot alias,
17 // etc.
18 //
19 // This alias analysis implementation really isn't very good for anything, but
20 // it is very fast, and makes a nice clean default implementation.  Because it
21 // handles lots of little corner cases, other, more complex, alias analysis
22 // implementations may choose to rely on this pass to resolve these simple and
23 // easy cases.
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/BasicAliasAnalysis.h"
29 #include "llvm/Analysis/CFG.h"
30 #include "llvm/Analysis/CFLAndersAliasAnalysis.h"
31 #include "llvm/Analysis/CFLSteensAliasAnalysis.h"
32 #include "llvm/Analysis/CaptureTracking.h"
33 #include "llvm/Analysis/GlobalsModRef.h"
34 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
35 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
36 #include "llvm/Analysis/ScopedNoAliasAA.h"
37 #include "llvm/Analysis/TargetLibraryInfo.h"
38 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
39 #include "llvm/Analysis/ValueTracking.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/Dominators.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/LLVMContext.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Pass.h"
49 using namespace llvm;
50 
51 /// Allow disabling BasicAA from the AA results. This is particularly useful
52 /// when testing to isolate a single AA implementation.
53 static cl::opt<bool> DisableBasicAA("disable-basicaa", cl::Hidden,
54                                     cl::init(false));
55 
56 AAResults::AAResults(AAResults &&Arg) : TLI(Arg.TLI), AAs(std::move(Arg.AAs)) {
57   for (auto &AA : AAs)
58     AA->setAAResults(this);
59 }
60 
61 AAResults::~AAResults() {
62 // FIXME; It would be nice to at least clear out the pointers back to this
63 // aggregation here, but we end up with non-nesting lifetimes in the legacy
64 // pass manager that prevent this from working. In the legacy pass manager
65 // we'll end up with dangling references here in some cases.
66 #if 0
67   for (auto &AA : AAs)
68     AA->setAAResults(nullptr);
69 #endif
70 }
71 
72 //===----------------------------------------------------------------------===//
73 // Default chaining methods
74 //===----------------------------------------------------------------------===//
75 
76 AliasResult AAResults::alias(const MemoryLocation &LocA,
77                              const MemoryLocation &LocB) {
78   for (const auto &AA : AAs) {
79     auto Result = AA->alias(LocA, LocB);
80     if (Result != MayAlias)
81       return Result;
82   }
83   return MayAlias;
84 }
85 
86 bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
87                                        bool OrLocal) {
88   for (const auto &AA : AAs)
89     if (AA->pointsToConstantMemory(Loc, OrLocal))
90       return true;
91 
92   return false;
93 }
94 
95 ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
96   ModRefInfo Result = MRI_ModRef;
97 
98   for (const auto &AA : AAs) {
99     Result = ModRefInfo(Result & AA->getArgModRefInfo(CS, ArgIdx));
100 
101     // Early-exit the moment we reach the bottom of the lattice.
102     if (Result == MRI_NoModRef)
103       return Result;
104   }
105 
106   return Result;
107 }
108 
109 ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
110   // We may have two calls
111   if (auto CS = ImmutableCallSite(I)) {
112     // Check if the two calls modify the same memory
113     return getModRefInfo(CS, Call);
114   } else if (I->isFenceLike()) {
115     // If this is a fence, just return MRI_ModRef.
116     return MRI_ModRef;
117   } else {
118     // Otherwise, check if the call modifies or references the
119     // location this memory access defines.  The best we can say
120     // is that if the call references what this instruction
121     // defines, it must be clobbered by this location.
122     const MemoryLocation DefLoc = MemoryLocation::get(I);
123     if (getModRefInfo(Call, DefLoc) != MRI_NoModRef)
124       return MRI_ModRef;
125   }
126   return MRI_NoModRef;
127 }
128 
129 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
130                                     const MemoryLocation &Loc) {
131   ModRefInfo Result = MRI_ModRef;
132 
133   for (const auto &AA : AAs) {
134     Result = ModRefInfo(Result & AA->getModRefInfo(CS, Loc));
135 
136     // Early-exit the moment we reach the bottom of the lattice.
137     if (Result == MRI_NoModRef)
138       return Result;
139   }
140 
141   // Try to refine the mod-ref info further using other API entry points to the
142   // aggregate set of AA results.
143   auto MRB = getModRefBehavior(CS);
144   if (MRB == FMRB_DoesNotAccessMemory)
145     return MRI_NoModRef;
146 
147   if (onlyReadsMemory(MRB))
148     Result = ModRefInfo(Result & MRI_Ref);
149   else if (doesNotReadMemory(MRB))
150     Result = ModRefInfo(Result & MRI_Mod);
151 
152   if (onlyAccessesArgPointees(MRB)) {
153     bool DoesAlias = false;
154     ModRefInfo AllArgsMask = MRI_NoModRef;
155     if (doesAccessArgPointees(MRB)) {
156       for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) {
157         const Value *Arg = *AI;
158         if (!Arg->getType()->isPointerTy())
159           continue;
160         unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
161         MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, TLI);
162         AliasResult ArgAlias = alias(ArgLoc, Loc);
163         if (ArgAlias != NoAlias) {
164           ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx);
165           DoesAlias = true;
166           AllArgsMask = ModRefInfo(AllArgsMask | ArgMask);
167         }
168       }
169     }
170     if (!DoesAlias)
171       return MRI_NoModRef;
172     Result = ModRefInfo(Result & AllArgsMask);
173   }
174 
175   // If Loc is a constant memory location, the call definitely could not
176   // modify the memory location.
177   if ((Result & MRI_Mod) &&
178       pointsToConstantMemory(Loc, /*OrLocal*/ false))
179     Result = ModRefInfo(Result & ~MRI_Mod);
180 
181   return Result;
182 }
183 
184 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
185                                     ImmutableCallSite CS2) {
186   ModRefInfo Result = MRI_ModRef;
187 
188   for (const auto &AA : AAs) {
189     Result = ModRefInfo(Result & AA->getModRefInfo(CS1, CS2));
190 
191     // Early-exit the moment we reach the bottom of the lattice.
192     if (Result == MRI_NoModRef)
193       return Result;
194   }
195 
196   // Try to refine the mod-ref info further using other API entry points to the
197   // aggregate set of AA results.
198 
199   // If CS1 or CS2 are readnone, they don't interact.
200   auto CS1B = getModRefBehavior(CS1);
201   if (CS1B == FMRB_DoesNotAccessMemory)
202     return MRI_NoModRef;
203 
204   auto CS2B = getModRefBehavior(CS2);
205   if (CS2B == FMRB_DoesNotAccessMemory)
206     return MRI_NoModRef;
207 
208   // If they both only read from memory, there is no dependence.
209   if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
210     return MRI_NoModRef;
211 
212   // If CS1 only reads memory, the only dependence on CS2 can be
213   // from CS1 reading memory written by CS2.
214   if (onlyReadsMemory(CS1B))
215     Result = ModRefInfo(Result & MRI_Ref);
216   else if (doesNotReadMemory(CS1B))
217     Result = ModRefInfo(Result & MRI_Mod);
218 
219   // If CS2 only access memory through arguments, accumulate the mod/ref
220   // information from CS1's references to the memory referenced by
221   // CS2's arguments.
222   if (onlyAccessesArgPointees(CS2B)) {
223     ModRefInfo R = MRI_NoModRef;
224     if (doesAccessArgPointees(CS2B)) {
225       for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
226         const Value *Arg = *I;
227         if (!Arg->getType()->isPointerTy())
228           continue;
229         unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
230         auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI);
231 
232         // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence
233         // of CS1 on that location is the inverse.
234         ModRefInfo ArgMask = getArgModRefInfo(CS2, CS2ArgIdx);
235         if (ArgMask == MRI_Mod)
236           ArgMask = MRI_ModRef;
237         else if (ArgMask == MRI_Ref)
238           ArgMask = MRI_Mod;
239 
240         ArgMask = ModRefInfo(ArgMask & getModRefInfo(CS1, CS2ArgLoc));
241 
242         R = ModRefInfo((R | ArgMask) & Result);
243         if (R == Result)
244           break;
245       }
246     }
247     return R;
248   }
249 
250   // If CS1 only accesses memory through arguments, check if CS2 references
251   // any of the memory referenced by CS1's arguments. If not, return NoModRef.
252   if (onlyAccessesArgPointees(CS1B)) {
253     ModRefInfo R = MRI_NoModRef;
254     if (doesAccessArgPointees(CS1B)) {
255       for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
256         const Value *Arg = *I;
257         if (!Arg->getType()->isPointerTy())
258           continue;
259         unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
260         auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI);
261 
262         // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
263         // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
264         // might Ref, then we care only about a Mod by CS2.
265         ModRefInfo ArgMask = getArgModRefInfo(CS1, CS1ArgIdx);
266         ModRefInfo ArgR = getModRefInfo(CS2, CS1ArgLoc);
267         if (((ArgMask & MRI_Mod) != MRI_NoModRef &&
268              (ArgR & MRI_ModRef) != MRI_NoModRef) ||
269             ((ArgMask & MRI_Ref) != MRI_NoModRef &&
270              (ArgR & MRI_Mod) != MRI_NoModRef))
271           R = ModRefInfo((R | ArgMask) & Result);
272 
273         if (R == Result)
274           break;
275       }
276     }
277     return R;
278   }
279 
280   return Result;
281 }
282 
283 FunctionModRefBehavior AAResults::getModRefBehavior(ImmutableCallSite CS) {
284   FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
285 
286   for (const auto &AA : AAs) {
287     Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(CS));
288 
289     // Early-exit the moment we reach the bottom of the lattice.
290     if (Result == FMRB_DoesNotAccessMemory)
291       return Result;
292   }
293 
294   return Result;
295 }
296 
297 FunctionModRefBehavior AAResults::getModRefBehavior(const Function *F) {
298   FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
299 
300   for (const auto &AA : AAs) {
301     Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(F));
302 
303     // Early-exit the moment we reach the bottom of the lattice.
304     if (Result == FMRB_DoesNotAccessMemory)
305       return Result;
306   }
307 
308   return Result;
309 }
310 
311 //===----------------------------------------------------------------------===//
312 // Helper method implementation
313 //===----------------------------------------------------------------------===//
314 
315 ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
316                                     const MemoryLocation &Loc) {
317   // Be conservative in the face of volatile/atomic.
318   if (!L->isUnordered())
319     return MRI_ModRef;
320 
321   // If the load address doesn't alias the given address, it doesn't read
322   // or write the specified memory.
323   if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc))
324     return MRI_NoModRef;
325 
326   // Otherwise, a load just reads.
327   return MRI_Ref;
328 }
329 
330 ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
331                                     const MemoryLocation &Loc) {
332   // Be conservative in the face of volatile/atomic.
333   if (!S->isUnordered())
334     return MRI_ModRef;
335 
336   if (Loc.Ptr) {
337     // If the store address cannot alias the pointer in question, then the
338     // specified memory cannot be modified by the store.
339     if (!alias(MemoryLocation::get(S), Loc))
340       return MRI_NoModRef;
341 
342     // If the pointer is a pointer to constant memory, then it could not have
343     // been modified by this store.
344     if (pointsToConstantMemory(Loc))
345       return MRI_NoModRef;
346   }
347 
348   // Otherwise, a store just writes.
349   return MRI_Mod;
350 }
351 
352 ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
353                                     const MemoryLocation &Loc) {
354 
355   if (Loc.Ptr) {
356     // If the va_arg address cannot alias the pointer in question, then the
357     // specified memory cannot be accessed by the va_arg.
358     if (!alias(MemoryLocation::get(V), Loc))
359       return MRI_NoModRef;
360 
361     // If the pointer is a pointer to constant memory, then it could not have
362     // been modified by this va_arg.
363     if (pointsToConstantMemory(Loc))
364       return MRI_NoModRef;
365   }
366 
367   // Otherwise, a va_arg reads and writes.
368   return MRI_ModRef;
369 }
370 
371 ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
372                                     const MemoryLocation &Loc) {
373   if (Loc.Ptr) {
374     // If the pointer is a pointer to constant memory,
375     // then it could not have been modified by this catchpad.
376     if (pointsToConstantMemory(Loc))
377       return MRI_NoModRef;
378   }
379 
380   // Otherwise, a catchpad reads and writes.
381   return MRI_ModRef;
382 }
383 
384 ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
385                                     const MemoryLocation &Loc) {
386   if (Loc.Ptr) {
387     // If the pointer is a pointer to constant memory,
388     // then it could not have been modified by this catchpad.
389     if (pointsToConstantMemory(Loc))
390       return MRI_NoModRef;
391   }
392 
393   // Otherwise, a catchret reads and writes.
394   return MRI_ModRef;
395 }
396 
397 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
398                                     const MemoryLocation &Loc) {
399   // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
400   if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
401     return MRI_ModRef;
402 
403   // If the cmpxchg address does not alias the location, it does not access it.
404   if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc))
405     return MRI_NoModRef;
406 
407   return MRI_ModRef;
408 }
409 
410 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
411                                     const MemoryLocation &Loc) {
412   // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
413   if (isStrongerThanMonotonic(RMW->getOrdering()))
414     return MRI_ModRef;
415 
416   // If the atomicrmw address does not alias the location, it does not access it.
417   if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc))
418     return MRI_NoModRef;
419 
420   return MRI_ModRef;
421 }
422 
423 /// \brief Return information about whether a particular call site modifies
424 /// or reads the specified memory location \p MemLoc before instruction \p I
425 /// in a BasicBlock. A ordered basic block \p OBB can be used to speed up
426 /// instruction-ordering queries inside the BasicBlock containing \p I.
427 /// FIXME: this is really just shoring-up a deficiency in alias analysis.
428 /// BasicAA isn't willing to spend linear time determining whether an alloca
429 /// was captured before or after this particular call, while we are. However,
430 /// with a smarter AA in place, this test is just wasting compile time.
431 ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
432                                          const MemoryLocation &MemLoc,
433                                          DominatorTree *DT,
434                                          OrderedBasicBlock *OBB) {
435   if (!DT)
436     return MRI_ModRef;
437 
438   const Value *Object =
439       GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
440   if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
441       isa<Constant>(Object))
442     return MRI_ModRef;
443 
444   ImmutableCallSite CS(I);
445   if (!CS.getInstruction() || CS.getInstruction() == Object)
446     return MRI_ModRef;
447 
448   if (llvm::PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
449                                        /* StoreCaptures */ true, I, DT,
450                                        /* include Object */ true,
451                                        /* OrderedBasicBlock */ OBB))
452     return MRI_ModRef;
453 
454   unsigned ArgNo = 0;
455   ModRefInfo R = MRI_NoModRef;
456   for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
457        CI != CE; ++CI, ++ArgNo) {
458     // Only look at the no-capture or byval pointer arguments.  If this
459     // pointer were passed to arguments that were neither of these, then it
460     // couldn't be no-capture.
461     if (!(*CI)->getType()->isPointerTy() ||
462         (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
463       continue;
464 
465     // If this is a no-capture pointer argument, see if we can tell that it
466     // is impossible to alias the pointer we're checking.  If not, we have to
467     // assume that the call could touch the pointer, even though it doesn't
468     // escape.
469     if (isNoAlias(MemoryLocation(*CI), MemoryLocation(Object)))
470       continue;
471     if (CS.doesNotAccessMemory(ArgNo))
472       continue;
473     if (CS.onlyReadsMemory(ArgNo)) {
474       R = MRI_Ref;
475       continue;
476     }
477     return MRI_ModRef;
478   }
479   return R;
480 }
481 
482 /// canBasicBlockModify - Return true if it is possible for execution of the
483 /// specified basic block to modify the location Loc.
484 ///
485 bool AAResults::canBasicBlockModify(const BasicBlock &BB,
486                                     const MemoryLocation &Loc) {
487   return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod);
488 }
489 
490 /// canInstructionRangeModRef - Return true if it is possible for the
491 /// execution of the specified instructions to mod\ref (according to the
492 /// mode) the location Loc. The instructions to consider are all
493 /// of the instructions in the range of [I1,I2] INCLUSIVE.
494 /// I1 and I2 must be in the same basic block.
495 bool AAResults::canInstructionRangeModRef(const Instruction &I1,
496                                           const Instruction &I2,
497                                           const MemoryLocation &Loc,
498                                           const ModRefInfo Mode) {
499   assert(I1.getParent() == I2.getParent() &&
500          "Instructions not in same basic block!");
501   BasicBlock::const_iterator I = I1.getIterator();
502   BasicBlock::const_iterator E = I2.getIterator();
503   ++E;  // Convert from inclusive to exclusive range.
504 
505   for (; I != E; ++I) // Check every instruction in range
506     if (getModRefInfo(&*I, Loc) & Mode)
507       return true;
508   return false;
509 }
510 
511 // Provide a definition for the root virtual destructor.
512 AAResults::Concept::~Concept() {}
513 
514 // Provide a definition for the static object used to identify passes.
515 char AAManager::PassID;
516 
517 namespace {
518 /// A wrapper pass for external alias analyses. This just squirrels away the
519 /// callback used to run any analyses and register their results.
520 struct ExternalAAWrapperPass : ImmutablePass {
521   typedef std::function<void(Pass &, Function &, AAResults &)> CallbackT;
522 
523   CallbackT CB;
524 
525   static char ID;
526 
527   ExternalAAWrapperPass() : ImmutablePass(ID) {
528     initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
529   }
530   explicit ExternalAAWrapperPass(CallbackT CB)
531       : ImmutablePass(ID), CB(std::move(CB)) {
532     initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
533   }
534 
535   void getAnalysisUsage(AnalysisUsage &AU) const override {
536     AU.setPreservesAll();
537   }
538 };
539 }
540 
541 char ExternalAAWrapperPass::ID = 0;
542 INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis",
543                 false, true)
544 
545 ImmutablePass *
546 llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) {
547   return new ExternalAAWrapperPass(std::move(Callback));
548 }
549 
550 AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) {
551   initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
552 }
553 
554 char AAResultsWrapperPass::ID = 0;
555 
556 INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa",
557                       "Function Alias Analysis Results", false, true)
558 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
559 INITIALIZE_PASS_DEPENDENCY(CFLAndersAAWrapperPass)
560 INITIALIZE_PASS_DEPENDENCY(CFLSteensAAWrapperPass)
561 INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass)
562 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
563 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
564 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
565 INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass)
566 INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass)
567 INITIALIZE_PASS_END(AAResultsWrapperPass, "aa",
568                     "Function Alias Analysis Results", false, true)
569 
570 FunctionPass *llvm::createAAResultsWrapperPass() {
571   return new AAResultsWrapperPass();
572 }
573 
574 /// Run the wrapper pass to rebuild an aggregation over known AA passes.
575 ///
576 /// This is the legacy pass manager's interface to the new-style AA results
577 /// aggregation object. Because this is somewhat shoe-horned into the legacy
578 /// pass manager, we hard code all the specific alias analyses available into
579 /// it. While the particular set enabled is configured via commandline flags,
580 /// adding a new alias analysis to LLVM will require adding support for it to
581 /// this list.
582 bool AAResultsWrapperPass::runOnFunction(Function &F) {
583   // NB! This *must* be reset before adding new AA results to the new
584   // AAResults object because in the legacy pass manager, each instance
585   // of these will refer to the *same* immutable analyses, registering and
586   // unregistering themselves with them. We need to carefully tear down the
587   // previous object first, in this case replacing it with an empty one, before
588   // registering new results.
589   AAR.reset(
590       new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI()));
591 
592   // BasicAA is always available for function analyses. Also, we add it first
593   // so that it can trump TBAA results when it proves MustAlias.
594   // FIXME: TBAA should have an explicit mode to support this and then we
595   // should reconsider the ordering here.
596   if (!DisableBasicAA)
597     AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult());
598 
599   // Populate the results with the currently available AAs.
600   if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
601     AAR->addAAResult(WrapperPass->getResult());
602   if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
603     AAR->addAAResult(WrapperPass->getResult());
604   if (auto *WrapperPass =
605           getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
606     AAR->addAAResult(WrapperPass->getResult());
607   if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>())
608     AAR->addAAResult(WrapperPass->getResult());
609   if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>())
610     AAR->addAAResult(WrapperPass->getResult());
611   if (auto *WrapperPass = getAnalysisIfAvailable<CFLAndersAAWrapperPass>())
612     AAR->addAAResult(WrapperPass->getResult());
613   if (auto *WrapperPass = getAnalysisIfAvailable<CFLSteensAAWrapperPass>())
614     AAR->addAAResult(WrapperPass->getResult());
615 
616   // If available, run an external AA providing callback over the results as
617   // well.
618   if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>())
619     if (WrapperPass->CB)
620       WrapperPass->CB(*this, F, *AAR);
621 
622   // Analyses don't mutate the IR, so return false.
623   return false;
624 }
625 
626 void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
627   AU.setPreservesAll();
628   AU.addRequired<BasicAAWrapperPass>();
629   AU.addRequired<TargetLibraryInfoWrapperPass>();
630 
631   // We also need to mark all the alias analysis passes we will potentially
632   // probe in runOnFunction as used here to ensure the legacy pass manager
633   // preserves them. This hard coding of lists of alias analyses is specific to
634   // the legacy pass manager.
635   AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
636   AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
637   AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>();
638   AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
639   AU.addUsedIfAvailable<SCEVAAWrapperPass>();
640   AU.addUsedIfAvailable<CFLAndersAAWrapperPass>();
641   AU.addUsedIfAvailable<CFLSteensAAWrapperPass>();
642 }
643 
644 AAResults llvm::createLegacyPMAAResults(Pass &P, Function &F,
645                                         BasicAAResult &BAR) {
646   AAResults AAR(P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI());
647 
648   // Add in our explicitly constructed BasicAA results.
649   if (!DisableBasicAA)
650     AAR.addAAResult(BAR);
651 
652   // Populate the results with the other currently available AAs.
653   if (auto *WrapperPass =
654           P.getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
655     AAR.addAAResult(WrapperPass->getResult());
656   if (auto *WrapperPass = P.getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
657     AAR.addAAResult(WrapperPass->getResult());
658   if (auto *WrapperPass =
659           P.getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
660     AAR.addAAResult(WrapperPass->getResult());
661   if (auto *WrapperPass = P.getAnalysisIfAvailable<GlobalsAAWrapperPass>())
662     AAR.addAAResult(WrapperPass->getResult());
663   if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLAndersAAWrapperPass>())
664     AAR.addAAResult(WrapperPass->getResult());
665   if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLSteensAAWrapperPass>())
666     AAR.addAAResult(WrapperPass->getResult());
667 
668   return AAR;
669 }
670 
671 bool llvm::isNoAliasCall(const Value *V) {
672   if (auto CS = ImmutableCallSite(V))
673     return CS.paramHasAttr(0, Attribute::NoAlias);
674   return false;
675 }
676 
677 bool llvm::isNoAliasArgument(const Value *V) {
678   if (const Argument *A = dyn_cast<Argument>(V))
679     return A->hasNoAliasAttr();
680   return false;
681 }
682 
683 bool llvm::isIdentifiedObject(const Value *V) {
684   if (isa<AllocaInst>(V))
685     return true;
686   if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
687     return true;
688   if (isNoAliasCall(V))
689     return true;
690   if (const Argument *A = dyn_cast<Argument>(V))
691     return A->hasNoAliasAttr() || A->hasByValAttr();
692   return false;
693 }
694 
695 bool llvm::isIdentifiedFunctionLocal(const Value *V) {
696   return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasArgument(V);
697 }
698 
699 void llvm::getAAResultsAnalysisUsage(AnalysisUsage &AU) {
700   // This function needs to be in sync with llvm::createLegacyPMAAResults -- if
701   // more alias analyses are added to llvm::createLegacyPMAAResults, they need
702   // to be added here also.
703   AU.addRequired<TargetLibraryInfoWrapperPass>();
704   AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
705   AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
706   AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>();
707   AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
708   AU.addUsedIfAvailable<CFLAndersAAWrapperPass>();
709   AU.addUsedIfAvailable<CFLSteensAAWrapperPass>();
710 }
711