1 //===- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation -==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the generic AliasAnalysis interface which is used as the
11 // common interface used by all clients and implementations of alias analysis.
12 //
13 // This file also implements the default version of the AliasAnalysis interface
14 // that is to be used when no other implementation is specified.  This does some
15 // simple tests that detect obvious cases: two different global pointers cannot
16 // alias, a global cannot alias a malloc, two different mallocs cannot alias,
17 // etc.
18 //
19 // This alias analysis implementation really isn't very good for anything, but
20 // it is very fast, and makes a nice clean default implementation.  Because it
21 // handles lots of little corner cases, other, more complex, alias analysis
22 // implementations may choose to rely on this pass to resolve these simple and
23 // easy cases.
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/BasicAliasAnalysis.h"
29 #include "llvm/Analysis/CFG.h"
30 #include "llvm/Analysis/CFLAliasAnalysis.h"
31 #include "llvm/Analysis/CaptureTracking.h"
32 #include "llvm/Analysis/GlobalsModRef.h"
33 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
34 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
35 #include "llvm/Analysis/ScopedNoAliasAA.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
38 #include "llvm/Analysis/ValueTracking.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/Dominators.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/LLVMContext.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/Pass.h"
48 using namespace llvm;
49 
50 /// Allow disabling BasicAA from the AA results. This is particularly useful
51 /// when testing to isolate a single AA implementation.
52 static cl::opt<bool> DisableBasicAA("disable-basicaa", cl::Hidden,
53                                     cl::init(false));
54 
55 AAResults::AAResults(AAResults &&Arg) : TLI(Arg.TLI), AAs(std::move(Arg.AAs)) {
56   for (auto &AA : AAs)
57     AA->setAAResults(this);
58 }
59 
60 AAResults::~AAResults() {
61 // FIXME; It would be nice to at least clear out the pointers back to this
62 // aggregation here, but we end up with non-nesting lifetimes in the legacy
63 // pass manager that prevent this from working. In the legacy pass manager
64 // we'll end up with dangling references here in some cases.
65 #if 0
66   for (auto &AA : AAs)
67     AA->setAAResults(nullptr);
68 #endif
69 }
70 
71 //===----------------------------------------------------------------------===//
72 // Default chaining methods
73 //===----------------------------------------------------------------------===//
74 
75 AliasResult AAResults::alias(const MemoryLocation &LocA,
76                              const MemoryLocation &LocB) {
77   for (const auto &AA : AAs) {
78     auto Result = AA->alias(LocA, LocB);
79     if (Result != MayAlias)
80       return Result;
81   }
82   return MayAlias;
83 }
84 
85 bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
86                                        bool OrLocal) {
87   for (const auto &AA : AAs)
88     if (AA->pointsToConstantMemory(Loc, OrLocal))
89       return true;
90 
91   return false;
92 }
93 
94 ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
95   ModRefInfo Result = MRI_ModRef;
96 
97   for (const auto &AA : AAs) {
98     Result = ModRefInfo(Result & AA->getArgModRefInfo(CS, ArgIdx));
99 
100     // Early-exit the moment we reach the bottom of the lattice.
101     if (Result == MRI_NoModRef)
102       return Result;
103   }
104 
105   return Result;
106 }
107 
108 ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
109   // We may have two calls
110   if (auto CS = ImmutableCallSite(I)) {
111     // Check if the two calls modify the same memory
112     return getModRefInfo(Call, CS);
113   } else {
114     // Otherwise, check if the call modifies or references the
115     // location this memory access defines.  The best we can say
116     // is that if the call references what this instruction
117     // defines, it must be clobbered by this location.
118     const MemoryLocation DefLoc = MemoryLocation::get(I);
119     if (getModRefInfo(Call, DefLoc) != MRI_NoModRef)
120       return MRI_ModRef;
121   }
122   return MRI_NoModRef;
123 }
124 
125 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
126                                     const MemoryLocation &Loc) {
127   ModRefInfo Result = MRI_ModRef;
128 
129   for (const auto &AA : AAs) {
130     Result = ModRefInfo(Result & AA->getModRefInfo(CS, Loc));
131 
132     // Early-exit the moment we reach the bottom of the lattice.
133     if (Result == MRI_NoModRef)
134       return Result;
135   }
136 
137   // Try to refine the mod-ref info further using other API entry points to the
138   // aggregate set of AA results.
139   auto MRB = getModRefBehavior(CS);
140   if (MRB == FMRB_DoesNotAccessMemory)
141     return MRI_NoModRef;
142 
143   if (onlyReadsMemory(MRB))
144     Result = ModRefInfo(Result & MRI_Ref);
145 
146   if (onlyAccessesArgPointees(MRB)) {
147     bool DoesAlias = false;
148     ModRefInfo AllArgsMask = MRI_NoModRef;
149     if (doesAccessArgPointees(MRB)) {
150       for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) {
151         const Value *Arg = *AI;
152         if (!Arg->getType()->isPointerTy())
153           continue;
154         unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
155         MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, TLI);
156         AliasResult ArgAlias = alias(ArgLoc, Loc);
157         if (ArgAlias != NoAlias) {
158           ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx);
159           DoesAlias = true;
160           AllArgsMask = ModRefInfo(AllArgsMask | ArgMask);
161         }
162       }
163     }
164     if (!DoesAlias)
165       return MRI_NoModRef;
166     Result = ModRefInfo(Result & AllArgsMask);
167   }
168 
169   // If Loc is a constant memory location, the call definitely could not
170   // modify the memory location.
171   if ((Result & MRI_Mod) &&
172       pointsToConstantMemory(Loc, /*OrLocal*/ false))
173     Result = ModRefInfo(Result & ~MRI_Mod);
174 
175   return Result;
176 }
177 
178 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
179                                     ImmutableCallSite CS2) {
180   ModRefInfo Result = MRI_ModRef;
181 
182   for (const auto &AA : AAs) {
183     Result = ModRefInfo(Result & AA->getModRefInfo(CS1, CS2));
184 
185     // Early-exit the moment we reach the bottom of the lattice.
186     if (Result == MRI_NoModRef)
187       return Result;
188   }
189 
190   // Try to refine the mod-ref info further using other API entry points to the
191   // aggregate set of AA results.
192 
193   // If CS1 or CS2 are readnone, they don't interact.
194   auto CS1B = getModRefBehavior(CS1);
195   if (CS1B == FMRB_DoesNotAccessMemory)
196     return MRI_NoModRef;
197 
198   auto CS2B = getModRefBehavior(CS2);
199   if (CS2B == FMRB_DoesNotAccessMemory)
200     return MRI_NoModRef;
201 
202   // If they both only read from memory, there is no dependence.
203   if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
204     return MRI_NoModRef;
205 
206   // If CS1 only reads memory, the only dependence on CS2 can be
207   // from CS1 reading memory written by CS2.
208   if (onlyReadsMemory(CS1B))
209     Result = ModRefInfo(Result & MRI_Ref);
210 
211   // If CS2 only access memory through arguments, accumulate the mod/ref
212   // information from CS1's references to the memory referenced by
213   // CS2's arguments.
214   if (onlyAccessesArgPointees(CS2B)) {
215     ModRefInfo R = MRI_NoModRef;
216     if (doesAccessArgPointees(CS2B)) {
217       for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
218         const Value *Arg = *I;
219         if (!Arg->getType()->isPointerTy())
220           continue;
221         unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
222         auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI);
223 
224         // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence
225         // of CS1 on that location is the inverse.
226         ModRefInfo ArgMask = getArgModRefInfo(CS2, CS2ArgIdx);
227         if (ArgMask == MRI_Mod)
228           ArgMask = MRI_ModRef;
229         else if (ArgMask == MRI_Ref)
230           ArgMask = MRI_Mod;
231 
232         ArgMask = ModRefInfo(ArgMask & getModRefInfo(CS1, CS2ArgLoc));
233 
234         R = ModRefInfo((R | ArgMask) & Result);
235         if (R == Result)
236           break;
237       }
238     }
239     return R;
240   }
241 
242   // If CS1 only accesses memory through arguments, check if CS2 references
243   // any of the memory referenced by CS1's arguments. If not, return NoModRef.
244   if (onlyAccessesArgPointees(CS1B)) {
245     ModRefInfo R = MRI_NoModRef;
246     if (doesAccessArgPointees(CS1B)) {
247       for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
248         const Value *Arg = *I;
249         if (!Arg->getType()->isPointerTy())
250           continue;
251         unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
252         auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI);
253 
254         // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
255         // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
256         // might Ref, then we care only about a Mod by CS2.
257         ModRefInfo ArgMask = getArgModRefInfo(CS1, CS1ArgIdx);
258         ModRefInfo ArgR = getModRefInfo(CS2, CS1ArgLoc);
259         if (((ArgMask & MRI_Mod) != MRI_NoModRef &&
260              (ArgR & MRI_ModRef) != MRI_NoModRef) ||
261             ((ArgMask & MRI_Ref) != MRI_NoModRef &&
262              (ArgR & MRI_Mod) != MRI_NoModRef))
263           R = ModRefInfo((R | ArgMask) & Result);
264 
265         if (R == Result)
266           break;
267       }
268     }
269     return R;
270   }
271 
272   return Result;
273 }
274 
275 FunctionModRefBehavior AAResults::getModRefBehavior(ImmutableCallSite CS) {
276   FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
277 
278   for (const auto &AA : AAs) {
279     Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(CS));
280 
281     // Early-exit the moment we reach the bottom of the lattice.
282     if (Result == FMRB_DoesNotAccessMemory)
283       return Result;
284   }
285 
286   return Result;
287 }
288 
289 FunctionModRefBehavior AAResults::getModRefBehavior(const Function *F) {
290   FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
291 
292   for (const auto &AA : AAs) {
293     Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(F));
294 
295     // Early-exit the moment we reach the bottom of the lattice.
296     if (Result == FMRB_DoesNotAccessMemory)
297       return Result;
298   }
299 
300   return Result;
301 }
302 
303 //===----------------------------------------------------------------------===//
304 // Helper method implementation
305 //===----------------------------------------------------------------------===//
306 
307 ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
308                                     const MemoryLocation &Loc) {
309   // Be conservative in the face of volatile/atomic.
310   if (!L->isUnordered())
311     return MRI_ModRef;
312 
313   // If the load address doesn't alias the given address, it doesn't read
314   // or write the specified memory.
315   if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc))
316     return MRI_NoModRef;
317 
318   // Otherwise, a load just reads.
319   return MRI_Ref;
320 }
321 
322 ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
323                                     const MemoryLocation &Loc) {
324   // Be conservative in the face of volatile/atomic.
325   if (!S->isUnordered())
326     return MRI_ModRef;
327 
328   if (Loc.Ptr) {
329     // If the store address cannot alias the pointer in question, then the
330     // specified memory cannot be modified by the store.
331     if (!alias(MemoryLocation::get(S), Loc))
332       return MRI_NoModRef;
333 
334     // If the pointer is a pointer to constant memory, then it could not have
335     // been modified by this store.
336     if (pointsToConstantMemory(Loc))
337       return MRI_NoModRef;
338   }
339 
340   // Otherwise, a store just writes.
341   return MRI_Mod;
342 }
343 
344 ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
345                                     const MemoryLocation &Loc) {
346 
347   if (Loc.Ptr) {
348     // If the va_arg address cannot alias the pointer in question, then the
349     // specified memory cannot be accessed by the va_arg.
350     if (!alias(MemoryLocation::get(V), Loc))
351       return MRI_NoModRef;
352 
353     // If the pointer is a pointer to constant memory, then it could not have
354     // been modified by this va_arg.
355     if (pointsToConstantMemory(Loc))
356       return MRI_NoModRef;
357   }
358 
359   // Otherwise, a va_arg reads and writes.
360   return MRI_ModRef;
361 }
362 
363 ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
364                                     const MemoryLocation &Loc) {
365   if (Loc.Ptr) {
366     // If the pointer is a pointer to constant memory,
367     // then it could not have been modified by this catchpad.
368     if (pointsToConstantMemory(Loc))
369       return MRI_NoModRef;
370   }
371 
372   // Otherwise, a catchpad reads and writes.
373   return MRI_ModRef;
374 }
375 
376 ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
377                                     const MemoryLocation &Loc) {
378   if (Loc.Ptr) {
379     // If the pointer is a pointer to constant memory,
380     // then it could not have been modified by this catchpad.
381     if (pointsToConstantMemory(Loc))
382       return MRI_NoModRef;
383   }
384 
385   // Otherwise, a catchret reads and writes.
386   return MRI_ModRef;
387 }
388 
389 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
390                                     const MemoryLocation &Loc) {
391   // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
392   if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
393     return MRI_ModRef;
394 
395   // If the cmpxchg address does not alias the location, it does not access it.
396   if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc))
397     return MRI_NoModRef;
398 
399   return MRI_ModRef;
400 }
401 
402 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
403                                     const MemoryLocation &Loc) {
404   // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
405   if (isStrongerThanMonotonic(RMW->getOrdering()))
406     return MRI_ModRef;
407 
408   // If the atomicrmw address does not alias the location, it does not access it.
409   if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc))
410     return MRI_NoModRef;
411 
412   return MRI_ModRef;
413 }
414 
415 /// \brief Return information about whether a particular call site modifies
416 /// or reads the specified memory location \p MemLoc before instruction \p I
417 /// in a BasicBlock. A ordered basic block \p OBB can be used to speed up
418 /// instruction-ordering queries inside the BasicBlock containing \p I.
419 /// FIXME: this is really just shoring-up a deficiency in alias analysis.
420 /// BasicAA isn't willing to spend linear time determining whether an alloca
421 /// was captured before or after this particular call, while we are. However,
422 /// with a smarter AA in place, this test is just wasting compile time.
423 ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
424                                          const MemoryLocation &MemLoc,
425                                          DominatorTree *DT,
426                                          OrderedBasicBlock *OBB) {
427   if (!DT)
428     return MRI_ModRef;
429 
430   const Value *Object =
431       GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
432   if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
433       isa<Constant>(Object))
434     return MRI_ModRef;
435 
436   ImmutableCallSite CS(I);
437   if (!CS.getInstruction() || CS.getInstruction() == Object)
438     return MRI_ModRef;
439 
440   if (llvm::PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
441                                        /* StoreCaptures */ true, I, DT,
442                                        /* include Object */ true,
443                                        /* OrderedBasicBlock */ OBB))
444     return MRI_ModRef;
445 
446   unsigned ArgNo = 0;
447   ModRefInfo R = MRI_NoModRef;
448   for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
449        CI != CE; ++CI, ++ArgNo) {
450     // Only look at the no-capture or byval pointer arguments.  If this
451     // pointer were passed to arguments that were neither of these, then it
452     // couldn't be no-capture.
453     if (!(*CI)->getType()->isPointerTy() ||
454         (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
455       continue;
456 
457     // If this is a no-capture pointer argument, see if we can tell that it
458     // is impossible to alias the pointer we're checking.  If not, we have to
459     // assume that the call could touch the pointer, even though it doesn't
460     // escape.
461     if (isNoAlias(MemoryLocation(*CI), MemoryLocation(Object)))
462       continue;
463     if (CS.doesNotAccessMemory(ArgNo))
464       continue;
465     if (CS.onlyReadsMemory(ArgNo)) {
466       R = MRI_Ref;
467       continue;
468     }
469     return MRI_ModRef;
470   }
471   return R;
472 }
473 
474 /// canBasicBlockModify - Return true if it is possible for execution of the
475 /// specified basic block to modify the location Loc.
476 ///
477 bool AAResults::canBasicBlockModify(const BasicBlock &BB,
478                                     const MemoryLocation &Loc) {
479   return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod);
480 }
481 
482 /// canInstructionRangeModRef - Return true if it is possible for the
483 /// execution of the specified instructions to mod\ref (according to the
484 /// mode) the location Loc. The instructions to consider are all
485 /// of the instructions in the range of [I1,I2] INCLUSIVE.
486 /// I1 and I2 must be in the same basic block.
487 bool AAResults::canInstructionRangeModRef(const Instruction &I1,
488                                           const Instruction &I2,
489                                           const MemoryLocation &Loc,
490                                           const ModRefInfo Mode) {
491   assert(I1.getParent() == I2.getParent() &&
492          "Instructions not in same basic block!");
493   BasicBlock::const_iterator I = I1.getIterator();
494   BasicBlock::const_iterator E = I2.getIterator();
495   ++E;  // Convert from inclusive to exclusive range.
496 
497   for (; I != E; ++I) // Check every instruction in range
498     if (getModRefInfo(&*I, Loc) & Mode)
499       return true;
500   return false;
501 }
502 
503 // Provide a definition for the root virtual destructor.
504 AAResults::Concept::~Concept() {}
505 
506 // Provide a definition for the static object used to identify passes.
507 char AAManager::PassID;
508 
509 namespace {
510 /// A wrapper pass for external alias analyses. This just squirrels away the
511 /// callback used to run any analyses and register their results.
512 struct ExternalAAWrapperPass : ImmutablePass {
513   typedef std::function<void(Pass &, Function &, AAResults &)> CallbackT;
514 
515   CallbackT CB;
516 
517   static char ID;
518 
519   ExternalAAWrapperPass() : ImmutablePass(ID) {
520     initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
521   }
522   explicit ExternalAAWrapperPass(CallbackT CB)
523       : ImmutablePass(ID), CB(std::move(CB)) {
524     initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
525   }
526 
527   void getAnalysisUsage(AnalysisUsage &AU) const override {
528     AU.setPreservesAll();
529   }
530 };
531 }
532 
533 char ExternalAAWrapperPass::ID = 0;
534 INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis",
535                 false, true)
536 
537 ImmutablePass *
538 llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) {
539   return new ExternalAAWrapperPass(std::move(Callback));
540 }
541 
542 AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) {
543   initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
544 }
545 
546 char AAResultsWrapperPass::ID = 0;
547 
548 INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa",
549                       "Function Alias Analysis Results", false, true)
550 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
551 INITIALIZE_PASS_DEPENDENCY(CFLAAWrapperPass)
552 INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass)
553 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
554 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
555 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
556 INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass)
557 INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass)
558 INITIALIZE_PASS_END(AAResultsWrapperPass, "aa",
559                     "Function Alias Analysis Results", false, true)
560 
561 FunctionPass *llvm::createAAResultsWrapperPass() {
562   return new AAResultsWrapperPass();
563 }
564 
565 /// Run the wrapper pass to rebuild an aggregation over known AA passes.
566 ///
567 /// This is the legacy pass manager's interface to the new-style AA results
568 /// aggregation object. Because this is somewhat shoe-horned into the legacy
569 /// pass manager, we hard code all the specific alias analyses available into
570 /// it. While the particular set enabled is configured via commandline flags,
571 /// adding a new alias analysis to LLVM will require adding support for it to
572 /// this list.
573 bool AAResultsWrapperPass::runOnFunction(Function &F) {
574   // NB! This *must* be reset before adding new AA results to the new
575   // AAResults object because in the legacy pass manager, each instance
576   // of these will refer to the *same* immutable analyses, registering and
577   // unregistering themselves with them. We need to carefully tear down the
578   // previous object first, in this case replacing it with an empty one, before
579   // registering new results.
580   AAR.reset(
581       new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI()));
582 
583   // BasicAA is always available for function analyses. Also, we add it first
584   // so that it can trump TBAA results when it proves MustAlias.
585   // FIXME: TBAA should have an explicit mode to support this and then we
586   // should reconsider the ordering here.
587   if (!DisableBasicAA)
588     AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult());
589 
590   // Populate the results with the currently available AAs.
591   if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
592     AAR->addAAResult(WrapperPass->getResult());
593   if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
594     AAR->addAAResult(WrapperPass->getResult());
595   if (auto *WrapperPass =
596           getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
597     AAR->addAAResult(WrapperPass->getResult());
598   if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>())
599     AAR->addAAResult(WrapperPass->getResult());
600   if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>())
601     AAR->addAAResult(WrapperPass->getResult());
602   if (auto *WrapperPass = getAnalysisIfAvailable<CFLAAWrapperPass>())
603     AAR->addAAResult(WrapperPass->getResult());
604 
605   // If available, run an external AA providing callback over the results as
606   // well.
607   if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>())
608     if (WrapperPass->CB)
609       WrapperPass->CB(*this, F, *AAR);
610 
611   // Analyses don't mutate the IR, so return false.
612   return false;
613 }
614 
615 void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
616   AU.setPreservesAll();
617   AU.addRequired<BasicAAWrapperPass>();
618   AU.addRequired<TargetLibraryInfoWrapperPass>();
619 
620   // We also need to mark all the alias analysis passes we will potentially
621   // probe in runOnFunction as used here to ensure the legacy pass manager
622   // preserves them. This hard coding of lists of alias analyses is specific to
623   // the legacy pass manager.
624   AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
625   AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
626   AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>();
627   AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
628   AU.addUsedIfAvailable<SCEVAAWrapperPass>();
629   AU.addUsedIfAvailable<CFLAAWrapperPass>();
630 }
631 
632 AAResults llvm::createLegacyPMAAResults(Pass &P, Function &F,
633                                         BasicAAResult &BAR) {
634   AAResults AAR(P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI());
635 
636   // Add in our explicitly constructed BasicAA results.
637   if (!DisableBasicAA)
638     AAR.addAAResult(BAR);
639 
640   // Populate the results with the other currently available AAs.
641   if (auto *WrapperPass =
642           P.getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
643     AAR.addAAResult(WrapperPass->getResult());
644   if (auto *WrapperPass = P.getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
645     AAR.addAAResult(WrapperPass->getResult());
646   if (auto *WrapperPass =
647           P.getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
648     AAR.addAAResult(WrapperPass->getResult());
649   if (auto *WrapperPass = P.getAnalysisIfAvailable<GlobalsAAWrapperPass>())
650     AAR.addAAResult(WrapperPass->getResult());
651   if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLAAWrapperPass>())
652     AAR.addAAResult(WrapperPass->getResult());
653 
654   return AAR;
655 }
656 
657 bool llvm::isNoAliasCall(const Value *V) {
658   if (auto CS = ImmutableCallSite(V))
659     return CS.paramHasAttr(0, Attribute::NoAlias);
660   return false;
661 }
662 
663 bool llvm::isNoAliasArgument(const Value *V) {
664   if (const Argument *A = dyn_cast<Argument>(V))
665     return A->hasNoAliasAttr();
666   return false;
667 }
668 
669 bool llvm::isIdentifiedObject(const Value *V) {
670   if (isa<AllocaInst>(V))
671     return true;
672   if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
673     return true;
674   if (isNoAliasCall(V))
675     return true;
676   if (const Argument *A = dyn_cast<Argument>(V))
677     return A->hasNoAliasAttr() || A->hasByValAttr();
678   return false;
679 }
680 
681 bool llvm::isIdentifiedFunctionLocal(const Value *V) {
682   return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasArgument(V);
683 }
684 
685 void llvm::getAAResultsAnalysisUsage(AnalysisUsage &AU) {
686   // This function needs to be in sync with llvm::createLegacyPMAAResults -- if
687   // more alias analyses are added to llvm::createLegacyPMAAResults, they need
688   // to be added here also.
689   AU.addRequired<TargetLibraryInfoWrapperPass>();
690   AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
691   AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
692   AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>();
693   AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
694   AU.addUsedIfAvailable<CFLAAWrapperPass>();
695 }
696