1 //==- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation --==//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the generic AliasAnalysis interface which is used as the
11 // common interface used by all clients and implementations of alias analysis.
12 //
13 // This file also implements the default version of the AliasAnalysis interface
14 // that is to be used when no other implementation is specified.  This does some
15 // simple tests that detect obvious cases: two different global pointers cannot
16 // alias, a global cannot alias a malloc, two different mallocs cannot alias,
17 // etc.
18 //
19 // This alias analysis implementation really isn't very good for anything, but
20 // it is very fast, and makes a nice clean default implementation.  Because it
21 // handles lots of little corner cases, other, more complex, alias analysis
22 // implementations may choose to rely on this pass to resolve these simple and
23 // easy cases.
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/BasicAliasAnalysis.h"
29 #include "llvm/Analysis/CFLAndersAliasAnalysis.h"
30 #include "llvm/Analysis/CFLSteensAliasAnalysis.h"
31 #include "llvm/Analysis/CaptureTracking.h"
32 #include "llvm/Analysis/GlobalsModRef.h"
33 #include "llvm/Analysis/MemoryLocation.h"
34 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
35 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
36 #include "llvm/Analysis/ScopedNoAliasAA.h"
37 #include "llvm/Analysis/TargetLibraryInfo.h"
38 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
39 #include "llvm/Analysis/ValueTracking.h"
40 #include "llvm/IR/Argument.h"
41 #include "llvm/IR/Attributes.h"
42 #include "llvm/IR/BasicBlock.h"
43 #include "llvm/IR/CallSite.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/Value.h"
49 #include "llvm/Pass.h"
50 #include "llvm/Support/AtomicOrdering.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/CommandLine.h"
53 #include <algorithm>
54 #include <cassert>
55 #include <functional>
56 #include <iterator>
57 
58 using namespace llvm;
59 
60 /// Allow disabling BasicAA from the AA results. This is particularly useful
61 /// when testing to isolate a single AA implementation.
62 static cl::opt<bool> DisableBasicAA("disable-basicaa", cl::Hidden,
63                                     cl::init(false));
64 
65 AAResults::AAResults(AAResults &&Arg)
66     : TLI(Arg.TLI), AAs(std::move(Arg.AAs)), AADeps(std::move(Arg.AADeps)) {
67   for (auto &AA : AAs)
68     AA->setAAResults(this);
69 }
70 
71 AAResults::~AAResults() {
72 // FIXME; It would be nice to at least clear out the pointers back to this
73 // aggregation here, but we end up with non-nesting lifetimes in the legacy
74 // pass manager that prevent this from working. In the legacy pass manager
75 // we'll end up with dangling references here in some cases.
76 #if 0
77   for (auto &AA : AAs)
78     AA->setAAResults(nullptr);
79 #endif
80 }
81 
82 bool AAResults::invalidate(Function &F, const PreservedAnalyses &PA,
83                            FunctionAnalysisManager::Invalidator &Inv) {
84   // Check if the AA manager itself has been invalidated.
85   auto PAC = PA.getChecker<AAManager>();
86   if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
87     return true; // The manager needs to be blown away, clear everything.
88 
89   // Check all of the dependencies registered.
90   for (AnalysisKey *ID : AADeps)
91     if (Inv.invalidate(ID, F, PA))
92       return true;
93 
94   // Everything we depend on is still fine, so are we. Nothing to invalidate.
95   return false;
96 }
97 
98 //===----------------------------------------------------------------------===//
99 // Default chaining methods
100 //===----------------------------------------------------------------------===//
101 
102 AliasResult AAResults::alias(const MemoryLocation &LocA,
103                              const MemoryLocation &LocB) {
104   for (const auto &AA : AAs) {
105     auto Result = AA->alias(LocA, LocB);
106     if (Result != MayAlias)
107       return Result;
108   }
109   return MayAlias;
110 }
111 
112 bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
113                                        bool OrLocal) {
114   for (const auto &AA : AAs)
115     if (AA->pointsToConstantMemory(Loc, OrLocal))
116       return true;
117 
118   return false;
119 }
120 
121 ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
122   ModRefInfo Result = MRI_ModRef;
123 
124   for (const auto &AA : AAs) {
125     Result = ModRefInfo(Result & AA->getArgModRefInfo(CS, ArgIdx));
126 
127     // Early-exit the moment we reach the bottom of the lattice.
128     if (Result == MRI_NoModRef)
129       return Result;
130   }
131 
132   return Result;
133 }
134 
135 ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
136   // We may have two calls
137   if (auto CS = ImmutableCallSite(I)) {
138     // Check if the two calls modify the same memory
139     return getModRefInfo(CS, Call);
140   } else if (I->isFenceLike()) {
141     // If this is a fence, just return MRI_ModRef.
142     return MRI_ModRef;
143   } else {
144     // Otherwise, check if the call modifies or references the
145     // location this memory access defines.  The best we can say
146     // is that if the call references what this instruction
147     // defines, it must be clobbered by this location.
148     const MemoryLocation DefLoc = MemoryLocation::get(I);
149     if (getModRefInfo(Call, DefLoc) != MRI_NoModRef)
150       return MRI_ModRef;
151   }
152   return MRI_NoModRef;
153 }
154 
155 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
156                                     const MemoryLocation &Loc) {
157   ModRefInfo Result = MRI_ModRef;
158 
159   for (const auto &AA : AAs) {
160     Result = ModRefInfo(Result & AA->getModRefInfo(CS, Loc));
161 
162     // Early-exit the moment we reach the bottom of the lattice.
163     if (Result == MRI_NoModRef)
164       return Result;
165   }
166 
167   // Try to refine the mod-ref info further using other API entry points to the
168   // aggregate set of AA results.
169   auto MRB = getModRefBehavior(CS);
170   if (MRB == FMRB_DoesNotAccessMemory ||
171       MRB == FMRB_OnlyAccessesInaccessibleMem)
172     return MRI_NoModRef;
173 
174   if (onlyReadsMemory(MRB))
175     Result = ModRefInfo(Result & MRI_Ref);
176   else if (doesNotReadMemory(MRB))
177     Result = ModRefInfo(Result & MRI_Mod);
178 
179   if (onlyAccessesArgPointees(MRB) || onlyAccessesInaccessibleOrArgMem(MRB)) {
180     bool DoesAlias = false;
181     ModRefInfo AllArgsMask = MRI_NoModRef;
182     if (doesAccessArgPointees(MRB)) {
183       for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) {
184         const Value *Arg = *AI;
185         if (!Arg->getType()->isPointerTy())
186           continue;
187         unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
188         MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, TLI);
189         AliasResult ArgAlias = alias(ArgLoc, Loc);
190         if (ArgAlias != NoAlias) {
191           ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx);
192           DoesAlias = true;
193           AllArgsMask = ModRefInfo(AllArgsMask | ArgMask);
194         }
195       }
196     }
197     if (!DoesAlias)
198       return MRI_NoModRef;
199     Result = ModRefInfo(Result & AllArgsMask);
200   }
201 
202   // If Loc is a constant memory location, the call definitely could not
203   // modify the memory location.
204   if ((Result & MRI_Mod) &&
205       pointsToConstantMemory(Loc, /*OrLocal*/ false))
206     Result = ModRefInfo(Result & ~MRI_Mod);
207 
208   return Result;
209 }
210 
211 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
212                                     ImmutableCallSite CS2) {
213   ModRefInfo Result = MRI_ModRef;
214 
215   for (const auto &AA : AAs) {
216     Result = ModRefInfo(Result & AA->getModRefInfo(CS1, CS2));
217 
218     // Early-exit the moment we reach the bottom of the lattice.
219     if (Result == MRI_NoModRef)
220       return Result;
221   }
222 
223   // Try to refine the mod-ref info further using other API entry points to the
224   // aggregate set of AA results.
225 
226   // If CS1 or CS2 are readnone, they don't interact.
227   auto CS1B = getModRefBehavior(CS1);
228   if (CS1B == FMRB_DoesNotAccessMemory)
229     return MRI_NoModRef;
230 
231   auto CS2B = getModRefBehavior(CS2);
232   if (CS2B == FMRB_DoesNotAccessMemory)
233     return MRI_NoModRef;
234 
235   // If they both only read from memory, there is no dependence.
236   if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
237     return MRI_NoModRef;
238 
239   // If CS1 only reads memory, the only dependence on CS2 can be
240   // from CS1 reading memory written by CS2.
241   if (onlyReadsMemory(CS1B))
242     Result = ModRefInfo(Result & MRI_Ref);
243   else if (doesNotReadMemory(CS1B))
244     Result = ModRefInfo(Result & MRI_Mod);
245 
246   // If CS2 only access memory through arguments, accumulate the mod/ref
247   // information from CS1's references to the memory referenced by
248   // CS2's arguments.
249   if (onlyAccessesArgPointees(CS2B)) {
250     ModRefInfo R = MRI_NoModRef;
251     if (doesAccessArgPointees(CS2B)) {
252       for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
253         const Value *Arg = *I;
254         if (!Arg->getType()->isPointerTy())
255           continue;
256         unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
257         auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI);
258 
259         // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence
260         // of CS1 on that location is the inverse.
261         ModRefInfo ArgMask = getArgModRefInfo(CS2, CS2ArgIdx);
262         if (ArgMask == MRI_Mod)
263           ArgMask = MRI_ModRef;
264         else if (ArgMask == MRI_Ref)
265           ArgMask = MRI_Mod;
266 
267         ArgMask = ModRefInfo(ArgMask & getModRefInfo(CS1, CS2ArgLoc));
268 
269         R = ModRefInfo((R | ArgMask) & Result);
270         if (R == Result)
271           break;
272       }
273     }
274     return R;
275   }
276 
277   // If CS1 only accesses memory through arguments, check if CS2 references
278   // any of the memory referenced by CS1's arguments. If not, return NoModRef.
279   if (onlyAccessesArgPointees(CS1B)) {
280     ModRefInfo R = MRI_NoModRef;
281     if (doesAccessArgPointees(CS1B)) {
282       for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
283         const Value *Arg = *I;
284         if (!Arg->getType()->isPointerTy())
285           continue;
286         unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
287         auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI);
288 
289         // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
290         // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
291         // might Ref, then we care only about a Mod by CS2.
292         ModRefInfo ArgMask = getArgModRefInfo(CS1, CS1ArgIdx);
293         ModRefInfo ArgR = getModRefInfo(CS2, CS1ArgLoc);
294         if (((ArgMask & MRI_Mod) != MRI_NoModRef &&
295              (ArgR & MRI_ModRef) != MRI_NoModRef) ||
296             ((ArgMask & MRI_Ref) != MRI_NoModRef &&
297              (ArgR & MRI_Mod) != MRI_NoModRef))
298           R = ModRefInfo((R | ArgMask) & Result);
299 
300         if (R == Result)
301           break;
302       }
303     }
304     return R;
305   }
306 
307   return Result;
308 }
309 
310 FunctionModRefBehavior AAResults::getModRefBehavior(ImmutableCallSite CS) {
311   FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
312 
313   for (const auto &AA : AAs) {
314     Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(CS));
315 
316     // Early-exit the moment we reach the bottom of the lattice.
317     if (Result == FMRB_DoesNotAccessMemory)
318       return Result;
319   }
320 
321   return Result;
322 }
323 
324 FunctionModRefBehavior AAResults::getModRefBehavior(const Function *F) {
325   FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
326 
327   for (const auto &AA : AAs) {
328     Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(F));
329 
330     // Early-exit the moment we reach the bottom of the lattice.
331     if (Result == FMRB_DoesNotAccessMemory)
332       return Result;
333   }
334 
335   return Result;
336 }
337 
338 //===----------------------------------------------------------------------===//
339 // Helper method implementation
340 //===----------------------------------------------------------------------===//
341 
342 ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
343                                     const MemoryLocation &Loc) {
344   // Be conservative in the face of atomic.
345   if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered))
346     return MRI_ModRef;
347 
348   // If the load address doesn't alias the given address, it doesn't read
349   // or write the specified memory.
350   if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc))
351     return MRI_NoModRef;
352 
353   // Otherwise, a load just reads.
354   return MRI_Ref;
355 }
356 
357 ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
358                                     const MemoryLocation &Loc) {
359   // Be conservative in the face of atomic.
360   if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered))
361     return MRI_ModRef;
362 
363   if (Loc.Ptr) {
364     // If the store address cannot alias the pointer in question, then the
365     // specified memory cannot be modified by the store.
366     if (!alias(MemoryLocation::get(S), Loc))
367       return MRI_NoModRef;
368 
369     // If the pointer is a pointer to constant memory, then it could not have
370     // been modified by this store.
371     if (pointsToConstantMemory(Loc))
372       return MRI_NoModRef;
373   }
374 
375   // Otherwise, a store just writes.
376   return MRI_Mod;
377 }
378 
379 ModRefInfo AAResults::getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) {
380   // If we know that the location is a constant memory location, the fence
381   // cannot modify this location.
382   if (Loc.Ptr && pointsToConstantMemory(Loc))
383     return MRI_Ref;
384   return MRI_ModRef;
385 }
386 
387 ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
388                                     const MemoryLocation &Loc) {
389   if (Loc.Ptr) {
390     // If the va_arg address cannot alias the pointer in question, then the
391     // specified memory cannot be accessed by the va_arg.
392     if (!alias(MemoryLocation::get(V), Loc))
393       return MRI_NoModRef;
394 
395     // If the pointer is a pointer to constant memory, then it could not have
396     // been modified by this va_arg.
397     if (pointsToConstantMemory(Loc))
398       return MRI_NoModRef;
399   }
400 
401   // Otherwise, a va_arg reads and writes.
402   return MRI_ModRef;
403 }
404 
405 ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
406                                     const MemoryLocation &Loc) {
407   if (Loc.Ptr) {
408     // If the pointer is a pointer to constant memory,
409     // then it could not have been modified by this catchpad.
410     if (pointsToConstantMemory(Loc))
411       return MRI_NoModRef;
412   }
413 
414   // Otherwise, a catchpad reads and writes.
415   return MRI_ModRef;
416 }
417 
418 ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
419                                     const MemoryLocation &Loc) {
420   if (Loc.Ptr) {
421     // If the pointer is a pointer to constant memory,
422     // then it could not have been modified by this catchpad.
423     if (pointsToConstantMemory(Loc))
424       return MRI_NoModRef;
425   }
426 
427   // Otherwise, a catchret reads and writes.
428   return MRI_ModRef;
429 }
430 
431 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
432                                     const MemoryLocation &Loc) {
433   // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
434   if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
435     return MRI_ModRef;
436 
437   // If the cmpxchg address does not alias the location, it does not access it.
438   if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc))
439     return MRI_NoModRef;
440 
441   return MRI_ModRef;
442 }
443 
444 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
445                                     const MemoryLocation &Loc) {
446   // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
447   if (isStrongerThanMonotonic(RMW->getOrdering()))
448     return MRI_ModRef;
449 
450   // If the atomicrmw address does not alias the location, it does not access it.
451   if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc))
452     return MRI_NoModRef;
453 
454   return MRI_ModRef;
455 }
456 
457 /// \brief Return information about whether a particular call site modifies
458 /// or reads the specified memory location \p MemLoc before instruction \p I
459 /// in a BasicBlock. A ordered basic block \p OBB can be used to speed up
460 /// instruction-ordering queries inside the BasicBlock containing \p I.
461 /// FIXME: this is really just shoring-up a deficiency in alias analysis.
462 /// BasicAA isn't willing to spend linear time determining whether an alloca
463 /// was captured before or after this particular call, while we are. However,
464 /// with a smarter AA in place, this test is just wasting compile time.
465 ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
466                                          const MemoryLocation &MemLoc,
467                                          DominatorTree *DT,
468                                          OrderedBasicBlock *OBB) {
469   if (!DT)
470     return MRI_ModRef;
471 
472   const Value *Object =
473       GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
474   if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
475       isa<Constant>(Object))
476     return MRI_ModRef;
477 
478   ImmutableCallSite CS(I);
479   if (!CS.getInstruction() || CS.getInstruction() == Object)
480     return MRI_ModRef;
481 
482   if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
483                                  /* StoreCaptures */ true, I, DT,
484                                  /* include Object */ true,
485                                  /* OrderedBasicBlock */ OBB))
486     return MRI_ModRef;
487 
488   unsigned ArgNo = 0;
489   ModRefInfo R = MRI_NoModRef;
490   for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
491        CI != CE; ++CI, ++ArgNo) {
492     // Only look at the no-capture or byval pointer arguments.  If this
493     // pointer were passed to arguments that were neither of these, then it
494     // couldn't be no-capture.
495     if (!(*CI)->getType()->isPointerTy() ||
496         (!CS.doesNotCapture(ArgNo) &&
497          ArgNo < CS.getNumArgOperands() && !CS.isByValArgument(ArgNo)))
498       continue;
499 
500     // If this is a no-capture pointer argument, see if we can tell that it
501     // is impossible to alias the pointer we're checking.  If not, we have to
502     // assume that the call could touch the pointer, even though it doesn't
503     // escape.
504     if (isNoAlias(MemoryLocation(*CI), MemoryLocation(Object)))
505       continue;
506     if (CS.doesNotAccessMemory(ArgNo))
507       continue;
508     if (CS.onlyReadsMemory(ArgNo)) {
509       R = MRI_Ref;
510       continue;
511     }
512     return MRI_ModRef;
513   }
514   return R;
515 }
516 
517 /// canBasicBlockModify - Return true if it is possible for execution of the
518 /// specified basic block to modify the location Loc.
519 ///
520 bool AAResults::canBasicBlockModify(const BasicBlock &BB,
521                                     const MemoryLocation &Loc) {
522   return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod);
523 }
524 
525 /// canInstructionRangeModRef - Return true if it is possible for the
526 /// execution of the specified instructions to mod\ref (according to the
527 /// mode) the location Loc. The instructions to consider are all
528 /// of the instructions in the range of [I1,I2] INCLUSIVE.
529 /// I1 and I2 must be in the same basic block.
530 bool AAResults::canInstructionRangeModRef(const Instruction &I1,
531                                           const Instruction &I2,
532                                           const MemoryLocation &Loc,
533                                           const ModRefInfo Mode) {
534   assert(I1.getParent() == I2.getParent() &&
535          "Instructions not in same basic block!");
536   BasicBlock::const_iterator I = I1.getIterator();
537   BasicBlock::const_iterator E = I2.getIterator();
538   ++E;  // Convert from inclusive to exclusive range.
539 
540   for (; I != E; ++I) // Check every instruction in range
541     if (getModRefInfo(&*I, Loc) & Mode)
542       return true;
543   return false;
544 }
545 
546 // Provide a definition for the root virtual destructor.
547 AAResults::Concept::~Concept() = default;
548 
549 // Provide a definition for the static object used to identify passes.
550 AnalysisKey AAManager::Key;
551 
552 namespace {
553 
554 /// A wrapper pass for external alias analyses. This just squirrels away the
555 /// callback used to run any analyses and register their results.
556 struct ExternalAAWrapperPass : ImmutablePass {
557   using CallbackT = std::function<void(Pass &, Function &, AAResults &)>;
558 
559   CallbackT CB;
560 
561   static char ID;
562 
563   ExternalAAWrapperPass() : ImmutablePass(ID) {
564     initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
565   }
566 
567   explicit ExternalAAWrapperPass(CallbackT CB)
568       : ImmutablePass(ID), CB(std::move(CB)) {
569     initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
570   }
571 
572   void getAnalysisUsage(AnalysisUsage &AU) const override {
573     AU.setPreservesAll();
574   }
575 };
576 
577 } // end anonymous namespace
578 
579 char ExternalAAWrapperPass::ID = 0;
580 
581 INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis",
582                 false, true)
583 
584 ImmutablePass *
585 llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) {
586   return new ExternalAAWrapperPass(std::move(Callback));
587 }
588 
589 AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) {
590   initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
591 }
592 
593 char AAResultsWrapperPass::ID = 0;
594 
595 INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa",
596                       "Function Alias Analysis Results", false, true)
597 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
598 INITIALIZE_PASS_DEPENDENCY(CFLAndersAAWrapperPass)
599 INITIALIZE_PASS_DEPENDENCY(CFLSteensAAWrapperPass)
600 INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass)
601 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
602 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
603 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
604 INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass)
605 INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass)
606 INITIALIZE_PASS_END(AAResultsWrapperPass, "aa",
607                     "Function Alias Analysis Results", false, true)
608 
609 FunctionPass *llvm::createAAResultsWrapperPass() {
610   return new AAResultsWrapperPass();
611 }
612 
613 /// Run the wrapper pass to rebuild an aggregation over known AA passes.
614 ///
615 /// This is the legacy pass manager's interface to the new-style AA results
616 /// aggregation object. Because this is somewhat shoe-horned into the legacy
617 /// pass manager, we hard code all the specific alias analyses available into
618 /// it. While the particular set enabled is configured via commandline flags,
619 /// adding a new alias analysis to LLVM will require adding support for it to
620 /// this list.
621 bool AAResultsWrapperPass::runOnFunction(Function &F) {
622   // NB! This *must* be reset before adding new AA results to the new
623   // AAResults object because in the legacy pass manager, each instance
624   // of these will refer to the *same* immutable analyses, registering and
625   // unregistering themselves with them. We need to carefully tear down the
626   // previous object first, in this case replacing it with an empty one, before
627   // registering new results.
628   AAR.reset(
629       new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI()));
630 
631   // BasicAA is always available for function analyses. Also, we add it first
632   // so that it can trump TBAA results when it proves MustAlias.
633   // FIXME: TBAA should have an explicit mode to support this and then we
634   // should reconsider the ordering here.
635   if (!DisableBasicAA)
636     AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult());
637 
638   // Populate the results with the currently available AAs.
639   if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
640     AAR->addAAResult(WrapperPass->getResult());
641   if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
642     AAR->addAAResult(WrapperPass->getResult());
643   if (auto *WrapperPass =
644           getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
645     AAR->addAAResult(WrapperPass->getResult());
646   if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>())
647     AAR->addAAResult(WrapperPass->getResult());
648   if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>())
649     AAR->addAAResult(WrapperPass->getResult());
650   if (auto *WrapperPass = getAnalysisIfAvailable<CFLAndersAAWrapperPass>())
651     AAR->addAAResult(WrapperPass->getResult());
652   if (auto *WrapperPass = getAnalysisIfAvailable<CFLSteensAAWrapperPass>())
653     AAR->addAAResult(WrapperPass->getResult());
654 
655   // If available, run an external AA providing callback over the results as
656   // well.
657   if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>())
658     if (WrapperPass->CB)
659       WrapperPass->CB(*this, F, *AAR);
660 
661   // Analyses don't mutate the IR, so return false.
662   return false;
663 }
664 
665 void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
666   AU.setPreservesAll();
667   AU.addRequired<BasicAAWrapperPass>();
668   AU.addRequired<TargetLibraryInfoWrapperPass>();
669 
670   // We also need to mark all the alias analysis passes we will potentially
671   // probe in runOnFunction as used here to ensure the legacy pass manager
672   // preserves them. This hard coding of lists of alias analyses is specific to
673   // the legacy pass manager.
674   AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
675   AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
676   AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>();
677   AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
678   AU.addUsedIfAvailable<SCEVAAWrapperPass>();
679   AU.addUsedIfAvailable<CFLAndersAAWrapperPass>();
680   AU.addUsedIfAvailable<CFLSteensAAWrapperPass>();
681 }
682 
683 AAResults llvm::createLegacyPMAAResults(Pass &P, Function &F,
684                                         BasicAAResult &BAR) {
685   AAResults AAR(P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI());
686 
687   // Add in our explicitly constructed BasicAA results.
688   if (!DisableBasicAA)
689     AAR.addAAResult(BAR);
690 
691   // Populate the results with the other currently available AAs.
692   if (auto *WrapperPass =
693           P.getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
694     AAR.addAAResult(WrapperPass->getResult());
695   if (auto *WrapperPass = P.getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
696     AAR.addAAResult(WrapperPass->getResult());
697   if (auto *WrapperPass =
698           P.getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
699     AAR.addAAResult(WrapperPass->getResult());
700   if (auto *WrapperPass = P.getAnalysisIfAvailable<GlobalsAAWrapperPass>())
701     AAR.addAAResult(WrapperPass->getResult());
702   if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLAndersAAWrapperPass>())
703     AAR.addAAResult(WrapperPass->getResult());
704   if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLSteensAAWrapperPass>())
705     AAR.addAAResult(WrapperPass->getResult());
706 
707   return AAR;
708 }
709 
710 bool llvm::isNoAliasCall(const Value *V) {
711   if (auto CS = ImmutableCallSite(V))
712     return CS.hasRetAttr(Attribute::NoAlias);
713   return false;
714 }
715 
716 bool llvm::isNoAliasArgument(const Value *V) {
717   if (const Argument *A = dyn_cast<Argument>(V))
718     return A->hasNoAliasAttr();
719   return false;
720 }
721 
722 bool llvm::isIdentifiedObject(const Value *V) {
723   if (isa<AllocaInst>(V))
724     return true;
725   if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
726     return true;
727   if (isNoAliasCall(V))
728     return true;
729   if (const Argument *A = dyn_cast<Argument>(V))
730     return A->hasNoAliasAttr() || A->hasByValAttr();
731   return false;
732 }
733 
734 bool llvm::isIdentifiedFunctionLocal(const Value *V) {
735   return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasArgument(V);
736 }
737 
738 void llvm::getAAResultsAnalysisUsage(AnalysisUsage &AU) {
739   // This function needs to be in sync with llvm::createLegacyPMAAResults -- if
740   // more alias analyses are added to llvm::createLegacyPMAAResults, they need
741   // to be added here also.
742   AU.addRequired<TargetLibraryInfoWrapperPass>();
743   AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
744   AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
745   AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>();
746   AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
747   AU.addUsedIfAvailable<CFLAndersAAWrapperPass>();
748   AU.addUsedIfAvailable<CFLSteensAAWrapperPass>();
749 }
750