1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CFG.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/PhiValues.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62 
63 #define DEBUG_TYPE "basicaa"
64 
65 using namespace llvm;
66 
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
69                                           cl::init(true));
70 
71 /// By default, even on 32-bit architectures we use 64-bit integers for
72 /// calculations. This will allow us to more-aggressively decompose indexing
73 /// expressions calculated using i64 values (e.g., long long in C) which is
74 /// common enough to worry about.
75 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b",
76                                         cl::Hidden, cl::init(true));
77 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits",
78                                     cl::Hidden, cl::init(false));
79 
80 /// SearchLimitReached / SearchTimes shows how often the limit of
81 /// to decompose GEPs is reached. It will affect the precision
82 /// of basic alias analysis.
83 STATISTIC(SearchLimitReached, "Number of times the limit to "
84                               "decompose GEPs is reached");
85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
86 
87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
89 /// careful with value equivalence. We use reachability to make sure a value
90 /// cannot be involved in a cycle.
91 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
92 
93 // The max limit of the search depth in DecomposeGEPExpression() and
94 // getUnderlyingObject(), both functions need to use the same search
95 // depth otherwise the algorithm in aliasGEP will assert.
96 static const unsigned MaxLookupSearchDepth = 6;
97 
98 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
99                                FunctionAnalysisManager::Invalidator &Inv) {
100   // We don't care if this analysis itself is preserved, it has no state. But
101   // we need to check that the analyses it depends on have been. Note that we
102   // may be created without handles to some analyses and in that case don't
103   // depend on them.
104   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
105       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
106       (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) ||
107       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
108     return true;
109 
110   // Otherwise this analysis result remains valid.
111   return false;
112 }
113 
114 //===----------------------------------------------------------------------===//
115 // Useful predicates
116 //===----------------------------------------------------------------------===//
117 
118 /// Returns true if the pointer is to a function-local object that never
119 /// escapes from the function.
120 static bool isNonEscapingLocalObject(
121     const Value *V,
122     SmallDenseMap<const Value *, bool, 8> *IsCapturedCache = nullptr) {
123   SmallDenseMap<const Value *, bool, 8>::iterator CacheIt;
124   if (IsCapturedCache) {
125     bool Inserted;
126     std::tie(CacheIt, Inserted) = IsCapturedCache->insert({V, false});
127     if (!Inserted)
128       // Found cached result, return it!
129       return CacheIt->second;
130   }
131 
132   // If this is a local allocation, check to see if it escapes.
133   if (isa<AllocaInst>(V) || isNoAliasCall(V)) {
134     // Set StoreCaptures to True so that we can assume in our callers that the
135     // pointer is not the result of a load instruction. Currently
136     // PointerMayBeCaptured doesn't have any special analysis for the
137     // StoreCaptures=false case; if it did, our callers could be refined to be
138     // more precise.
139     auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
140     if (IsCapturedCache)
141       CacheIt->second = Ret;
142     return Ret;
143   }
144 
145   // If this is an argument that corresponds to a byval or noalias argument,
146   // then it has not escaped before entering the function.  Check if it escapes
147   // inside the function.
148   if (const Argument *A = dyn_cast<Argument>(V))
149     if (A->hasByValAttr() || A->hasNoAliasAttr()) {
150       // Note even if the argument is marked nocapture, we still need to check
151       // for copies made inside the function. The nocapture attribute only
152       // specifies that there are no copies made that outlive the function.
153       auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
154       if (IsCapturedCache)
155         CacheIt->second = Ret;
156       return Ret;
157     }
158 
159   return false;
160 }
161 
162 /// Returns true if the pointer is one which would have been considered an
163 /// escape by isNonEscapingLocalObject.
164 static bool isEscapeSource(const Value *V) {
165   if (isa<CallBase>(V))
166     return true;
167 
168   if (isa<Argument>(V))
169     return true;
170 
171   // The load case works because isNonEscapingLocalObject considers all
172   // stores to be escapes (it passes true for the StoreCaptures argument
173   // to PointerMayBeCaptured).
174   if (isa<LoadInst>(V))
175     return true;
176 
177   return false;
178 }
179 
180 /// Returns the size of the object specified by V or UnknownSize if unknown.
181 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
182                               const TargetLibraryInfo &TLI,
183                               bool NullIsValidLoc,
184                               bool RoundToAlign = false) {
185   uint64_t Size;
186   ObjectSizeOpts Opts;
187   Opts.RoundToAlign = RoundToAlign;
188   Opts.NullIsUnknownSize = NullIsValidLoc;
189   if (getObjectSize(V, Size, DL, &TLI, Opts))
190     return Size;
191   return MemoryLocation::UnknownSize;
192 }
193 
194 /// Returns true if we can prove that the object specified by V is smaller than
195 /// Size.
196 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
197                                 const DataLayout &DL,
198                                 const TargetLibraryInfo &TLI,
199                                 bool NullIsValidLoc) {
200   // Note that the meanings of the "object" are slightly different in the
201   // following contexts:
202   //    c1: llvm::getObjectSize()
203   //    c2: llvm.objectsize() intrinsic
204   //    c3: isObjectSmallerThan()
205   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
206   // refers to the "entire object".
207   //
208   //  Consider this example:
209   //     char *p = (char*)malloc(100)
210   //     char *q = p+80;
211   //
212   //  In the context of c1 and c2, the "object" pointed by q refers to the
213   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
214   //
215   //  However, in the context of c3, the "object" refers to the chunk of memory
216   // being allocated. So, the "object" has 100 bytes, and q points to the middle
217   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
218   // parameter, before the llvm::getObjectSize() is called to get the size of
219   // entire object, we should:
220   //    - either rewind the pointer q to the base-address of the object in
221   //      question (in this case rewind to p), or
222   //    - just give up. It is up to caller to make sure the pointer is pointing
223   //      to the base address the object.
224   //
225   // We go for 2nd option for simplicity.
226   if (!isIdentifiedObject(V))
227     return false;
228 
229   // This function needs to use the aligned object size because we allow
230   // reads a bit past the end given sufficient alignment.
231   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
232                                       /*RoundToAlign*/ true);
233 
234   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
235 }
236 
237 /// Return the minimal extent from \p V to the end of the underlying object,
238 /// assuming the result is used in an aliasing query. E.g., we do use the query
239 /// location size and the fact that null pointers cannot alias here.
240 static uint64_t getMinimalExtentFrom(const Value &V,
241                                      const LocationSize &LocSize,
242                                      const DataLayout &DL,
243                                      bool NullIsValidLoc) {
244   // If we have dereferenceability information we know a lower bound for the
245   // extent as accesses for a lower offset would be valid. We need to exclude
246   // the "or null" part if null is a valid pointer.
247   bool CanBeNull;
248   uint64_t DerefBytes = V.getPointerDereferenceableBytes(DL, CanBeNull);
249   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
250   // If queried with a precise location size, we assume that location size to be
251   // accessed, thus valid.
252   if (LocSize.isPrecise())
253     DerefBytes = std::max(DerefBytes, LocSize.getValue());
254   return DerefBytes;
255 }
256 
257 /// Returns true if we can prove that the object specified by V has size Size.
258 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
259                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
260   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
261   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
262 }
263 
264 //===----------------------------------------------------------------------===//
265 // GetElementPtr Instruction Decomposition and Analysis
266 //===----------------------------------------------------------------------===//
267 
268 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
269 /// B are constant integers.
270 ///
271 /// Returns the scale and offset values as APInts and return V as a Value*, and
272 /// return whether we looked through any sign or zero extends.  The incoming
273 /// Value is known to have IntegerType, and it may already be sign or zero
274 /// extended.
275 ///
276 /// Note that this looks through extends, so the high bits may not be
277 /// represented in the result.
278 /*static*/ const Value *BasicAAResult::GetLinearExpression(
279     const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
280     unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
281     AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
282   assert(V->getType()->isIntegerTy() && "Not an integer value");
283 
284   // Limit our recursion depth.
285   if (Depth == 6) {
286     Scale = 1;
287     Offset = 0;
288     return V;
289   }
290 
291   if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
292     // If it's a constant, just convert it to an offset and remove the variable.
293     // If we've been called recursively, the Offset bit width will be greater
294     // than the constant's (the Offset's always as wide as the outermost call),
295     // so we'll zext here and process any extension in the isa<SExtInst> &
296     // isa<ZExtInst> cases below.
297     Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
298     assert(Scale == 0 && "Constant values don't have a scale");
299     return V;
300   }
301 
302   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
303     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
304       // If we've been called recursively, then Offset and Scale will be wider
305       // than the BOp operands. We'll always zext it here as we'll process sign
306       // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
307       APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
308 
309       switch (BOp->getOpcode()) {
310       default:
311         // We don't understand this instruction, so we can't decompose it any
312         // further.
313         Scale = 1;
314         Offset = 0;
315         return V;
316       case Instruction::Or:
317         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
318         // analyze it.
319         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
320                                BOp, DT)) {
321           Scale = 1;
322           Offset = 0;
323           return V;
324         }
325         LLVM_FALLTHROUGH;
326       case Instruction::Add:
327         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
328                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
329         Offset += RHS;
330         break;
331       case Instruction::Sub:
332         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
333                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
334         Offset -= RHS;
335         break;
336       case Instruction::Mul:
337         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
338                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
339         Offset *= RHS;
340         Scale *= RHS;
341         break;
342       case Instruction::Shl:
343         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
344                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
345 
346         // We're trying to linearize an expression of the kind:
347         //   shl i8 -128, 36
348         // where the shift count exceeds the bitwidth of the type.
349         // We can't decompose this further (the expression would return
350         // a poison value).
351         if (Offset.getBitWidth() < RHS.getLimitedValue() ||
352             Scale.getBitWidth() < RHS.getLimitedValue()) {
353           Scale = 1;
354           Offset = 0;
355           return V;
356         }
357 
358         Offset <<= RHS.getLimitedValue();
359         Scale <<= RHS.getLimitedValue();
360         // the semantics of nsw and nuw for left shifts don't match those of
361         // multiplications, so we won't propagate them.
362         NSW = NUW = false;
363         return V;
364       }
365 
366       if (isa<OverflowingBinaryOperator>(BOp)) {
367         NUW &= BOp->hasNoUnsignedWrap();
368         NSW &= BOp->hasNoSignedWrap();
369       }
370       return V;
371     }
372   }
373 
374   // Since GEP indices are sign extended anyway, we don't care about the high
375   // bits of a sign or zero extended value - just scales and offsets.  The
376   // extensions have to be consistent though.
377   if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
378     Value *CastOp = cast<CastInst>(V)->getOperand(0);
379     unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
380     unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
381     unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
382     const Value *Result =
383         GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
384                             Depth + 1, AC, DT, NSW, NUW);
385 
386     // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
387     // by just incrementing the number of bits we've extended by.
388     unsigned ExtendedBy = NewWidth - SmallWidth;
389 
390     if (isa<SExtInst>(V) && ZExtBits == 0) {
391       // sext(sext(%x, a), b) == sext(%x, a + b)
392 
393       if (NSW) {
394         // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
395         // into sext(%x) + sext(c). We'll sext the Offset ourselves:
396         unsigned OldWidth = Offset.getBitWidth();
397         Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
398       } else {
399         // We may have signed-wrapped, so don't decompose sext(%x + c) into
400         // sext(%x) + sext(c)
401         Scale = 1;
402         Offset = 0;
403         Result = CastOp;
404         ZExtBits = OldZExtBits;
405         SExtBits = OldSExtBits;
406       }
407       SExtBits += ExtendedBy;
408     } else {
409       // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
410 
411       if (!NUW) {
412         // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
413         // zext(%x) + zext(c)
414         Scale = 1;
415         Offset = 0;
416         Result = CastOp;
417         ZExtBits = OldZExtBits;
418         SExtBits = OldSExtBits;
419       }
420       ZExtBits += ExtendedBy;
421     }
422 
423     return Result;
424   }
425 
426   Scale = 1;
427   Offset = 0;
428   return V;
429 }
430 
431 /// To ensure a pointer offset fits in an integer of size PointerSize
432 /// (in bits) when that size is smaller than the maximum pointer size. This is
433 /// an issue, for example, in particular for 32b pointers with negative indices
434 /// that rely on two's complement wrap-arounds for precise alias information
435 /// where the maximum pointer size is 64b.
436 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
437   assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
438   unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
439   return (Offset << ShiftBits).ashr(ShiftBits);
440 }
441 
442 static unsigned getMaxPointerSize(const DataLayout &DL) {
443   unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
444   if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
445   if (DoubleCalcBits) MaxPointerSize *= 2;
446 
447   return MaxPointerSize;
448 }
449 
450 /// If V is a symbolic pointer expression, decompose it into a base pointer
451 /// with a constant offset and a number of scaled symbolic offsets.
452 ///
453 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
454 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
455 /// specified amount, but which may have other unrepresented high bits. As
456 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
457 ///
458 /// When DataLayout is around, this function is capable of analyzing everything
459 /// that getUnderlyingObject can look through. To be able to do that
460 /// getUnderlyingObject and DecomposeGEPExpression must use the same search
461 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
462 /// through pointer casts.
463 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
464        DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
465        DominatorTree *DT) {
466   // Limit recursion depth to limit compile time in crazy cases.
467   unsigned MaxLookup = MaxLookupSearchDepth;
468   SearchTimes++;
469 
470   unsigned MaxPointerSize = getMaxPointerSize(DL);
471   Decomposed.VarIndices.clear();
472   do {
473     // See if this is a bitcast or GEP.
474     const Operator *Op = dyn_cast<Operator>(V);
475     if (!Op) {
476       // The only non-operator case we can handle are GlobalAliases.
477       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
478         if (!GA->isInterposable()) {
479           V = GA->getAliasee();
480           continue;
481         }
482       }
483       Decomposed.Base = V;
484       return false;
485     }
486 
487     if (Op->getOpcode() == Instruction::BitCast ||
488         Op->getOpcode() == Instruction::AddrSpaceCast) {
489       V = Op->getOperand(0);
490       continue;
491     }
492 
493     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
494     if (!GEPOp) {
495       if (const auto *PHI = dyn_cast<PHINode>(V)) {
496         // Look through single-arg phi nodes created by LCSSA.
497         if (PHI->getNumIncomingValues() == 1) {
498           V = PHI->getIncomingValue(0);
499           continue;
500         }
501       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
502         // CaptureTracking can know about special capturing properties of some
503         // intrinsics like launder.invariant.group, that can't be expressed with
504         // the attributes, but have properties like returning aliasing pointer.
505         // Because some analysis may assume that nocaptured pointer is not
506         // returned from some special intrinsic (because function would have to
507         // be marked with returns attribute), it is crucial to use this function
508         // because it should be in sync with CaptureTracking. Not using it may
509         // cause weird miscompilations where 2 aliasing pointers are assumed to
510         // noalias.
511         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
512           V = RP;
513           continue;
514         }
515       }
516 
517       Decomposed.Base = V;
518       return false;
519     }
520 
521     // Don't attempt to analyze GEPs over unsized objects.
522     if (!GEPOp->getSourceElementType()->isSized()) {
523       Decomposed.Base = V;
524       return false;
525     }
526 
527     // Don't attempt to analyze GEPs if index scale is not a compile-time
528     // constant.
529     if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
530       Decomposed.Base = V;
531       Decomposed.HasCompileTimeConstantScale = false;
532       return false;
533     }
534 
535     unsigned AS = GEPOp->getPointerAddressSpace();
536     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
537     gep_type_iterator GTI = gep_type_begin(GEPOp);
538     unsigned PointerSize = DL.getPointerSizeInBits(AS);
539     // Assume all GEP operands are constants until proven otherwise.
540     bool GepHasConstantOffset = true;
541     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
542          I != E; ++I, ++GTI) {
543       const Value *Index = *I;
544       // Compute the (potentially symbolic) offset in bytes for this index.
545       if (StructType *STy = GTI.getStructTypeOrNull()) {
546         // For a struct, add the member offset.
547         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
548         if (FieldNo == 0)
549           continue;
550 
551         Decomposed.StructOffset +=
552           DL.getStructLayout(STy)->getElementOffset(FieldNo);
553         continue;
554       }
555 
556       // For an array/pointer, add the element offset, explicitly scaled.
557       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
558         if (CIdx->isZero())
559           continue;
560         Decomposed.OtherOffset +=
561             (DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
562              CIdx->getValue().sextOrSelf(MaxPointerSize))
563                 .sextOrTrunc(MaxPointerSize);
564         continue;
565       }
566 
567       GepHasConstantOffset = false;
568 
569       APInt Scale(MaxPointerSize,
570                   DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
571       unsigned ZExtBits = 0, SExtBits = 0;
572 
573       // If the integer type is smaller than the pointer size, it is implicitly
574       // sign extended to pointer size.
575       unsigned Width = Index->getType()->getIntegerBitWidth();
576       if (PointerSize > Width)
577         SExtBits += PointerSize - Width;
578 
579       // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
580       APInt IndexScale(Width, 0), IndexOffset(Width, 0);
581       bool NSW = true, NUW = true;
582       const Value *OrigIndex = Index;
583       Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
584                                   SExtBits, DL, 0, AC, DT, NSW, NUW);
585 
586       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
587       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
588 
589       // It can be the case that, even through C1*V+C2 does not overflow for
590       // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
591       // decompose the expression in this way.
592       //
593       // FIXME: C1*Scale and the other operations in the decomposed
594       // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
595       // possibility.
596       APInt WideScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize*2) *
597                                  Scale.sext(MaxPointerSize*2);
598       if (WideScaledOffset.getMinSignedBits() > MaxPointerSize) {
599         Index = OrigIndex;
600         IndexScale = 1;
601         IndexOffset = 0;
602 
603         ZExtBits = SExtBits = 0;
604         if (PointerSize > Width)
605           SExtBits += PointerSize - Width;
606       } else {
607         Decomposed.OtherOffset += IndexOffset.sextOrTrunc(MaxPointerSize) * Scale;
608         Scale *= IndexScale.sextOrTrunc(MaxPointerSize);
609       }
610 
611       // If we already had an occurrence of this index variable, merge this
612       // scale into it.  For example, we want to handle:
613       //   A[x][x] -> x*16 + x*4 -> x*20
614       // This also ensures that 'x' only appears in the index list once.
615       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
616         if (Decomposed.VarIndices[i].V == Index &&
617             Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
618             Decomposed.VarIndices[i].SExtBits == SExtBits) {
619           Scale += Decomposed.VarIndices[i].Scale;
620           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
621           break;
622         }
623       }
624 
625       // Make sure that we have a scale that makes sense for this target's
626       // pointer size.
627       Scale = adjustToPointerSize(Scale, PointerSize);
628 
629       if (!!Scale) {
630         VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale};
631         Decomposed.VarIndices.push_back(Entry);
632       }
633     }
634 
635     // Take care of wrap-arounds
636     if (GepHasConstantOffset) {
637       Decomposed.StructOffset =
638           adjustToPointerSize(Decomposed.StructOffset, PointerSize);
639       Decomposed.OtherOffset =
640           adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
641     }
642 
643     // Analyze the base pointer next.
644     V = GEPOp->getOperand(0);
645   } while (--MaxLookup);
646 
647   // If the chain of expressions is too deep, just return early.
648   Decomposed.Base = V;
649   SearchLimitReached++;
650   return true;
651 }
652 
653 /// Returns whether the given pointer value points to memory that is local to
654 /// the function, with global constants being considered local to all
655 /// functions.
656 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
657                                            AAQueryInfo &AAQI, bool OrLocal) {
658   assert(Visited.empty() && "Visited must be cleared after use!");
659 
660   unsigned MaxLookup = 8;
661   SmallVector<const Value *, 16> Worklist;
662   Worklist.push_back(Loc.Ptr);
663   do {
664     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
665     if (!Visited.insert(V).second) {
666       Visited.clear();
667       return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
668     }
669 
670     // An alloca instruction defines local memory.
671     if (OrLocal && isa<AllocaInst>(V))
672       continue;
673 
674     // A global constant counts as local memory for our purposes.
675     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
676       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
677       // global to be marked constant in some modules and non-constant in
678       // others.  GV may even be a declaration, not a definition.
679       if (!GV->isConstant()) {
680         Visited.clear();
681         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
682       }
683       continue;
684     }
685 
686     // If both select values point to local memory, then so does the select.
687     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
688       Worklist.push_back(SI->getTrueValue());
689       Worklist.push_back(SI->getFalseValue());
690       continue;
691     }
692 
693     // If all values incoming to a phi node point to local memory, then so does
694     // the phi.
695     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
696       // Don't bother inspecting phi nodes with many operands.
697       if (PN->getNumIncomingValues() > MaxLookup) {
698         Visited.clear();
699         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
700       }
701       for (Value *IncValue : PN->incoming_values())
702         Worklist.push_back(IncValue);
703       continue;
704     }
705 
706     // Otherwise be conservative.
707     Visited.clear();
708     return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
709   } while (!Worklist.empty() && --MaxLookup);
710 
711   Visited.clear();
712   return Worklist.empty();
713 }
714 
715 /// Returns the behavior when calling the given call site.
716 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
717   if (Call->doesNotAccessMemory())
718     // Can't do better than this.
719     return FMRB_DoesNotAccessMemory;
720 
721   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
722 
723   // If the callsite knows it only reads memory, don't return worse
724   // than that.
725   if (Call->onlyReadsMemory())
726     Min = FMRB_OnlyReadsMemory;
727   else if (Call->doesNotReadMemory())
728     Min = FMRB_OnlyWritesMemory;
729 
730   if (Call->onlyAccessesArgMemory())
731     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
732   else if (Call->onlyAccessesInaccessibleMemory())
733     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
734   else if (Call->onlyAccessesInaccessibleMemOrArgMem())
735     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
736 
737   // If the call has operand bundles then aliasing attributes from the function
738   // it calls do not directly apply to the call.  This can be made more precise
739   // in the future.
740   if (!Call->hasOperandBundles())
741     if (const Function *F = Call->getCalledFunction())
742       Min =
743           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
744 
745   return Min;
746 }
747 
748 /// Returns the behavior when calling the given function. For use when the call
749 /// site is not known.
750 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
751   // If the function declares it doesn't access memory, we can't do better.
752   if (F->doesNotAccessMemory())
753     return FMRB_DoesNotAccessMemory;
754 
755   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
756 
757   // If the function declares it only reads memory, go with that.
758   if (F->onlyReadsMemory())
759     Min = FMRB_OnlyReadsMemory;
760   else if (F->doesNotReadMemory())
761     Min = FMRB_OnlyWritesMemory;
762 
763   if (F->onlyAccessesArgMemory())
764     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
765   else if (F->onlyAccessesInaccessibleMemory())
766     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
767   else if (F->onlyAccessesInaccessibleMemOrArgMem())
768     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
769 
770   return Min;
771 }
772 
773 /// Returns true if this is a writeonly (i.e Mod only) parameter.
774 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
775                              const TargetLibraryInfo &TLI) {
776   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
777     return true;
778 
779   // We can bound the aliasing properties of memset_pattern16 just as we can
780   // for memcpy/memset.  This is particularly important because the
781   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
782   // whenever possible.
783   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
784   // attributes.
785   LibFunc F;
786   if (Call->getCalledFunction() &&
787       TLI.getLibFunc(*Call->getCalledFunction(), F) &&
788       F == LibFunc_memset_pattern16 && TLI.has(F))
789     if (ArgIdx == 0)
790       return true;
791 
792   // TODO: memset_pattern4, memset_pattern8
793   // TODO: _chk variants
794   // TODO: strcmp, strcpy
795 
796   return false;
797 }
798 
799 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
800                                            unsigned ArgIdx) {
801   // Checking for known builtin intrinsics and target library functions.
802   if (isWriteOnlyParam(Call, ArgIdx, TLI))
803     return ModRefInfo::Mod;
804 
805   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
806     return ModRefInfo::Ref;
807 
808   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
809     return ModRefInfo::NoModRef;
810 
811   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
812 }
813 
814 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
815   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
816   return II && II->getIntrinsicID() == IID;
817 }
818 
819 #ifndef NDEBUG
820 static const Function *getParent(const Value *V) {
821   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
822     if (!inst->getParent())
823       return nullptr;
824     return inst->getParent()->getParent();
825   }
826 
827   if (const Argument *arg = dyn_cast<Argument>(V))
828     return arg->getParent();
829 
830   return nullptr;
831 }
832 
833 static bool notDifferentParent(const Value *O1, const Value *O2) {
834 
835   const Function *F1 = getParent(O1);
836   const Function *F2 = getParent(O2);
837 
838   return !F1 || !F2 || F1 == F2;
839 }
840 #endif
841 
842 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
843                                  const MemoryLocation &LocB,
844                                  AAQueryInfo &AAQI) {
845   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
846          "BasicAliasAnalysis doesn't support interprocedural queries.");
847 
848   // If we have a directly cached entry for these locations, we have recursed
849   // through this once, so just return the cached results. Notably, when this
850   // happens, we don't clear the cache.
851   auto CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocA, LocB));
852   if (CacheIt != AAQI.AliasCache.end())
853     return CacheIt->second;
854 
855   CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocB, LocA));
856   if (CacheIt != AAQI.AliasCache.end())
857     return CacheIt->second;
858 
859   AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
860                                  LocB.Size, LocB.AATags, AAQI);
861 
862   VisitedPhiBBs.clear();
863   return Alias;
864 }
865 
866 /// Checks to see if the specified callsite can clobber the specified memory
867 /// object.
868 ///
869 /// Since we only look at local properties of this function, we really can't
870 /// say much about this query.  We do, however, use simple "address taken"
871 /// analysis on local objects.
872 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
873                                         const MemoryLocation &Loc,
874                                         AAQueryInfo &AAQI) {
875   assert(notDifferentParent(Call, Loc.Ptr) &&
876          "AliasAnalysis query involving multiple functions!");
877 
878   const Value *Object = getUnderlyingObject(Loc.Ptr);
879 
880   // Calls marked 'tail' cannot read or write allocas from the current frame
881   // because the current frame might be destroyed by the time they run. However,
882   // a tail call may use an alloca with byval. Calling with byval copies the
883   // contents of the alloca into argument registers or stack slots, so there is
884   // no lifetime issue.
885   if (isa<AllocaInst>(Object))
886     if (const CallInst *CI = dyn_cast<CallInst>(Call))
887       if (CI->isTailCall() &&
888           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
889         return ModRefInfo::NoModRef;
890 
891   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
892   // modify them even though the alloca is not escaped.
893   if (auto *AI = dyn_cast<AllocaInst>(Object))
894     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
895       return ModRefInfo::Mod;
896 
897   // If the pointer is to a locally allocated object that does not escape,
898   // then the call can not mod/ref the pointer unless the call takes the pointer
899   // as an argument, and itself doesn't capture it.
900   if (!isa<Constant>(Object) && Call != Object &&
901       isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) {
902 
903     // Optimistically assume that call doesn't touch Object and check this
904     // assumption in the following loop.
905     ModRefInfo Result = ModRefInfo::NoModRef;
906     bool IsMustAlias = true;
907 
908     unsigned OperandNo = 0;
909     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
910          CI != CE; ++CI, ++OperandNo) {
911       // Only look at the no-capture or byval pointer arguments.  If this
912       // pointer were passed to arguments that were neither of these, then it
913       // couldn't be no-capture.
914       if (!(*CI)->getType()->isPointerTy() ||
915           (!Call->doesNotCapture(OperandNo) &&
916            OperandNo < Call->getNumArgOperands() &&
917            !Call->isByValArgument(OperandNo)))
918         continue;
919 
920       // Call doesn't access memory through this operand, so we don't care
921       // if it aliases with Object.
922       if (Call->doesNotAccessMemory(OperandNo))
923         continue;
924 
925       // If this is a no-capture pointer argument, see if we can tell that it
926       // is impossible to alias the pointer we're checking.
927       AliasResult AR = getBestAAResults().alias(MemoryLocation(*CI),
928                                                 MemoryLocation(Object), AAQI);
929       if (AR != MustAlias)
930         IsMustAlias = false;
931       // Operand doesn't alias 'Object', continue looking for other aliases
932       if (AR == NoAlias)
933         continue;
934       // Operand aliases 'Object', but call doesn't modify it. Strengthen
935       // initial assumption and keep looking in case if there are more aliases.
936       if (Call->onlyReadsMemory(OperandNo)) {
937         Result = setRef(Result);
938         continue;
939       }
940       // Operand aliases 'Object' but call only writes into it.
941       if (Call->doesNotReadMemory(OperandNo)) {
942         Result = setMod(Result);
943         continue;
944       }
945       // This operand aliases 'Object' and call reads and writes into it.
946       // Setting ModRef will not yield an early return below, MustAlias is not
947       // used further.
948       Result = ModRefInfo::ModRef;
949       break;
950     }
951 
952     // No operand aliases, reset Must bit. Add below if at least one aliases
953     // and all aliases found are MustAlias.
954     if (isNoModRef(Result))
955       IsMustAlias = false;
956 
957     // Early return if we improved mod ref information
958     if (!isModAndRefSet(Result)) {
959       if (isNoModRef(Result))
960         return ModRefInfo::NoModRef;
961       return IsMustAlias ? setMust(Result) : clearMust(Result);
962     }
963   }
964 
965   // If the call is malloc/calloc like, we can assume that it doesn't
966   // modify any IR visible value.  This is only valid because we assume these
967   // routines do not read values visible in the IR.  TODO: Consider special
968   // casing realloc and strdup routines which access only their arguments as
969   // well.  Or alternatively, replace all of this with inaccessiblememonly once
970   // that's implemented fully.
971   if (isMallocOrCallocLikeFn(Call, &TLI)) {
972     // Be conservative if the accessed pointer may alias the allocation -
973     // fallback to the generic handling below.
974     if (getBestAAResults().alias(MemoryLocation(Call), Loc, AAQI) == NoAlias)
975       return ModRefInfo::NoModRef;
976   }
977 
978   // The semantics of memcpy intrinsics either exactly overlap or do not
979   // overlap, i.e., source and destination of any given memcpy are either
980   // no-alias or must-alias.
981   if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
982     AliasResult SrcAA =
983         getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
984     AliasResult DestAA =
985         getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
986     // It's also possible for Loc to alias both src and dest, or neither.
987     ModRefInfo rv = ModRefInfo::NoModRef;
988     if (SrcAA != NoAlias)
989       rv = setRef(rv);
990     if (DestAA != NoAlias)
991       rv = setMod(rv);
992     return rv;
993   }
994 
995   // While the assume intrinsic is marked as arbitrarily writing so that
996   // proper control dependencies will be maintained, it never aliases any
997   // particular memory location.
998   if (isIntrinsicCall(Call, Intrinsic::assume))
999     return ModRefInfo::NoModRef;
1000 
1001   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
1002   // that proper control dependencies are maintained but they never mods any
1003   // particular memory location.
1004   //
1005   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1006   // heap state at the point the guard is issued needs to be consistent in case
1007   // the guard invokes the "deopt" continuation.
1008   if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
1009     return ModRefInfo::Ref;
1010 
1011   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1012   // writing so that proper control dependencies are maintained but they never
1013   // mod any particular memory location visible to the IR.
1014   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1015   // intrinsic is now modeled as reading memory. This prevents hoisting the
1016   // invariant.start intrinsic over stores. Consider:
1017   // *ptr = 40;
1018   // *ptr = 50;
1019   // invariant_start(ptr)
1020   // int val = *ptr;
1021   // print(val);
1022   //
1023   // This cannot be transformed to:
1024   //
1025   // *ptr = 40;
1026   // invariant_start(ptr)
1027   // *ptr = 50;
1028   // int val = *ptr;
1029   // print(val);
1030   //
1031   // The transformation will cause the second store to be ignored (based on
1032   // rules of invariant.start)  and print 40, while the first program always
1033   // prints 50.
1034   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1035     return ModRefInfo::Ref;
1036 
1037   // The AAResultBase base class has some smarts, lets use them.
1038   return AAResultBase::getModRefInfo(Call, Loc, AAQI);
1039 }
1040 
1041 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1042                                         const CallBase *Call2,
1043                                         AAQueryInfo &AAQI) {
1044   // While the assume intrinsic is marked as arbitrarily writing so that
1045   // proper control dependencies will be maintained, it never aliases any
1046   // particular memory location.
1047   if (isIntrinsicCall(Call1, Intrinsic::assume) ||
1048       isIntrinsicCall(Call2, Intrinsic::assume))
1049     return ModRefInfo::NoModRef;
1050 
1051   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
1052   // that proper control dependencies are maintained but they never mod any
1053   // particular memory location.
1054   //
1055   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1056   // heap state at the point the guard is issued needs to be consistent in case
1057   // the guard invokes the "deopt" continuation.
1058 
1059   // NB! This function is *not* commutative, so we special case two
1060   // possibilities for guard intrinsics.
1061 
1062   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1063     return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1064                ? ModRefInfo::Ref
1065                : ModRefInfo::NoModRef;
1066 
1067   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1068     return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1069                ? ModRefInfo::Mod
1070                : ModRefInfo::NoModRef;
1071 
1072   // The AAResultBase base class has some smarts, lets use them.
1073   return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1074 }
1075 
1076 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
1077 /// both having the exact same pointer operand.
1078 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
1079                                             LocationSize MaybeV1Size,
1080                                             const GEPOperator *GEP2,
1081                                             LocationSize MaybeV2Size,
1082                                             const DataLayout &DL) {
1083   assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1084              GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1085          GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
1086          "Expected GEPs with the same pointer operand");
1087 
1088   // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1089   // such that the struct field accesses provably cannot alias.
1090   // We also need at least two indices (the pointer, and the struct field).
1091   if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
1092       GEP1->getNumIndices() < 2)
1093     return MayAlias;
1094 
1095   // If we don't know the size of the accesses through both GEPs, we can't
1096   // determine whether the struct fields accessed can't alias.
1097   if (MaybeV1Size == LocationSize::unknown() ||
1098       MaybeV2Size == LocationSize::unknown())
1099     return MayAlias;
1100 
1101   const uint64_t V1Size = MaybeV1Size.getValue();
1102   const uint64_t V2Size = MaybeV2Size.getValue();
1103 
1104   ConstantInt *C1 =
1105       dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
1106   ConstantInt *C2 =
1107       dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
1108 
1109   // If the last (struct) indices are constants and are equal, the other indices
1110   // might be also be dynamically equal, so the GEPs can alias.
1111   if (C1 && C2) {
1112     unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth());
1113     if (C1->getValue().sextOrSelf(BitWidth) ==
1114         C2->getValue().sextOrSelf(BitWidth))
1115       return MayAlias;
1116   }
1117 
1118   // Find the last-indexed type of the GEP, i.e., the type you'd get if
1119   // you stripped the last index.
1120   // On the way, look at each indexed type.  If there's something other
1121   // than an array, different indices can lead to different final types.
1122   SmallVector<Value *, 8> IntermediateIndices;
1123 
1124   // Insert the first index; we don't need to check the type indexed
1125   // through it as it only drops the pointer indirection.
1126   assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
1127   IntermediateIndices.push_back(GEP1->getOperand(1));
1128 
1129   // Insert all the remaining indices but the last one.
1130   // Also, check that they all index through arrays.
1131   for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
1132     if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
1133             GEP1->getSourceElementType(), IntermediateIndices)))
1134       return MayAlias;
1135     IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1136   }
1137 
1138   auto *Ty = GetElementPtrInst::getIndexedType(
1139     GEP1->getSourceElementType(), IntermediateIndices);
1140   StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1141 
1142   if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
1143     // We know that:
1144     // - both GEPs begin indexing from the exact same pointer;
1145     // - the last indices in both GEPs are constants, indexing into a sequential
1146     //   type (array or vector);
1147     // - both GEPs only index through arrays prior to that.
1148     //
1149     // Because array indices greater than the number of elements are valid in
1150     // GEPs, unless we know the intermediate indices are identical between
1151     // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1152     // partially overlap. We also need to check that the loaded size matches
1153     // the element size, otherwise we could still have overlap.
1154     Type *LastElementTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0);
1155     const uint64_t ElementSize =
1156         DL.getTypeStoreSize(LastElementTy).getFixedSize();
1157     if (V1Size != ElementSize || V2Size != ElementSize)
1158       return MayAlias;
1159 
1160     for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1161       if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1162         return MayAlias;
1163 
1164     // Now we know that the array/pointer that GEP1 indexes into and that
1165     // that GEP2 indexes into must either precisely overlap or be disjoint.
1166     // Because they cannot partially overlap and because fields in an array
1167     // cannot overlap, if we can prove the final indices are different between
1168     // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1169 
1170     // If the last indices are constants, we've already checked they don't
1171     // equal each other so we can exit early.
1172     if (C1 && C2)
1173       return NoAlias;
1174     {
1175       Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1176       Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1177       if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1178         // If one of the indices is a PHI node, be safe and only use
1179         // computeKnownBits so we don't make any assumptions about the
1180         // relationships between the two indices. This is important if we're
1181         // asking about values from different loop iterations. See PR32314.
1182         // TODO: We may be able to change the check so we only do this when
1183         // we definitely looked through a PHINode.
1184         if (GEP1LastIdx != GEP2LastIdx &&
1185             GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1186           KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1187           KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1188           if (Known1.Zero.intersects(Known2.One) ||
1189               Known1.One.intersects(Known2.Zero))
1190             return NoAlias;
1191         }
1192       } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1193         return NoAlias;
1194     }
1195     return MayAlias;
1196   } else if (!LastIndexedStruct || !C1 || !C2) {
1197     return MayAlias;
1198   }
1199 
1200   if (C1->getValue().getActiveBits() > 64 ||
1201       C2->getValue().getActiveBits() > 64)
1202     return MayAlias;
1203 
1204   // We know that:
1205   // - both GEPs begin indexing from the exact same pointer;
1206   // - the last indices in both GEPs are constants, indexing into a struct;
1207   // - said indices are different, hence, the pointed-to fields are different;
1208   // - both GEPs only index through arrays prior to that.
1209   //
1210   // This lets us determine that the struct that GEP1 indexes into and the
1211   // struct that GEP2 indexes into must either precisely overlap or be
1212   // completely disjoint.  Because they cannot partially overlap, indexing into
1213   // different non-overlapping fields of the struct will never alias.
1214 
1215   // Therefore, the only remaining thing needed to show that both GEPs can't
1216   // alias is that the fields are not overlapping.
1217   const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1218   const uint64_t StructSize = SL->getSizeInBytes();
1219   const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1220   const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1221 
1222   auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1223                                       uint64_t V2Off, uint64_t V2Size) {
1224     return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1225            ((V2Off + V2Size <= StructSize) ||
1226             (V2Off + V2Size - StructSize <= V1Off));
1227   };
1228 
1229   if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1230       EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1231     return NoAlias;
1232 
1233   return MayAlias;
1234 }
1235 
1236 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1237 // beginning of the object the GEP points would have a negative offset with
1238 // repsect to the alloca, that means the GEP can not alias pointer (b).
1239 // Note that the pointer based on the alloca may not be a GEP. For
1240 // example, it may be the alloca itself.
1241 // The same applies if (b) is based on a GlobalVariable. Note that just being
1242 // based on isIdentifiedObject() is not enough - we need an identified object
1243 // that does not permit access to negative offsets. For example, a negative
1244 // offset from a noalias argument or call can be inbounds w.r.t the actual
1245 // underlying object.
1246 //
1247 // For example, consider:
1248 //
1249 //   struct { int f0, int f1, ...} foo;
1250 //   foo alloca;
1251 //   foo* random = bar(alloca);
1252 //   int *f0 = &alloca.f0
1253 //   int *f1 = &random->f1;
1254 //
1255 // Which is lowered, approximately, to:
1256 //
1257 //  %alloca = alloca %struct.foo
1258 //  %random = call %struct.foo* @random(%struct.foo* %alloca)
1259 //  %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1260 //  %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1261 //
1262 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1263 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1264 // point into the same object. But since %f0 points to the beginning of %alloca,
1265 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1266 // than (%alloca - 1), and so is not inbounds, a contradiction.
1267 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1268       const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1269       LocationSize MaybeObjectAccessSize) {
1270   // If the object access size is unknown, or the GEP isn't inbounds, bail.
1271   if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds())
1272     return false;
1273 
1274   const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue();
1275 
1276   // We need the object to be an alloca or a globalvariable, and want to know
1277   // the offset of the pointer from the object precisely, so no variable
1278   // indices are allowed.
1279   if (!(isa<AllocaInst>(DecompObject.Base) ||
1280         isa<GlobalVariable>(DecompObject.Base)) ||
1281       !DecompObject.VarIndices.empty())
1282     return false;
1283 
1284   APInt ObjectBaseOffset = DecompObject.StructOffset +
1285                            DecompObject.OtherOffset;
1286 
1287   // If the GEP has no variable indices, we know the precise offset
1288   // from the base, then use it. If the GEP has variable indices,
1289   // we can't get exact GEP offset to identify pointer alias. So return
1290   // false in that case.
1291   if (!DecompGEP.VarIndices.empty())
1292     return false;
1293 
1294   APInt GEPBaseOffset = DecompGEP.StructOffset;
1295   GEPBaseOffset += DecompGEP.OtherOffset;
1296 
1297   return GEPBaseOffset.sge(ObjectBaseOffset + (int64_t)ObjectAccessSize);
1298 }
1299 
1300 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1301 /// another pointer.
1302 ///
1303 /// We know that V1 is a GEP, but we don't know anything about V2.
1304 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1305 /// V2.
1306 AliasResult BasicAAResult::aliasGEP(
1307     const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
1308     const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo,
1309     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1310   DecomposedGEP DecompGEP1, DecompGEP2;
1311   unsigned MaxPointerSize = getMaxPointerSize(DL);
1312   DecompGEP1.StructOffset = DecompGEP1.OtherOffset = APInt(MaxPointerSize, 0);
1313   DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0);
1314   DecompGEP1.HasCompileTimeConstantScale =
1315       DecompGEP2.HasCompileTimeConstantScale = true;
1316 
1317   bool GEP1MaxLookupReached =
1318     DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1319   bool GEP2MaxLookupReached =
1320     DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1321 
1322   // Don't attempt to analyze the decomposed GEP if index scale is not a
1323   // compile-time constant.
1324   if (!DecompGEP1.HasCompileTimeConstantScale ||
1325       !DecompGEP2.HasCompileTimeConstantScale)
1326     return MayAlias;
1327 
1328   APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1329   APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1330 
1331   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1332          "DecomposeGEPExpression returned a result different from "
1333          "getUnderlyingObject");
1334 
1335   // If the GEP's offset relative to its base is such that the base would
1336   // fall below the start of the object underlying V2, then the GEP and V2
1337   // cannot alias.
1338   if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1339       isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1340     return NoAlias;
1341   // If we have two gep instructions with must-alias or not-alias'ing base
1342   // pointers, figure out if the indexes to the GEP tell us anything about the
1343   // derived pointer.
1344   if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1345     // Check for the GEP base being at a negative offset, this time in the other
1346     // direction.
1347     if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1348         isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1349       return NoAlias;
1350     // Do the base pointers alias?
1351     AliasResult BaseAlias =
1352         aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(),
1353                    UnderlyingV2, LocationSize::unknown(), AAMDNodes(), AAQI);
1354 
1355     // Check for geps of non-aliasing underlying pointers where the offsets are
1356     // identical.
1357     if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1358       // Do the base pointers alias assuming type and size.
1359       AliasResult PreciseBaseAlias = aliasCheck(
1360           UnderlyingV1, V1Size, V1AAInfo, UnderlyingV2, V2Size, V2AAInfo, AAQI);
1361       if (PreciseBaseAlias == NoAlias) {
1362         // See if the computed offset from the common pointer tells us about the
1363         // relation of the resulting pointer.
1364         // If the max search depth is reached the result is undefined
1365         if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1366           return MayAlias;
1367 
1368         // Same offsets.
1369         if (GEP1BaseOffset == GEP2BaseOffset &&
1370             DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1371           return NoAlias;
1372       }
1373     }
1374 
1375     // If we get a No or May, then return it immediately, no amount of analysis
1376     // will improve this situation.
1377     if (BaseAlias != MustAlias) {
1378       assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1379       return BaseAlias;
1380     }
1381 
1382     // Otherwise, we have a MustAlias.  Since the base pointers alias each other
1383     // exactly, see if the computed offset from the common pointer tells us
1384     // about the relation of the resulting pointer.
1385     // If we know the two GEPs are based off of the exact same pointer (and not
1386     // just the same underlying object), see if that tells us anything about
1387     // the resulting pointers.
1388     if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1389             GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1390         GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1391       AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1392       // If we couldn't find anything interesting, don't abandon just yet.
1393       if (R != MayAlias)
1394         return R;
1395     }
1396 
1397     // If the max search depth is reached, the result is undefined
1398     if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1399       return MayAlias;
1400 
1401     // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1402     // symbolic difference.
1403     GEP1BaseOffset -= GEP2BaseOffset;
1404     GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1405 
1406   } else {
1407     // Check to see if these two pointers are related by the getelementptr
1408     // instruction.  If one pointer is a GEP with a non-zero index of the other
1409     // pointer, we know they cannot alias.
1410 
1411     // If both accesses are unknown size, we can't do anything useful here.
1412     if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown())
1413       return MayAlias;
1414 
1415     AliasResult R = aliasCheck(UnderlyingV1, LocationSize::unknown(),
1416                                AAMDNodes(), V2, LocationSize::unknown(),
1417                                V2AAInfo, AAQI, nullptr, UnderlyingV2);
1418     if (R != MustAlias) {
1419       // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1420       // If V2 is known not to alias GEP base pointer, then the two values
1421       // cannot alias per GEP semantics: "Any memory access must be done through
1422       // a pointer value associated with an address range of the memory access,
1423       // otherwise the behavior is undefined.".
1424       assert(R == NoAlias || R == MayAlias);
1425       return R;
1426     }
1427 
1428     // If the max search depth is reached the result is undefined
1429     if (GEP1MaxLookupReached)
1430       return MayAlias;
1431   }
1432 
1433   // In the two GEP Case, if there is no difference in the offsets of the
1434   // computed pointers, the resultant pointers are a must alias.  This
1435   // happens when we have two lexically identical GEP's (for example).
1436   //
1437   // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1438   // must aliases the GEP, the end result is a must alias also.
1439   if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1440     return MustAlias;
1441 
1442   // If there is a constant difference between the pointers, but the difference
1443   // is less than the size of the associated memory object, then we know
1444   // that the objects are partially overlapping.  If the difference is
1445   // greater, we know they do not overlap.
1446   if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1447     if (GEP1BaseOffset.sge(0)) {
1448       if (V2Size != LocationSize::unknown()) {
1449         if (GEP1BaseOffset.ult(V2Size.getValue()))
1450           return PartialAlias;
1451         return NoAlias;
1452       }
1453     } else {
1454       // We have the situation where:
1455       // +                +
1456       // | BaseOffset     |
1457       // ---------------->|
1458       // |-->V1Size       |-------> V2Size
1459       // GEP1             V2
1460       // We need to know that V2Size is not unknown, otherwise we might have
1461       // stripped a gep with negative index ('gep <ptr>, -1, ...).
1462       if (V1Size != LocationSize::unknown() &&
1463           V2Size != LocationSize::unknown()) {
1464         if ((-GEP1BaseOffset).ult(V1Size.getValue()))
1465           return PartialAlias;
1466         return NoAlias;
1467       }
1468     }
1469   }
1470 
1471   if (!DecompGEP1.VarIndices.empty()) {
1472     APInt Modulo(MaxPointerSize, 0);
1473     bool AllPositive = true;
1474     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1475 
1476       // Try to distinguish something like &A[i][1] against &A[42][0].
1477       // Grab the least significant bit set in any of the scales. We
1478       // don't need std::abs here (even if the scale's negative) as we'll
1479       // be ^'ing Modulo with itself later.
1480       Modulo |= DecompGEP1.VarIndices[i].Scale;
1481 
1482       if (AllPositive) {
1483         // If the Value could change between cycles, then any reasoning about
1484         // the Value this cycle may not hold in the next cycle. We'll just
1485         // give up if we can't determine conditions that hold for every cycle:
1486         const Value *V = DecompGEP1.VarIndices[i].V;
1487 
1488         KnownBits Known =
1489             computeKnownBits(V, DL, 0, &AC, dyn_cast<Instruction>(GEP1), DT);
1490         bool SignKnownZero = Known.isNonNegative();
1491         bool SignKnownOne = Known.isNegative();
1492 
1493         // Zero-extension widens the variable, and so forces the sign
1494         // bit to zero.
1495         bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1496         SignKnownZero |= IsZExt;
1497         SignKnownOne &= !IsZExt;
1498 
1499         // If the variable begins with a zero then we know it's
1500         // positive, regardless of whether the value is signed or
1501         // unsigned.
1502         APInt Scale = DecompGEP1.VarIndices[i].Scale;
1503         AllPositive =
1504             (SignKnownZero && Scale.sge(0)) || (SignKnownOne && Scale.slt(0));
1505       }
1506     }
1507 
1508     Modulo = Modulo ^ (Modulo & (Modulo - 1));
1509 
1510     // We can compute the difference between the two addresses
1511     // mod Modulo. Check whether that difference guarantees that the
1512     // two locations do not alias.
1513     APInt ModOffset = GEP1BaseOffset & (Modulo - 1);
1514     if (V1Size != LocationSize::unknown() &&
1515         V2Size != LocationSize::unknown() && ModOffset.uge(V2Size.getValue()) &&
1516         (Modulo - ModOffset).uge(V1Size.getValue()))
1517       return NoAlias;
1518 
1519     // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1520     // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1521     // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1522     if (AllPositive && GEP1BaseOffset.sgt(0) &&
1523         V2Size != LocationSize::unknown() &&
1524         GEP1BaseOffset.uge(V2Size.getValue()))
1525       return NoAlias;
1526 
1527     if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1528                                 GEP1BaseOffset, &AC, DT))
1529       return NoAlias;
1530   }
1531 
1532   // Statically, we can see that the base objects are the same, but the
1533   // pointers have dynamic offsets which we can't resolve. And none of our
1534   // little tricks above worked.
1535   return MayAlias;
1536 }
1537 
1538 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1539   // If the results agree, take it.
1540   if (A == B)
1541     return A;
1542   // A mix of PartialAlias and MustAlias is PartialAlias.
1543   if ((A == PartialAlias && B == MustAlias) ||
1544       (B == PartialAlias && A == MustAlias))
1545     return PartialAlias;
1546   // Otherwise, we don't know anything.
1547   return MayAlias;
1548 }
1549 
1550 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1551 /// against another.
1552 AliasResult
1553 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1554                            const AAMDNodes &SIAAInfo, const Value *V2,
1555                            LocationSize V2Size, const AAMDNodes &V2AAInfo,
1556                            const Value *UnderV2, AAQueryInfo &AAQI) {
1557   // If the values are Selects with the same condition, we can do a more precise
1558   // check: just check for aliases between the values on corresponding arms.
1559   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1560     if (SI->getCondition() == SI2->getCondition()) {
1561       AliasResult Alias =
1562           aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(),
1563                      V2Size, V2AAInfo, AAQI);
1564       if (Alias == MayAlias)
1565         return MayAlias;
1566       AliasResult ThisAlias =
1567           aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1568                      SI2->getFalseValue(), V2Size, V2AAInfo, AAQI);
1569       return MergeAliasResults(ThisAlias, Alias);
1570     }
1571 
1572   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1573   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1574   AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1575                                  SISize, SIAAInfo, AAQI, UnderV2);
1576   if (Alias == MayAlias)
1577     return MayAlias;
1578 
1579   AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(),
1580                                      SISize, SIAAInfo, AAQI, UnderV2);
1581   return MergeAliasResults(ThisAlias, Alias);
1582 }
1583 
1584 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1585 /// another.
1586 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1587                                     const AAMDNodes &PNAAInfo, const Value *V2,
1588                                     LocationSize V2Size,
1589                                     const AAMDNodes &V2AAInfo,
1590                                     const Value *UnderV2, AAQueryInfo &AAQI) {
1591   // Track phi nodes we have visited. We use this information when we determine
1592   // value equivalence.
1593   VisitedPhiBBs.insert(PN->getParent());
1594 
1595   // If the values are PHIs in the same block, we can do a more precise
1596   // as well as efficient check: just check for aliases between the values
1597   // on corresponding edges.
1598   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1599     if (PN2->getParent() == PN->getParent()) {
1600       AAQueryInfo::LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1601                                 MemoryLocation(V2, V2Size, V2AAInfo));
1602       if (PN > V2)
1603         std::swap(Locs.first, Locs.second);
1604       // Analyse the PHIs' inputs under the assumption that the PHIs are
1605       // NoAlias.
1606       // If the PHIs are May/MustAlias there must be (recursively) an input
1607       // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1608       // there must be an operation on the PHIs within the PHIs' value cycle
1609       // that causes a MayAlias.
1610       // Pretend the phis do not alias.
1611       AliasResult Alias = NoAlias;
1612       AliasResult OrigAliasResult;
1613       {
1614         // Limited lifetime iterator invalidated by the aliasCheck call below.
1615         auto CacheIt = AAQI.AliasCache.find(Locs);
1616         assert((CacheIt != AAQI.AliasCache.end()) &&
1617                "There must exist an entry for the phi node");
1618         OrigAliasResult = CacheIt->second;
1619         CacheIt->second = NoAlias;
1620       }
1621 
1622       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1623         AliasResult ThisAlias =
1624             aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1625                        PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1626                        V2Size, V2AAInfo, AAQI);
1627         Alias = MergeAliasResults(ThisAlias, Alias);
1628         if (Alias == MayAlias)
1629           break;
1630       }
1631 
1632       // Reset if speculation failed.
1633       if (Alias != NoAlias) {
1634         auto Pair =
1635             AAQI.AliasCache.insert(std::make_pair(Locs, OrigAliasResult));
1636         assert(!Pair.second && "Entry must have existed");
1637         Pair.first->second = OrigAliasResult;
1638       }
1639       return Alias;
1640     }
1641 
1642   SmallVector<Value *, 4> V1Srcs;
1643   // For a recursive phi, that recurses through a contant gep, we can perform
1644   // aliasing calculations using the other phi operands with an unknown size to
1645   // specify that an unknown number of elements after the initial value are
1646   // potentially accessed.
1647   bool isRecursive = false;
1648   auto CheckForRecPhi = [&](Value *PV) {
1649     if (!EnableRecPhiAnalysis)
1650       return false;
1651     if (GEPOperator *PVGEP = dyn_cast<GEPOperator>(PV)) {
1652       // Check whether the incoming value is a GEP that advances the pointer
1653       // result of this PHI node (e.g. in a loop). If this is the case, we
1654       // would recurse and always get a MayAlias. Handle this case specially
1655       // below. We need to ensure that the phi is inbounds and has a constant
1656       // positive operand so that we can check for alias with the initial value
1657       // and an unknown but positive size.
1658       if (PVGEP->getPointerOperand() == PN && PVGEP->isInBounds() &&
1659           PVGEP->getNumIndices() == 1 && isa<ConstantInt>(PVGEP->idx_begin()) &&
1660           !cast<ConstantInt>(PVGEP->idx_begin())->isNegative()) {
1661         isRecursive = true;
1662         return true;
1663       }
1664     }
1665     return false;
1666   };
1667 
1668   if (PV) {
1669     // If we have PhiValues then use it to get the underlying phi values.
1670     const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1671     // If we have more phi values than the search depth then return MayAlias
1672     // conservatively to avoid compile time explosion. The worst possible case
1673     // is if both sides are PHI nodes. In which case, this is O(m x n) time
1674     // where 'm' and 'n' are the number of PHI sources.
1675     if (PhiValueSet.size() > MaxLookupSearchDepth)
1676       return MayAlias;
1677     // Add the values to V1Srcs
1678     for (Value *PV1 : PhiValueSet) {
1679       if (CheckForRecPhi(PV1))
1680         continue;
1681       V1Srcs.push_back(PV1);
1682     }
1683   } else {
1684     // If we don't have PhiInfo then just look at the operands of the phi itself
1685     // FIXME: Remove this once we can guarantee that we have PhiInfo always
1686     SmallPtrSet<Value *, 4> UniqueSrc;
1687     for (Value *PV1 : PN->incoming_values()) {
1688       if (isa<PHINode>(PV1))
1689         // If any of the source itself is a PHI, return MayAlias conservatively
1690         // to avoid compile time explosion. The worst possible case is if both
1691         // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1692         // and 'n' are the number of PHI sources.
1693         return MayAlias;
1694 
1695       if (CheckForRecPhi(PV1))
1696         continue;
1697 
1698       if (UniqueSrc.insert(PV1).second)
1699         V1Srcs.push_back(PV1);
1700     }
1701   }
1702 
1703   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1704   // value. This should only be possible in blocks unreachable from the entry
1705   // block, but return MayAlias just in case.
1706   if (V1Srcs.empty())
1707     return MayAlias;
1708 
1709   // If this PHI node is recursive, set the size of the accessed memory to
1710   // unknown to represent all the possible values the GEP could advance the
1711   // pointer to.
1712   if (isRecursive)
1713     PNSize = LocationSize::unknown();
1714 
1715   AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize,
1716                                  PNAAInfo, AAQI, UnderV2);
1717 
1718   // Early exit if the check of the first PHI source against V2 is MayAlias.
1719   // Other results are not possible.
1720   if (Alias == MayAlias)
1721     return MayAlias;
1722   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1723   // remain valid to all elements and needs to conservatively return MayAlias.
1724   if (isRecursive && Alias != NoAlias)
1725     return MayAlias;
1726 
1727   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1728   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1729   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1730     Value *V = V1Srcs[i];
1731 
1732     AliasResult ThisAlias =
1733         aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, AAQI, UnderV2);
1734     Alias = MergeAliasResults(ThisAlias, Alias);
1735     if (Alias == MayAlias)
1736       break;
1737   }
1738 
1739   return Alias;
1740 }
1741 
1742 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1743 /// array references.
1744 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1745                                       AAMDNodes V1AAInfo, const Value *V2,
1746                                       LocationSize V2Size, AAMDNodes V2AAInfo,
1747                                       AAQueryInfo &AAQI, const Value *O1,
1748                                       const Value *O2) {
1749   // If either of the memory references is empty, it doesn't matter what the
1750   // pointer values are.
1751   if (V1Size.isZero() || V2Size.isZero())
1752     return NoAlias;
1753 
1754   // Strip off any casts if they exist.
1755   V1 = V1->stripPointerCastsAndInvariantGroups();
1756   V2 = V2->stripPointerCastsAndInvariantGroups();
1757 
1758   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1759   // value for undef that aliases nothing in the program.
1760   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1761     return NoAlias;
1762 
1763   // Are we checking for alias of the same value?
1764   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1765   // different iterations. We must therefore make sure that this is not the
1766   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1767   // happen by looking at the visited phi nodes and making sure they cannot
1768   // reach the value.
1769   if (isValueEqualInPotentialCycles(V1, V2))
1770     return MustAlias;
1771 
1772   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1773     return NoAlias; // Scalars cannot alias each other
1774 
1775   // Figure out what objects these things are pointing to if we can.
1776   if (O1 == nullptr)
1777     O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1778 
1779   if (O2 == nullptr)
1780     O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1781 
1782   // Null values in the default address space don't point to any object, so they
1783   // don't alias any other pointer.
1784   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1785     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1786       return NoAlias;
1787   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1788     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1789       return NoAlias;
1790 
1791   if (O1 != O2) {
1792     // If V1/V2 point to two different objects, we know that we have no alias.
1793     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1794       return NoAlias;
1795 
1796     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1797     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1798         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1799       return NoAlias;
1800 
1801     // Function arguments can't alias with things that are known to be
1802     // unambigously identified at the function level.
1803     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1804         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1805       return NoAlias;
1806 
1807     // If one pointer is the result of a call/invoke or load and the other is a
1808     // non-escaping local object within the same function, then we know the
1809     // object couldn't escape to a point where the call could return it.
1810     //
1811     // Note that if the pointers are in different functions, there are a
1812     // variety of complications. A call with a nocapture argument may still
1813     // temporary store the nocapture argument's value in a temporary memory
1814     // location if that memory location doesn't escape. Or it may pass a
1815     // nocapture value to other functions as long as they don't capture it.
1816     if (isEscapeSource(O1) &&
1817         isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
1818       return NoAlias;
1819     if (isEscapeSource(O2) &&
1820         isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
1821       return NoAlias;
1822   }
1823 
1824   // If the size of one access is larger than the entire object on the other
1825   // side, then we know such behavior is undefined and can assume no alias.
1826   bool NullIsValidLocation = NullPointerIsDefined(&F);
1827   if ((isObjectSmallerThan(
1828           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1829           TLI, NullIsValidLocation)) ||
1830       (isObjectSmallerThan(
1831           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1832           TLI, NullIsValidLocation)))
1833     return NoAlias;
1834 
1835   // Check the cache before climbing up use-def chains. This also terminates
1836   // otherwise infinitely recursive queries.
1837   AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1838                             MemoryLocation(V2, V2Size, V2AAInfo));
1839   if (V1 > V2)
1840     std::swap(Locs.first, Locs.second);
1841   std::pair<AAQueryInfo::AliasCacheT::iterator, bool> Pair =
1842       AAQI.AliasCache.try_emplace(Locs, MayAlias);
1843   if (!Pair.second)
1844     return Pair.first->second;
1845 
1846   // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1847   // GEP can't simplify, we don't even look at the PHI cases.
1848   if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1849     std::swap(V1, V2);
1850     std::swap(V1Size, V2Size);
1851     std::swap(O1, O2);
1852     std::swap(V1AAInfo, V2AAInfo);
1853   }
1854   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1855     AliasResult Result =
1856         aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI);
1857     if (Result != MayAlias) {
1858       auto ItInsPair = AAQI.AliasCache.insert(std::make_pair(Locs, Result));
1859       assert(!ItInsPair.second && "Entry must have existed");
1860       ItInsPair.first->second = Result;
1861       return Result;
1862     }
1863   }
1864 
1865   if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1866     std::swap(V1, V2);
1867     std::swap(O1, O2);
1868     std::swap(V1Size, V2Size);
1869     std::swap(V1AAInfo, V2AAInfo);
1870   }
1871   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1872     AliasResult Result =
1873         aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
1874     if (Result != MayAlias) {
1875       Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1876       assert(!Pair.second && "Entry must have existed");
1877       return Pair.first->second = Result;
1878     }
1879   }
1880 
1881   if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1882     std::swap(V1, V2);
1883     std::swap(O1, O2);
1884     std::swap(V1Size, V2Size);
1885     std::swap(V1AAInfo, V2AAInfo);
1886   }
1887   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1888     AliasResult Result =
1889         aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
1890     if (Result != MayAlias) {
1891       Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1892       assert(!Pair.second && "Entry must have existed");
1893       return Pair.first->second = Result;
1894     }
1895   }
1896 
1897   // If both pointers are pointing into the same object and one of them
1898   // accesses the entire object, then the accesses must overlap in some way.
1899   if (O1 == O2)
1900     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1901         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1902          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) {
1903       Pair = AAQI.AliasCache.try_emplace(Locs, PartialAlias);
1904       assert(!Pair.second && "Entry must have existed");
1905       return Pair.first->second = PartialAlias;
1906     }
1907 
1908   // Recurse back into the best AA results we have, potentially with refined
1909   // memory locations. We have already ensured that BasicAA has a MayAlias
1910   // cache result for these, so any recursion back into BasicAA won't loop.
1911   AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second, AAQI);
1912   Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1913   assert(!Pair.second && "Entry must have existed");
1914   return Pair.first->second = Result;
1915 }
1916 
1917 /// Check whether two Values can be considered equivalent.
1918 ///
1919 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1920 /// they can not be part of a cycle in the value graph by looking at all
1921 /// visited phi nodes an making sure that the phis cannot reach the value. We
1922 /// have to do this because we are looking through phi nodes (That is we say
1923 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1924 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1925                                                   const Value *V2) {
1926   if (V != V2)
1927     return false;
1928 
1929   const Instruction *Inst = dyn_cast<Instruction>(V);
1930   if (!Inst)
1931     return true;
1932 
1933   if (VisitedPhiBBs.empty())
1934     return true;
1935 
1936   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1937     return false;
1938 
1939   // Make sure that the visited phis cannot reach the Value. This ensures that
1940   // the Values cannot come from different iterations of a potential cycle the
1941   // phi nodes could be involved in.
1942   for (auto *P : VisitedPhiBBs)
1943     if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT, LI))
1944       return false;
1945 
1946   return true;
1947 }
1948 
1949 /// Computes the symbolic difference between two de-composed GEPs.
1950 ///
1951 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1952 /// instructions GEP1 and GEP2 which have common base pointers.
1953 void BasicAAResult::GetIndexDifference(
1954     SmallVectorImpl<VariableGEPIndex> &Dest,
1955     const SmallVectorImpl<VariableGEPIndex> &Src) {
1956   if (Src.empty())
1957     return;
1958 
1959   for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1960     const Value *V = Src[i].V;
1961     unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1962     APInt Scale = Src[i].Scale;
1963 
1964     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1965     // than a few variable indexes.
1966     for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1967       if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1968           Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1969         continue;
1970 
1971       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1972       // goes to zero, remove the entry.
1973       if (Dest[j].Scale != Scale)
1974         Dest[j].Scale -= Scale;
1975       else
1976         Dest.erase(Dest.begin() + j);
1977       Scale = 0;
1978       break;
1979     }
1980 
1981     // If we didn't consume this entry, add it to the end of the Dest list.
1982     if (!!Scale) {
1983       VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1984       Dest.push_back(Entry);
1985     }
1986   }
1987 }
1988 
1989 bool BasicAAResult::constantOffsetHeuristic(
1990     const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1991     LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset,
1992     AssumptionCache *AC, DominatorTree *DT) {
1993   if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() ||
1994       MaybeV2Size == LocationSize::unknown())
1995     return false;
1996 
1997   const uint64_t V1Size = MaybeV1Size.getValue();
1998   const uint64_t V2Size = MaybeV2Size.getValue();
1999 
2000   const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
2001 
2002   if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
2003       Var0.Scale != -Var1.Scale)
2004     return false;
2005 
2006   unsigned Width = Var1.V->getType()->getIntegerBitWidth();
2007 
2008   // We'll strip off the Extensions of Var0 and Var1 and do another round
2009   // of GetLinearExpression decomposition. In the example above, if Var0
2010   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
2011 
2012   APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
2013       V1Offset(Width, 0);
2014   bool NSW = true, NUW = true;
2015   unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
2016   const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
2017                                         V0SExtBits, DL, 0, AC, DT, NSW, NUW);
2018   NSW = true;
2019   NUW = true;
2020   const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
2021                                         V1SExtBits, DL, 0, AC, DT, NSW, NUW);
2022 
2023   if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
2024       V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
2025     return false;
2026 
2027   // We have a hit - Var0 and Var1 only differ by a constant offset!
2028 
2029   // If we've been sext'ed then zext'd the maximum difference between Var0 and
2030   // Var1 is possible to calculate, but we're just interested in the absolute
2031   // minimum difference between the two. The minimum distance may occur due to
2032   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
2033   // the minimum distance between %i and %i + 5 is 3.
2034   APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
2035   MinDiff = APIntOps::umin(MinDiff, Wrapped);
2036   APInt MinDiffBytes =
2037     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
2038 
2039   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
2040   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
2041   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
2042   // V2Size can fit in the MinDiffBytes gap.
2043   return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
2044          MinDiffBytes.uge(V2Size + BaseOffset.abs());
2045 }
2046 
2047 //===----------------------------------------------------------------------===//
2048 // BasicAliasAnalysis Pass
2049 //===----------------------------------------------------------------------===//
2050 
2051 AnalysisKey BasicAA::Key;
2052 
2053 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
2054   return BasicAAResult(F.getParent()->getDataLayout(),
2055                        F,
2056                        AM.getResult<TargetLibraryAnalysis>(F),
2057                        AM.getResult<AssumptionAnalysis>(F),
2058                        &AM.getResult<DominatorTreeAnalysis>(F),
2059                        AM.getCachedResult<LoopAnalysis>(F),
2060                        AM.getCachedResult<PhiValuesAnalysis>(F));
2061 }
2062 
2063 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
2064   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
2065 }
2066 
2067 char BasicAAWrapperPass::ID = 0;
2068 
2069 void BasicAAWrapperPass::anchor() {}
2070 
2071 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
2072                       "Basic Alias Analysis (stateless AA impl)", true, true)
2073 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2074 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2075 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2076 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
2077 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
2078                     "Basic Alias Analysis (stateless AA impl)", true, true)
2079 
2080 FunctionPass *llvm::createBasicAAWrapperPass() {
2081   return new BasicAAWrapperPass();
2082 }
2083 
2084 bool BasicAAWrapperPass::runOnFunction(Function &F) {
2085   auto &ACT = getAnalysis<AssumptionCacheTracker>();
2086   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2087   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2088   auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
2089   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
2090 
2091   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
2092                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
2093                                  &DTWP.getDomTree(),
2094                                  LIWP ? &LIWP->getLoopInfo() : nullptr,
2095                                  PVWP ? &PVWP->getResult() : nullptr));
2096 
2097   return false;
2098 }
2099 
2100 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2101   AU.setPreservesAll();
2102   AU.addRequired<AssumptionCacheTracker>();
2103   AU.addRequired<DominatorTreeWrapperPass>();
2104   AU.addRequired<TargetLibraryInfoWrapperPass>();
2105   AU.addUsedIfAvailable<PhiValuesWrapperPass>();
2106 }
2107 
2108 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
2109   return BasicAAResult(
2110       F.getParent()->getDataLayout(), F,
2111       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
2112       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
2113 }
2114