1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/PhiValues.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62 
63 #define DEBUG_TYPE "basicaa"
64 
65 using namespace llvm;
66 
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
69                                           cl::init(true));
70 
71 /// By default, even on 32-bit architectures we use 64-bit integers for
72 /// calculations. This will allow us to more-aggressively decompose indexing
73 /// expressions calculated using i64 values (e.g., long long in C) which is
74 /// common enough to worry about.
75 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b",
76                                         cl::Hidden, cl::init(true));
77 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits",
78                                     cl::Hidden, cl::init(false));
79 
80 /// SearchLimitReached / SearchTimes shows how often the limit of
81 /// to decompose GEPs is reached. It will affect the precision
82 /// of basic alias analysis.
83 STATISTIC(SearchLimitReached, "Number of times the limit to "
84                               "decompose GEPs is reached");
85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
86 
87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
89 /// careful with value equivalence. We use reachability to make sure a value
90 /// cannot be involved in a cycle.
91 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
92 
93 // The max limit of the search depth in DecomposeGEPExpression() and
94 // getUnderlyingObject(), both functions need to use the same search
95 // depth otherwise the algorithm in aliasGEP will assert.
96 static const unsigned MaxLookupSearchDepth = 6;
97 
98 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
99                                FunctionAnalysisManager::Invalidator &Inv) {
100   // We don't care if this analysis itself is preserved, it has no state. But
101   // we need to check that the analyses it depends on have been. Note that we
102   // may be created without handles to some analyses and in that case don't
103   // depend on them.
104   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
105       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
106       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
107     return true;
108 
109   // Otherwise this analysis result remains valid.
110   return false;
111 }
112 
113 //===----------------------------------------------------------------------===//
114 // Useful predicates
115 //===----------------------------------------------------------------------===//
116 
117 /// Returns true if the pointer is one which would have been considered an
118 /// escape by isNonEscapingLocalObject.
119 static bool isEscapeSource(const Value *V) {
120   if (isa<CallBase>(V))
121     return true;
122 
123   if (isa<Argument>(V))
124     return true;
125 
126   // The load case works because isNonEscapingLocalObject considers all
127   // stores to be escapes (it passes true for the StoreCaptures argument
128   // to PointerMayBeCaptured).
129   if (isa<LoadInst>(V))
130     return true;
131 
132   return false;
133 }
134 
135 /// Returns the size of the object specified by V or UnknownSize if unknown.
136 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
137                               const TargetLibraryInfo &TLI,
138                               bool NullIsValidLoc,
139                               bool RoundToAlign = false) {
140   uint64_t Size;
141   ObjectSizeOpts Opts;
142   Opts.RoundToAlign = RoundToAlign;
143   Opts.NullIsUnknownSize = NullIsValidLoc;
144   if (getObjectSize(V, Size, DL, &TLI, Opts))
145     return Size;
146   return MemoryLocation::UnknownSize;
147 }
148 
149 /// Returns true if we can prove that the object specified by V is smaller than
150 /// Size.
151 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
152                                 const DataLayout &DL,
153                                 const TargetLibraryInfo &TLI,
154                                 bool NullIsValidLoc) {
155   // Note that the meanings of the "object" are slightly different in the
156   // following contexts:
157   //    c1: llvm::getObjectSize()
158   //    c2: llvm.objectsize() intrinsic
159   //    c3: isObjectSmallerThan()
160   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
161   // refers to the "entire object".
162   //
163   //  Consider this example:
164   //     char *p = (char*)malloc(100)
165   //     char *q = p+80;
166   //
167   //  In the context of c1 and c2, the "object" pointed by q refers to the
168   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
169   //
170   //  However, in the context of c3, the "object" refers to the chunk of memory
171   // being allocated. So, the "object" has 100 bytes, and q points to the middle
172   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
173   // parameter, before the llvm::getObjectSize() is called to get the size of
174   // entire object, we should:
175   //    - either rewind the pointer q to the base-address of the object in
176   //      question (in this case rewind to p), or
177   //    - just give up. It is up to caller to make sure the pointer is pointing
178   //      to the base address the object.
179   //
180   // We go for 2nd option for simplicity.
181   if (!isIdentifiedObject(V))
182     return false;
183 
184   // This function needs to use the aligned object size because we allow
185   // reads a bit past the end given sufficient alignment.
186   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
187                                       /*RoundToAlign*/ true);
188 
189   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
190 }
191 
192 /// Return the minimal extent from \p V to the end of the underlying object,
193 /// assuming the result is used in an aliasing query. E.g., we do use the query
194 /// location size and the fact that null pointers cannot alias here.
195 static uint64_t getMinimalExtentFrom(const Value &V,
196                                      const LocationSize &LocSize,
197                                      const DataLayout &DL,
198                                      bool NullIsValidLoc) {
199   // If we have dereferenceability information we know a lower bound for the
200   // extent as accesses for a lower offset would be valid. We need to exclude
201   // the "or null" part if null is a valid pointer.
202   bool CanBeNull, CanBeFreed;
203   uint64_t DerefBytes =
204     V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
205   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
206   DerefBytes = CanBeFreed ? 0 : DerefBytes;
207   // If queried with a precise location size, we assume that location size to be
208   // accessed, thus valid.
209   if (LocSize.isPrecise())
210     DerefBytes = std::max(DerefBytes, LocSize.getValue());
211   return DerefBytes;
212 }
213 
214 /// Returns true if we can prove that the object specified by V has size Size.
215 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
216                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
217   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
218   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
219 }
220 
221 //===----------------------------------------------------------------------===//
222 // GetElementPtr Instruction Decomposition and Analysis
223 //===----------------------------------------------------------------------===//
224 
225 namespace {
226 /// Represents zext(sext(V)).
227 struct ExtendedValue {
228   const Value *V;
229   unsigned ZExtBits;
230   unsigned SExtBits;
231 
232   explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0,
233                          unsigned SExtBits = 0)
234       : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {}
235 
236   unsigned getBitWidth() const {
237     return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits;
238   }
239 
240   ExtendedValue withValue(const Value *NewV) const {
241     return ExtendedValue(NewV, ZExtBits, SExtBits);
242   }
243 
244   ExtendedValue withZExtOfValue(const Value *NewV) const {
245     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
246                         NewV->getType()->getPrimitiveSizeInBits();
247     // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
248     return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0);
249   }
250 
251   ExtendedValue withSExtOfValue(const Value *NewV) const {
252     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
253                         NewV->getType()->getPrimitiveSizeInBits();
254     // zext(sext(sext(NewV)))
255     return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy);
256   }
257 
258   APInt evaluateWith(APInt N) const {
259     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
260            "Incompatible bit width");
261     if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
262     if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
263     return N;
264   }
265 
266   bool canDistributeOver(bool NUW, bool NSW) const {
267     // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
268     // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
269     return (!ZExtBits || NUW) && (!SExtBits || NSW);
270   }
271 };
272 
273 /// Represents zext(sext(V)) * Scale + Offset.
274 struct LinearExpression {
275   ExtendedValue Val;
276   APInt Scale;
277   APInt Offset;
278 
279   LinearExpression(const ExtendedValue &Val, const APInt &Scale,
280                    const APInt &Offset)
281       : Val(Val), Scale(Scale), Offset(Offset) {}
282 
283   LinearExpression(const ExtendedValue &Val) : Val(Val) {
284     unsigned BitWidth = Val.getBitWidth();
285     Scale = APInt(BitWidth, 1);
286     Offset = APInt(BitWidth, 0);
287   }
288 };
289 }
290 
291 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
292 /// B are constant integers.
293 static LinearExpression GetLinearExpression(
294     const ExtendedValue &Val,  const DataLayout &DL, unsigned Depth,
295     AssumptionCache *AC, DominatorTree *DT) {
296   // Limit our recursion depth.
297   if (Depth == 6)
298     return Val;
299 
300   if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
301     return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
302                             Val.evaluateWith(Const->getValue()));
303 
304   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
305     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
306       APInt RHS = Val.evaluateWith(RHSC->getValue());
307       // The only non-OBO case we deal with is or, and only limited to the
308       // case where it is both nuw and nsw.
309       bool NUW = true, NSW = true;
310       if (isa<OverflowingBinaryOperator>(BOp)) {
311         NUW &= BOp->hasNoUnsignedWrap();
312         NSW &= BOp->hasNoSignedWrap();
313       }
314       if (!Val.canDistributeOver(NUW, NSW))
315         return Val;
316 
317       switch (BOp->getOpcode()) {
318       default:
319         // We don't understand this instruction, so we can't decompose it any
320         // further.
321         return Val;
322       case Instruction::Or:
323         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
324         // analyze it.
325         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
326                                BOp, DT))
327           return Val;
328 
329         LLVM_FALLTHROUGH;
330       case Instruction::Add: {
331         LinearExpression E = GetLinearExpression(
332             Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
333         E.Offset += RHS;
334         return E;
335       }
336       case Instruction::Sub: {
337         LinearExpression E = GetLinearExpression(
338             Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
339         E.Offset -= RHS;
340         return E;
341       }
342       case Instruction::Mul: {
343         LinearExpression E = GetLinearExpression(
344             Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
345         E.Offset *= RHS;
346         E.Scale *= RHS;
347         return E;
348       }
349       case Instruction::Shl:
350         // We're trying to linearize an expression of the kind:
351         //   shl i8 -128, 36
352         // where the shift count exceeds the bitwidth of the type.
353         // We can't decompose this further (the expression would return
354         // a poison value).
355         if (RHS.getLimitedValue() > Val.getBitWidth())
356           return Val;
357 
358         LinearExpression E = GetLinearExpression(
359             Val.withValue(BOp->getOperand(0)), DL, Depth + 1, AC, DT);
360         E.Offset <<= RHS.getLimitedValue();
361         E.Scale <<= RHS.getLimitedValue();
362         return E;
363       }
364     }
365   }
366 
367   if (isa<ZExtInst>(Val.V))
368     return GetLinearExpression(
369         Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
370         DL, Depth + 1, AC, DT);
371 
372   if (isa<SExtInst>(Val.V))
373     return GetLinearExpression(
374         Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
375         DL, Depth + 1, AC, DT);
376 
377   return Val;
378 }
379 
380 /// To ensure a pointer offset fits in an integer of size PointerSize
381 /// (in bits) when that size is smaller than the maximum pointer size. This is
382 /// an issue, for example, in particular for 32b pointers with negative indices
383 /// that rely on two's complement wrap-arounds for precise alias information
384 /// where the maximum pointer size is 64b.
385 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
386   assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
387   unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
388   return (Offset << ShiftBits).ashr(ShiftBits);
389 }
390 
391 static unsigned getMaxPointerSize(const DataLayout &DL) {
392   unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
393   if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
394   if (DoubleCalcBits) MaxPointerSize *= 2;
395 
396   return MaxPointerSize;
397 }
398 
399 /// If V is a symbolic pointer expression, decompose it into a base pointer
400 /// with a constant offset and a number of scaled symbolic offsets.
401 ///
402 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
403 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
404 /// specified amount, but which may have other unrepresented high bits. As
405 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
406 ///
407 /// This function is capable of analyzing everything that getUnderlyingObject
408 /// can look through. To be able to do that getUnderlyingObject and
409 /// DecomposeGEPExpression must use the same search depth
410 /// (MaxLookupSearchDepth).
411 BasicAAResult::DecomposedGEP
412 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
413                                       AssumptionCache *AC, DominatorTree *DT) {
414   // Limit recursion depth to limit compile time in crazy cases.
415   unsigned MaxLookup = MaxLookupSearchDepth;
416   SearchTimes++;
417   const Instruction *CxtI = dyn_cast<Instruction>(V);
418 
419   unsigned MaxPointerSize = getMaxPointerSize(DL);
420   DecomposedGEP Decomposed;
421   Decomposed.Offset = APInt(MaxPointerSize, 0);
422   Decomposed.HasCompileTimeConstantScale = true;
423   do {
424     // See if this is a bitcast or GEP.
425     const Operator *Op = dyn_cast<Operator>(V);
426     if (!Op) {
427       // The only non-operator case we can handle are GlobalAliases.
428       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
429         if (!GA->isInterposable()) {
430           V = GA->getAliasee();
431           continue;
432         }
433       }
434       Decomposed.Base = V;
435       return Decomposed;
436     }
437 
438     if (Op->getOpcode() == Instruction::BitCast ||
439         Op->getOpcode() == Instruction::AddrSpaceCast) {
440       V = Op->getOperand(0);
441       continue;
442     }
443 
444     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
445     if (!GEPOp) {
446       if (const auto *PHI = dyn_cast<PHINode>(V)) {
447         // Look through single-arg phi nodes created by LCSSA.
448         if (PHI->getNumIncomingValues() == 1) {
449           V = PHI->getIncomingValue(0);
450           continue;
451         }
452       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
453         // CaptureTracking can know about special capturing properties of some
454         // intrinsics like launder.invariant.group, that can't be expressed with
455         // the attributes, but have properties like returning aliasing pointer.
456         // Because some analysis may assume that nocaptured pointer is not
457         // returned from some special intrinsic (because function would have to
458         // be marked with returns attribute), it is crucial to use this function
459         // because it should be in sync with CaptureTracking. Not using it may
460         // cause weird miscompilations where 2 aliasing pointers are assumed to
461         // noalias.
462         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
463           V = RP;
464           continue;
465         }
466       }
467 
468       Decomposed.Base = V;
469       return Decomposed;
470     }
471 
472     // Track whether we've seen at least one in bounds gep, and if so, whether
473     // all geps parsed were in bounds.
474     if (Decomposed.InBounds == None)
475       Decomposed.InBounds = GEPOp->isInBounds();
476     else if (!GEPOp->isInBounds())
477       Decomposed.InBounds = false;
478 
479     // Don't attempt to analyze GEPs over unsized objects.
480     if (!GEPOp->getSourceElementType()->isSized()) {
481       Decomposed.Base = V;
482       return Decomposed;
483     }
484 
485     // Don't attempt to analyze GEPs if index scale is not a compile-time
486     // constant.
487     if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
488       Decomposed.Base = V;
489       Decomposed.HasCompileTimeConstantScale = false;
490       return Decomposed;
491     }
492 
493     unsigned AS = GEPOp->getPointerAddressSpace();
494     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
495     gep_type_iterator GTI = gep_type_begin(GEPOp);
496     unsigned PointerSize = DL.getPointerSizeInBits(AS);
497     // Assume all GEP operands are constants until proven otherwise.
498     bool GepHasConstantOffset = true;
499     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
500          I != E; ++I, ++GTI) {
501       const Value *Index = *I;
502       // Compute the (potentially symbolic) offset in bytes for this index.
503       if (StructType *STy = GTI.getStructTypeOrNull()) {
504         // For a struct, add the member offset.
505         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
506         if (FieldNo == 0)
507           continue;
508 
509         Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
510         continue;
511       }
512 
513       // For an array/pointer, add the element offset, explicitly scaled.
514       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
515         if (CIdx->isZero())
516           continue;
517         Decomposed.Offset +=
518             DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
519             CIdx->getValue().sextOrTrunc(MaxPointerSize);
520         continue;
521       }
522 
523       GepHasConstantOffset = false;
524 
525       APInt Scale(MaxPointerSize,
526                   DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
527       // If the integer type is smaller than the pointer size, it is implicitly
528       // sign extended to pointer size.
529       unsigned Width = Index->getType()->getIntegerBitWidth();
530       unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
531       LinearExpression LE = GetLinearExpression(
532           ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT);
533 
534       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
535       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
536 
537       // It can be the case that, even through C1*V+C2 does not overflow for
538       // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
539       // decompose the expression in this way.
540       //
541       // FIXME: C1*Scale and the other operations in the decomposed
542       // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
543       // possibility.
544       bool Overflow;
545       APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
546                            .smul_ov(Scale, Overflow);
547       if (Overflow) {
548         LE = LinearExpression(ExtendedValue(Index, 0, SExtBits));
549       } else {
550         Decomposed.Offset += ScaledOffset;
551         Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
552       }
553 
554       // If we already had an occurrence of this index variable, merge this
555       // scale into it.  For example, we want to handle:
556       //   A[x][x] -> x*16 + x*4 -> x*20
557       // This also ensures that 'x' only appears in the index list once.
558       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
559         if (Decomposed.VarIndices[i].V == LE.Val.V &&
560             Decomposed.VarIndices[i].ZExtBits == LE.Val.ZExtBits &&
561             Decomposed.VarIndices[i].SExtBits == LE.Val.SExtBits) {
562           Scale += Decomposed.VarIndices[i].Scale;
563           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
564           break;
565         }
566       }
567 
568       // Make sure that we have a scale that makes sense for this target's
569       // pointer size.
570       Scale = adjustToPointerSize(Scale, PointerSize);
571 
572       if (!!Scale) {
573         VariableGEPIndex Entry = {LE.Val.V, LE.Val.ZExtBits, LE.Val.SExtBits,
574                                   Scale, CxtI};
575         Decomposed.VarIndices.push_back(Entry);
576       }
577     }
578 
579     // Take care of wrap-arounds
580     if (GepHasConstantOffset)
581       Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
582 
583     // Analyze the base pointer next.
584     V = GEPOp->getOperand(0);
585   } while (--MaxLookup);
586 
587   // If the chain of expressions is too deep, just return early.
588   Decomposed.Base = V;
589   SearchLimitReached++;
590   return Decomposed;
591 }
592 
593 /// Returns whether the given pointer value points to memory that is local to
594 /// the function, with global constants being considered local to all
595 /// functions.
596 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
597                                            AAQueryInfo &AAQI, bool OrLocal) {
598   assert(Visited.empty() && "Visited must be cleared after use!");
599 
600   unsigned MaxLookup = 8;
601   SmallVector<const Value *, 16> Worklist;
602   Worklist.push_back(Loc.Ptr);
603   do {
604     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
605     if (!Visited.insert(V).second) {
606       Visited.clear();
607       return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
608     }
609 
610     // An alloca instruction defines local memory.
611     if (OrLocal && isa<AllocaInst>(V))
612       continue;
613 
614     // A global constant counts as local memory for our purposes.
615     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
616       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
617       // global to be marked constant in some modules and non-constant in
618       // others.  GV may even be a declaration, not a definition.
619       if (!GV->isConstant()) {
620         Visited.clear();
621         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
622       }
623       continue;
624     }
625 
626     // If both select values point to local memory, then so does the select.
627     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
628       Worklist.push_back(SI->getTrueValue());
629       Worklist.push_back(SI->getFalseValue());
630       continue;
631     }
632 
633     // If all values incoming to a phi node point to local memory, then so does
634     // the phi.
635     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
636       // Don't bother inspecting phi nodes with many operands.
637       if (PN->getNumIncomingValues() > MaxLookup) {
638         Visited.clear();
639         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
640       }
641       append_range(Worklist, PN->incoming_values());
642       continue;
643     }
644 
645     // Otherwise be conservative.
646     Visited.clear();
647     return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
648   } while (!Worklist.empty() && --MaxLookup);
649 
650   Visited.clear();
651   return Worklist.empty();
652 }
653 
654 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
655   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
656   return II && II->getIntrinsicID() == IID;
657 }
658 
659 /// Returns the behavior when calling the given call site.
660 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
661   if (Call->doesNotAccessMemory())
662     // Can't do better than this.
663     return FMRB_DoesNotAccessMemory;
664 
665   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
666 
667   // If the callsite knows it only reads memory, don't return worse
668   // than that.
669   if (Call->onlyReadsMemory())
670     Min = FMRB_OnlyReadsMemory;
671   else if (Call->doesNotReadMemory())
672     Min = FMRB_OnlyWritesMemory;
673 
674   if (Call->onlyAccessesArgMemory())
675     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
676   else if (Call->onlyAccessesInaccessibleMemory())
677     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
678   else if (Call->onlyAccessesInaccessibleMemOrArgMem())
679     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
680 
681   // If the call has operand bundles then aliasing attributes from the function
682   // it calls do not directly apply to the call.  This can be made more precise
683   // in the future.
684   if (!Call->hasOperandBundles())
685     if (const Function *F = Call->getCalledFunction())
686       Min =
687           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
688 
689   return Min;
690 }
691 
692 /// Returns the behavior when calling the given function. For use when the call
693 /// site is not known.
694 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
695   // If the function declares it doesn't access memory, we can't do better.
696   if (F->doesNotAccessMemory())
697     return FMRB_DoesNotAccessMemory;
698 
699   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
700 
701   // If the function declares it only reads memory, go with that.
702   if (F->onlyReadsMemory())
703     Min = FMRB_OnlyReadsMemory;
704   else if (F->doesNotReadMemory())
705     Min = FMRB_OnlyWritesMemory;
706 
707   if (F->onlyAccessesArgMemory())
708     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
709   else if (F->onlyAccessesInaccessibleMemory())
710     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
711   else if (F->onlyAccessesInaccessibleMemOrArgMem())
712     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
713 
714   return Min;
715 }
716 
717 /// Returns true if this is a writeonly (i.e Mod only) parameter.
718 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
719                              const TargetLibraryInfo &TLI) {
720   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
721     return true;
722 
723   // We can bound the aliasing properties of memset_pattern16 just as we can
724   // for memcpy/memset.  This is particularly important because the
725   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
726   // whenever possible.
727   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
728   // attributes.
729   LibFunc F;
730   if (Call->getCalledFunction() &&
731       TLI.getLibFunc(*Call->getCalledFunction(), F) &&
732       F == LibFunc_memset_pattern16 && TLI.has(F))
733     if (ArgIdx == 0)
734       return true;
735 
736   // TODO: memset_pattern4, memset_pattern8
737   // TODO: _chk variants
738   // TODO: strcmp, strcpy
739 
740   return false;
741 }
742 
743 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
744                                            unsigned ArgIdx) {
745   // Checking for known builtin intrinsics and target library functions.
746   if (isWriteOnlyParam(Call, ArgIdx, TLI))
747     return ModRefInfo::Mod;
748 
749   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
750     return ModRefInfo::Ref;
751 
752   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
753     return ModRefInfo::NoModRef;
754 
755   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
756 }
757 
758 #ifndef NDEBUG
759 static const Function *getParent(const Value *V) {
760   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
761     if (!inst->getParent())
762       return nullptr;
763     return inst->getParent()->getParent();
764   }
765 
766   if (const Argument *arg = dyn_cast<Argument>(V))
767     return arg->getParent();
768 
769   return nullptr;
770 }
771 
772 static bool notDifferentParent(const Value *O1, const Value *O2) {
773 
774   const Function *F1 = getParent(O1);
775   const Function *F2 = getParent(O2);
776 
777   return !F1 || !F2 || F1 == F2;
778 }
779 #endif
780 
781 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
782                                  const MemoryLocation &LocB,
783                                  AAQueryInfo &AAQI) {
784   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
785          "BasicAliasAnalysis doesn't support interprocedural queries.");
786   return aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr, LocB.Size,
787                     LocB.AATags, AAQI);
788 }
789 
790 /// Checks to see if the specified callsite can clobber the specified memory
791 /// object.
792 ///
793 /// Since we only look at local properties of this function, we really can't
794 /// say much about this query.  We do, however, use simple "address taken"
795 /// analysis on local objects.
796 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
797                                         const MemoryLocation &Loc,
798                                         AAQueryInfo &AAQI) {
799   assert(notDifferentParent(Call, Loc.Ptr) &&
800          "AliasAnalysis query involving multiple functions!");
801 
802   const Value *Object = getUnderlyingObject(Loc.Ptr);
803 
804   // Calls marked 'tail' cannot read or write allocas from the current frame
805   // because the current frame might be destroyed by the time they run. However,
806   // a tail call may use an alloca with byval. Calling with byval copies the
807   // contents of the alloca into argument registers or stack slots, so there is
808   // no lifetime issue.
809   if (isa<AllocaInst>(Object))
810     if (const CallInst *CI = dyn_cast<CallInst>(Call))
811       if (CI->isTailCall() &&
812           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
813         return ModRefInfo::NoModRef;
814 
815   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
816   // modify them even though the alloca is not escaped.
817   if (auto *AI = dyn_cast<AllocaInst>(Object))
818     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
819       return ModRefInfo::Mod;
820 
821   // If the pointer is to a locally allocated object that does not escape,
822   // then the call can not mod/ref the pointer unless the call takes the pointer
823   // as an argument, and itself doesn't capture it.
824   if (!isa<Constant>(Object) && Call != Object &&
825       isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) {
826 
827     // Optimistically assume that call doesn't touch Object and check this
828     // assumption in the following loop.
829     ModRefInfo Result = ModRefInfo::NoModRef;
830     bool IsMustAlias = true;
831 
832     unsigned OperandNo = 0;
833     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
834          CI != CE; ++CI, ++OperandNo) {
835       // Only look at the no-capture or byval pointer arguments.  If this
836       // pointer were passed to arguments that were neither of these, then it
837       // couldn't be no-capture.
838       if (!(*CI)->getType()->isPointerTy() ||
839           (!Call->doesNotCapture(OperandNo) &&
840            OperandNo < Call->getNumArgOperands() &&
841            !Call->isByValArgument(OperandNo)))
842         continue;
843 
844       // Call doesn't access memory through this operand, so we don't care
845       // if it aliases with Object.
846       if (Call->doesNotAccessMemory(OperandNo))
847         continue;
848 
849       // If this is a no-capture pointer argument, see if we can tell that it
850       // is impossible to alias the pointer we're checking.
851       AliasResult AR = getBestAAResults().alias(
852           MemoryLocation::getBeforeOrAfter(*CI),
853           MemoryLocation::getBeforeOrAfter(Object), AAQI);
854       if (AR != MustAlias)
855         IsMustAlias = false;
856       // Operand doesn't alias 'Object', continue looking for other aliases
857       if (AR == NoAlias)
858         continue;
859       // Operand aliases 'Object', but call doesn't modify it. Strengthen
860       // initial assumption and keep looking in case if there are more aliases.
861       if (Call->onlyReadsMemory(OperandNo)) {
862         Result = setRef(Result);
863         continue;
864       }
865       // Operand aliases 'Object' but call only writes into it.
866       if (Call->doesNotReadMemory(OperandNo)) {
867         Result = setMod(Result);
868         continue;
869       }
870       // This operand aliases 'Object' and call reads and writes into it.
871       // Setting ModRef will not yield an early return below, MustAlias is not
872       // used further.
873       Result = ModRefInfo::ModRef;
874       break;
875     }
876 
877     // No operand aliases, reset Must bit. Add below if at least one aliases
878     // and all aliases found are MustAlias.
879     if (isNoModRef(Result))
880       IsMustAlias = false;
881 
882     // Early return if we improved mod ref information
883     if (!isModAndRefSet(Result)) {
884       if (isNoModRef(Result))
885         return ModRefInfo::NoModRef;
886       return IsMustAlias ? setMust(Result) : clearMust(Result);
887     }
888   }
889 
890   // If the call is malloc/calloc like, we can assume that it doesn't
891   // modify any IR visible value.  This is only valid because we assume these
892   // routines do not read values visible in the IR.  TODO: Consider special
893   // casing realloc and strdup routines which access only their arguments as
894   // well.  Or alternatively, replace all of this with inaccessiblememonly once
895   // that's implemented fully.
896   if (isMallocOrCallocLikeFn(Call, &TLI)) {
897     // Be conservative if the accessed pointer may alias the allocation -
898     // fallback to the generic handling below.
899     if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call),
900                                  Loc, AAQI) == NoAlias)
901       return ModRefInfo::NoModRef;
902   }
903 
904   // The semantics of memcpy intrinsics either exactly overlap or do not
905   // overlap, i.e., source and destination of any given memcpy are either
906   // no-alias or must-alias.
907   if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
908     AliasResult SrcAA =
909         getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
910     AliasResult DestAA =
911         getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
912     // It's also possible for Loc to alias both src and dest, or neither.
913     ModRefInfo rv = ModRefInfo::NoModRef;
914     if (SrcAA != NoAlias)
915       rv = setRef(rv);
916     if (DestAA != NoAlias)
917       rv = setMod(rv);
918     return rv;
919   }
920 
921   // Guard intrinsics are marked as arbitrarily writing so that proper control
922   // dependencies are maintained but they never mods any particular memory
923   // location.
924   //
925   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
926   // heap state at the point the guard is issued needs to be consistent in case
927   // the guard invokes the "deopt" continuation.
928   if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
929     return ModRefInfo::Ref;
930   // The same applies to deoptimize which is essentially a guard(false).
931   if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
932     return ModRefInfo::Ref;
933 
934   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
935   // writing so that proper control dependencies are maintained but they never
936   // mod any particular memory location visible to the IR.
937   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
938   // intrinsic is now modeled as reading memory. This prevents hoisting the
939   // invariant.start intrinsic over stores. Consider:
940   // *ptr = 40;
941   // *ptr = 50;
942   // invariant_start(ptr)
943   // int val = *ptr;
944   // print(val);
945   //
946   // This cannot be transformed to:
947   //
948   // *ptr = 40;
949   // invariant_start(ptr)
950   // *ptr = 50;
951   // int val = *ptr;
952   // print(val);
953   //
954   // The transformation will cause the second store to be ignored (based on
955   // rules of invariant.start)  and print 40, while the first program always
956   // prints 50.
957   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
958     return ModRefInfo::Ref;
959 
960   // The AAResultBase base class has some smarts, lets use them.
961   return AAResultBase::getModRefInfo(Call, Loc, AAQI);
962 }
963 
964 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
965                                         const CallBase *Call2,
966                                         AAQueryInfo &AAQI) {
967   // Guard intrinsics are marked as arbitrarily writing so that proper control
968   // dependencies are maintained but they never mods any particular memory
969   // location.
970   //
971   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
972   // heap state at the point the guard is issued needs to be consistent in case
973   // the guard invokes the "deopt" continuation.
974 
975   // NB! This function is *not* commutative, so we special case two
976   // possibilities for guard intrinsics.
977 
978   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
979     return isModSet(createModRefInfo(getModRefBehavior(Call2)))
980                ? ModRefInfo::Ref
981                : ModRefInfo::NoModRef;
982 
983   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
984     return isModSet(createModRefInfo(getModRefBehavior(Call1)))
985                ? ModRefInfo::Mod
986                : ModRefInfo::NoModRef;
987 
988   // The AAResultBase base class has some smarts, lets use them.
989   return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
990 }
991 
992 /// Return true if we know V to the base address of the corresponding memory
993 /// object.  This implies that any address less than V must be out of bounds
994 /// for the underlying object.  Note that just being isIdentifiedObject() is
995 /// not enough - For example, a negative offset from a noalias argument or call
996 /// can be inbounds w.r.t the actual underlying object.
997 static bool isBaseOfObject(const Value *V) {
998   // TODO: We can handle other cases here
999   // 1) For GC languages, arguments to functions are often required to be
1000   //    base pointers.
1001   // 2) Result of allocation routines are often base pointers.  Leverage TLI.
1002   return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1003 }
1004 
1005 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1006 /// another pointer.
1007 ///
1008 /// We know that V1 is a GEP, but we don't know anything about V2.
1009 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1010 /// V2.
1011 AliasResult BasicAAResult::aliasGEP(
1012     const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
1013     const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo,
1014     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1015   if (!V1Size.hasValue() && !V2Size.hasValue()) {
1016     // TODO: This limitation exists for compile-time reasons. Relax it if we
1017     // can avoid exponential pathological cases.
1018     if (!isa<GEPOperator>(V2))
1019       return MayAlias;
1020 
1021     // If both accesses have unknown size, we can only check whether the base
1022     // objects don't alias.
1023     AliasResult BaseAlias = getBestAAResults().alias(
1024         MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1025         MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1026     return BaseAlias == NoAlias ? NoAlias : MayAlias;
1027   }
1028 
1029   DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1030   DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1031 
1032   // Don't attempt to analyze the decomposed GEP if index scale is not a
1033   // compile-time constant.
1034   if (!DecompGEP1.HasCompileTimeConstantScale ||
1035       !DecompGEP2.HasCompileTimeConstantScale)
1036     return MayAlias;
1037 
1038   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1039          "DecomposeGEPExpression returned a result different from "
1040          "getUnderlyingObject");
1041 
1042   // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1043   // symbolic difference.
1044   DecompGEP1.Offset -= DecompGEP2.Offset;
1045   GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1046 
1047   // If an inbounds GEP would have to start from an out of bounds address
1048   // for the two to alias, then we can assume noalias.
1049   if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1050       V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1051       isBaseOfObject(DecompGEP2.Base))
1052     return NoAlias;
1053 
1054   if (isa<GEPOperator>(V2)) {
1055     // Symmetric case to above.
1056     if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1057         V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1058         isBaseOfObject(DecompGEP1.Base))
1059     return NoAlias;
1060   }
1061 
1062   // For GEPs with identical offsets, we can preserve the size and AAInfo
1063   // when performing the alias check on the underlying objects.
1064   if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1065     return getBestAAResults().alias(
1066         MemoryLocation(UnderlyingV1, V1Size, V1AAInfo),
1067         MemoryLocation(UnderlyingV2, V2Size, V2AAInfo), AAQI);
1068 
1069   // Do the base pointers alias?
1070   AliasResult BaseAlias = getBestAAResults().alias(
1071       MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1072       MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1073 
1074   // If we get a No or May, then return it immediately, no amount of analysis
1075   // will improve this situation.
1076   if (BaseAlias != MustAlias) {
1077     assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1078     return BaseAlias;
1079   }
1080 
1081   // If there is a constant difference between the pointers, but the difference
1082   // is less than the size of the associated memory object, then we know
1083   // that the objects are partially overlapping.  If the difference is
1084   // greater, we know they do not overlap.
1085   if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
1086     APInt &Off = DecompGEP1.Offset;
1087 
1088     // Initialize for Off >= 0 (V2 <= GEP1) case.
1089     const Value *LeftPtr = V2;
1090     const Value *RightPtr = GEP1;
1091     LocationSize VLeftSize = V2Size;
1092     LocationSize VRightSize = V1Size;
1093 
1094     if (Off.isNegative()) {
1095       // Swap if we have the situation where:
1096       // +                +
1097       // | BaseOffset     |
1098       // ---------------->|
1099       // |-->V1Size       |-------> V2Size
1100       // GEP1             V2
1101       std::swap(LeftPtr, RightPtr);
1102       std::swap(VLeftSize, VRightSize);
1103       Off = -Off;
1104     }
1105 
1106     if (VLeftSize.hasValue()) {
1107       const uint64_t LSize = VLeftSize.getValue();
1108       if (Off.ult(LSize)) {
1109         // Conservatively drop processing if a phi was visited and/or offset is
1110         // too big.
1111         if (VisitedPhiBBs.empty() && VRightSize.hasValue() &&
1112             Off.ule(INT64_MAX)) {
1113           // Memory referenced by right pointer is nested. Save the offset in
1114           // cache.
1115           const uint64_t RSize = VRightSize.getValue();
1116           if ((Off + RSize).ule(LSize))
1117             AAQI.setClobberOffset(LeftPtr, RightPtr, LSize, RSize,
1118                                   Off.getSExtValue());
1119         }
1120         return PartialAlias;
1121       }
1122       return NoAlias;
1123     }
1124   }
1125 
1126   if (!DecompGEP1.VarIndices.empty()) {
1127     APInt GCD;
1128     bool AllNonNegative = DecompGEP1.Offset.isNonNegative();
1129     bool AllNonPositive = DecompGEP1.Offset.isNonPositive();
1130     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1131       const APInt &Scale = DecompGEP1.VarIndices[i].Scale;
1132       if (i == 0)
1133         GCD = Scale.abs();
1134       else
1135         GCD = APIntOps::GreatestCommonDivisor(GCD, Scale.abs());
1136 
1137       if (AllNonNegative || AllNonPositive) {
1138         // If the Value could change between cycles, then any reasoning about
1139         // the Value this cycle may not hold in the next cycle. We'll just
1140         // give up if we can't determine conditions that hold for every cycle:
1141         const Value *V = DecompGEP1.VarIndices[i].V;
1142         const Instruction *CxtI = DecompGEP1.VarIndices[i].CxtI;
1143 
1144         KnownBits Known = computeKnownBits(V, DL, 0, &AC, CxtI, DT);
1145         bool SignKnownZero = Known.isNonNegative();
1146         bool SignKnownOne = Known.isNegative();
1147 
1148         // Zero-extension widens the variable, and so forces the sign
1149         // bit to zero.
1150         bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1151         SignKnownZero |= IsZExt;
1152         SignKnownOne &= !IsZExt;
1153 
1154         AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
1155                           (SignKnownOne && Scale.isNonPositive());
1156         AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) ||
1157                           (SignKnownOne && Scale.isNonNegative());
1158       }
1159     }
1160 
1161     // We now have accesses at two offsets from the same base:
1162     //  1. (...)*GCD + DecompGEP1.Offset with size V1Size
1163     //  2. 0 with size V2Size
1164     // Using arithmetic modulo GCD, the accesses are at
1165     // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1166     // into the range [V2Size..GCD), then we know they cannot overlap.
1167     APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1168     if (ModOffset.isNegative())
1169       ModOffset += GCD; // We want mod, not rem.
1170     if (V1Size.hasValue() && V2Size.hasValue() &&
1171         ModOffset.uge(V2Size.getValue()) &&
1172         (GCD - ModOffset).uge(V1Size.getValue()))
1173       return NoAlias;
1174 
1175     // If we know all the variables are non-negative, then the total offset is
1176     // also non-negative and >= DecompGEP1.Offset. We have the following layout:
1177     // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size]
1178     // If DecompGEP1.Offset >= V2Size, the accesses don't alias.
1179     if (AllNonNegative && V2Size.hasValue() &&
1180         DecompGEP1.Offset.uge(V2Size.getValue()))
1181       return NoAlias;
1182     // Similarly, if the variables are non-positive, then the total offset is
1183     // also non-positive and <= DecompGEP1.Offset. We have the following layout:
1184     // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
1185     // If -DecompGEP1.Offset >= V1Size, the accesses don't alias.
1186     if (AllNonPositive && V1Size.hasValue() &&
1187         (-DecompGEP1.Offset).uge(V1Size.getValue()))
1188       return NoAlias;
1189 
1190     if (V1Size.hasValue() && V2Size.hasValue()) {
1191       // Try to determine whether abs(VarIndex) > 0.
1192       Optional<APInt> MinAbsVarIndex;
1193       if (DecompGEP1.VarIndices.size() == 1) {
1194         // VarIndex = Scale*V. If V != 0 then abs(VarIndex) >= abs(Scale).
1195         const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1196         if (isKnownNonZero(Var.V, DL, 0, &AC, Var.CxtI, DT))
1197           MinAbsVarIndex = Var.Scale.abs();
1198       } else if (DecompGEP1.VarIndices.size() == 2) {
1199         // VarIndex = Scale*V0 + (-Scale)*V1.
1200         // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1201         // Check that VisitedPhiBBs is empty, to avoid reasoning about
1202         // inequality of values across loop iterations.
1203         const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1204         const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1205         if (Var0.Scale == -Var1.Scale && Var0.ZExtBits == Var1.ZExtBits &&
1206             Var0.SExtBits == Var1.SExtBits && VisitedPhiBBs.empty() &&
1207             isKnownNonEqual(Var0.V, Var1.V, DL, &AC, /* CxtI */ nullptr, DT))
1208           MinAbsVarIndex = Var0.Scale.abs();
1209       }
1210 
1211       if (MinAbsVarIndex) {
1212         // The constant offset will have added at least +/-MinAbsVarIndex to it.
1213         APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1214         APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1215         // Check that an access at OffsetLo or lower, and an access at OffsetHi
1216         // or higher both do not alias.
1217         if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1218             OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1219           return NoAlias;
1220       }
1221     }
1222 
1223     if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1224                                 DecompGEP1.Offset, &AC, DT))
1225       return NoAlias;
1226   }
1227 
1228   // Statically, we can see that the base objects are the same, but the
1229   // pointers have dynamic offsets which we can't resolve. And none of our
1230   // little tricks above worked.
1231   return MayAlias;
1232 }
1233 
1234 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1235   // If the results agree, take it.
1236   if (A == B)
1237     return A;
1238   // A mix of PartialAlias and MustAlias is PartialAlias.
1239   if ((A == PartialAlias && B == MustAlias) ||
1240       (B == PartialAlias && A == MustAlias))
1241     return PartialAlias;
1242   // Otherwise, we don't know anything.
1243   return MayAlias;
1244 }
1245 
1246 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1247 /// against another.
1248 AliasResult
1249 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1250                            const AAMDNodes &SIAAInfo, const Value *V2,
1251                            LocationSize V2Size, const AAMDNodes &V2AAInfo,
1252                            AAQueryInfo &AAQI) {
1253   // If the values are Selects with the same condition, we can do a more precise
1254   // check: just check for aliases between the values on corresponding arms.
1255   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1256     if (SI->getCondition() == SI2->getCondition()) {
1257       AliasResult Alias = getBestAAResults().alias(
1258           MemoryLocation(SI->getTrueValue(), SISize, SIAAInfo),
1259           MemoryLocation(SI2->getTrueValue(), V2Size, V2AAInfo), AAQI);
1260       if (Alias == MayAlias)
1261         return MayAlias;
1262       AliasResult ThisAlias = getBestAAResults().alias(
1263           MemoryLocation(SI->getFalseValue(), SISize, SIAAInfo),
1264           MemoryLocation(SI2->getFalseValue(), V2Size, V2AAInfo), AAQI);
1265       return MergeAliasResults(ThisAlias, Alias);
1266     }
1267 
1268   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1269   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1270   AliasResult Alias = getBestAAResults().alias(
1271       MemoryLocation(V2, V2Size, V2AAInfo),
1272       MemoryLocation(SI->getTrueValue(), SISize, SIAAInfo), AAQI);
1273   if (Alias == MayAlias)
1274     return MayAlias;
1275 
1276   AliasResult ThisAlias = getBestAAResults().alias(
1277       MemoryLocation(V2, V2Size, V2AAInfo),
1278       MemoryLocation(SI->getFalseValue(), SISize, SIAAInfo), AAQI);
1279   return MergeAliasResults(ThisAlias, Alias);
1280 }
1281 
1282 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1283 /// another.
1284 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1285                                     const AAMDNodes &PNAAInfo, const Value *V2,
1286                                     LocationSize V2Size,
1287                                     const AAMDNodes &V2AAInfo,
1288                                     AAQueryInfo &AAQI) {
1289   // If the values are PHIs in the same block, we can do a more precise
1290   // as well as efficient check: just check for aliases between the values
1291   // on corresponding edges.
1292   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1293     if (PN2->getParent() == PN->getParent()) {
1294       Optional<AliasResult> Alias;
1295       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1296         AliasResult ThisAlias = getBestAAResults().alias(
1297             MemoryLocation(PN->getIncomingValue(i), PNSize, PNAAInfo),
1298             MemoryLocation(
1299                 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size,
1300                 V2AAInfo),
1301             AAQI);
1302         if (Alias)
1303           *Alias = MergeAliasResults(*Alias, ThisAlias);
1304         else
1305           Alias = ThisAlias;
1306         if (*Alias == MayAlias)
1307           break;
1308       }
1309       return *Alias;
1310     }
1311 
1312   SmallVector<Value *, 4> V1Srcs;
1313   // If a phi operand recurses back to the phi, we can still determine NoAlias
1314   // if we don't alias the underlying objects of the other phi operands, as we
1315   // know that the recursive phi needs to be based on them in some way.
1316   bool isRecursive = false;
1317   auto CheckForRecPhi = [&](Value *PV) {
1318     if (!EnableRecPhiAnalysis)
1319       return false;
1320     if (getUnderlyingObject(PV) == PN) {
1321       isRecursive = true;
1322       return true;
1323     }
1324     return false;
1325   };
1326 
1327   if (PV) {
1328     // If we have PhiValues then use it to get the underlying phi values.
1329     const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1330     // If we have more phi values than the search depth then return MayAlias
1331     // conservatively to avoid compile time explosion. The worst possible case
1332     // is if both sides are PHI nodes. In which case, this is O(m x n) time
1333     // where 'm' and 'n' are the number of PHI sources.
1334     if (PhiValueSet.size() > MaxLookupSearchDepth)
1335       return MayAlias;
1336     // Add the values to V1Srcs
1337     for (Value *PV1 : PhiValueSet) {
1338       if (CheckForRecPhi(PV1))
1339         continue;
1340       V1Srcs.push_back(PV1);
1341     }
1342   } else {
1343     // If we don't have PhiInfo then just look at the operands of the phi itself
1344     // FIXME: Remove this once we can guarantee that we have PhiInfo always
1345     SmallPtrSet<Value *, 4> UniqueSrc;
1346     Value *OnePhi = nullptr;
1347     for (Value *PV1 : PN->incoming_values()) {
1348       if (isa<PHINode>(PV1)) {
1349         if (OnePhi && OnePhi != PV1) {
1350           // To control potential compile time explosion, we choose to be
1351           // conserviate when we have more than one Phi input.  It is important
1352           // that we handle the single phi case as that lets us handle LCSSA
1353           // phi nodes and (combined with the recursive phi handling) simple
1354           // pointer induction variable patterns.
1355           return MayAlias;
1356         }
1357         OnePhi = PV1;
1358       }
1359 
1360       if (CheckForRecPhi(PV1))
1361         continue;
1362 
1363       if (UniqueSrc.insert(PV1).second)
1364         V1Srcs.push_back(PV1);
1365     }
1366 
1367     if (OnePhi && UniqueSrc.size() > 1)
1368       // Out of an abundance of caution, allow only the trivial lcssa and
1369       // recursive phi cases.
1370       return MayAlias;
1371   }
1372 
1373   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1374   // value. This should only be possible in blocks unreachable from the entry
1375   // block, but return MayAlias just in case.
1376   if (V1Srcs.empty())
1377     return MayAlias;
1378 
1379   // If this PHI node is recursive, indicate that the pointer may be moved
1380   // across iterations. We can only prove NoAlias if different underlying
1381   // objects are involved.
1382   if (isRecursive)
1383     PNSize = LocationSize::beforeOrAfterPointer();
1384 
1385   // In the recursive alias queries below, we may compare values from two
1386   // different loop iterations. Keep track of visited phi blocks, which will
1387   // be used when determining value equivalence.
1388   bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1389   auto _ = make_scope_exit([&]() {
1390     if (BlockInserted)
1391       VisitedPhiBBs.erase(PN->getParent());
1392   });
1393 
1394   // If we inserted a block into VisitedPhiBBs, alias analysis results that
1395   // have been cached earlier may no longer be valid. Perform recursive queries
1396   // with a new AAQueryInfo.
1397   AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1398   AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1399 
1400   AliasResult Alias = getBestAAResults().alias(
1401       MemoryLocation(V2, V2Size, V2AAInfo),
1402       MemoryLocation(V1Srcs[0], PNSize, PNAAInfo), *UseAAQI);
1403 
1404   // Early exit if the check of the first PHI source against V2 is MayAlias.
1405   // Other results are not possible.
1406   if (Alias == MayAlias)
1407     return MayAlias;
1408   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1409   // remain valid to all elements and needs to conservatively return MayAlias.
1410   if (isRecursive && Alias != NoAlias)
1411     return MayAlias;
1412 
1413   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1414   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1415   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1416     Value *V = V1Srcs[i];
1417 
1418     AliasResult ThisAlias = getBestAAResults().alias(
1419         MemoryLocation(V2, V2Size, V2AAInfo),
1420         MemoryLocation(V, PNSize, PNAAInfo), *UseAAQI);
1421     Alias = MergeAliasResults(ThisAlias, Alias);
1422     if (Alias == MayAlias)
1423       break;
1424   }
1425 
1426   return Alias;
1427 }
1428 
1429 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1430 /// array references.
1431 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1432                                       const AAMDNodes &V1AAInfo,
1433                                       const Value *V2, LocationSize V2Size,
1434                                       const AAMDNodes &V2AAInfo,
1435                                       AAQueryInfo &AAQI) {
1436   // If either of the memory references is empty, it doesn't matter what the
1437   // pointer values are.
1438   if (V1Size.isZero() || V2Size.isZero())
1439     return NoAlias;
1440 
1441   // Strip off any casts if they exist.
1442   V1 = V1->stripPointerCastsForAliasAnalysis();
1443   V2 = V2->stripPointerCastsForAliasAnalysis();
1444 
1445   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1446   // value for undef that aliases nothing in the program.
1447   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1448     return NoAlias;
1449 
1450   // Are we checking for alias of the same value?
1451   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1452   // different iterations. We must therefore make sure that this is not the
1453   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1454   // happen by looking at the visited phi nodes and making sure they cannot
1455   // reach the value.
1456   if (isValueEqualInPotentialCycles(V1, V2))
1457     return MustAlias;
1458 
1459   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1460     return NoAlias; // Scalars cannot alias each other
1461 
1462   // Figure out what objects these things are pointing to if we can.
1463   const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1464   const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1465 
1466   // Null values in the default address space don't point to any object, so they
1467   // don't alias any other pointer.
1468   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1469     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1470       return NoAlias;
1471   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1472     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1473       return NoAlias;
1474 
1475   if (O1 != O2) {
1476     // If V1/V2 point to two different objects, we know that we have no alias.
1477     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1478       return NoAlias;
1479 
1480     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1481     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1482         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1483       return NoAlias;
1484 
1485     // Function arguments can't alias with things that are known to be
1486     // unambigously identified at the function level.
1487     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1488         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1489       return NoAlias;
1490 
1491     // If one pointer is the result of a call/invoke or load and the other is a
1492     // non-escaping local object within the same function, then we know the
1493     // object couldn't escape to a point where the call could return it.
1494     //
1495     // Note that if the pointers are in different functions, there are a
1496     // variety of complications. A call with a nocapture argument may still
1497     // temporary store the nocapture argument's value in a temporary memory
1498     // location if that memory location doesn't escape. Or it may pass a
1499     // nocapture value to other functions as long as they don't capture it.
1500     if (isEscapeSource(O1) &&
1501         isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
1502       return NoAlias;
1503     if (isEscapeSource(O2) &&
1504         isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
1505       return NoAlias;
1506   }
1507 
1508   // If the size of one access is larger than the entire object on the other
1509   // side, then we know such behavior is undefined and can assume no alias.
1510   bool NullIsValidLocation = NullPointerIsDefined(&F);
1511   if ((isObjectSmallerThan(
1512           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1513           TLI, NullIsValidLocation)) ||
1514       (isObjectSmallerThan(
1515           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1516           TLI, NullIsValidLocation)))
1517     return NoAlias;
1518 
1519   // If one the accesses may be before the accessed pointer, canonicalize this
1520   // by using unknown after-pointer sizes for both accesses. This is
1521   // equivalent, because regardless of which pointer is lower, one of them
1522   // will always came after the other, as long as the underlying objects aren't
1523   // disjoint. We do this so that the rest of BasicAA does not have to deal
1524   // with accesses before the base pointer, and to improve cache utilization by
1525   // merging equivalent states.
1526   if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1527     V1Size = LocationSize::afterPointer();
1528     V2Size = LocationSize::afterPointer();
1529   }
1530 
1531   // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1532   // for recursive queries. For this reason, this limit is chosen to be large
1533   // enough to be very rarely hit, while still being small enough to avoid
1534   // stack overflows.
1535   if (AAQI.Depth >= 512)
1536     return MayAlias;
1537 
1538   // Check the cache before climbing up use-def chains. This also terminates
1539   // otherwise infinitely recursive queries.
1540   AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1541                             MemoryLocation(V2, V2Size, V2AAInfo));
1542   if (V1 > V2)
1543     std::swap(Locs.first, Locs.second);
1544   const auto &Pair = AAQI.AliasCache.try_emplace(
1545       Locs, AAQueryInfo::CacheEntry{NoAlias, 0});
1546   if (!Pair.second) {
1547     auto &Entry = Pair.first->second;
1548     if (!Entry.isDefinitive()) {
1549       // Remember that we used an assumption.
1550       ++Entry.NumAssumptionUses;
1551       ++AAQI.NumAssumptionUses;
1552     }
1553     return Entry.Result;
1554   }
1555 
1556   int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1557   unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1558   AliasResult Result = aliasCheckRecursive(V1, V1Size, V1AAInfo, V2, V2Size,
1559                                            V2AAInfo, AAQI, O1, O2);
1560 
1561   auto It = AAQI.AliasCache.find(Locs);
1562   assert(It != AAQI.AliasCache.end() && "Must be in cache");
1563   auto &Entry = It->second;
1564 
1565   // Check whether a NoAlias assumption has been used, but disproven.
1566   bool AssumptionDisproven = Entry.NumAssumptionUses > 0 && Result != NoAlias;
1567   if (AssumptionDisproven)
1568     Result = MayAlias;
1569 
1570   // This is a definitive result now, when considered as a root query.
1571   AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1572   Entry.Result = Result;
1573   Entry.NumAssumptionUses = -1;
1574 
1575   // If the assumption has been disproven, remove any results that may have
1576   // been based on this assumption. Do this after the Entry updates above to
1577   // avoid iterator invalidation.
1578   if (AssumptionDisproven)
1579     while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1580       AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1581 
1582   // The result may still be based on assumptions higher up in the chain.
1583   // Remember it, so it can be purged from the cache later.
1584   if (OrigNumAssumptionUses != AAQI.NumAssumptionUses && Result != MayAlias)
1585     AAQI.AssumptionBasedResults.push_back(Locs);
1586   return Result;
1587 }
1588 
1589 AliasResult BasicAAResult::aliasCheckRecursive(
1590     const Value *V1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
1591     const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo,
1592     AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1593   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1594     AliasResult Result =
1595         aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI);
1596     if (Result != MayAlias)
1597       return Result;
1598   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1599     AliasResult Result =
1600         aliasGEP(GV2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O2, O1, AAQI);
1601     if (Result != MayAlias)
1602       return Result;
1603   }
1604 
1605   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1606     AliasResult Result =
1607         aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, AAQI);
1608     if (Result != MayAlias)
1609       return Result;
1610   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1611     AliasResult Result =
1612         aliasPHI(PN, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, AAQI);
1613     if (Result != MayAlias)
1614       return Result;
1615   }
1616 
1617   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1618     AliasResult Result =
1619         aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, AAQI);
1620     if (Result != MayAlias)
1621       return Result;
1622   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1623     AliasResult Result =
1624         aliasSelect(S2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, AAQI);
1625     if (Result != MayAlias)
1626       return Result;
1627   }
1628 
1629   // If both pointers are pointing into the same object and one of them
1630   // accesses the entire object, then the accesses must overlap in some way.
1631   if (O1 == O2) {
1632     bool NullIsValidLocation = NullPointerIsDefined(&F);
1633     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1634         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1635          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1636       return PartialAlias;
1637   }
1638 
1639   return MayAlias;
1640 }
1641 
1642 /// Check whether two Values can be considered equivalent.
1643 ///
1644 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1645 /// they can not be part of a cycle in the value graph by looking at all
1646 /// visited phi nodes an making sure that the phis cannot reach the value. We
1647 /// have to do this because we are looking through phi nodes (That is we say
1648 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1649 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1650                                                   const Value *V2) {
1651   if (V != V2)
1652     return false;
1653 
1654   const Instruction *Inst = dyn_cast<Instruction>(V);
1655   if (!Inst)
1656     return true;
1657 
1658   if (VisitedPhiBBs.empty())
1659     return true;
1660 
1661   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1662     return false;
1663 
1664   // Make sure that the visited phis cannot reach the Value. This ensures that
1665   // the Values cannot come from different iterations of a potential cycle the
1666   // phi nodes could be involved in.
1667   for (auto *P : VisitedPhiBBs)
1668     if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1669       return false;
1670 
1671   return true;
1672 }
1673 
1674 /// Computes the symbolic difference between two de-composed GEPs.
1675 ///
1676 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1677 /// instructions GEP1 and GEP2 which have common base pointers.
1678 void BasicAAResult::GetIndexDifference(
1679     SmallVectorImpl<VariableGEPIndex> &Dest,
1680     const SmallVectorImpl<VariableGEPIndex> &Src) {
1681   if (Src.empty())
1682     return;
1683 
1684   for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1685     const Value *V = Src[i].V;
1686     unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1687     APInt Scale = Src[i].Scale;
1688 
1689     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1690     // than a few variable indexes.
1691     for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1692       if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1693           Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1694         continue;
1695 
1696       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1697       // goes to zero, remove the entry.
1698       if (Dest[j].Scale != Scale)
1699         Dest[j].Scale -= Scale;
1700       else
1701         Dest.erase(Dest.begin() + j);
1702       Scale = 0;
1703       break;
1704     }
1705 
1706     // If we didn't consume this entry, add it to the end of the Dest list.
1707     if (!!Scale) {
1708       VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale, Src[i].CxtI};
1709       Dest.push_back(Entry);
1710     }
1711   }
1712 }
1713 
1714 bool BasicAAResult::constantOffsetHeuristic(
1715     const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1716     LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset,
1717     AssumptionCache *AC, DominatorTree *DT) {
1718   if (VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1719       !MaybeV2Size.hasValue())
1720     return false;
1721 
1722   const uint64_t V1Size = MaybeV1Size.getValue();
1723   const uint64_t V2Size = MaybeV2Size.getValue();
1724 
1725   const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1726 
1727   if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1728       Var0.Scale != -Var1.Scale || Var0.V->getType() != Var1.V->getType())
1729     return false;
1730 
1731   // We'll strip off the Extensions of Var0 and Var1 and do another round
1732   // of GetLinearExpression decomposition. In the example above, if Var0
1733   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1734 
1735   LinearExpression E0 =
1736       GetLinearExpression(ExtendedValue(Var0.V), DL, 0, AC, DT);
1737   LinearExpression E1 =
1738       GetLinearExpression(ExtendedValue(Var1.V), DL, 0, AC, DT);
1739   if (E0.Scale != E1.Scale || E0.Val.ZExtBits != E1.Val.ZExtBits ||
1740       E0.Val.SExtBits != E1.Val.SExtBits ||
1741       !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1742     return false;
1743 
1744   // We have a hit - Var0 and Var1 only differ by a constant offset!
1745 
1746   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1747   // Var1 is possible to calculate, but we're just interested in the absolute
1748   // minimum difference between the two. The minimum distance may occur due to
1749   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1750   // the minimum distance between %i and %i + 5 is 3.
1751   APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1752   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1753   APInt MinDiffBytes =
1754     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1755 
1756   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1757   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1758   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1759   // V2Size can fit in the MinDiffBytes gap.
1760   return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
1761          MinDiffBytes.uge(V2Size + BaseOffset.abs());
1762 }
1763 
1764 //===----------------------------------------------------------------------===//
1765 // BasicAliasAnalysis Pass
1766 //===----------------------------------------------------------------------===//
1767 
1768 AnalysisKey BasicAA::Key;
1769 
1770 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1771   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1772   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1773   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1774   auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1775   return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1776 }
1777 
1778 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1779   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1780 }
1781 
1782 char BasicAAWrapperPass::ID = 0;
1783 
1784 void BasicAAWrapperPass::anchor() {}
1785 
1786 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1787                       "Basic Alias Analysis (stateless AA impl)", true, true)
1788 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1789 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1790 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1791 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1792 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1793                     "Basic Alias Analysis (stateless AA impl)", true, true)
1794 
1795 FunctionPass *llvm::createBasicAAWrapperPass() {
1796   return new BasicAAWrapperPass();
1797 }
1798 
1799 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1800   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1801   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1802   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1803   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1804 
1805   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1806                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1807                                  &DTWP.getDomTree(),
1808                                  PVWP ? &PVWP->getResult() : nullptr));
1809 
1810   return false;
1811 }
1812 
1813 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1814   AU.setPreservesAll();
1815   AU.addRequiredTransitive<AssumptionCacheTracker>();
1816   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1817   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1818   AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1819 }
1820 
1821 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1822   return BasicAAResult(
1823       F.getParent()->getDataLayout(), F,
1824       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1825       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1826 }
1827