1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/PhiValues.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62 
63 #define DEBUG_TYPE "basicaa"
64 
65 using namespace llvm;
66 
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
69                                           cl::init(true));
70 
71 /// By default, even on 32-bit architectures we use 64-bit integers for
72 /// calculations. This will allow us to more-aggressively decompose indexing
73 /// expressions calculated using i64 values (e.g., long long in C) which is
74 /// common enough to worry about.
75 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b",
76                                         cl::Hidden, cl::init(true));
77 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits",
78                                     cl::Hidden, cl::init(false));
79 
80 /// SearchLimitReached / SearchTimes shows how often the limit of
81 /// to decompose GEPs is reached. It will affect the precision
82 /// of basic alias analysis.
83 STATISTIC(SearchLimitReached, "Number of times the limit to "
84                               "decompose GEPs is reached");
85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
86 
87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
89 /// careful with value equivalence. We use reachability to make sure a value
90 /// cannot be involved in a cycle.
91 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
92 
93 // The max limit of the search depth in DecomposeGEPExpression() and
94 // getUnderlyingObject(), both functions need to use the same search
95 // depth otherwise the algorithm in aliasGEP will assert.
96 static const unsigned MaxLookupSearchDepth = 6;
97 
98 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
99                                FunctionAnalysisManager::Invalidator &Inv) {
100   // We don't care if this analysis itself is preserved, it has no state. But
101   // we need to check that the analyses it depends on have been. Note that we
102   // may be created without handles to some analyses and in that case don't
103   // depend on them.
104   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
105       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
106       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
107     return true;
108 
109   // Otherwise this analysis result remains valid.
110   return false;
111 }
112 
113 //===----------------------------------------------------------------------===//
114 // Useful predicates
115 //===----------------------------------------------------------------------===//
116 
117 /// Returns true if the pointer is one which would have been considered an
118 /// escape by isNonEscapingLocalObject.
119 static bool isEscapeSource(const Value *V) {
120   if (isa<CallBase>(V))
121     return true;
122 
123   // The load case works because isNonEscapingLocalObject considers all
124   // stores to be escapes (it passes true for the StoreCaptures argument
125   // to PointerMayBeCaptured).
126   if (isa<LoadInst>(V))
127     return true;
128 
129   // The inttoptr case works because isNonEscapingLocalObject considers all
130   // means of converting or equating a pointer to an int (ptrtoint, ptr store
131   // which could be followed by an integer load, ptr<->int compare) as
132   // escaping, and objects located at well-known addresses via platform-specific
133   // means cannot be considered non-escaping local objects.
134   if (isa<IntToPtrInst>(V))
135     return true;
136 
137   return false;
138 }
139 
140 /// Returns the size of the object specified by V or UnknownSize if unknown.
141 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
142                               const TargetLibraryInfo &TLI,
143                               bool NullIsValidLoc,
144                               bool RoundToAlign = false) {
145   uint64_t Size;
146   ObjectSizeOpts Opts;
147   Opts.RoundToAlign = RoundToAlign;
148   Opts.NullIsUnknownSize = NullIsValidLoc;
149   if (getObjectSize(V, Size, DL, &TLI, Opts))
150     return Size;
151   return MemoryLocation::UnknownSize;
152 }
153 
154 /// Returns true if we can prove that the object specified by V is smaller than
155 /// Size.
156 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
157                                 const DataLayout &DL,
158                                 const TargetLibraryInfo &TLI,
159                                 bool NullIsValidLoc) {
160   // Note that the meanings of the "object" are slightly different in the
161   // following contexts:
162   //    c1: llvm::getObjectSize()
163   //    c2: llvm.objectsize() intrinsic
164   //    c3: isObjectSmallerThan()
165   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
166   // refers to the "entire object".
167   //
168   //  Consider this example:
169   //     char *p = (char*)malloc(100)
170   //     char *q = p+80;
171   //
172   //  In the context of c1 and c2, the "object" pointed by q refers to the
173   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
174   //
175   //  However, in the context of c3, the "object" refers to the chunk of memory
176   // being allocated. So, the "object" has 100 bytes, and q points to the middle
177   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
178   // parameter, before the llvm::getObjectSize() is called to get the size of
179   // entire object, we should:
180   //    - either rewind the pointer q to the base-address of the object in
181   //      question (in this case rewind to p), or
182   //    - just give up. It is up to caller to make sure the pointer is pointing
183   //      to the base address the object.
184   //
185   // We go for 2nd option for simplicity.
186   if (!isIdentifiedObject(V))
187     return false;
188 
189   // This function needs to use the aligned object size because we allow
190   // reads a bit past the end given sufficient alignment.
191   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
192                                       /*RoundToAlign*/ true);
193 
194   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
195 }
196 
197 /// Return the minimal extent from \p V to the end of the underlying object,
198 /// assuming the result is used in an aliasing query. E.g., we do use the query
199 /// location size and the fact that null pointers cannot alias here.
200 static uint64_t getMinimalExtentFrom(const Value &V,
201                                      const LocationSize &LocSize,
202                                      const DataLayout &DL,
203                                      bool NullIsValidLoc) {
204   // If we have dereferenceability information we know a lower bound for the
205   // extent as accesses for a lower offset would be valid. We need to exclude
206   // the "or null" part if null is a valid pointer.
207   bool CanBeNull, CanBeFreed;
208   uint64_t DerefBytes =
209     V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
210   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
211   DerefBytes = CanBeFreed ? 0 : DerefBytes;
212   // If queried with a precise location size, we assume that location size to be
213   // accessed, thus valid.
214   if (LocSize.isPrecise())
215     DerefBytes = std::max(DerefBytes, LocSize.getValue());
216   return DerefBytes;
217 }
218 
219 /// Returns true if we can prove that the object specified by V has size Size.
220 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
221                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
222   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
223   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
224 }
225 
226 //===----------------------------------------------------------------------===//
227 // CaptureInfo implementations
228 //===----------------------------------------------------------------------===//
229 
230 CaptureInfo::~CaptureInfo() = default;
231 
232 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object,
233                                                 const Instruction *I) {
234   return isNonEscapingLocalObject(Object, &IsCapturedCache);
235 }
236 
237 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object,
238                                                  const Instruction *I) {
239   if (!isIdentifiedFunctionLocal(Object))
240     return false;
241 
242   auto Iter = EarliestEscapes.insert({Object, nullptr});
243   if (Iter.second) {
244     Instruction *EarliestCapture = FindEarliestCapture(
245         Object, *const_cast<Function *>(I->getFunction()),
246         /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
247     if (EarliestCapture) {
248       auto Ins = Inst2Obj.insert({EarliestCapture, {}});
249       Ins.first->second.push_back(Object);
250     }
251     Iter.first->second = EarliestCapture;
252   }
253 
254   // No capturing instruction.
255   if (!Iter.first->second)
256     return true;
257 
258   return I != Iter.first->second &&
259          !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
260 }
261 
262 void EarliestEscapeInfo::removeInstruction(Instruction *I) {
263   auto Iter = Inst2Obj.find(I);
264   if (Iter != Inst2Obj.end()) {
265     for (const Value *Obj : Iter->second)
266       EarliestEscapes.erase(Obj);
267     Inst2Obj.erase(I);
268   }
269 }
270 
271 //===----------------------------------------------------------------------===//
272 // GetElementPtr Instruction Decomposition and Analysis
273 //===----------------------------------------------------------------------===//
274 
275 namespace {
276 /// Represents zext(sext(V)).
277 struct ExtendedValue {
278   const Value *V;
279   unsigned ZExtBits;
280   unsigned SExtBits;
281 
282   explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0,
283                          unsigned SExtBits = 0)
284       : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {}
285 
286   unsigned getBitWidth() const {
287     return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits;
288   }
289 
290   ExtendedValue withValue(const Value *NewV) const {
291     return ExtendedValue(NewV, ZExtBits, SExtBits);
292   }
293 
294   ExtendedValue withZExtOfValue(const Value *NewV) const {
295     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
296                         NewV->getType()->getPrimitiveSizeInBits();
297     // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
298     return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0);
299   }
300 
301   ExtendedValue withSExtOfValue(const Value *NewV) const {
302     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
303                         NewV->getType()->getPrimitiveSizeInBits();
304     // zext(sext(sext(NewV)))
305     return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy);
306   }
307 
308   APInt evaluateWith(APInt N) const {
309     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
310            "Incompatible bit width");
311     if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
312     if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
313     return N;
314   }
315 
316   bool canDistributeOver(bool NUW, bool NSW) const {
317     // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
318     // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
319     return (!ZExtBits || NUW) && (!SExtBits || NSW);
320   }
321 };
322 
323 /// Represents zext(sext(V)) * Scale + Offset.
324 struct LinearExpression {
325   ExtendedValue Val;
326   APInt Scale;
327   APInt Offset;
328 
329   /// True if all operations in this expression are NSW.
330   bool IsNSW;
331 
332   LinearExpression(const ExtendedValue &Val, const APInt &Scale,
333                    const APInt &Offset, bool IsNSW)
334       : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
335 
336   LinearExpression(const ExtendedValue &Val) : Val(Val), IsNSW(true) {
337     unsigned BitWidth = Val.getBitWidth();
338     Scale = APInt(BitWidth, 1);
339     Offset = APInt(BitWidth, 0);
340   }
341 };
342 }
343 
344 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
345 /// B are constant integers.
346 static LinearExpression GetLinearExpression(
347     const ExtendedValue &Val,  const DataLayout &DL, unsigned Depth,
348     AssumptionCache *AC, DominatorTree *DT) {
349   // Limit our recursion depth.
350   if (Depth == 6)
351     return Val;
352 
353   if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
354     return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
355                             Val.evaluateWith(Const->getValue()), true);
356 
357   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
358     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
359       APInt RHS = Val.evaluateWith(RHSC->getValue());
360       // The only non-OBO case we deal with is or, and only limited to the
361       // case where it is both nuw and nsw.
362       bool NUW = true, NSW = true;
363       if (isa<OverflowingBinaryOperator>(BOp)) {
364         NUW &= BOp->hasNoUnsignedWrap();
365         NSW &= BOp->hasNoSignedWrap();
366       }
367       if (!Val.canDistributeOver(NUW, NSW))
368         return Val;
369 
370       LinearExpression E(Val);
371       switch (BOp->getOpcode()) {
372       default:
373         // We don't understand this instruction, so we can't decompose it any
374         // further.
375         return Val;
376       case Instruction::Or:
377         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
378         // analyze it.
379         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
380                                BOp, DT))
381           return Val;
382 
383         LLVM_FALLTHROUGH;
384       case Instruction::Add: {
385         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
386                                 Depth + 1, AC, DT);
387         E.Offset += RHS;
388         E.IsNSW &= NSW;
389         break;
390       }
391       case Instruction::Sub: {
392         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
393                                 Depth + 1, AC, DT);
394         E.Offset -= RHS;
395         E.IsNSW &= NSW;
396         break;
397       }
398       case Instruction::Mul: {
399         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
400                                 Depth + 1, AC, DT);
401         E.Offset *= RHS;
402         E.Scale *= RHS;
403         E.IsNSW &= NSW;
404         break;
405       }
406       case Instruction::Shl:
407         // We're trying to linearize an expression of the kind:
408         //   shl i8 -128, 36
409         // where the shift count exceeds the bitwidth of the type.
410         // We can't decompose this further (the expression would return
411         // a poison value).
412         if (RHS.getLimitedValue() > Val.getBitWidth())
413           return Val;
414 
415         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
416                                 Depth + 1, AC, DT);
417         E.Offset <<= RHS.getLimitedValue();
418         E.Scale <<= RHS.getLimitedValue();
419         E.IsNSW &= NSW;
420         break;
421       }
422       return E;
423     }
424   }
425 
426   if (isa<ZExtInst>(Val.V))
427     return GetLinearExpression(
428         Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
429         DL, Depth + 1, AC, DT);
430 
431   if (isa<SExtInst>(Val.V))
432     return GetLinearExpression(
433         Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
434         DL, Depth + 1, AC, DT);
435 
436   return Val;
437 }
438 
439 /// To ensure a pointer offset fits in an integer of size PointerSize
440 /// (in bits) when that size is smaller than the maximum pointer size. This is
441 /// an issue, for example, in particular for 32b pointers with negative indices
442 /// that rely on two's complement wrap-arounds for precise alias information
443 /// where the maximum pointer size is 64b.
444 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
445   assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
446   unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
447   return (Offset << ShiftBits).ashr(ShiftBits);
448 }
449 
450 static unsigned getMaxPointerSize(const DataLayout &DL) {
451   unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
452   if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
453   if (DoubleCalcBits) MaxPointerSize *= 2;
454 
455   return MaxPointerSize;
456 }
457 
458 namespace {
459 // A linear transformation of a Value; this class represents ZExt(SExt(V,
460 // SExtBits), ZExtBits) * Scale + Offset.
461 struct VariableGEPIndex {
462   // An opaque Value - we can't decompose this further.
463   const Value *V;
464 
465   // We need to track what extensions we've done as we consider the same Value
466   // with different extensions as different variables in a GEP's linear
467   // expression;
468   // e.g.: if V == -1, then sext(x) != zext(x).
469   unsigned ZExtBits;
470   unsigned SExtBits;
471 
472   APInt Scale;
473 
474   // Context instruction to use when querying information about this index.
475   const Instruction *CxtI;
476 
477   /// True if all operations in this expression are NSW.
478   bool IsNSW;
479 
480   void dump() const {
481     print(dbgs());
482     dbgs() << "\n";
483   }
484   void print(raw_ostream &OS) const {
485     OS << "(V=" << V->getName()
486        << ", zextbits=" << ZExtBits
487        << ", sextbits=" << SExtBits
488        << ", scale=" << Scale << ")";
489   }
490 };
491 }
492 
493 // Represents the internal structure of a GEP, decomposed into a base pointer,
494 // constant offsets, and variable scaled indices.
495 struct BasicAAResult::DecomposedGEP {
496   // Base pointer of the GEP
497   const Value *Base;
498   // Total constant offset from base.
499   APInt Offset;
500   // Scaled variable (non-constant) indices.
501   SmallVector<VariableGEPIndex, 4> VarIndices;
502   // Is GEP index scale compile-time constant.
503   bool HasCompileTimeConstantScale;
504   // Are all operations inbounds GEPs or non-indexing operations?
505   // (None iff expression doesn't involve any geps)
506   Optional<bool> InBounds;
507 
508   void dump() const {
509     print(dbgs());
510     dbgs() << "\n";
511   }
512   void print(raw_ostream &OS) const {
513     OS << "(DecomposedGEP Base=" << Base->getName()
514        << ", Offset=" << Offset
515        << ", VarIndices=[";
516     for (size_t i = 0; i < VarIndices.size(); i++) {
517       if (i != 0)
518         OS << ", ";
519       VarIndices[i].print(OS);
520     }
521     OS << "], HasCompileTimeConstantScale=" << HasCompileTimeConstantScale
522        << ")";
523   }
524 };
525 
526 
527 /// If V is a symbolic pointer expression, decompose it into a base pointer
528 /// with a constant offset and a number of scaled symbolic offsets.
529 ///
530 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
531 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
532 /// specified amount, but which may have other unrepresented high bits. As
533 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
534 ///
535 /// This function is capable of analyzing everything that getUnderlyingObject
536 /// can look through. To be able to do that getUnderlyingObject and
537 /// DecomposeGEPExpression must use the same search depth
538 /// (MaxLookupSearchDepth).
539 BasicAAResult::DecomposedGEP
540 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
541                                       AssumptionCache *AC, DominatorTree *DT) {
542   // Limit recursion depth to limit compile time in crazy cases.
543   unsigned MaxLookup = MaxLookupSearchDepth;
544   SearchTimes++;
545   const Instruction *CxtI = dyn_cast<Instruction>(V);
546 
547   unsigned MaxPointerSize = getMaxPointerSize(DL);
548   DecomposedGEP Decomposed;
549   Decomposed.Offset = APInt(MaxPointerSize, 0);
550   Decomposed.HasCompileTimeConstantScale = true;
551   do {
552     // See if this is a bitcast or GEP.
553     const Operator *Op = dyn_cast<Operator>(V);
554     if (!Op) {
555       // The only non-operator case we can handle are GlobalAliases.
556       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
557         if (!GA->isInterposable()) {
558           V = GA->getAliasee();
559           continue;
560         }
561       }
562       Decomposed.Base = V;
563       return Decomposed;
564     }
565 
566     if (Op->getOpcode() == Instruction::BitCast ||
567         Op->getOpcode() == Instruction::AddrSpaceCast) {
568       V = Op->getOperand(0);
569       continue;
570     }
571 
572     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
573     if (!GEPOp) {
574       if (const auto *PHI = dyn_cast<PHINode>(V)) {
575         // Look through single-arg phi nodes created by LCSSA.
576         if (PHI->getNumIncomingValues() == 1) {
577           V = PHI->getIncomingValue(0);
578           continue;
579         }
580       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
581         // CaptureTracking can know about special capturing properties of some
582         // intrinsics like launder.invariant.group, that can't be expressed with
583         // the attributes, but have properties like returning aliasing pointer.
584         // Because some analysis may assume that nocaptured pointer is not
585         // returned from some special intrinsic (because function would have to
586         // be marked with returns attribute), it is crucial to use this function
587         // because it should be in sync with CaptureTracking. Not using it may
588         // cause weird miscompilations where 2 aliasing pointers are assumed to
589         // noalias.
590         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
591           V = RP;
592           continue;
593         }
594       }
595 
596       Decomposed.Base = V;
597       return Decomposed;
598     }
599 
600     // Track whether we've seen at least one in bounds gep, and if so, whether
601     // all geps parsed were in bounds.
602     if (Decomposed.InBounds == None)
603       Decomposed.InBounds = GEPOp->isInBounds();
604     else if (!GEPOp->isInBounds())
605       Decomposed.InBounds = false;
606 
607     assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
608 
609     // Don't attempt to analyze GEPs if index scale is not a compile-time
610     // constant.
611     if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
612       Decomposed.Base = V;
613       Decomposed.HasCompileTimeConstantScale = false;
614       return Decomposed;
615     }
616 
617     unsigned AS = GEPOp->getPointerAddressSpace();
618     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
619     gep_type_iterator GTI = gep_type_begin(GEPOp);
620     unsigned PointerSize = DL.getPointerSizeInBits(AS);
621     // Assume all GEP operands are constants until proven otherwise.
622     bool GepHasConstantOffset = true;
623     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
624          I != E; ++I, ++GTI) {
625       const Value *Index = *I;
626       // Compute the (potentially symbolic) offset in bytes for this index.
627       if (StructType *STy = GTI.getStructTypeOrNull()) {
628         // For a struct, add the member offset.
629         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
630         if (FieldNo == 0)
631           continue;
632 
633         Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
634         continue;
635       }
636 
637       // For an array/pointer, add the element offset, explicitly scaled.
638       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
639         if (CIdx->isZero())
640           continue;
641         Decomposed.Offset +=
642             DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
643             CIdx->getValue().sextOrTrunc(MaxPointerSize);
644         continue;
645       }
646 
647       GepHasConstantOffset = false;
648 
649       APInt Scale(MaxPointerSize,
650                   DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
651       // If the integer type is smaller than the pointer size, it is implicitly
652       // sign extended to pointer size.
653       unsigned Width = Index->getType()->getIntegerBitWidth();
654       unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
655       LinearExpression LE = GetLinearExpression(
656           ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT);
657 
658       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
659       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
660 
661       // It can be the case that, even through C1*V+C2 does not overflow for
662       // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
663       // decompose the expression in this way.
664       //
665       // FIXME: C1*Scale and the other operations in the decomposed
666       // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
667       // possibility.
668       bool Overflow;
669       APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
670                            .smul_ov(Scale, Overflow);
671       if (Overflow) {
672         LE = LinearExpression(ExtendedValue(Index, 0, SExtBits));
673       } else {
674         Decomposed.Offset += ScaledOffset;
675         Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
676       }
677 
678       // If we already had an occurrence of this index variable, merge this
679       // scale into it.  For example, we want to handle:
680       //   A[x][x] -> x*16 + x*4 -> x*20
681       // This also ensures that 'x' only appears in the index list once.
682       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
683         if (Decomposed.VarIndices[i].V == LE.Val.V &&
684             Decomposed.VarIndices[i].ZExtBits == LE.Val.ZExtBits &&
685             Decomposed.VarIndices[i].SExtBits == LE.Val.SExtBits) {
686           Scale += Decomposed.VarIndices[i].Scale;
687           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
688           break;
689         }
690       }
691 
692       // Make sure that we have a scale that makes sense for this target's
693       // pointer size.
694       Scale = adjustToPointerSize(Scale, PointerSize);
695 
696       if (!!Scale) {
697         VariableGEPIndex Entry = {
698             LE.Val.V, LE.Val.ZExtBits, LE.Val.SExtBits, Scale, CxtI, LE.IsNSW};
699         Decomposed.VarIndices.push_back(Entry);
700       }
701     }
702 
703     // Take care of wrap-arounds
704     if (GepHasConstantOffset)
705       Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
706 
707     // Analyze the base pointer next.
708     V = GEPOp->getOperand(0);
709   } while (--MaxLookup);
710 
711   // If the chain of expressions is too deep, just return early.
712   Decomposed.Base = V;
713   SearchLimitReached++;
714   return Decomposed;
715 }
716 
717 /// Returns whether the given pointer value points to memory that is local to
718 /// the function, with global constants being considered local to all
719 /// functions.
720 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
721                                            AAQueryInfo &AAQI, bool OrLocal) {
722   assert(Visited.empty() && "Visited must be cleared after use!");
723 
724   unsigned MaxLookup = 8;
725   SmallVector<const Value *, 16> Worklist;
726   Worklist.push_back(Loc.Ptr);
727   do {
728     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
729     if (!Visited.insert(V).second) {
730       Visited.clear();
731       return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
732     }
733 
734     // An alloca instruction defines local memory.
735     if (OrLocal && isa<AllocaInst>(V))
736       continue;
737 
738     // A global constant counts as local memory for our purposes.
739     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
740       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
741       // global to be marked constant in some modules and non-constant in
742       // others.  GV may even be a declaration, not a definition.
743       if (!GV->isConstant()) {
744         Visited.clear();
745         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
746       }
747       continue;
748     }
749 
750     // If both select values point to local memory, then so does the select.
751     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
752       Worklist.push_back(SI->getTrueValue());
753       Worklist.push_back(SI->getFalseValue());
754       continue;
755     }
756 
757     // If all values incoming to a phi node point to local memory, then so does
758     // the phi.
759     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
760       // Don't bother inspecting phi nodes with many operands.
761       if (PN->getNumIncomingValues() > MaxLookup) {
762         Visited.clear();
763         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
764       }
765       append_range(Worklist, PN->incoming_values());
766       continue;
767     }
768 
769     // Otherwise be conservative.
770     Visited.clear();
771     return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
772   } while (!Worklist.empty() && --MaxLookup);
773 
774   Visited.clear();
775   return Worklist.empty();
776 }
777 
778 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
779   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
780   return II && II->getIntrinsicID() == IID;
781 }
782 
783 /// Returns the behavior when calling the given call site.
784 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
785   if (Call->doesNotAccessMemory())
786     // Can't do better than this.
787     return FMRB_DoesNotAccessMemory;
788 
789   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
790 
791   // If the callsite knows it only reads memory, don't return worse
792   // than that.
793   if (Call->onlyReadsMemory())
794     Min = FMRB_OnlyReadsMemory;
795   else if (Call->doesNotReadMemory())
796     Min = FMRB_OnlyWritesMemory;
797 
798   if (Call->onlyAccessesArgMemory())
799     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
800   else if (Call->onlyAccessesInaccessibleMemory())
801     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
802   else if (Call->onlyAccessesInaccessibleMemOrArgMem())
803     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
804 
805   // If the call has operand bundles then aliasing attributes from the function
806   // it calls do not directly apply to the call.  This can be made more precise
807   // in the future.
808   if (!Call->hasOperandBundles())
809     if (const Function *F = Call->getCalledFunction())
810       Min =
811           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
812 
813   return Min;
814 }
815 
816 /// Returns the behavior when calling the given function. For use when the call
817 /// site is not known.
818 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
819   // If the function declares it doesn't access memory, we can't do better.
820   if (F->doesNotAccessMemory())
821     return FMRB_DoesNotAccessMemory;
822 
823   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
824 
825   // If the function declares it only reads memory, go with that.
826   if (F->onlyReadsMemory())
827     Min = FMRB_OnlyReadsMemory;
828   else if (F->doesNotReadMemory())
829     Min = FMRB_OnlyWritesMemory;
830 
831   if (F->onlyAccessesArgMemory())
832     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
833   else if (F->onlyAccessesInaccessibleMemory())
834     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
835   else if (F->onlyAccessesInaccessibleMemOrArgMem())
836     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
837 
838   return Min;
839 }
840 
841 /// Returns true if this is a writeonly (i.e Mod only) parameter.
842 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
843                              const TargetLibraryInfo &TLI) {
844   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
845     return true;
846 
847   // We can bound the aliasing properties of memset_pattern16 just as we can
848   // for memcpy/memset.  This is particularly important because the
849   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
850   // whenever possible.
851   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
852   // attributes.
853   LibFunc F;
854   if (Call->getCalledFunction() &&
855       TLI.getLibFunc(*Call->getCalledFunction(), F) &&
856       F == LibFunc_memset_pattern16 && TLI.has(F))
857     if (ArgIdx == 0)
858       return true;
859 
860   // TODO: memset_pattern4, memset_pattern8
861   // TODO: _chk variants
862   // TODO: strcmp, strcpy
863 
864   return false;
865 }
866 
867 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
868                                            unsigned ArgIdx) {
869   // Checking for known builtin intrinsics and target library functions.
870   if (isWriteOnlyParam(Call, ArgIdx, TLI))
871     return ModRefInfo::Mod;
872 
873   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
874     return ModRefInfo::Ref;
875 
876   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
877     return ModRefInfo::NoModRef;
878 
879   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
880 }
881 
882 #ifndef NDEBUG
883 static const Function *getParent(const Value *V) {
884   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
885     if (!inst->getParent())
886       return nullptr;
887     return inst->getParent()->getParent();
888   }
889 
890   if (const Argument *arg = dyn_cast<Argument>(V))
891     return arg->getParent();
892 
893   return nullptr;
894 }
895 
896 static bool notDifferentParent(const Value *O1, const Value *O2) {
897 
898   const Function *F1 = getParent(O1);
899   const Function *F2 = getParent(O2);
900 
901   return !F1 || !F2 || F1 == F2;
902 }
903 #endif
904 
905 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
906                                  const MemoryLocation &LocB,
907                                  AAQueryInfo &AAQI) {
908   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
909          "BasicAliasAnalysis doesn't support interprocedural queries.");
910   return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
911 }
912 
913 /// Checks to see if the specified callsite can clobber the specified memory
914 /// object.
915 ///
916 /// Since we only look at local properties of this function, we really can't
917 /// say much about this query.  We do, however, use simple "address taken"
918 /// analysis on local objects.
919 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
920                                         const MemoryLocation &Loc,
921                                         AAQueryInfo &AAQI) {
922   assert(notDifferentParent(Call, Loc.Ptr) &&
923          "AliasAnalysis query involving multiple functions!");
924 
925   const Value *Object = getUnderlyingObject(Loc.Ptr);
926 
927   // Calls marked 'tail' cannot read or write allocas from the current frame
928   // because the current frame might be destroyed by the time they run. However,
929   // a tail call may use an alloca with byval. Calling with byval copies the
930   // contents of the alloca into argument registers or stack slots, so there is
931   // no lifetime issue.
932   if (isa<AllocaInst>(Object))
933     if (const CallInst *CI = dyn_cast<CallInst>(Call))
934       if (CI->isTailCall() &&
935           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
936         return ModRefInfo::NoModRef;
937 
938   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
939   // modify them even though the alloca is not escaped.
940   if (auto *AI = dyn_cast<AllocaInst>(Object))
941     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
942       return ModRefInfo::Mod;
943 
944   // If the pointer is to a locally allocated object that does not escape,
945   // then the call can not mod/ref the pointer unless the call takes the pointer
946   // as an argument, and itself doesn't capture it.
947   if (!isa<Constant>(Object) && Call != Object &&
948       AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) {
949 
950     // Optimistically assume that call doesn't touch Object and check this
951     // assumption in the following loop.
952     ModRefInfo Result = ModRefInfo::NoModRef;
953     bool IsMustAlias = true;
954 
955     unsigned OperandNo = 0;
956     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
957          CI != CE; ++CI, ++OperandNo) {
958       // Only look at the no-capture or byval pointer arguments.  If this
959       // pointer were passed to arguments that were neither of these, then it
960       // couldn't be no-capture.
961       if (!(*CI)->getType()->isPointerTy() ||
962           (!Call->doesNotCapture(OperandNo) &&
963            OperandNo < Call->getNumArgOperands() &&
964            !Call->isByValArgument(OperandNo)))
965         continue;
966 
967       // Call doesn't access memory through this operand, so we don't care
968       // if it aliases with Object.
969       if (Call->doesNotAccessMemory(OperandNo))
970         continue;
971 
972       // If this is a no-capture pointer argument, see if we can tell that it
973       // is impossible to alias the pointer we're checking.
974       AliasResult AR = getBestAAResults().alias(
975           MemoryLocation::getBeforeOrAfter(*CI),
976           MemoryLocation::getBeforeOrAfter(Object), AAQI);
977       if (AR != AliasResult::MustAlias)
978         IsMustAlias = false;
979       // Operand doesn't alias 'Object', continue looking for other aliases
980       if (AR == AliasResult::NoAlias)
981         continue;
982       // Operand aliases 'Object', but call doesn't modify it. Strengthen
983       // initial assumption and keep looking in case if there are more aliases.
984       if (Call->onlyReadsMemory(OperandNo)) {
985         Result = setRef(Result);
986         continue;
987       }
988       // Operand aliases 'Object' but call only writes into it.
989       if (Call->doesNotReadMemory(OperandNo)) {
990         Result = setMod(Result);
991         continue;
992       }
993       // This operand aliases 'Object' and call reads and writes into it.
994       // Setting ModRef will not yield an early return below, MustAlias is not
995       // used further.
996       Result = ModRefInfo::ModRef;
997       break;
998     }
999 
1000     // No operand aliases, reset Must bit. Add below if at least one aliases
1001     // and all aliases found are MustAlias.
1002     if (isNoModRef(Result))
1003       IsMustAlias = false;
1004 
1005     // Early return if we improved mod ref information
1006     if (!isModAndRefSet(Result)) {
1007       if (isNoModRef(Result))
1008         return ModRefInfo::NoModRef;
1009       return IsMustAlias ? setMust(Result) : clearMust(Result);
1010     }
1011   }
1012 
1013   // If the call is malloc/calloc like, we can assume that it doesn't
1014   // modify any IR visible value.  This is only valid because we assume these
1015   // routines do not read values visible in the IR.  TODO: Consider special
1016   // casing realloc and strdup routines which access only their arguments as
1017   // well.  Or alternatively, replace all of this with inaccessiblememonly once
1018   // that's implemented fully.
1019   if (isMallocOrCallocLikeFn(Call, &TLI)) {
1020     // Be conservative if the accessed pointer may alias the allocation -
1021     // fallback to the generic handling below.
1022     if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc,
1023                                  AAQI) == AliasResult::NoAlias)
1024       return ModRefInfo::NoModRef;
1025   }
1026 
1027   // The semantics of memcpy intrinsics either exactly overlap or do not
1028   // overlap, i.e., source and destination of any given memcpy are either
1029   // no-alias or must-alias.
1030   if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
1031     AliasResult SrcAA =
1032         getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
1033     AliasResult DestAA =
1034         getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
1035     // It's also possible for Loc to alias both src and dest, or neither.
1036     ModRefInfo rv = ModRefInfo::NoModRef;
1037     if (SrcAA != AliasResult::NoAlias)
1038       rv = setRef(rv);
1039     if (DestAA != AliasResult::NoAlias)
1040       rv = setMod(rv);
1041     return rv;
1042   }
1043 
1044   // Guard intrinsics are marked as arbitrarily writing so that proper control
1045   // dependencies are maintained but they never mods any particular memory
1046   // location.
1047   //
1048   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1049   // heap state at the point the guard is issued needs to be consistent in case
1050   // the guard invokes the "deopt" continuation.
1051   if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
1052     return ModRefInfo::Ref;
1053   // The same applies to deoptimize which is essentially a guard(false).
1054   if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
1055     return ModRefInfo::Ref;
1056 
1057   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1058   // writing so that proper control dependencies are maintained but they never
1059   // mod any particular memory location visible to the IR.
1060   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1061   // intrinsic is now modeled as reading memory. This prevents hoisting the
1062   // invariant.start intrinsic over stores. Consider:
1063   // *ptr = 40;
1064   // *ptr = 50;
1065   // invariant_start(ptr)
1066   // int val = *ptr;
1067   // print(val);
1068   //
1069   // This cannot be transformed to:
1070   //
1071   // *ptr = 40;
1072   // invariant_start(ptr)
1073   // *ptr = 50;
1074   // int val = *ptr;
1075   // print(val);
1076   //
1077   // The transformation will cause the second store to be ignored (based on
1078   // rules of invariant.start)  and print 40, while the first program always
1079   // prints 50.
1080   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1081     return ModRefInfo::Ref;
1082 
1083   // The AAResultBase base class has some smarts, lets use them.
1084   return AAResultBase::getModRefInfo(Call, Loc, AAQI);
1085 }
1086 
1087 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1088                                         const CallBase *Call2,
1089                                         AAQueryInfo &AAQI) {
1090   // Guard intrinsics are marked as arbitrarily writing so that proper control
1091   // dependencies are maintained but they never mods any particular memory
1092   // location.
1093   //
1094   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1095   // heap state at the point the guard is issued needs to be consistent in case
1096   // the guard invokes the "deopt" continuation.
1097 
1098   // NB! This function is *not* commutative, so we special case two
1099   // possibilities for guard intrinsics.
1100 
1101   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1102     return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1103                ? ModRefInfo::Ref
1104                : ModRefInfo::NoModRef;
1105 
1106   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1107     return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1108                ? ModRefInfo::Mod
1109                : ModRefInfo::NoModRef;
1110 
1111   // The AAResultBase base class has some smarts, lets use them.
1112   return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1113 }
1114 
1115 /// Return true if we know V to the base address of the corresponding memory
1116 /// object.  This implies that any address less than V must be out of bounds
1117 /// for the underlying object.  Note that just being isIdentifiedObject() is
1118 /// not enough - For example, a negative offset from a noalias argument or call
1119 /// can be inbounds w.r.t the actual underlying object.
1120 static bool isBaseOfObject(const Value *V) {
1121   // TODO: We can handle other cases here
1122   // 1) For GC languages, arguments to functions are often required to be
1123   //    base pointers.
1124   // 2) Result of allocation routines are often base pointers.  Leverage TLI.
1125   return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1126 }
1127 
1128 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1129 /// another pointer.
1130 ///
1131 /// We know that V1 is a GEP, but we don't know anything about V2.
1132 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1133 /// V2.
1134 AliasResult BasicAAResult::aliasGEP(
1135     const GEPOperator *GEP1, LocationSize V1Size,
1136     const Value *V2, LocationSize V2Size,
1137     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1138   if (!V1Size.hasValue() && !V2Size.hasValue()) {
1139     // TODO: This limitation exists for compile-time reasons. Relax it if we
1140     // can avoid exponential pathological cases.
1141     if (!isa<GEPOperator>(V2))
1142       return AliasResult::MayAlias;
1143 
1144     // If both accesses have unknown size, we can only check whether the base
1145     // objects don't alias.
1146     AliasResult BaseAlias = getBestAAResults().alias(
1147         MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1148         MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1149     return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1150                                              : AliasResult::MayAlias;
1151   }
1152 
1153   DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1154   DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1155 
1156   // Don't attempt to analyze the decomposed GEP if index scale is not a
1157   // compile-time constant.
1158   if (!DecompGEP1.HasCompileTimeConstantScale ||
1159       !DecompGEP2.HasCompileTimeConstantScale)
1160     return AliasResult::MayAlias;
1161 
1162   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1163          "DecomposeGEPExpression returned a result different from "
1164          "getUnderlyingObject");
1165 
1166   // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1167   // symbolic difference.
1168   subtractDecomposedGEPs(DecompGEP1, DecompGEP2);
1169 
1170   // If an inbounds GEP would have to start from an out of bounds address
1171   // for the two to alias, then we can assume noalias.
1172   if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1173       V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1174       isBaseOfObject(DecompGEP2.Base))
1175     return AliasResult::NoAlias;
1176 
1177   if (isa<GEPOperator>(V2)) {
1178     // Symmetric case to above.
1179     if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1180         V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1181         isBaseOfObject(DecompGEP1.Base))
1182       return AliasResult::NoAlias;
1183   }
1184 
1185   // For GEPs with identical offsets, we can preserve the size and AAInfo
1186   // when performing the alias check on the underlying objects.
1187   if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1188     return getBestAAResults().alias(
1189         MemoryLocation(UnderlyingV1, V1Size),
1190         MemoryLocation(UnderlyingV2, V2Size), AAQI);
1191 
1192   // Do the base pointers alias?
1193   AliasResult BaseAlias = getBestAAResults().alias(
1194       MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1195       MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1196 
1197   // If we get a No or May, then return it immediately, no amount of analysis
1198   // will improve this situation.
1199   if (BaseAlias != AliasResult::MustAlias) {
1200     assert(BaseAlias == AliasResult::NoAlias ||
1201            BaseAlias == AliasResult::MayAlias);
1202     return BaseAlias;
1203   }
1204 
1205   // If there is a constant difference between the pointers, but the difference
1206   // is less than the size of the associated memory object, then we know
1207   // that the objects are partially overlapping.  If the difference is
1208   // greater, we know they do not overlap.
1209   if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
1210     APInt &Off = DecompGEP1.Offset;
1211 
1212     // Initialize for Off >= 0 (V2 <= GEP1) case.
1213     const Value *LeftPtr = V2;
1214     const Value *RightPtr = GEP1;
1215     LocationSize VLeftSize = V2Size;
1216     LocationSize VRightSize = V1Size;
1217     const bool Swapped = Off.isNegative();
1218 
1219     if (Swapped) {
1220       // Swap if we have the situation where:
1221       // +                +
1222       // | BaseOffset     |
1223       // ---------------->|
1224       // |-->V1Size       |-------> V2Size
1225       // GEP1             V2
1226       std::swap(LeftPtr, RightPtr);
1227       std::swap(VLeftSize, VRightSize);
1228       Off = -Off;
1229     }
1230 
1231     if (VLeftSize.hasValue()) {
1232       const uint64_t LSize = VLeftSize.getValue();
1233       if (Off.ult(LSize)) {
1234         // Conservatively drop processing if a phi was visited and/or offset is
1235         // too big.
1236         AliasResult AR = AliasResult::PartialAlias;
1237         if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1238             (Off + VRightSize.getValue()).ule(LSize)) {
1239           // Memory referenced by right pointer is nested. Save the offset in
1240           // cache. Note that originally offset estimated as GEP1-V2, but
1241           // AliasResult contains the shift that represents GEP1+Offset=V2.
1242           AR.setOffset(-Off.getSExtValue());
1243           AR.swap(Swapped);
1244         }
1245         return AR;
1246       }
1247       return AliasResult::NoAlias;
1248     }
1249   }
1250 
1251   if (!DecompGEP1.VarIndices.empty()) {
1252     APInt GCD;
1253     bool AllNonNegative = DecompGEP1.Offset.isNonNegative();
1254     bool AllNonPositive = DecompGEP1.Offset.isNonPositive();
1255     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1256       APInt Scale = DecompGEP1.VarIndices[i].Scale;
1257       APInt ScaleForGCD = DecompGEP1.VarIndices[i].Scale;
1258       if (!DecompGEP1.VarIndices[i].IsNSW)
1259         ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(),
1260                                           Scale.countTrailingZeros());
1261 
1262       if (i == 0)
1263         GCD = ScaleForGCD.abs();
1264       else
1265         GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1266 
1267       if (AllNonNegative || AllNonPositive) {
1268         // If the Value could change between cycles, then any reasoning about
1269         // the Value this cycle may not hold in the next cycle. We'll just
1270         // give up if we can't determine conditions that hold for every cycle:
1271         const Value *V = DecompGEP1.VarIndices[i].V;
1272         const Instruction *CxtI = DecompGEP1.VarIndices[i].CxtI;
1273 
1274         KnownBits Known = computeKnownBits(V, DL, 0, &AC, CxtI, DT);
1275         bool SignKnownZero = Known.isNonNegative();
1276         bool SignKnownOne = Known.isNegative();
1277 
1278         // Zero-extension widens the variable, and so forces the sign
1279         // bit to zero.
1280         bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1281         SignKnownZero |= IsZExt;
1282         SignKnownOne &= !IsZExt;
1283 
1284         AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
1285                           (SignKnownOne && Scale.isNonPositive());
1286         AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) ||
1287                           (SignKnownOne && Scale.isNonNegative());
1288       }
1289     }
1290 
1291     // We now have accesses at two offsets from the same base:
1292     //  1. (...)*GCD + DecompGEP1.Offset with size V1Size
1293     //  2. 0 with size V2Size
1294     // Using arithmetic modulo GCD, the accesses are at
1295     // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1296     // into the range [V2Size..GCD), then we know they cannot overlap.
1297     APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1298     if (ModOffset.isNegative())
1299       ModOffset += GCD; // We want mod, not rem.
1300     if (V1Size.hasValue() && V2Size.hasValue() &&
1301         ModOffset.uge(V2Size.getValue()) &&
1302         (GCD - ModOffset).uge(V1Size.getValue()))
1303       return AliasResult::NoAlias;
1304 
1305     // If we know all the variables are non-negative, then the total offset is
1306     // also non-negative and >= DecompGEP1.Offset. We have the following layout:
1307     // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size]
1308     // If DecompGEP1.Offset >= V2Size, the accesses don't alias.
1309     if (AllNonNegative && V2Size.hasValue() &&
1310         DecompGEP1.Offset.uge(V2Size.getValue()))
1311       return AliasResult::NoAlias;
1312     // Similarly, if the variables are non-positive, then the total offset is
1313     // also non-positive and <= DecompGEP1.Offset. We have the following layout:
1314     // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
1315     // If -DecompGEP1.Offset >= V1Size, the accesses don't alias.
1316     if (AllNonPositive && V1Size.hasValue() &&
1317         (-DecompGEP1.Offset).uge(V1Size.getValue()))
1318       return AliasResult::NoAlias;
1319 
1320     if (V1Size.hasValue() && V2Size.hasValue()) {
1321       // Try to determine whether abs(VarIndex) > 0.
1322       Optional<APInt> MinAbsVarIndex;
1323       if (DecompGEP1.VarIndices.size() == 1) {
1324         // VarIndex = Scale*V. If V != 0 then abs(VarIndex) >= abs(Scale).
1325         const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1326         if (isKnownNonZero(Var.V, DL, 0, &AC, Var.CxtI, DT))
1327           MinAbsVarIndex = Var.Scale.abs();
1328       } else if (DecompGEP1.VarIndices.size() == 2) {
1329         // VarIndex = Scale*V0 + (-Scale)*V1.
1330         // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1331         // Check that VisitedPhiBBs is empty, to avoid reasoning about
1332         // inequality of values across loop iterations.
1333         const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1334         const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1335         if (Var0.Scale == -Var1.Scale && Var0.ZExtBits == Var1.ZExtBits &&
1336             Var0.SExtBits == Var1.SExtBits && VisitedPhiBBs.empty() &&
1337             isKnownNonEqual(Var0.V, Var1.V, DL, &AC, /* CxtI */ nullptr, DT))
1338           MinAbsVarIndex = Var0.Scale.abs();
1339       }
1340 
1341       if (MinAbsVarIndex) {
1342         // The constant offset will have added at least +/-MinAbsVarIndex to it.
1343         APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1344         APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1345         // Check that an access at OffsetLo or lower, and an access at OffsetHi
1346         // or higher both do not alias.
1347         if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1348             OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1349           return AliasResult::NoAlias;
1350       }
1351     }
1352 
1353     if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT))
1354       return AliasResult::NoAlias;
1355   }
1356 
1357   // Statically, we can see that the base objects are the same, but the
1358   // pointers have dynamic offsets which we can't resolve. And none of our
1359   // little tricks above worked.
1360   return AliasResult::MayAlias;
1361 }
1362 
1363 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1364   // If the results agree, take it.
1365   if (A == B)
1366     return A;
1367   // A mix of PartialAlias and MustAlias is PartialAlias.
1368   if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1369       (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1370     return AliasResult::PartialAlias;
1371   // Otherwise, we don't know anything.
1372   return AliasResult::MayAlias;
1373 }
1374 
1375 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1376 /// against another.
1377 AliasResult
1378 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1379                            const Value *V2, LocationSize V2Size,
1380                            AAQueryInfo &AAQI) {
1381   // If the values are Selects with the same condition, we can do a more precise
1382   // check: just check for aliases between the values on corresponding arms.
1383   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1384     if (SI->getCondition() == SI2->getCondition()) {
1385       AliasResult Alias = getBestAAResults().alias(
1386           MemoryLocation(SI->getTrueValue(), SISize),
1387           MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1388       if (Alias == AliasResult::MayAlias)
1389         return AliasResult::MayAlias;
1390       AliasResult ThisAlias = getBestAAResults().alias(
1391           MemoryLocation(SI->getFalseValue(), SISize),
1392           MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1393       return MergeAliasResults(ThisAlias, Alias);
1394     }
1395 
1396   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1397   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1398   AliasResult Alias = getBestAAResults().alias(
1399       MemoryLocation(V2, V2Size),
1400       MemoryLocation(SI->getTrueValue(), SISize), AAQI);
1401   if (Alias == AliasResult::MayAlias)
1402     return AliasResult::MayAlias;
1403 
1404   AliasResult ThisAlias = getBestAAResults().alias(
1405       MemoryLocation(V2, V2Size),
1406       MemoryLocation(SI->getFalseValue(), SISize), AAQI);
1407   return MergeAliasResults(ThisAlias, Alias);
1408 }
1409 
1410 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1411 /// another.
1412 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1413                                     const Value *V2, LocationSize V2Size,
1414                                     AAQueryInfo &AAQI) {
1415   if (!PN->getNumIncomingValues())
1416     return AliasResult::NoAlias;
1417   // If the values are PHIs in the same block, we can do a more precise
1418   // as well as efficient check: just check for aliases between the values
1419   // on corresponding edges.
1420   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1421     if (PN2->getParent() == PN->getParent()) {
1422       Optional<AliasResult> Alias;
1423       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1424         AliasResult ThisAlias = getBestAAResults().alias(
1425             MemoryLocation(PN->getIncomingValue(i), PNSize),
1426             MemoryLocation(
1427                 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1428             AAQI);
1429         if (Alias)
1430           *Alias = MergeAliasResults(*Alias, ThisAlias);
1431         else
1432           Alias = ThisAlias;
1433         if (*Alias == AliasResult::MayAlias)
1434           break;
1435       }
1436       return *Alias;
1437     }
1438 
1439   SmallVector<Value *, 4> V1Srcs;
1440   // If a phi operand recurses back to the phi, we can still determine NoAlias
1441   // if we don't alias the underlying objects of the other phi operands, as we
1442   // know that the recursive phi needs to be based on them in some way.
1443   bool isRecursive = false;
1444   auto CheckForRecPhi = [&](Value *PV) {
1445     if (!EnableRecPhiAnalysis)
1446       return false;
1447     if (getUnderlyingObject(PV) == PN) {
1448       isRecursive = true;
1449       return true;
1450     }
1451     return false;
1452   };
1453 
1454   if (PV) {
1455     // If we have PhiValues then use it to get the underlying phi values.
1456     const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1457     // If we have more phi values than the search depth then return MayAlias
1458     // conservatively to avoid compile time explosion. The worst possible case
1459     // is if both sides are PHI nodes. In which case, this is O(m x n) time
1460     // where 'm' and 'n' are the number of PHI sources.
1461     if (PhiValueSet.size() > MaxLookupSearchDepth)
1462       return AliasResult::MayAlias;
1463     // Add the values to V1Srcs
1464     for (Value *PV1 : PhiValueSet) {
1465       if (CheckForRecPhi(PV1))
1466         continue;
1467       V1Srcs.push_back(PV1);
1468     }
1469   } else {
1470     // If we don't have PhiInfo then just look at the operands of the phi itself
1471     // FIXME: Remove this once we can guarantee that we have PhiInfo always
1472     SmallPtrSet<Value *, 4> UniqueSrc;
1473     Value *OnePhi = nullptr;
1474     for (Value *PV1 : PN->incoming_values()) {
1475       if (isa<PHINode>(PV1)) {
1476         if (OnePhi && OnePhi != PV1) {
1477           // To control potential compile time explosion, we choose to be
1478           // conserviate when we have more than one Phi input.  It is important
1479           // that we handle the single phi case as that lets us handle LCSSA
1480           // phi nodes and (combined with the recursive phi handling) simple
1481           // pointer induction variable patterns.
1482           return AliasResult::MayAlias;
1483         }
1484         OnePhi = PV1;
1485       }
1486 
1487       if (CheckForRecPhi(PV1))
1488         continue;
1489 
1490       if (UniqueSrc.insert(PV1).second)
1491         V1Srcs.push_back(PV1);
1492     }
1493 
1494     if (OnePhi && UniqueSrc.size() > 1)
1495       // Out of an abundance of caution, allow only the trivial lcssa and
1496       // recursive phi cases.
1497       return AliasResult::MayAlias;
1498   }
1499 
1500   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1501   // value. This should only be possible in blocks unreachable from the entry
1502   // block, but return MayAlias just in case.
1503   if (V1Srcs.empty())
1504     return AliasResult::MayAlias;
1505 
1506   // If this PHI node is recursive, indicate that the pointer may be moved
1507   // across iterations. We can only prove NoAlias if different underlying
1508   // objects are involved.
1509   if (isRecursive)
1510     PNSize = LocationSize::beforeOrAfterPointer();
1511 
1512   // In the recursive alias queries below, we may compare values from two
1513   // different loop iterations. Keep track of visited phi blocks, which will
1514   // be used when determining value equivalence.
1515   bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1516   auto _ = make_scope_exit([&]() {
1517     if (BlockInserted)
1518       VisitedPhiBBs.erase(PN->getParent());
1519   });
1520 
1521   // If we inserted a block into VisitedPhiBBs, alias analysis results that
1522   // have been cached earlier may no longer be valid. Perform recursive queries
1523   // with a new AAQueryInfo.
1524   AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1525   AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1526 
1527   AliasResult Alias = getBestAAResults().alias(
1528       MemoryLocation(V2, V2Size),
1529       MemoryLocation(V1Srcs[0], PNSize), *UseAAQI);
1530 
1531   // Early exit if the check of the first PHI source against V2 is MayAlias.
1532   // Other results are not possible.
1533   if (Alias == AliasResult::MayAlias)
1534     return AliasResult::MayAlias;
1535   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1536   // remain valid to all elements and needs to conservatively return MayAlias.
1537   if (isRecursive && Alias != AliasResult::NoAlias)
1538     return AliasResult::MayAlias;
1539 
1540   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1541   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1542   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1543     Value *V = V1Srcs[i];
1544 
1545     AliasResult ThisAlias = getBestAAResults().alias(
1546         MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI);
1547     Alias = MergeAliasResults(ThisAlias, Alias);
1548     if (Alias == AliasResult::MayAlias)
1549       break;
1550   }
1551 
1552   return Alias;
1553 }
1554 
1555 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1556 /// array references.
1557 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1558                                       const Value *V2, LocationSize V2Size,
1559                                       AAQueryInfo &AAQI) {
1560   // If either of the memory references is empty, it doesn't matter what the
1561   // pointer values are.
1562   if (V1Size.isZero() || V2Size.isZero())
1563     return AliasResult::NoAlias;
1564 
1565   // Strip off any casts if they exist.
1566   V1 = V1->stripPointerCastsForAliasAnalysis();
1567   V2 = V2->stripPointerCastsForAliasAnalysis();
1568 
1569   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1570   // value for undef that aliases nothing in the program.
1571   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1572     return AliasResult::NoAlias;
1573 
1574   // Are we checking for alias of the same value?
1575   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1576   // different iterations. We must therefore make sure that this is not the
1577   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1578   // happen by looking at the visited phi nodes and making sure they cannot
1579   // reach the value.
1580   if (isValueEqualInPotentialCycles(V1, V2))
1581     return AliasResult::MustAlias;
1582 
1583   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1584     return AliasResult::NoAlias; // Scalars cannot alias each other
1585 
1586   // Figure out what objects these things are pointing to if we can.
1587   const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1588   const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1589 
1590   // Null values in the default address space don't point to any object, so they
1591   // don't alias any other pointer.
1592   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1593     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1594       return AliasResult::NoAlias;
1595   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1596     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1597       return AliasResult::NoAlias;
1598 
1599   if (O1 != O2) {
1600     // If V1/V2 point to two different objects, we know that we have no alias.
1601     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1602       return AliasResult::NoAlias;
1603 
1604     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1605     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1606         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1607       return AliasResult::NoAlias;
1608 
1609     // Function arguments can't alias with things that are known to be
1610     // unambigously identified at the function level.
1611     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1612         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1613       return AliasResult::NoAlias;
1614 
1615     // If one pointer is the result of a call/invoke or load and the other is a
1616     // non-escaping local object within the same function, then we know the
1617     // object couldn't escape to a point where the call could return it.
1618     //
1619     // Note that if the pointers are in different functions, there are a
1620     // variety of complications. A call with a nocapture argument may still
1621     // temporary store the nocapture argument's value in a temporary memory
1622     // location if that memory location doesn't escape. Or it may pass a
1623     // nocapture value to other functions as long as they don't capture it.
1624     if (isEscapeSource(O1) &&
1625         AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1)))
1626       return AliasResult::NoAlias;
1627     if (isEscapeSource(O2) &&
1628         AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2)))
1629       return AliasResult::NoAlias;
1630   }
1631 
1632   // If the size of one access is larger than the entire object on the other
1633   // side, then we know such behavior is undefined and can assume no alias.
1634   bool NullIsValidLocation = NullPointerIsDefined(&F);
1635   if ((isObjectSmallerThan(
1636           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1637           TLI, NullIsValidLocation)) ||
1638       (isObjectSmallerThan(
1639           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1640           TLI, NullIsValidLocation)))
1641     return AliasResult::NoAlias;
1642 
1643   // If one the accesses may be before the accessed pointer, canonicalize this
1644   // by using unknown after-pointer sizes for both accesses. This is
1645   // equivalent, because regardless of which pointer is lower, one of them
1646   // will always came after the other, as long as the underlying objects aren't
1647   // disjoint. We do this so that the rest of BasicAA does not have to deal
1648   // with accesses before the base pointer, and to improve cache utilization by
1649   // merging equivalent states.
1650   if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1651     V1Size = LocationSize::afterPointer();
1652     V2Size = LocationSize::afterPointer();
1653   }
1654 
1655   // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1656   // for recursive queries. For this reason, this limit is chosen to be large
1657   // enough to be very rarely hit, while still being small enough to avoid
1658   // stack overflows.
1659   if (AAQI.Depth >= 512)
1660     return AliasResult::MayAlias;
1661 
1662   // Check the cache before climbing up use-def chains. This also terminates
1663   // otherwise infinitely recursive queries.
1664   AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1665   const bool Swapped = V1 > V2;
1666   if (Swapped)
1667     std::swap(Locs.first, Locs.second);
1668   const auto &Pair = AAQI.AliasCache.try_emplace(
1669       Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1670   if (!Pair.second) {
1671     auto &Entry = Pair.first->second;
1672     if (!Entry.isDefinitive()) {
1673       // Remember that we used an assumption.
1674       ++Entry.NumAssumptionUses;
1675       ++AAQI.NumAssumptionUses;
1676     }
1677     // Cache contains sorted {V1,V2} pairs but we should return original order.
1678     auto Result = Entry.Result;
1679     Result.swap(Swapped);
1680     return Result;
1681   }
1682 
1683   int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1684   unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1685   AliasResult Result =
1686       aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1687 
1688   auto It = AAQI.AliasCache.find(Locs);
1689   assert(It != AAQI.AliasCache.end() && "Must be in cache");
1690   auto &Entry = It->second;
1691 
1692   // Check whether a NoAlias assumption has been used, but disproven.
1693   bool AssumptionDisproven =
1694       Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1695   if (AssumptionDisproven)
1696     Result = AliasResult::MayAlias;
1697 
1698   // This is a definitive result now, when considered as a root query.
1699   AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1700   Entry.Result = Result;
1701   // Cache contains sorted {V1,V2} pairs.
1702   Entry.Result.swap(Swapped);
1703   Entry.NumAssumptionUses = -1;
1704 
1705   // If the assumption has been disproven, remove any results that may have
1706   // been based on this assumption. Do this after the Entry updates above to
1707   // avoid iterator invalidation.
1708   if (AssumptionDisproven)
1709     while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1710       AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1711 
1712   // The result may still be based on assumptions higher up in the chain.
1713   // Remember it, so it can be purged from the cache later.
1714   if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1715       Result != AliasResult::MayAlias)
1716     AAQI.AssumptionBasedResults.push_back(Locs);
1717   return Result;
1718 }
1719 
1720 AliasResult BasicAAResult::aliasCheckRecursive(
1721     const Value *V1, LocationSize V1Size,
1722     const Value *V2, LocationSize V2Size,
1723     AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1724   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1725     AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1726     if (Result != AliasResult::MayAlias)
1727       return Result;
1728   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1729     AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1730     if (Result != AliasResult::MayAlias)
1731       return Result;
1732   }
1733 
1734   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1735     AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1736     if (Result != AliasResult::MayAlias)
1737       return Result;
1738   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1739     AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1740     if (Result != AliasResult::MayAlias)
1741       return Result;
1742   }
1743 
1744   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1745     AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1746     if (Result != AliasResult::MayAlias)
1747       return Result;
1748   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1749     AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1750     if (Result != AliasResult::MayAlias)
1751       return Result;
1752   }
1753 
1754   // If both pointers are pointing into the same object and one of them
1755   // accesses the entire object, then the accesses must overlap in some way.
1756   if (O1 == O2) {
1757     bool NullIsValidLocation = NullPointerIsDefined(&F);
1758     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1759         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1760          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1761       return AliasResult::PartialAlias;
1762   }
1763 
1764   return AliasResult::MayAlias;
1765 }
1766 
1767 /// Check whether two Values can be considered equivalent.
1768 ///
1769 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1770 /// they can not be part of a cycle in the value graph by looking at all
1771 /// visited phi nodes an making sure that the phis cannot reach the value. We
1772 /// have to do this because we are looking through phi nodes (That is we say
1773 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1774 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1775                                                   const Value *V2) {
1776   if (V != V2)
1777     return false;
1778 
1779   const Instruction *Inst = dyn_cast<Instruction>(V);
1780   if (!Inst)
1781     return true;
1782 
1783   if (VisitedPhiBBs.empty())
1784     return true;
1785 
1786   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1787     return false;
1788 
1789   // Make sure that the visited phis cannot reach the Value. This ensures that
1790   // the Values cannot come from different iterations of a potential cycle the
1791   // phi nodes could be involved in.
1792   for (auto *P : VisitedPhiBBs)
1793     if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1794       return false;
1795 
1796   return true;
1797 }
1798 
1799 /// Computes the symbolic difference between two de-composed GEPs.
1800 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1801                                            const DecomposedGEP &SrcGEP) {
1802   DestGEP.Offset -= SrcGEP.Offset;
1803   for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1804     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1805     // than a few variable indexes.
1806     bool Found = false;
1807     for (auto I : enumerate(DestGEP.VarIndices)) {
1808       VariableGEPIndex &Dest = I.value();
1809       if (!isValueEqualInPotentialCycles(Dest.V, Src.V) ||
1810           Dest.ZExtBits != Src.ZExtBits || Dest.SExtBits != Src.SExtBits)
1811         continue;
1812 
1813       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1814       // goes to zero, remove the entry.
1815       if (Dest.Scale != Src.Scale) {
1816         Dest.Scale -= Src.Scale;
1817         Dest.IsNSW = false;
1818       } else {
1819         DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1820       }
1821       Found = true;
1822       break;
1823     }
1824 
1825     // If we didn't consume this entry, add it to the end of the Dest list.
1826     if (!Found) {
1827       VariableGEPIndex Entry = {Src.V,      Src.ZExtBits, Src.SExtBits,
1828                                 -Src.Scale, Src.CxtI,     Src.IsNSW};
1829       DestGEP.VarIndices.push_back(Entry);
1830     }
1831   }
1832 }
1833 
1834 bool BasicAAResult::constantOffsetHeuristic(
1835     const DecomposedGEP &GEP, LocationSize MaybeV1Size,
1836     LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) {
1837   if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1838       !MaybeV2Size.hasValue())
1839     return false;
1840 
1841   const uint64_t V1Size = MaybeV1Size.getValue();
1842   const uint64_t V2Size = MaybeV2Size.getValue();
1843 
1844   const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1845 
1846   if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1847       Var0.Scale != -Var1.Scale || Var0.V->getType() != Var1.V->getType())
1848     return false;
1849 
1850   // We'll strip off the Extensions of Var0 and Var1 and do another round
1851   // of GetLinearExpression decomposition. In the example above, if Var0
1852   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1853 
1854   LinearExpression E0 =
1855       GetLinearExpression(ExtendedValue(Var0.V), DL, 0, AC, DT);
1856   LinearExpression E1 =
1857       GetLinearExpression(ExtendedValue(Var1.V), DL, 0, AC, DT);
1858   if (E0.Scale != E1.Scale || E0.Val.ZExtBits != E1.Val.ZExtBits ||
1859       E0.Val.SExtBits != E1.Val.SExtBits ||
1860       !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1861     return false;
1862 
1863   // We have a hit - Var0 and Var1 only differ by a constant offset!
1864 
1865   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1866   // Var1 is possible to calculate, but we're just interested in the absolute
1867   // minimum difference between the two. The minimum distance may occur due to
1868   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1869   // the minimum distance between %i and %i + 5 is 3.
1870   APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1871   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1872   APInt MinDiffBytes =
1873     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1874 
1875   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1876   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1877   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1878   // V2Size can fit in the MinDiffBytes gap.
1879   return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1880          MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1881 }
1882 
1883 //===----------------------------------------------------------------------===//
1884 // BasicAliasAnalysis Pass
1885 //===----------------------------------------------------------------------===//
1886 
1887 AnalysisKey BasicAA::Key;
1888 
1889 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1890   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1891   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1892   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1893   auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1894   return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1895 }
1896 
1897 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1898   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1899 }
1900 
1901 char BasicAAWrapperPass::ID = 0;
1902 
1903 void BasicAAWrapperPass::anchor() {}
1904 
1905 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1906                       "Basic Alias Analysis (stateless AA impl)", true, true)
1907 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1908 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1909 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1910 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1911 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1912                     "Basic Alias Analysis (stateless AA impl)", true, true)
1913 
1914 FunctionPass *llvm::createBasicAAWrapperPass() {
1915   return new BasicAAWrapperPass();
1916 }
1917 
1918 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1919   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1920   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1921   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1922   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1923 
1924   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1925                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1926                                  &DTWP.getDomTree(),
1927                                  PVWP ? &PVWP->getResult() : nullptr));
1928 
1929   return false;
1930 }
1931 
1932 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1933   AU.setPreservesAll();
1934   AU.addRequiredTransitive<AssumptionCacheTracker>();
1935   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1936   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1937   AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1938 }
1939 
1940 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1941   return BasicAAResult(
1942       F.getParent()->getDataLayout(), F,
1943       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1944       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1945 }
1946