1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/PhiValues.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Argument.h"
31 #include "llvm/IR/Attributes.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/ConstantRange.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Operator.h"
48 #include "llvm/IR/Type.h"
49 #include "llvm/IR/User.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/InitializePasses.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/KnownBits.h"
57 #include <cassert>
58 #include <cstdint>
59 #include <cstdlib>
60 #include <utility>
61 
62 #define DEBUG_TYPE "basicaa"
63 
64 using namespace llvm;
65 
66 /// Enable analysis of recursive PHI nodes.
67 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
68                                           cl::init(true));
69 
70 /// SearchLimitReached / SearchTimes shows how often the limit of
71 /// to decompose GEPs is reached. It will affect the precision
72 /// of basic alias analysis.
73 STATISTIC(SearchLimitReached, "Number of times the limit to "
74                               "decompose GEPs is reached");
75 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
76 
77 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
78 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
79 /// careful with value equivalence. We use reachability to make sure a value
80 /// cannot be involved in a cycle.
81 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
82 
83 // The max limit of the search depth in DecomposeGEPExpression() and
84 // getUnderlyingObject().
85 static const unsigned MaxLookupSearchDepth = 6;
86 
87 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
88                                FunctionAnalysisManager::Invalidator &Inv) {
89   // We don't care if this analysis itself is preserved, it has no state. But
90   // we need to check that the analyses it depends on have been. Note that we
91   // may be created without handles to some analyses and in that case don't
92   // depend on them.
93   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
94       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
95       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
96     return true;
97 
98   // Otherwise this analysis result remains valid.
99   return false;
100 }
101 
102 //===----------------------------------------------------------------------===//
103 // Useful predicates
104 //===----------------------------------------------------------------------===//
105 
106 /// Returns true if the pointer is one which would have been considered an
107 /// escape by isNonEscapingLocalObject.
108 static bool isEscapeSource(const Value *V) {
109   if (auto *CB = dyn_cast<CallBase>(V))
110     return !isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CB,
111                                                                         true);
112 
113   // The load case works because isNonEscapingLocalObject considers all
114   // stores to be escapes (it passes true for the StoreCaptures argument
115   // to PointerMayBeCaptured).
116   if (isa<LoadInst>(V))
117     return true;
118 
119   // The inttoptr case works because isNonEscapingLocalObject considers all
120   // means of converting or equating a pointer to an int (ptrtoint, ptr store
121   // which could be followed by an integer load, ptr<->int compare) as
122   // escaping, and objects located at well-known addresses via platform-specific
123   // means cannot be considered non-escaping local objects.
124   if (isa<IntToPtrInst>(V))
125     return true;
126 
127   return false;
128 }
129 
130 /// Returns the size of the object specified by V or UnknownSize if unknown.
131 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
132                               const TargetLibraryInfo &TLI,
133                               bool NullIsValidLoc,
134                               bool RoundToAlign = false) {
135   uint64_t Size;
136   ObjectSizeOpts Opts;
137   Opts.RoundToAlign = RoundToAlign;
138   Opts.NullIsUnknownSize = NullIsValidLoc;
139   if (getObjectSize(V, Size, DL, &TLI, Opts))
140     return Size;
141   return MemoryLocation::UnknownSize;
142 }
143 
144 /// Returns true if we can prove that the object specified by V is smaller than
145 /// Size.
146 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
147                                 const DataLayout &DL,
148                                 const TargetLibraryInfo &TLI,
149                                 bool NullIsValidLoc) {
150   // Note that the meanings of the "object" are slightly different in the
151   // following contexts:
152   //    c1: llvm::getObjectSize()
153   //    c2: llvm.objectsize() intrinsic
154   //    c3: isObjectSmallerThan()
155   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
156   // refers to the "entire object".
157   //
158   //  Consider this example:
159   //     char *p = (char*)malloc(100)
160   //     char *q = p+80;
161   //
162   //  In the context of c1 and c2, the "object" pointed by q refers to the
163   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
164   //
165   //  However, in the context of c3, the "object" refers to the chunk of memory
166   // being allocated. So, the "object" has 100 bytes, and q points to the middle
167   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
168   // parameter, before the llvm::getObjectSize() is called to get the size of
169   // entire object, we should:
170   //    - either rewind the pointer q to the base-address of the object in
171   //      question (in this case rewind to p), or
172   //    - just give up. It is up to caller to make sure the pointer is pointing
173   //      to the base address the object.
174   //
175   // We go for 2nd option for simplicity.
176   if (!isIdentifiedObject(V))
177     return false;
178 
179   // This function needs to use the aligned object size because we allow
180   // reads a bit past the end given sufficient alignment.
181   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
182                                       /*RoundToAlign*/ true);
183 
184   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
185 }
186 
187 /// Return the minimal extent from \p V to the end of the underlying object,
188 /// assuming the result is used in an aliasing query. E.g., we do use the query
189 /// location size and the fact that null pointers cannot alias here.
190 static uint64_t getMinimalExtentFrom(const Value &V,
191                                      const LocationSize &LocSize,
192                                      const DataLayout &DL,
193                                      bool NullIsValidLoc) {
194   // If we have dereferenceability information we know a lower bound for the
195   // extent as accesses for a lower offset would be valid. We need to exclude
196   // the "or null" part if null is a valid pointer. We can ignore frees, as an
197   // access after free would be undefined behavior.
198   bool CanBeNull, CanBeFreed;
199   uint64_t DerefBytes =
200     V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
201   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
202   // If queried with a precise location size, we assume that location size to be
203   // accessed, thus valid.
204   if (LocSize.isPrecise())
205     DerefBytes = std::max(DerefBytes, LocSize.getValue());
206   return DerefBytes;
207 }
208 
209 /// Returns true if we can prove that the object specified by V has size Size.
210 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
211                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
212   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
213   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
214 }
215 
216 //===----------------------------------------------------------------------===//
217 // CaptureInfo implementations
218 //===----------------------------------------------------------------------===//
219 
220 CaptureInfo::~CaptureInfo() = default;
221 
222 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object,
223                                                 const Instruction *I) {
224   return isNonEscapingLocalObject(Object, &IsCapturedCache);
225 }
226 
227 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object,
228                                                  const Instruction *I) {
229   if (!isIdentifiedFunctionLocal(Object))
230     return false;
231 
232   auto Iter = EarliestEscapes.insert({Object, nullptr});
233   if (Iter.second) {
234     Instruction *EarliestCapture = FindEarliestCapture(
235         Object, *const_cast<Function *>(I->getFunction()),
236         /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT, EphValues);
237     if (EarliestCapture) {
238       auto Ins = Inst2Obj.insert({EarliestCapture, {}});
239       Ins.first->second.push_back(Object);
240     }
241     Iter.first->second = EarliestCapture;
242   }
243 
244   // No capturing instruction.
245   if (!Iter.first->second)
246     return true;
247 
248   return I != Iter.first->second &&
249          !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
250 }
251 
252 void EarliestEscapeInfo::removeInstruction(Instruction *I) {
253   auto Iter = Inst2Obj.find(I);
254   if (Iter != Inst2Obj.end()) {
255     for (const Value *Obj : Iter->second)
256       EarliestEscapes.erase(Obj);
257     Inst2Obj.erase(I);
258   }
259 }
260 
261 //===----------------------------------------------------------------------===//
262 // GetElementPtr Instruction Decomposition and Analysis
263 //===----------------------------------------------------------------------===//
264 
265 namespace {
266 /// Represents zext(sext(trunc(V))).
267 struct CastedValue {
268   const Value *V;
269   unsigned ZExtBits = 0;
270   unsigned SExtBits = 0;
271   unsigned TruncBits = 0;
272 
273   explicit CastedValue(const Value *V) : V(V) {}
274   explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
275                        unsigned TruncBits)
276       : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
277 
278   unsigned getBitWidth() const {
279     return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
280            SExtBits;
281   }
282 
283   CastedValue withValue(const Value *NewV) const {
284     return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
285   }
286 
287   /// Replace V with zext(NewV)
288   CastedValue withZExtOfValue(const Value *NewV) const {
289     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
290                         NewV->getType()->getPrimitiveSizeInBits();
291     if (ExtendBy <= TruncBits)
292       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
293 
294     // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
295     ExtendBy -= TruncBits;
296     return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
297   }
298 
299   /// Replace V with sext(NewV)
300   CastedValue withSExtOfValue(const Value *NewV) const {
301     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
302                         NewV->getType()->getPrimitiveSizeInBits();
303     if (ExtendBy <= TruncBits)
304       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
305 
306     // zext(sext(sext(NewV)))
307     ExtendBy -= TruncBits;
308     return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
309   }
310 
311   APInt evaluateWith(APInt N) const {
312     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
313            "Incompatible bit width");
314     if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
315     if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
316     if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
317     return N;
318   }
319 
320   ConstantRange evaluateWith(ConstantRange N) const {
321     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
322            "Incompatible bit width");
323     if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
324     if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
325     if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
326     return N;
327   }
328 
329   bool canDistributeOver(bool NUW, bool NSW) const {
330     // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
331     // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
332     // trunc(x op y) == trunc(x) op trunc(y)
333     return (!ZExtBits || NUW) && (!SExtBits || NSW);
334   }
335 
336   bool hasSameCastsAs(const CastedValue &Other) const {
337     return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
338            TruncBits == Other.TruncBits;
339   }
340 };
341 
342 /// Represents zext(sext(trunc(V))) * Scale + Offset.
343 struct LinearExpression {
344   CastedValue Val;
345   APInt Scale;
346   APInt Offset;
347 
348   /// True if all operations in this expression are NSW.
349   bool IsNSW;
350 
351   LinearExpression(const CastedValue &Val, const APInt &Scale,
352                    const APInt &Offset, bool IsNSW)
353       : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
354 
355   LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
356     unsigned BitWidth = Val.getBitWidth();
357     Scale = APInt(BitWidth, 1);
358     Offset = APInt(BitWidth, 0);
359   }
360 
361   LinearExpression mul(const APInt &Other, bool MulIsNSW) const {
362     // The check for zero offset is necessary, because generally
363     // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
364     bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
365     return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
366   }
367 };
368 }
369 
370 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
371 /// B are constant integers.
372 static LinearExpression GetLinearExpression(
373     const CastedValue &Val,  const DataLayout &DL, unsigned Depth,
374     AssumptionCache *AC, DominatorTree *DT) {
375   // Limit our recursion depth.
376   if (Depth == 6)
377     return Val;
378 
379   if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
380     return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
381                             Val.evaluateWith(Const->getValue()), true);
382 
383   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
384     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
385       APInt RHS = Val.evaluateWith(RHSC->getValue());
386       // The only non-OBO case we deal with is or, and only limited to the
387       // case where it is both nuw and nsw.
388       bool NUW = true, NSW = true;
389       if (isa<OverflowingBinaryOperator>(BOp)) {
390         NUW &= BOp->hasNoUnsignedWrap();
391         NSW &= BOp->hasNoSignedWrap();
392       }
393       if (!Val.canDistributeOver(NUW, NSW))
394         return Val;
395 
396       // While we can distribute over trunc, we cannot preserve nowrap flags
397       // in that case.
398       if (Val.TruncBits)
399         NUW = NSW = false;
400 
401       LinearExpression E(Val);
402       switch (BOp->getOpcode()) {
403       default:
404         // We don't understand this instruction, so we can't decompose it any
405         // further.
406         return Val;
407       case Instruction::Or:
408         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
409         // analyze it.
410         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
411                                BOp, DT))
412           return Val;
413 
414         LLVM_FALLTHROUGH;
415       case Instruction::Add: {
416         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
417                                 Depth + 1, AC, DT);
418         E.Offset += RHS;
419         E.IsNSW &= NSW;
420         break;
421       }
422       case Instruction::Sub: {
423         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
424                                 Depth + 1, AC, DT);
425         E.Offset -= RHS;
426         E.IsNSW &= NSW;
427         break;
428       }
429       case Instruction::Mul:
430         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
431                                 Depth + 1, AC, DT)
432                 .mul(RHS, NSW);
433         break;
434       case Instruction::Shl:
435         // We're trying to linearize an expression of the kind:
436         //   shl i8 -128, 36
437         // where the shift count exceeds the bitwidth of the type.
438         // We can't decompose this further (the expression would return
439         // a poison value).
440         if (RHS.getLimitedValue() > Val.getBitWidth())
441           return Val;
442 
443         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
444                                 Depth + 1, AC, DT);
445         E.Offset <<= RHS.getLimitedValue();
446         E.Scale <<= RHS.getLimitedValue();
447         E.IsNSW &= NSW;
448         break;
449       }
450       return E;
451     }
452   }
453 
454   if (isa<ZExtInst>(Val.V))
455     return GetLinearExpression(
456         Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
457         DL, Depth + 1, AC, DT);
458 
459   if (isa<SExtInst>(Val.V))
460     return GetLinearExpression(
461         Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
462         DL, Depth + 1, AC, DT);
463 
464   return Val;
465 }
466 
467 /// To ensure a pointer offset fits in an integer of size IndexSize
468 /// (in bits) when that size is smaller than the maximum index size. This is
469 /// an issue, for example, in particular for 32b pointers with negative indices
470 /// that rely on two's complement wrap-arounds for precise alias information
471 /// where the maximum index size is 64b.
472 static APInt adjustToIndexSize(const APInt &Offset, unsigned IndexSize) {
473   assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
474   unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
475   return (Offset << ShiftBits).ashr(ShiftBits);
476 }
477 
478 namespace {
479 // A linear transformation of a Value; this class represents
480 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
481 struct VariableGEPIndex {
482   CastedValue Val;
483   APInt Scale;
484 
485   // Context instruction to use when querying information about this index.
486   const Instruction *CxtI;
487 
488   /// True if all operations in this expression are NSW.
489   bool IsNSW;
490 
491   void dump() const {
492     print(dbgs());
493     dbgs() << "\n";
494   }
495   void print(raw_ostream &OS) const {
496     OS << "(V=" << Val.V->getName()
497        << ", zextbits=" << Val.ZExtBits
498        << ", sextbits=" << Val.SExtBits
499        << ", truncbits=" << Val.TruncBits
500        << ", scale=" << Scale << ")";
501   }
502 };
503 }
504 
505 // Represents the internal structure of a GEP, decomposed into a base pointer,
506 // constant offsets, and variable scaled indices.
507 struct BasicAAResult::DecomposedGEP {
508   // Base pointer of the GEP
509   const Value *Base;
510   // Total constant offset from base.
511   APInt Offset;
512   // Scaled variable (non-constant) indices.
513   SmallVector<VariableGEPIndex, 4> VarIndices;
514   // Are all operations inbounds GEPs or non-indexing operations?
515   // (None iff expression doesn't involve any geps)
516   Optional<bool> InBounds;
517 
518   void dump() const {
519     print(dbgs());
520     dbgs() << "\n";
521   }
522   void print(raw_ostream &OS) const {
523     OS << "(DecomposedGEP Base=" << Base->getName()
524        << ", Offset=" << Offset
525        << ", VarIndices=[";
526     for (size_t i = 0; i < VarIndices.size(); i++) {
527       if (i != 0)
528         OS << ", ";
529       VarIndices[i].print(OS);
530     }
531     OS << "])";
532   }
533 };
534 
535 
536 /// If V is a symbolic pointer expression, decompose it into a base pointer
537 /// with a constant offset and a number of scaled symbolic offsets.
538 ///
539 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
540 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
541 /// specified amount, but which may have other unrepresented high bits. As
542 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
543 BasicAAResult::DecomposedGEP
544 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
545                                       AssumptionCache *AC, DominatorTree *DT) {
546   // Limit recursion depth to limit compile time in crazy cases.
547   unsigned MaxLookup = MaxLookupSearchDepth;
548   SearchTimes++;
549   const Instruction *CxtI = dyn_cast<Instruction>(V);
550 
551   unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
552   DecomposedGEP Decomposed;
553   Decomposed.Offset = APInt(MaxIndexSize, 0);
554   do {
555     // See if this is a bitcast or GEP.
556     const Operator *Op = dyn_cast<Operator>(V);
557     if (!Op) {
558       // The only non-operator case we can handle are GlobalAliases.
559       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
560         if (!GA->isInterposable()) {
561           V = GA->getAliasee();
562           continue;
563         }
564       }
565       Decomposed.Base = V;
566       return Decomposed;
567     }
568 
569     if (Op->getOpcode() == Instruction::BitCast ||
570         Op->getOpcode() == Instruction::AddrSpaceCast) {
571       V = Op->getOperand(0);
572       continue;
573     }
574 
575     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
576     if (!GEPOp) {
577       if (const auto *PHI = dyn_cast<PHINode>(V)) {
578         // Look through single-arg phi nodes created by LCSSA.
579         if (PHI->getNumIncomingValues() == 1) {
580           V = PHI->getIncomingValue(0);
581           continue;
582         }
583       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
584         // CaptureTracking can know about special capturing properties of some
585         // intrinsics like launder.invariant.group, that can't be expressed with
586         // the attributes, but have properties like returning aliasing pointer.
587         // Because some analysis may assume that nocaptured pointer is not
588         // returned from some special intrinsic (because function would have to
589         // be marked with returns attribute), it is crucial to use this function
590         // because it should be in sync with CaptureTracking. Not using it may
591         // cause weird miscompilations where 2 aliasing pointers are assumed to
592         // noalias.
593         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
594           V = RP;
595           continue;
596         }
597       }
598 
599       Decomposed.Base = V;
600       return Decomposed;
601     }
602 
603     // Track whether we've seen at least one in bounds gep, and if so, whether
604     // all geps parsed were in bounds.
605     if (Decomposed.InBounds == None)
606       Decomposed.InBounds = GEPOp->isInBounds();
607     else if (!GEPOp->isInBounds())
608       Decomposed.InBounds = false;
609 
610     assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
611 
612     // Don't attempt to analyze GEPs if index scale is not a compile-time
613     // constant.
614     if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
615       Decomposed.Base = V;
616       return Decomposed;
617     }
618 
619     unsigned AS = GEPOp->getPointerAddressSpace();
620     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
621     gep_type_iterator GTI = gep_type_begin(GEPOp);
622     unsigned IndexSize = DL.getIndexSizeInBits(AS);
623     // Assume all GEP operands are constants until proven otherwise.
624     bool GepHasConstantOffset = true;
625     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
626          I != E; ++I, ++GTI) {
627       const Value *Index = *I;
628       // Compute the (potentially symbolic) offset in bytes for this index.
629       if (StructType *STy = GTI.getStructTypeOrNull()) {
630         // For a struct, add the member offset.
631         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
632         if (FieldNo == 0)
633           continue;
634 
635         Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
636         continue;
637       }
638 
639       // For an array/pointer, add the element offset, explicitly scaled.
640       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
641         if (CIdx->isZero())
642           continue;
643         Decomposed.Offset +=
644             DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
645             CIdx->getValue().sextOrTrunc(MaxIndexSize);
646         continue;
647       }
648 
649       GepHasConstantOffset = false;
650 
651       // If the integer type is smaller than the index size, it is implicitly
652       // sign extended or truncated to index size.
653       unsigned Width = Index->getType()->getIntegerBitWidth();
654       unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
655       unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
656       LinearExpression LE = GetLinearExpression(
657           CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
658 
659       // Scale by the type size.
660       unsigned TypeSize =
661           DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize();
662       LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
663       Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
664       APInt Scale = LE.Scale.sext(MaxIndexSize);
665 
666       // If we already had an occurrence of this index variable, merge this
667       // scale into it.  For example, we want to handle:
668       //   A[x][x] -> x*16 + x*4 -> x*20
669       // This also ensures that 'x' only appears in the index list once.
670       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
671         if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
672             Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
673           Scale += Decomposed.VarIndices[i].Scale;
674           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
675           break;
676         }
677       }
678 
679       // Make sure that we have a scale that makes sense for this target's
680       // index size.
681       Scale = adjustToIndexSize(Scale, IndexSize);
682 
683       if (!!Scale) {
684         VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW};
685         Decomposed.VarIndices.push_back(Entry);
686       }
687     }
688 
689     // Take care of wrap-arounds
690     if (GepHasConstantOffset)
691       Decomposed.Offset = adjustToIndexSize(Decomposed.Offset, IndexSize);
692 
693     // Analyze the base pointer next.
694     V = GEPOp->getOperand(0);
695   } while (--MaxLookup);
696 
697   // If the chain of expressions is too deep, just return early.
698   Decomposed.Base = V;
699   SearchLimitReached++;
700   return Decomposed;
701 }
702 
703 /// Returns whether the given pointer value points to memory that is local to
704 /// the function, with global constants being considered local to all
705 /// functions.
706 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
707                                            AAQueryInfo &AAQI, bool OrLocal) {
708   assert(Visited.empty() && "Visited must be cleared after use!");
709 
710   unsigned MaxLookup = 8;
711   SmallVector<const Value *, 16> Worklist;
712   Worklist.push_back(Loc.Ptr);
713   do {
714     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
715     if (!Visited.insert(V).second) {
716       Visited.clear();
717       return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
718     }
719 
720     // An alloca instruction defines local memory.
721     if (OrLocal && isa<AllocaInst>(V))
722       continue;
723 
724     // A global constant counts as local memory for our purposes.
725     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
726       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
727       // global to be marked constant in some modules and non-constant in
728       // others.  GV may even be a declaration, not a definition.
729       if (!GV->isConstant()) {
730         Visited.clear();
731         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
732       }
733       continue;
734     }
735 
736     // If both select values point to local memory, then so does the select.
737     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
738       Worklist.push_back(SI->getTrueValue());
739       Worklist.push_back(SI->getFalseValue());
740       continue;
741     }
742 
743     // If all values incoming to a phi node point to local memory, then so does
744     // the phi.
745     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
746       // Don't bother inspecting phi nodes with many operands.
747       if (PN->getNumIncomingValues() > MaxLookup) {
748         Visited.clear();
749         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
750       }
751       append_range(Worklist, PN->incoming_values());
752       continue;
753     }
754 
755     // Otherwise be conservative.
756     Visited.clear();
757     return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
758   } while (!Worklist.empty() && --MaxLookup);
759 
760   Visited.clear();
761   return Worklist.empty();
762 }
763 
764 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
765   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
766   return II && II->getIntrinsicID() == IID;
767 }
768 
769 /// Returns the behavior when calling the given call site.
770 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
771   if (Call->doesNotAccessMemory())
772     // Can't do better than this.
773     return FMRB_DoesNotAccessMemory;
774 
775   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
776 
777   // If the callsite knows it only reads memory, don't return worse
778   // than that.
779   if (Call->onlyReadsMemory())
780     Min = FMRB_OnlyReadsMemory;
781   else if (Call->onlyWritesMemory())
782     Min = FMRB_OnlyWritesMemory;
783 
784   if (Call->onlyAccessesArgMemory())
785     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
786   else if (Call->onlyAccessesInaccessibleMemory())
787     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
788   else if (Call->onlyAccessesInaccessibleMemOrArgMem())
789     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
790 
791   // If the call has operand bundles then aliasing attributes from the function
792   // it calls do not directly apply to the call.  This can be made more precise
793   // in the future.
794   if (!Call->hasOperandBundles())
795     if (const Function *F = Call->getCalledFunction())
796       Min =
797           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
798 
799   return Min;
800 }
801 
802 /// Returns the behavior when calling the given function. For use when the call
803 /// site is not known.
804 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
805   // If the function declares it doesn't access memory, we can't do better.
806   if (F->doesNotAccessMemory())
807     return FMRB_DoesNotAccessMemory;
808 
809   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
810 
811   // If the function declares it only reads memory, go with that.
812   if (F->onlyReadsMemory())
813     Min = FMRB_OnlyReadsMemory;
814   else if (F->onlyWritesMemory())
815     Min = FMRB_OnlyWritesMemory;
816 
817   if (F->onlyAccessesArgMemory())
818     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
819   else if (F->onlyAccessesInaccessibleMemory())
820     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
821   else if (F->onlyAccessesInaccessibleMemOrArgMem())
822     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
823 
824   return Min;
825 }
826 
827 /// Returns true if this is a writeonly (i.e Mod only) parameter.
828 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
829                              const TargetLibraryInfo &TLI) {
830   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
831     return true;
832 
833   // We can bound the aliasing properties of memset_pattern16 just as we can
834   // for memcpy/memset.  This is particularly important because the
835   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
836   // whenever possible.
837   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
838   // attributes.
839   LibFunc F;
840   if (Call->getCalledFunction() &&
841       TLI.getLibFunc(*Call->getCalledFunction(), F) &&
842       F == LibFunc_memset_pattern16 && TLI.has(F))
843     if (ArgIdx == 0)
844       return true;
845 
846   // TODO: memset_pattern4, memset_pattern8
847   // TODO: _chk variants
848   // TODO: strcmp, strcpy
849 
850   return false;
851 }
852 
853 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
854                                            unsigned ArgIdx) {
855   // Checking for known builtin intrinsics and target library functions.
856   if (isWriteOnlyParam(Call, ArgIdx, TLI))
857     return ModRefInfo::Mod;
858 
859   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
860     return ModRefInfo::Ref;
861 
862   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
863     return ModRefInfo::NoModRef;
864 
865   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
866 }
867 
868 #ifndef NDEBUG
869 static const Function *getParent(const Value *V) {
870   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
871     if (!inst->getParent())
872       return nullptr;
873     return inst->getParent()->getParent();
874   }
875 
876   if (const Argument *arg = dyn_cast<Argument>(V))
877     return arg->getParent();
878 
879   return nullptr;
880 }
881 
882 static bool notDifferentParent(const Value *O1, const Value *O2) {
883 
884   const Function *F1 = getParent(O1);
885   const Function *F2 = getParent(O2);
886 
887   return !F1 || !F2 || F1 == F2;
888 }
889 #endif
890 
891 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
892                                  const MemoryLocation &LocB,
893                                  AAQueryInfo &AAQI) {
894   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
895          "BasicAliasAnalysis doesn't support interprocedural queries.");
896   return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
897 }
898 
899 /// Checks to see if the specified callsite can clobber the specified memory
900 /// object.
901 ///
902 /// Since we only look at local properties of this function, we really can't
903 /// say much about this query.  We do, however, use simple "address taken"
904 /// analysis on local objects.
905 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
906                                         const MemoryLocation &Loc,
907                                         AAQueryInfo &AAQI) {
908   assert(notDifferentParent(Call, Loc.Ptr) &&
909          "AliasAnalysis query involving multiple functions!");
910 
911   const Value *Object = getUnderlyingObject(Loc.Ptr);
912 
913   // Calls marked 'tail' cannot read or write allocas from the current frame
914   // because the current frame might be destroyed by the time they run. However,
915   // a tail call may use an alloca with byval. Calling with byval copies the
916   // contents of the alloca into argument registers or stack slots, so there is
917   // no lifetime issue.
918   if (isa<AllocaInst>(Object))
919     if (const CallInst *CI = dyn_cast<CallInst>(Call))
920       if (CI->isTailCall() &&
921           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
922         return ModRefInfo::NoModRef;
923 
924   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
925   // modify them even though the alloca is not escaped.
926   if (auto *AI = dyn_cast<AllocaInst>(Object))
927     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
928       return ModRefInfo::Mod;
929 
930   // If the pointer is to a locally allocated object that does not escape,
931   // then the call can not mod/ref the pointer unless the call takes the pointer
932   // as an argument, and itself doesn't capture it.
933   if (!isa<Constant>(Object) && Call != Object &&
934       AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) {
935 
936     // Optimistically assume that call doesn't touch Object and check this
937     // assumption in the following loop.
938     ModRefInfo Result = ModRefInfo::NoModRef;
939     bool IsMustAlias = true;
940 
941     unsigned OperandNo = 0;
942     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
943          CI != CE; ++CI, ++OperandNo) {
944       // Only look at the no-capture or byval pointer arguments.  If this
945       // pointer were passed to arguments that were neither of these, then it
946       // couldn't be no-capture.
947       if (!(*CI)->getType()->isPointerTy() ||
948           (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() &&
949            !Call->isByValArgument(OperandNo)))
950         continue;
951 
952       // Call doesn't access memory through this operand, so we don't care
953       // if it aliases with Object.
954       if (Call->doesNotAccessMemory(OperandNo))
955         continue;
956 
957       // If this is a no-capture pointer argument, see if we can tell that it
958       // is impossible to alias the pointer we're checking.
959       AliasResult AR = getBestAAResults().alias(
960           MemoryLocation::getBeforeOrAfter(*CI),
961           MemoryLocation::getBeforeOrAfter(Object), AAQI);
962       if (AR != AliasResult::MustAlias)
963         IsMustAlias = false;
964       // Operand doesn't alias 'Object', continue looking for other aliases
965       if (AR == AliasResult::NoAlias)
966         continue;
967       // Operand aliases 'Object', but call doesn't modify it. Strengthen
968       // initial assumption and keep looking in case if there are more aliases.
969       if (Call->onlyReadsMemory(OperandNo)) {
970         Result = setRef(Result);
971         continue;
972       }
973       // Operand aliases 'Object' but call only writes into it.
974       if (Call->onlyWritesMemory(OperandNo)) {
975         Result = setMod(Result);
976         continue;
977       }
978       // This operand aliases 'Object' and call reads and writes into it.
979       // Setting ModRef will not yield an early return below, MustAlias is not
980       // used further.
981       Result = ModRefInfo::ModRef;
982       break;
983     }
984 
985     // No operand aliases, reset Must bit. Add below if at least one aliases
986     // and all aliases found are MustAlias.
987     if (isNoModRef(Result))
988       IsMustAlias = false;
989 
990     // Early return if we improved mod ref information
991     if (!isModAndRefSet(Result)) {
992       if (isNoModRef(Result))
993         return ModRefInfo::NoModRef;
994       return IsMustAlias ? setMust(Result) : clearMust(Result);
995     }
996   }
997 
998   // If the call is malloc/calloc like, we can assume that it doesn't
999   // modify any IR visible value.  This is only valid because we assume these
1000   // routines do not read values visible in the IR.  TODO: Consider special
1001   // casing realloc and strdup routines which access only their arguments as
1002   // well.  Or alternatively, replace all of this with inaccessiblememonly once
1003   // that's implemented fully.
1004   if (isMallocOrCallocLikeFn(Call, &TLI)) {
1005     // Be conservative if the accessed pointer may alias the allocation -
1006     // fallback to the generic handling below.
1007     if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc,
1008                                  AAQI) == AliasResult::NoAlias)
1009       return ModRefInfo::NoModRef;
1010   }
1011 
1012   // Ideally, there should be no need to special case for memcpy/memove
1013   // intrinsics here since general machinery (based on memory attributes) should
1014   // already handle it just fine. Unfortunately, it doesn't due to deficiency in
1015   // operand bundles support. At the moment it's not clear if complexity behind
1016   // enhancing general mechanism worths it.
1017   // TODO: Consider improving operand bundles support in general mechanism.
1018   if (auto *Inst = dyn_cast<AnyMemTransferInst>(Call)) {
1019     AliasResult SrcAA =
1020         getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
1021     AliasResult DestAA =
1022         getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
1023     // It's also possible for Loc to alias both src and dest, or neither.
1024     ModRefInfo rv = ModRefInfo::NoModRef;
1025     if (SrcAA != AliasResult::NoAlias || Call->hasReadingOperandBundles())
1026       rv = setRef(rv);
1027     if (DestAA != AliasResult::NoAlias || Call->hasClobberingOperandBundles())
1028       rv = setMod(rv);
1029     return rv;
1030   }
1031 
1032   // Guard intrinsics are marked as arbitrarily writing so that proper control
1033   // dependencies are maintained but they never mods any particular memory
1034   // location.
1035   //
1036   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1037   // heap state at the point the guard is issued needs to be consistent in case
1038   // the guard invokes the "deopt" continuation.
1039   if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
1040     return ModRefInfo::Ref;
1041   // The same applies to deoptimize which is essentially a guard(false).
1042   if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
1043     return ModRefInfo::Ref;
1044 
1045   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1046   // writing so that proper control dependencies are maintained but they never
1047   // mod any particular memory location visible to the IR.
1048   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1049   // intrinsic is now modeled as reading memory. This prevents hoisting the
1050   // invariant.start intrinsic over stores. Consider:
1051   // *ptr = 40;
1052   // *ptr = 50;
1053   // invariant_start(ptr)
1054   // int val = *ptr;
1055   // print(val);
1056   //
1057   // This cannot be transformed to:
1058   //
1059   // *ptr = 40;
1060   // invariant_start(ptr)
1061   // *ptr = 50;
1062   // int val = *ptr;
1063   // print(val);
1064   //
1065   // The transformation will cause the second store to be ignored (based on
1066   // rules of invariant.start)  and print 40, while the first program always
1067   // prints 50.
1068   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1069     return ModRefInfo::Ref;
1070 
1071   // The AAResultBase base class has some smarts, lets use them.
1072   return AAResultBase::getModRefInfo(Call, Loc, AAQI);
1073 }
1074 
1075 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1076                                         const CallBase *Call2,
1077                                         AAQueryInfo &AAQI) {
1078   // Guard intrinsics are marked as arbitrarily writing so that proper control
1079   // dependencies are maintained but they never mods any particular memory
1080   // location.
1081   //
1082   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1083   // heap state at the point the guard is issued needs to be consistent in case
1084   // the guard invokes the "deopt" continuation.
1085 
1086   // NB! This function is *not* commutative, so we special case two
1087   // possibilities for guard intrinsics.
1088 
1089   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1090     return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1091                ? ModRefInfo::Ref
1092                : ModRefInfo::NoModRef;
1093 
1094   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1095     return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1096                ? ModRefInfo::Mod
1097                : ModRefInfo::NoModRef;
1098 
1099   // The AAResultBase base class has some smarts, lets use them.
1100   return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1101 }
1102 
1103 /// Return true if we know V to the base address of the corresponding memory
1104 /// object.  This implies that any address less than V must be out of bounds
1105 /// for the underlying object.  Note that just being isIdentifiedObject() is
1106 /// not enough - For example, a negative offset from a noalias argument or call
1107 /// can be inbounds w.r.t the actual underlying object.
1108 static bool isBaseOfObject(const Value *V) {
1109   // TODO: We can handle other cases here
1110   // 1) For GC languages, arguments to functions are often required to be
1111   //    base pointers.
1112   // 2) Result of allocation routines are often base pointers.  Leverage TLI.
1113   return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1114 }
1115 
1116 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1117 /// another pointer.
1118 ///
1119 /// We know that V1 is a GEP, but we don't know anything about V2.
1120 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1121 /// V2.
1122 AliasResult BasicAAResult::aliasGEP(
1123     const GEPOperator *GEP1, LocationSize V1Size,
1124     const Value *V2, LocationSize V2Size,
1125     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1126   if (!V1Size.hasValue() && !V2Size.hasValue()) {
1127     // TODO: This limitation exists for compile-time reasons. Relax it if we
1128     // can avoid exponential pathological cases.
1129     if (!isa<GEPOperator>(V2))
1130       return AliasResult::MayAlias;
1131 
1132     // If both accesses have unknown size, we can only check whether the base
1133     // objects don't alias.
1134     AliasResult BaseAlias = getBestAAResults().alias(
1135         MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1136         MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1137     return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1138                                              : AliasResult::MayAlias;
1139   }
1140 
1141   DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1142   DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1143 
1144   // Bail if we were not able to decompose anything.
1145   if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1146     return AliasResult::MayAlias;
1147 
1148   // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1149   // symbolic difference.
1150   subtractDecomposedGEPs(DecompGEP1, DecompGEP2);
1151 
1152   // If an inbounds GEP would have to start from an out of bounds address
1153   // for the two to alias, then we can assume noalias.
1154   if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1155       V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1156       isBaseOfObject(DecompGEP2.Base))
1157     return AliasResult::NoAlias;
1158 
1159   if (isa<GEPOperator>(V2)) {
1160     // Symmetric case to above.
1161     if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1162         V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1163         isBaseOfObject(DecompGEP1.Base))
1164       return AliasResult::NoAlias;
1165   }
1166 
1167   // For GEPs with identical offsets, we can preserve the size and AAInfo
1168   // when performing the alias check on the underlying objects.
1169   if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1170     return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size),
1171                                     MemoryLocation(DecompGEP2.Base, V2Size),
1172                                     AAQI);
1173 
1174   // Do the base pointers alias?
1175   AliasResult BaseAlias = getBestAAResults().alias(
1176       MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1177       MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1178 
1179   // If we get a No or May, then return it immediately, no amount of analysis
1180   // will improve this situation.
1181   if (BaseAlias != AliasResult::MustAlias) {
1182     assert(BaseAlias == AliasResult::NoAlias ||
1183            BaseAlias == AliasResult::MayAlias);
1184     return BaseAlias;
1185   }
1186 
1187   // If there is a constant difference between the pointers, but the difference
1188   // is less than the size of the associated memory object, then we know
1189   // that the objects are partially overlapping.  If the difference is
1190   // greater, we know they do not overlap.
1191   if (DecompGEP1.VarIndices.empty()) {
1192     APInt &Off = DecompGEP1.Offset;
1193 
1194     // Initialize for Off >= 0 (V2 <= GEP1) case.
1195     const Value *LeftPtr = V2;
1196     const Value *RightPtr = GEP1;
1197     LocationSize VLeftSize = V2Size;
1198     LocationSize VRightSize = V1Size;
1199     const bool Swapped = Off.isNegative();
1200 
1201     if (Swapped) {
1202       // Swap if we have the situation where:
1203       // +                +
1204       // | BaseOffset     |
1205       // ---------------->|
1206       // |-->V1Size       |-------> V2Size
1207       // GEP1             V2
1208       std::swap(LeftPtr, RightPtr);
1209       std::swap(VLeftSize, VRightSize);
1210       Off = -Off;
1211     }
1212 
1213     if (!VLeftSize.hasValue())
1214       return AliasResult::MayAlias;
1215 
1216     const uint64_t LSize = VLeftSize.getValue();
1217     if (Off.ult(LSize)) {
1218       // Conservatively drop processing if a phi was visited and/or offset is
1219       // too big.
1220       AliasResult AR = AliasResult::PartialAlias;
1221       if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1222           (Off + VRightSize.getValue()).ule(LSize)) {
1223         // Memory referenced by right pointer is nested. Save the offset in
1224         // cache. Note that originally offset estimated as GEP1-V2, but
1225         // AliasResult contains the shift that represents GEP1+Offset=V2.
1226         AR.setOffset(-Off.getSExtValue());
1227         AR.swap(Swapped);
1228       }
1229       return AR;
1230     }
1231     return AliasResult::NoAlias;
1232   }
1233 
1234   // We need to know both acess sizes for all the following heuristics.
1235   if (!V1Size.hasValue() || !V2Size.hasValue())
1236     return AliasResult::MayAlias;
1237 
1238   APInt GCD;
1239   ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1240   for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1241     const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1242     const APInt &Scale = Index.Scale;
1243     APInt ScaleForGCD = Scale;
1244     if (!Index.IsNSW)
1245       ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(),
1246                                         Scale.countTrailingZeros());
1247 
1248     if (i == 0)
1249       GCD = ScaleForGCD.abs();
1250     else
1251       GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1252 
1253     ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1254                                             true, &AC, Index.CxtI);
1255     KnownBits Known =
1256         computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1257     CR = CR.intersectWith(
1258         ConstantRange::fromKnownBits(Known, /* Signed */ true),
1259         ConstantRange::Signed);
1260     CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1261 
1262     assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1263            "Bit widths are normalized to MaxIndexSize");
1264     if (Index.IsNSW)
1265       OffsetRange = OffsetRange.add(CR.smul_sat(ConstantRange(Scale)));
1266     else
1267       OffsetRange = OffsetRange.add(CR.smul_fast(ConstantRange(Scale)));
1268   }
1269 
1270   // We now have accesses at two offsets from the same base:
1271   //  1. (...)*GCD + DecompGEP1.Offset with size V1Size
1272   //  2. 0 with size V2Size
1273   // Using arithmetic modulo GCD, the accesses are at
1274   // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1275   // into the range [V2Size..GCD), then we know they cannot overlap.
1276   APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1277   if (ModOffset.isNegative())
1278     ModOffset += GCD; // We want mod, not rem.
1279   if (ModOffset.uge(V2Size.getValue()) &&
1280       (GCD - ModOffset).uge(V1Size.getValue()))
1281     return AliasResult::NoAlias;
1282 
1283   // Compute ranges of potentially accessed bytes for both accesses. If the
1284   // interseciton is empty, there can be no overlap.
1285   unsigned BW = OffsetRange.getBitWidth();
1286   ConstantRange Range1 = OffsetRange.add(
1287       ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1288   ConstantRange Range2 =
1289       ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1290   if (Range1.intersectWith(Range2).isEmptySet())
1291     return AliasResult::NoAlias;
1292 
1293   // Try to determine the range of values for VarIndex such that
1294   // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1295   Optional<APInt> MinAbsVarIndex;
1296   if (DecompGEP1.VarIndices.size() == 1) {
1297     // VarIndex = Scale*V.
1298     const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1299     if (Var.Val.TruncBits == 0 &&
1300         isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1301       // If V != 0, then abs(VarIndex) > 0.
1302       MinAbsVarIndex = APInt(Var.Scale.getBitWidth(), 1);
1303 
1304       // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1305       // potentially wrapping math.
1306       auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1307         if (Var.IsNSW)
1308           return true;
1309 
1310         int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1311         // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1312         // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1313         // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1314         int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1315         if (MaxScaleValueBW <= 0)
1316           return false;
1317         return Var.Scale.ule(
1318             APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1319       };
1320       // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1321       // presence of potentially wrapping math.
1322       if (MultiplyByScaleNoWrap(Var)) {
1323         // If V != 0 then abs(VarIndex) >= abs(Scale).
1324         MinAbsVarIndex = Var.Scale.abs();
1325       }
1326     }
1327   } else if (DecompGEP1.VarIndices.size() == 2) {
1328     // VarIndex = Scale*V0 + (-Scale)*V1.
1329     // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1330     // Check that VisitedPhiBBs is empty, to avoid reasoning about
1331     // inequality of values across loop iterations.
1332     const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1333     const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1334     if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 &&
1335         Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.empty() &&
1336         isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1337                         DT))
1338       MinAbsVarIndex = Var0.Scale.abs();
1339   }
1340 
1341   if (MinAbsVarIndex) {
1342     // The constant offset will have added at least +/-MinAbsVarIndex to it.
1343     APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1344     APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1345     // We know that Offset <= OffsetLo || Offset >= OffsetHi
1346     if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1347         OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1348       return AliasResult::NoAlias;
1349   }
1350 
1351   if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT))
1352     return AliasResult::NoAlias;
1353 
1354   // Statically, we can see that the base objects are the same, but the
1355   // pointers have dynamic offsets which we can't resolve. And none of our
1356   // little tricks above worked.
1357   return AliasResult::MayAlias;
1358 }
1359 
1360 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1361   // If the results agree, take it.
1362   if (A == B)
1363     return A;
1364   // A mix of PartialAlias and MustAlias is PartialAlias.
1365   if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1366       (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1367     return AliasResult::PartialAlias;
1368   // Otherwise, we don't know anything.
1369   return AliasResult::MayAlias;
1370 }
1371 
1372 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1373 /// against another.
1374 AliasResult
1375 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1376                            const Value *V2, LocationSize V2Size,
1377                            AAQueryInfo &AAQI) {
1378   // If the values are Selects with the same condition, we can do a more precise
1379   // check: just check for aliases between the values on corresponding arms.
1380   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1381     if (SI->getCondition() == SI2->getCondition()) {
1382       AliasResult Alias = getBestAAResults().alias(
1383           MemoryLocation(SI->getTrueValue(), SISize),
1384           MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1385       if (Alias == AliasResult::MayAlias)
1386         return AliasResult::MayAlias;
1387       AliasResult ThisAlias = getBestAAResults().alias(
1388           MemoryLocation(SI->getFalseValue(), SISize),
1389           MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1390       return MergeAliasResults(ThisAlias, Alias);
1391     }
1392 
1393   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1394   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1395   AliasResult Alias =
1396       getBestAAResults().alias(MemoryLocation(SI->getTrueValue(), SISize),
1397                                MemoryLocation(V2, V2Size), AAQI);
1398   if (Alias == AliasResult::MayAlias)
1399     return AliasResult::MayAlias;
1400 
1401   AliasResult ThisAlias =
1402       getBestAAResults().alias(MemoryLocation(SI->getFalseValue(), SISize),
1403                                MemoryLocation(V2, V2Size), AAQI);
1404   return MergeAliasResults(ThisAlias, Alias);
1405 }
1406 
1407 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1408 /// another.
1409 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1410                                     const Value *V2, LocationSize V2Size,
1411                                     AAQueryInfo &AAQI) {
1412   if (!PN->getNumIncomingValues())
1413     return AliasResult::NoAlias;
1414   // If the values are PHIs in the same block, we can do a more precise
1415   // as well as efficient check: just check for aliases between the values
1416   // on corresponding edges.
1417   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1418     if (PN2->getParent() == PN->getParent()) {
1419       Optional<AliasResult> Alias;
1420       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1421         AliasResult ThisAlias = getBestAAResults().alias(
1422             MemoryLocation(PN->getIncomingValue(i), PNSize),
1423             MemoryLocation(
1424                 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1425             AAQI);
1426         if (Alias)
1427           *Alias = MergeAliasResults(*Alias, ThisAlias);
1428         else
1429           Alias = ThisAlias;
1430         if (*Alias == AliasResult::MayAlias)
1431           break;
1432       }
1433       return *Alias;
1434     }
1435 
1436   SmallVector<Value *, 4> V1Srcs;
1437   // If a phi operand recurses back to the phi, we can still determine NoAlias
1438   // if we don't alias the underlying objects of the other phi operands, as we
1439   // know that the recursive phi needs to be based on them in some way.
1440   bool isRecursive = false;
1441   auto CheckForRecPhi = [&](Value *PV) {
1442     if (!EnableRecPhiAnalysis)
1443       return false;
1444     if (getUnderlyingObject(PV) == PN) {
1445       isRecursive = true;
1446       return true;
1447     }
1448     return false;
1449   };
1450 
1451   if (PV) {
1452     // If we have PhiValues then use it to get the underlying phi values.
1453     const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1454     // If we have more phi values than the search depth then return MayAlias
1455     // conservatively to avoid compile time explosion. The worst possible case
1456     // is if both sides are PHI nodes. In which case, this is O(m x n) time
1457     // where 'm' and 'n' are the number of PHI sources.
1458     if (PhiValueSet.size() > MaxLookupSearchDepth)
1459       return AliasResult::MayAlias;
1460     // Add the values to V1Srcs
1461     for (Value *PV1 : PhiValueSet) {
1462       if (CheckForRecPhi(PV1))
1463         continue;
1464       V1Srcs.push_back(PV1);
1465     }
1466   } else {
1467     // If we don't have PhiInfo then just look at the operands of the phi itself
1468     // FIXME: Remove this once we can guarantee that we have PhiInfo always
1469     SmallPtrSet<Value *, 4> UniqueSrc;
1470     Value *OnePhi = nullptr;
1471     for (Value *PV1 : PN->incoming_values()) {
1472       if (isa<PHINode>(PV1)) {
1473         if (OnePhi && OnePhi != PV1) {
1474           // To control potential compile time explosion, we choose to be
1475           // conserviate when we have more than one Phi input.  It is important
1476           // that we handle the single phi case as that lets us handle LCSSA
1477           // phi nodes and (combined with the recursive phi handling) simple
1478           // pointer induction variable patterns.
1479           return AliasResult::MayAlias;
1480         }
1481         OnePhi = PV1;
1482       }
1483 
1484       if (CheckForRecPhi(PV1))
1485         continue;
1486 
1487       if (UniqueSrc.insert(PV1).second)
1488         V1Srcs.push_back(PV1);
1489     }
1490 
1491     if (OnePhi && UniqueSrc.size() > 1)
1492       // Out of an abundance of caution, allow only the trivial lcssa and
1493       // recursive phi cases.
1494       return AliasResult::MayAlias;
1495   }
1496 
1497   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1498   // value. This should only be possible in blocks unreachable from the entry
1499   // block, but return MayAlias just in case.
1500   if (V1Srcs.empty())
1501     return AliasResult::MayAlias;
1502 
1503   // If this PHI node is recursive, indicate that the pointer may be moved
1504   // across iterations. We can only prove NoAlias if different underlying
1505   // objects are involved.
1506   if (isRecursive)
1507     PNSize = LocationSize::beforeOrAfterPointer();
1508 
1509   // In the recursive alias queries below, we may compare values from two
1510   // different loop iterations. Keep track of visited phi blocks, which will
1511   // be used when determining value equivalence.
1512   bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1513   auto _ = make_scope_exit([&]() {
1514     if (BlockInserted)
1515       VisitedPhiBBs.erase(PN->getParent());
1516   });
1517 
1518   // If we inserted a block into VisitedPhiBBs, alias analysis results that
1519   // have been cached earlier may no longer be valid. Perform recursive queries
1520   // with a new AAQueryInfo.
1521   AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1522   AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1523 
1524   AliasResult Alias = getBestAAResults().alias(
1525       MemoryLocation(V1Srcs[0], PNSize), MemoryLocation(V2, V2Size), *UseAAQI);
1526 
1527   // Early exit if the check of the first PHI source against V2 is MayAlias.
1528   // Other results are not possible.
1529   if (Alias == AliasResult::MayAlias)
1530     return AliasResult::MayAlias;
1531   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1532   // remain valid to all elements and needs to conservatively return MayAlias.
1533   if (isRecursive && Alias != AliasResult::NoAlias)
1534     return AliasResult::MayAlias;
1535 
1536   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1537   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1538   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1539     Value *V = V1Srcs[i];
1540 
1541     AliasResult ThisAlias = getBestAAResults().alias(
1542         MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), *UseAAQI);
1543     Alias = MergeAliasResults(ThisAlias, Alias);
1544     if (Alias == AliasResult::MayAlias)
1545       break;
1546   }
1547 
1548   return Alias;
1549 }
1550 
1551 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1552 /// array references.
1553 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1554                                       const Value *V2, LocationSize V2Size,
1555                                       AAQueryInfo &AAQI) {
1556   // If either of the memory references is empty, it doesn't matter what the
1557   // pointer values are.
1558   if (V1Size.isZero() || V2Size.isZero())
1559     return AliasResult::NoAlias;
1560 
1561   // Strip off any casts if they exist.
1562   V1 = V1->stripPointerCastsForAliasAnalysis();
1563   V2 = V2->stripPointerCastsForAliasAnalysis();
1564 
1565   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1566   // value for undef that aliases nothing in the program.
1567   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1568     return AliasResult::NoAlias;
1569 
1570   // Are we checking for alias of the same value?
1571   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1572   // different iterations. We must therefore make sure that this is not the
1573   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1574   // happen by looking at the visited phi nodes and making sure they cannot
1575   // reach the value.
1576   if (isValueEqualInPotentialCycles(V1, V2))
1577     return AliasResult::MustAlias;
1578 
1579   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1580     return AliasResult::NoAlias; // Scalars cannot alias each other
1581 
1582   // Figure out what objects these things are pointing to if we can.
1583   const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1584   const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1585 
1586   // Null values in the default address space don't point to any object, so they
1587   // don't alias any other pointer.
1588   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1589     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1590       return AliasResult::NoAlias;
1591   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1592     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1593       return AliasResult::NoAlias;
1594 
1595   if (O1 != O2) {
1596     // If V1/V2 point to two different objects, we know that we have no alias.
1597     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1598       return AliasResult::NoAlias;
1599 
1600     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1601     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1602         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1603       return AliasResult::NoAlias;
1604 
1605     // Function arguments can't alias with things that are known to be
1606     // unambigously identified at the function level.
1607     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1608         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1609       return AliasResult::NoAlias;
1610 
1611     // If one pointer is the result of a call/invoke or load and the other is a
1612     // non-escaping local object within the same function, then we know the
1613     // object couldn't escape to a point where the call could return it.
1614     //
1615     // Note that if the pointers are in different functions, there are a
1616     // variety of complications. A call with a nocapture argument may still
1617     // temporary store the nocapture argument's value in a temporary memory
1618     // location if that memory location doesn't escape. Or it may pass a
1619     // nocapture value to other functions as long as they don't capture it.
1620     if (isEscapeSource(O1) &&
1621         AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1)))
1622       return AliasResult::NoAlias;
1623     if (isEscapeSource(O2) &&
1624         AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2)))
1625       return AliasResult::NoAlias;
1626   }
1627 
1628   // If the size of one access is larger than the entire object on the other
1629   // side, then we know such behavior is undefined and can assume no alias.
1630   bool NullIsValidLocation = NullPointerIsDefined(&F);
1631   if ((isObjectSmallerThan(
1632           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1633           TLI, NullIsValidLocation)) ||
1634       (isObjectSmallerThan(
1635           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1636           TLI, NullIsValidLocation)))
1637     return AliasResult::NoAlias;
1638 
1639   // If one the accesses may be before the accessed pointer, canonicalize this
1640   // by using unknown after-pointer sizes for both accesses. This is
1641   // equivalent, because regardless of which pointer is lower, one of them
1642   // will always came after the other, as long as the underlying objects aren't
1643   // disjoint. We do this so that the rest of BasicAA does not have to deal
1644   // with accesses before the base pointer, and to improve cache utilization by
1645   // merging equivalent states.
1646   if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1647     V1Size = LocationSize::afterPointer();
1648     V2Size = LocationSize::afterPointer();
1649   }
1650 
1651   // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1652   // for recursive queries. For this reason, this limit is chosen to be large
1653   // enough to be very rarely hit, while still being small enough to avoid
1654   // stack overflows.
1655   if (AAQI.Depth >= 512)
1656     return AliasResult::MayAlias;
1657 
1658   // Check the cache before climbing up use-def chains. This also terminates
1659   // otherwise infinitely recursive queries.
1660   AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1661   const bool Swapped = V1 > V2;
1662   if (Swapped)
1663     std::swap(Locs.first, Locs.second);
1664   const auto &Pair = AAQI.AliasCache.try_emplace(
1665       Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1666   if (!Pair.second) {
1667     auto &Entry = Pair.first->second;
1668     if (!Entry.isDefinitive()) {
1669       // Remember that we used an assumption.
1670       ++Entry.NumAssumptionUses;
1671       ++AAQI.NumAssumptionUses;
1672     }
1673     // Cache contains sorted {V1,V2} pairs but we should return original order.
1674     auto Result = Entry.Result;
1675     Result.swap(Swapped);
1676     return Result;
1677   }
1678 
1679   int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1680   unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1681   AliasResult Result =
1682       aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1683 
1684   auto It = AAQI.AliasCache.find(Locs);
1685   assert(It != AAQI.AliasCache.end() && "Must be in cache");
1686   auto &Entry = It->second;
1687 
1688   // Check whether a NoAlias assumption has been used, but disproven.
1689   bool AssumptionDisproven =
1690       Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1691   if (AssumptionDisproven)
1692     Result = AliasResult::MayAlias;
1693 
1694   // This is a definitive result now, when considered as a root query.
1695   AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1696   Entry.Result = Result;
1697   // Cache contains sorted {V1,V2} pairs.
1698   Entry.Result.swap(Swapped);
1699   Entry.NumAssumptionUses = -1;
1700 
1701   // If the assumption has been disproven, remove any results that may have
1702   // been based on this assumption. Do this after the Entry updates above to
1703   // avoid iterator invalidation.
1704   if (AssumptionDisproven)
1705     while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1706       AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1707 
1708   // The result may still be based on assumptions higher up in the chain.
1709   // Remember it, so it can be purged from the cache later.
1710   if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1711       Result != AliasResult::MayAlias)
1712     AAQI.AssumptionBasedResults.push_back(Locs);
1713   return Result;
1714 }
1715 
1716 AliasResult BasicAAResult::aliasCheckRecursive(
1717     const Value *V1, LocationSize V1Size,
1718     const Value *V2, LocationSize V2Size,
1719     AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1720   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1721     AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1722     if (Result != AliasResult::MayAlias)
1723       return Result;
1724   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1725     AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1726     Result.swap();
1727     if (Result != AliasResult::MayAlias)
1728       return Result;
1729   }
1730 
1731   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1732     AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1733     if (Result != AliasResult::MayAlias)
1734       return Result;
1735   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1736     AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1737     Result.swap();
1738     if (Result != AliasResult::MayAlias)
1739       return Result;
1740   }
1741 
1742   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1743     AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1744     if (Result != AliasResult::MayAlias)
1745       return Result;
1746   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1747     AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1748     Result.swap();
1749     if (Result != AliasResult::MayAlias)
1750       return Result;
1751   }
1752 
1753   // If both pointers are pointing into the same object and one of them
1754   // accesses the entire object, then the accesses must overlap in some way.
1755   if (O1 == O2) {
1756     bool NullIsValidLocation = NullPointerIsDefined(&F);
1757     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1758         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1759          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1760       return AliasResult::PartialAlias;
1761   }
1762 
1763   return AliasResult::MayAlias;
1764 }
1765 
1766 /// Check whether two Values can be considered equivalent.
1767 ///
1768 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1769 /// they can not be part of a cycle in the value graph by looking at all
1770 /// visited phi nodes an making sure that the phis cannot reach the value. We
1771 /// have to do this because we are looking through phi nodes (That is we say
1772 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1773 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1774                                                   const Value *V2) {
1775   if (V != V2)
1776     return false;
1777 
1778   const Instruction *Inst = dyn_cast<Instruction>(V);
1779   if (!Inst)
1780     return true;
1781 
1782   if (VisitedPhiBBs.empty())
1783     return true;
1784 
1785   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1786     return false;
1787 
1788   // Make sure that the visited phis cannot reach the Value. This ensures that
1789   // the Values cannot come from different iterations of a potential cycle the
1790   // phi nodes could be involved in.
1791   for (auto *P : VisitedPhiBBs)
1792     if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1793       return false;
1794 
1795   return true;
1796 }
1797 
1798 /// Computes the symbolic difference between two de-composed GEPs.
1799 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1800                                            const DecomposedGEP &SrcGEP) {
1801   DestGEP.Offset -= SrcGEP.Offset;
1802   for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1803     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1804     // than a few variable indexes.
1805     bool Found = false;
1806     for (auto I : enumerate(DestGEP.VarIndices)) {
1807       VariableGEPIndex &Dest = I.value();
1808       if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) ||
1809           !Dest.Val.hasSameCastsAs(Src.Val))
1810         continue;
1811 
1812       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1813       // goes to zero, remove the entry.
1814       if (Dest.Scale != Src.Scale) {
1815         Dest.Scale -= Src.Scale;
1816         Dest.IsNSW = false;
1817       } else {
1818         DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1819       }
1820       Found = true;
1821       break;
1822     }
1823 
1824     // If we didn't consume this entry, add it to the end of the Dest list.
1825     if (!Found) {
1826       VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW};
1827       DestGEP.VarIndices.push_back(Entry);
1828     }
1829   }
1830 }
1831 
1832 bool BasicAAResult::constantOffsetHeuristic(
1833     const DecomposedGEP &GEP, LocationSize MaybeV1Size,
1834     LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) {
1835   if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1836       !MaybeV2Size.hasValue())
1837     return false;
1838 
1839   const uint64_t V1Size = MaybeV1Size.getValue();
1840   const uint64_t V2Size = MaybeV2Size.getValue();
1841 
1842   const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1843 
1844   if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1845       Var0.Scale != -Var1.Scale ||
1846       Var0.Val.V->getType() != Var1.Val.V->getType())
1847     return false;
1848 
1849   // We'll strip off the Extensions of Var0 and Var1 and do another round
1850   // of GetLinearExpression decomposition. In the example above, if Var0
1851   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1852 
1853   LinearExpression E0 =
1854       GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1855   LinearExpression E1 =
1856       GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1857   if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1858       !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1859     return false;
1860 
1861   // We have a hit - Var0 and Var1 only differ by a constant offset!
1862 
1863   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1864   // Var1 is possible to calculate, but we're just interested in the absolute
1865   // minimum difference between the two. The minimum distance may occur due to
1866   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1867   // the minimum distance between %i and %i + 5 is 3.
1868   APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1869   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1870   APInt MinDiffBytes =
1871     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1872 
1873   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1874   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1875   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1876   // V2Size can fit in the MinDiffBytes gap.
1877   return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1878          MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1879 }
1880 
1881 //===----------------------------------------------------------------------===//
1882 // BasicAliasAnalysis Pass
1883 //===----------------------------------------------------------------------===//
1884 
1885 AnalysisKey BasicAA::Key;
1886 
1887 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1888   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1889   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1890   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1891   auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1892   return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1893 }
1894 
1895 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1896   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1897 }
1898 
1899 char BasicAAWrapperPass::ID = 0;
1900 
1901 void BasicAAWrapperPass::anchor() {}
1902 
1903 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1904                       "Basic Alias Analysis (stateless AA impl)", true, true)
1905 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1906 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1907 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1908 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1909 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1910                     "Basic Alias Analysis (stateless AA impl)", true, true)
1911 
1912 FunctionPass *llvm::createBasicAAWrapperPass() {
1913   return new BasicAAWrapperPass();
1914 }
1915 
1916 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1917   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1918   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1919   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1920   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1921 
1922   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1923                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1924                                  &DTWP.getDomTree(),
1925                                  PVWP ? &PVWP->getResult() : nullptr));
1926 
1927   return false;
1928 }
1929 
1930 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1931   AU.setPreservesAll();
1932   AU.addRequiredTransitive<AssumptionCacheTracker>();
1933   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1934   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1935   AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1936 }
1937 
1938 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1939   return BasicAAResult(
1940       F.getParent()->getDataLayout(), F,
1941       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1942       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1943 }
1944