1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/PhiValues.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/ConstantRange.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GetElementPtrTypeIterator.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/KnownBits.h"
59 #include <cassert>
60 #include <cstdint>
61 #include <cstdlib>
62 #include <utility>
63 
64 #define DEBUG_TYPE "basicaa"
65 
66 using namespace llvm;
67 
68 /// Enable analysis of recursive PHI nodes.
69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
70                                           cl::init(true));
71 
72 /// By default, even on 32-bit architectures we use 64-bit integers for
73 /// calculations. This will allow us to more-aggressively decompose indexing
74 /// expressions calculated using i64 values (e.g., long long in C) which is
75 /// common enough to worry about.
76 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b",
77                                         cl::Hidden, cl::init(true));
78 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits",
79                                     cl::Hidden, cl::init(false));
80 
81 /// SearchLimitReached / SearchTimes shows how often the limit of
82 /// to decompose GEPs is reached. It will affect the precision
83 /// of basic alias analysis.
84 STATISTIC(SearchLimitReached, "Number of times the limit to "
85                               "decompose GEPs is reached");
86 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
87 
88 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
89 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
90 /// careful with value equivalence. We use reachability to make sure a value
91 /// cannot be involved in a cycle.
92 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
93 
94 // The max limit of the search depth in DecomposeGEPExpression() and
95 // getUnderlyingObject(), both functions need to use the same search
96 // depth otherwise the algorithm in aliasGEP will assert.
97 static const unsigned MaxLookupSearchDepth = 6;
98 
99 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
100                                FunctionAnalysisManager::Invalidator &Inv) {
101   // We don't care if this analysis itself is preserved, it has no state. But
102   // we need to check that the analyses it depends on have been. Note that we
103   // may be created without handles to some analyses and in that case don't
104   // depend on them.
105   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
106       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
107       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
108     return true;
109 
110   // Otherwise this analysis result remains valid.
111   return false;
112 }
113 
114 //===----------------------------------------------------------------------===//
115 // Useful predicates
116 //===----------------------------------------------------------------------===//
117 
118 /// Returns true if the pointer is one which would have been considered an
119 /// escape by isNonEscapingLocalObject.
120 static bool isEscapeSource(const Value *V) {
121   if (isa<CallBase>(V))
122     return true;
123 
124   // The load case works because isNonEscapingLocalObject considers all
125   // stores to be escapes (it passes true for the StoreCaptures argument
126   // to PointerMayBeCaptured).
127   if (isa<LoadInst>(V))
128     return true;
129 
130   // The inttoptr case works because isNonEscapingLocalObject considers all
131   // means of converting or equating a pointer to an int (ptrtoint, ptr store
132   // which could be followed by an integer load, ptr<->int compare) as
133   // escaping, and objects located at well-known addresses via platform-specific
134   // means cannot be considered non-escaping local objects.
135   if (isa<IntToPtrInst>(V))
136     return true;
137 
138   return false;
139 }
140 
141 /// Returns the size of the object specified by V or UnknownSize if unknown.
142 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
143                               const TargetLibraryInfo &TLI,
144                               bool NullIsValidLoc,
145                               bool RoundToAlign = false) {
146   uint64_t Size;
147   ObjectSizeOpts Opts;
148   Opts.RoundToAlign = RoundToAlign;
149   Opts.NullIsUnknownSize = NullIsValidLoc;
150   if (getObjectSize(V, Size, DL, &TLI, Opts))
151     return Size;
152   return MemoryLocation::UnknownSize;
153 }
154 
155 /// Returns true if we can prove that the object specified by V is smaller than
156 /// Size.
157 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
158                                 const DataLayout &DL,
159                                 const TargetLibraryInfo &TLI,
160                                 bool NullIsValidLoc) {
161   // Note that the meanings of the "object" are slightly different in the
162   // following contexts:
163   //    c1: llvm::getObjectSize()
164   //    c2: llvm.objectsize() intrinsic
165   //    c3: isObjectSmallerThan()
166   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
167   // refers to the "entire object".
168   //
169   //  Consider this example:
170   //     char *p = (char*)malloc(100)
171   //     char *q = p+80;
172   //
173   //  In the context of c1 and c2, the "object" pointed by q refers to the
174   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
175   //
176   //  However, in the context of c3, the "object" refers to the chunk of memory
177   // being allocated. So, the "object" has 100 bytes, and q points to the middle
178   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
179   // parameter, before the llvm::getObjectSize() is called to get the size of
180   // entire object, we should:
181   //    - either rewind the pointer q to the base-address of the object in
182   //      question (in this case rewind to p), or
183   //    - just give up. It is up to caller to make sure the pointer is pointing
184   //      to the base address the object.
185   //
186   // We go for 2nd option for simplicity.
187   if (!isIdentifiedObject(V))
188     return false;
189 
190   // This function needs to use the aligned object size because we allow
191   // reads a bit past the end given sufficient alignment.
192   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
193                                       /*RoundToAlign*/ true);
194 
195   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
196 }
197 
198 /// Return the minimal extent from \p V to the end of the underlying object,
199 /// assuming the result is used in an aliasing query. E.g., we do use the query
200 /// location size and the fact that null pointers cannot alias here.
201 static uint64_t getMinimalExtentFrom(const Value &V,
202                                      const LocationSize &LocSize,
203                                      const DataLayout &DL,
204                                      bool NullIsValidLoc) {
205   // If we have dereferenceability information we know a lower bound for the
206   // extent as accesses for a lower offset would be valid. We need to exclude
207   // the "or null" part if null is a valid pointer.
208   bool CanBeNull, CanBeFreed;
209   uint64_t DerefBytes =
210     V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
211   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
212   DerefBytes = CanBeFreed ? 0 : DerefBytes;
213   // If queried with a precise location size, we assume that location size to be
214   // accessed, thus valid.
215   if (LocSize.isPrecise())
216     DerefBytes = std::max(DerefBytes, LocSize.getValue());
217   return DerefBytes;
218 }
219 
220 /// Returns true if we can prove that the object specified by V has size Size.
221 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
222                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
223   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
224   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
225 }
226 
227 //===----------------------------------------------------------------------===//
228 // CaptureInfo implementations
229 //===----------------------------------------------------------------------===//
230 
231 CaptureInfo::~CaptureInfo() = default;
232 
233 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object,
234                                                 const Instruction *I) {
235   return isNonEscapingLocalObject(Object, &IsCapturedCache);
236 }
237 
238 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object,
239                                                  const Instruction *I) {
240   if (!isIdentifiedFunctionLocal(Object))
241     return false;
242 
243   auto Iter = EarliestEscapes.insert({Object, nullptr});
244   if (Iter.second) {
245     Instruction *EarliestCapture = FindEarliestCapture(
246         Object, *const_cast<Function *>(I->getFunction()),
247         /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
248     if (EarliestCapture) {
249       auto Ins = Inst2Obj.insert({EarliestCapture, {}});
250       Ins.first->second.push_back(Object);
251     }
252     Iter.first->second = EarliestCapture;
253   }
254 
255   // No capturing instruction.
256   if (!Iter.first->second)
257     return true;
258 
259   return I != Iter.first->second &&
260          !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
261 }
262 
263 void EarliestEscapeInfo::removeInstruction(Instruction *I) {
264   auto Iter = Inst2Obj.find(I);
265   if (Iter != Inst2Obj.end()) {
266     for (const Value *Obj : Iter->second)
267       EarliestEscapes.erase(Obj);
268     Inst2Obj.erase(I);
269   }
270 }
271 
272 //===----------------------------------------------------------------------===//
273 // GetElementPtr Instruction Decomposition and Analysis
274 //===----------------------------------------------------------------------===//
275 
276 namespace {
277 /// Represents zext(sext(V)).
278 struct ExtendedValue {
279   const Value *V;
280   unsigned ZExtBits;
281   unsigned SExtBits;
282 
283   explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0,
284                          unsigned SExtBits = 0)
285       : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {}
286 
287   unsigned getBitWidth() const {
288     return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits;
289   }
290 
291   ExtendedValue withValue(const Value *NewV) const {
292     return ExtendedValue(NewV, ZExtBits, SExtBits);
293   }
294 
295   ExtendedValue withZExtOfValue(const Value *NewV) const {
296     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
297                         NewV->getType()->getPrimitiveSizeInBits();
298     // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
299     return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0);
300   }
301 
302   ExtendedValue withSExtOfValue(const Value *NewV) const {
303     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
304                         NewV->getType()->getPrimitiveSizeInBits();
305     // zext(sext(sext(NewV)))
306     return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy);
307   }
308 
309   APInt evaluateWith(APInt N) const {
310     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
311            "Incompatible bit width");
312     if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
313     if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
314     return N;
315   }
316 
317   bool canDistributeOver(bool NUW, bool NSW) const {
318     // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
319     // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
320     return (!ZExtBits || NUW) && (!SExtBits || NSW);
321   }
322 
323   bool hasSameExtensionsAs(const ExtendedValue &Other) const {
324     return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits;
325   }
326 };
327 
328 /// Represents zext(sext(V)) * Scale + Offset.
329 struct LinearExpression {
330   ExtendedValue Val;
331   APInt Scale;
332   APInt Offset;
333 
334   /// True if all operations in this expression are NSW.
335   bool IsNSW;
336 
337   LinearExpression(const ExtendedValue &Val, const APInt &Scale,
338                    const APInt &Offset, bool IsNSW)
339       : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
340 
341   LinearExpression(const ExtendedValue &Val) : Val(Val), IsNSW(true) {
342     unsigned BitWidth = Val.getBitWidth();
343     Scale = APInt(BitWidth, 1);
344     Offset = APInt(BitWidth, 0);
345   }
346 };
347 }
348 
349 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
350 /// B are constant integers.
351 static LinearExpression GetLinearExpression(
352     const ExtendedValue &Val,  const DataLayout &DL, unsigned Depth,
353     AssumptionCache *AC, DominatorTree *DT) {
354   // Limit our recursion depth.
355   if (Depth == 6)
356     return Val;
357 
358   if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
359     return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
360                             Val.evaluateWith(Const->getValue()), true);
361 
362   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
363     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
364       APInt RHS = Val.evaluateWith(RHSC->getValue());
365       // The only non-OBO case we deal with is or, and only limited to the
366       // case where it is both nuw and nsw.
367       bool NUW = true, NSW = true;
368       if (isa<OverflowingBinaryOperator>(BOp)) {
369         NUW &= BOp->hasNoUnsignedWrap();
370         NSW &= BOp->hasNoSignedWrap();
371       }
372       if (!Val.canDistributeOver(NUW, NSW))
373         return Val;
374 
375       LinearExpression E(Val);
376       switch (BOp->getOpcode()) {
377       default:
378         // We don't understand this instruction, so we can't decompose it any
379         // further.
380         return Val;
381       case Instruction::Or:
382         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
383         // analyze it.
384         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
385                                BOp, DT))
386           return Val;
387 
388         LLVM_FALLTHROUGH;
389       case Instruction::Add: {
390         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
391                                 Depth + 1, AC, DT);
392         E.Offset += RHS;
393         E.IsNSW &= NSW;
394         break;
395       }
396       case Instruction::Sub: {
397         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
398                                 Depth + 1, AC, DT);
399         E.Offset -= RHS;
400         E.IsNSW &= NSW;
401         break;
402       }
403       case Instruction::Mul: {
404         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
405                                 Depth + 1, AC, DT);
406         E.Offset *= RHS;
407         E.Scale *= RHS;
408         E.IsNSW &= NSW;
409         break;
410       }
411       case Instruction::Shl:
412         // We're trying to linearize an expression of the kind:
413         //   shl i8 -128, 36
414         // where the shift count exceeds the bitwidth of the type.
415         // We can't decompose this further (the expression would return
416         // a poison value).
417         if (RHS.getLimitedValue() > Val.getBitWidth())
418           return Val;
419 
420         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
421                                 Depth + 1, AC, DT);
422         E.Offset <<= RHS.getLimitedValue();
423         E.Scale <<= RHS.getLimitedValue();
424         E.IsNSW &= NSW;
425         break;
426       }
427       return E;
428     }
429   }
430 
431   if (isa<ZExtInst>(Val.V))
432     return GetLinearExpression(
433         Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
434         DL, Depth + 1, AC, DT);
435 
436   if (isa<SExtInst>(Val.V))
437     return GetLinearExpression(
438         Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
439         DL, Depth + 1, AC, DT);
440 
441   return Val;
442 }
443 
444 /// To ensure a pointer offset fits in an integer of size PointerSize
445 /// (in bits) when that size is smaller than the maximum pointer size. This is
446 /// an issue, for example, in particular for 32b pointers with negative indices
447 /// that rely on two's complement wrap-arounds for precise alias information
448 /// where the maximum pointer size is 64b.
449 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
450   assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
451   unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
452   return (Offset << ShiftBits).ashr(ShiftBits);
453 }
454 
455 static unsigned getMaxPointerSize(const DataLayout &DL) {
456   unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
457   if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
458   if (DoubleCalcBits) MaxPointerSize *= 2;
459 
460   return MaxPointerSize;
461 }
462 
463 namespace {
464 // A linear transformation of a Value; this class represents
465 // ZExt(SExt(V, SExtBits), ZExtBits) * Scale.
466 struct VariableGEPIndex {
467   ExtendedValue Val;
468   APInt Scale;
469 
470   // Context instruction to use when querying information about this index.
471   const Instruction *CxtI;
472 
473   /// True if all operations in this expression are NSW.
474   bool IsNSW;
475 
476   void dump() const {
477     print(dbgs());
478     dbgs() << "\n";
479   }
480   void print(raw_ostream &OS) const {
481     OS << "(V=" << Val.V->getName()
482        << ", zextbits=" << Val.ZExtBits
483        << ", sextbits=" << Val.SExtBits
484        << ", scale=" << Scale << ")";
485   }
486 };
487 }
488 
489 // Represents the internal structure of a GEP, decomposed into a base pointer,
490 // constant offsets, and variable scaled indices.
491 struct BasicAAResult::DecomposedGEP {
492   // Base pointer of the GEP
493   const Value *Base;
494   // Total constant offset from base.
495   APInt Offset;
496   // Scaled variable (non-constant) indices.
497   SmallVector<VariableGEPIndex, 4> VarIndices;
498   // Is GEP index scale compile-time constant.
499   bool HasCompileTimeConstantScale;
500   // Are all operations inbounds GEPs or non-indexing operations?
501   // (None iff expression doesn't involve any geps)
502   Optional<bool> InBounds;
503 
504   void dump() const {
505     print(dbgs());
506     dbgs() << "\n";
507   }
508   void print(raw_ostream &OS) const {
509     OS << "(DecomposedGEP Base=" << Base->getName()
510        << ", Offset=" << Offset
511        << ", VarIndices=[";
512     for (size_t i = 0; i < VarIndices.size(); i++) {
513       if (i != 0)
514         OS << ", ";
515       VarIndices[i].print(OS);
516     }
517     OS << "], HasCompileTimeConstantScale=" << HasCompileTimeConstantScale
518        << ")";
519   }
520 };
521 
522 
523 /// If V is a symbolic pointer expression, decompose it into a base pointer
524 /// with a constant offset and a number of scaled symbolic offsets.
525 ///
526 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
527 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
528 /// specified amount, but which may have other unrepresented high bits. As
529 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
530 ///
531 /// This function is capable of analyzing everything that getUnderlyingObject
532 /// can look through. To be able to do that getUnderlyingObject and
533 /// DecomposeGEPExpression must use the same search depth
534 /// (MaxLookupSearchDepth).
535 BasicAAResult::DecomposedGEP
536 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
537                                       AssumptionCache *AC, DominatorTree *DT) {
538   // Limit recursion depth to limit compile time in crazy cases.
539   unsigned MaxLookup = MaxLookupSearchDepth;
540   SearchTimes++;
541   const Instruction *CxtI = dyn_cast<Instruction>(V);
542 
543   unsigned MaxPointerSize = getMaxPointerSize(DL);
544   DecomposedGEP Decomposed;
545   Decomposed.Offset = APInt(MaxPointerSize, 0);
546   Decomposed.HasCompileTimeConstantScale = true;
547   do {
548     // See if this is a bitcast or GEP.
549     const Operator *Op = dyn_cast<Operator>(V);
550     if (!Op) {
551       // The only non-operator case we can handle are GlobalAliases.
552       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
553         if (!GA->isInterposable()) {
554           V = GA->getAliasee();
555           continue;
556         }
557       }
558       Decomposed.Base = V;
559       return Decomposed;
560     }
561 
562     if (Op->getOpcode() == Instruction::BitCast ||
563         Op->getOpcode() == Instruction::AddrSpaceCast) {
564       V = Op->getOperand(0);
565       continue;
566     }
567 
568     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
569     if (!GEPOp) {
570       if (const auto *PHI = dyn_cast<PHINode>(V)) {
571         // Look through single-arg phi nodes created by LCSSA.
572         if (PHI->getNumIncomingValues() == 1) {
573           V = PHI->getIncomingValue(0);
574           continue;
575         }
576       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
577         // CaptureTracking can know about special capturing properties of some
578         // intrinsics like launder.invariant.group, that can't be expressed with
579         // the attributes, but have properties like returning aliasing pointer.
580         // Because some analysis may assume that nocaptured pointer is not
581         // returned from some special intrinsic (because function would have to
582         // be marked with returns attribute), it is crucial to use this function
583         // because it should be in sync with CaptureTracking. Not using it may
584         // cause weird miscompilations where 2 aliasing pointers are assumed to
585         // noalias.
586         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
587           V = RP;
588           continue;
589         }
590       }
591 
592       Decomposed.Base = V;
593       return Decomposed;
594     }
595 
596     // Track whether we've seen at least one in bounds gep, and if so, whether
597     // all geps parsed were in bounds.
598     if (Decomposed.InBounds == None)
599       Decomposed.InBounds = GEPOp->isInBounds();
600     else if (!GEPOp->isInBounds())
601       Decomposed.InBounds = false;
602 
603     assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
604 
605     // Don't attempt to analyze GEPs if index scale is not a compile-time
606     // constant.
607     if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
608       Decomposed.Base = V;
609       Decomposed.HasCompileTimeConstantScale = false;
610       return Decomposed;
611     }
612 
613     unsigned AS = GEPOp->getPointerAddressSpace();
614     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
615     gep_type_iterator GTI = gep_type_begin(GEPOp);
616     unsigned PointerSize = DL.getPointerSizeInBits(AS);
617     // Assume all GEP operands are constants until proven otherwise.
618     bool GepHasConstantOffset = true;
619     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
620          I != E; ++I, ++GTI) {
621       const Value *Index = *I;
622       // Compute the (potentially symbolic) offset in bytes for this index.
623       if (StructType *STy = GTI.getStructTypeOrNull()) {
624         // For a struct, add the member offset.
625         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
626         if (FieldNo == 0)
627           continue;
628 
629         Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
630         continue;
631       }
632 
633       // For an array/pointer, add the element offset, explicitly scaled.
634       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
635         if (CIdx->isZero())
636           continue;
637         Decomposed.Offset +=
638             DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
639             CIdx->getValue().sextOrTrunc(MaxPointerSize);
640         continue;
641       }
642 
643       GepHasConstantOffset = false;
644 
645       APInt Scale(MaxPointerSize,
646                   DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
647       // If the integer type is smaller than the pointer size, it is implicitly
648       // sign extended to pointer size.
649       unsigned Width = Index->getType()->getIntegerBitWidth();
650       unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
651       LinearExpression LE = GetLinearExpression(
652           ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT);
653 
654       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
655       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
656 
657       // It can be the case that, even through C1*V+C2 does not overflow for
658       // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
659       // decompose the expression in this way.
660       //
661       // FIXME: C1*Scale and the other operations in the decomposed
662       // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
663       // possibility.
664       bool Overflow;
665       APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
666                            .smul_ov(Scale, Overflow);
667       if (Overflow) {
668         LE = LinearExpression(ExtendedValue(Index, 0, SExtBits));
669       } else {
670         Decomposed.Offset += ScaledOffset;
671         Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
672       }
673 
674       // If we already had an occurrence of this index variable, merge this
675       // scale into it.  For example, we want to handle:
676       //   A[x][x] -> x*16 + x*4 -> x*20
677       // This also ensures that 'x' only appears in the index list once.
678       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
679         if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
680             Decomposed.VarIndices[i].Val.hasSameExtensionsAs(LE.Val)) {
681           Scale += Decomposed.VarIndices[i].Scale;
682           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
683           break;
684         }
685       }
686 
687       // Make sure that we have a scale that makes sense for this target's
688       // pointer size.
689       Scale = adjustToPointerSize(Scale, PointerSize);
690 
691       if (!!Scale) {
692         VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW};
693         Decomposed.VarIndices.push_back(Entry);
694       }
695     }
696 
697     // Take care of wrap-arounds
698     if (GepHasConstantOffset)
699       Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
700 
701     // Analyze the base pointer next.
702     V = GEPOp->getOperand(0);
703   } while (--MaxLookup);
704 
705   // If the chain of expressions is too deep, just return early.
706   Decomposed.Base = V;
707   SearchLimitReached++;
708   return Decomposed;
709 }
710 
711 /// Returns whether the given pointer value points to memory that is local to
712 /// the function, with global constants being considered local to all
713 /// functions.
714 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
715                                            AAQueryInfo &AAQI, bool OrLocal) {
716   assert(Visited.empty() && "Visited must be cleared after use!");
717 
718   unsigned MaxLookup = 8;
719   SmallVector<const Value *, 16> Worklist;
720   Worklist.push_back(Loc.Ptr);
721   do {
722     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
723     if (!Visited.insert(V).second) {
724       Visited.clear();
725       return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
726     }
727 
728     // An alloca instruction defines local memory.
729     if (OrLocal && isa<AllocaInst>(V))
730       continue;
731 
732     // A global constant counts as local memory for our purposes.
733     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
734       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
735       // global to be marked constant in some modules and non-constant in
736       // others.  GV may even be a declaration, not a definition.
737       if (!GV->isConstant()) {
738         Visited.clear();
739         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
740       }
741       continue;
742     }
743 
744     // If both select values point to local memory, then so does the select.
745     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
746       Worklist.push_back(SI->getTrueValue());
747       Worklist.push_back(SI->getFalseValue());
748       continue;
749     }
750 
751     // If all values incoming to a phi node point to local memory, then so does
752     // the phi.
753     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
754       // Don't bother inspecting phi nodes with many operands.
755       if (PN->getNumIncomingValues() > MaxLookup) {
756         Visited.clear();
757         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
758       }
759       append_range(Worklist, PN->incoming_values());
760       continue;
761     }
762 
763     // Otherwise be conservative.
764     Visited.clear();
765     return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
766   } while (!Worklist.empty() && --MaxLookup);
767 
768   Visited.clear();
769   return Worklist.empty();
770 }
771 
772 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
773   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
774   return II && II->getIntrinsicID() == IID;
775 }
776 
777 /// Returns the behavior when calling the given call site.
778 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
779   if (Call->doesNotAccessMemory())
780     // Can't do better than this.
781     return FMRB_DoesNotAccessMemory;
782 
783   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
784 
785   // If the callsite knows it only reads memory, don't return worse
786   // than that.
787   if (Call->onlyReadsMemory())
788     Min = FMRB_OnlyReadsMemory;
789   else if (Call->doesNotReadMemory())
790     Min = FMRB_OnlyWritesMemory;
791 
792   if (Call->onlyAccessesArgMemory())
793     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
794   else if (Call->onlyAccessesInaccessibleMemory())
795     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
796   else if (Call->onlyAccessesInaccessibleMemOrArgMem())
797     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
798 
799   // If the call has operand bundles then aliasing attributes from the function
800   // it calls do not directly apply to the call.  This can be made more precise
801   // in the future.
802   if (!Call->hasOperandBundles())
803     if (const Function *F = Call->getCalledFunction())
804       Min =
805           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
806 
807   return Min;
808 }
809 
810 /// Returns the behavior when calling the given function. For use when the call
811 /// site is not known.
812 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
813   // If the function declares it doesn't access memory, we can't do better.
814   if (F->doesNotAccessMemory())
815     return FMRB_DoesNotAccessMemory;
816 
817   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
818 
819   // If the function declares it only reads memory, go with that.
820   if (F->onlyReadsMemory())
821     Min = FMRB_OnlyReadsMemory;
822   else if (F->doesNotReadMemory())
823     Min = FMRB_OnlyWritesMemory;
824 
825   if (F->onlyAccessesArgMemory())
826     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
827   else if (F->onlyAccessesInaccessibleMemory())
828     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
829   else if (F->onlyAccessesInaccessibleMemOrArgMem())
830     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
831 
832   return Min;
833 }
834 
835 /// Returns true if this is a writeonly (i.e Mod only) parameter.
836 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
837                              const TargetLibraryInfo &TLI) {
838   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
839     return true;
840 
841   // We can bound the aliasing properties of memset_pattern16 just as we can
842   // for memcpy/memset.  This is particularly important because the
843   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
844   // whenever possible.
845   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
846   // attributes.
847   LibFunc F;
848   if (Call->getCalledFunction() &&
849       TLI.getLibFunc(*Call->getCalledFunction(), F) &&
850       F == LibFunc_memset_pattern16 && TLI.has(F))
851     if (ArgIdx == 0)
852       return true;
853 
854   // TODO: memset_pattern4, memset_pattern8
855   // TODO: _chk variants
856   // TODO: strcmp, strcpy
857 
858   return false;
859 }
860 
861 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
862                                            unsigned ArgIdx) {
863   // Checking for known builtin intrinsics and target library functions.
864   if (isWriteOnlyParam(Call, ArgIdx, TLI))
865     return ModRefInfo::Mod;
866 
867   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
868     return ModRefInfo::Ref;
869 
870   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
871     return ModRefInfo::NoModRef;
872 
873   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
874 }
875 
876 #ifndef NDEBUG
877 static const Function *getParent(const Value *V) {
878   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
879     if (!inst->getParent())
880       return nullptr;
881     return inst->getParent()->getParent();
882   }
883 
884   if (const Argument *arg = dyn_cast<Argument>(V))
885     return arg->getParent();
886 
887   return nullptr;
888 }
889 
890 static bool notDifferentParent(const Value *O1, const Value *O2) {
891 
892   const Function *F1 = getParent(O1);
893   const Function *F2 = getParent(O2);
894 
895   return !F1 || !F2 || F1 == F2;
896 }
897 #endif
898 
899 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
900                                  const MemoryLocation &LocB,
901                                  AAQueryInfo &AAQI) {
902   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
903          "BasicAliasAnalysis doesn't support interprocedural queries.");
904   return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
905 }
906 
907 /// Checks to see if the specified callsite can clobber the specified memory
908 /// object.
909 ///
910 /// Since we only look at local properties of this function, we really can't
911 /// say much about this query.  We do, however, use simple "address taken"
912 /// analysis on local objects.
913 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
914                                         const MemoryLocation &Loc,
915                                         AAQueryInfo &AAQI) {
916   assert(notDifferentParent(Call, Loc.Ptr) &&
917          "AliasAnalysis query involving multiple functions!");
918 
919   const Value *Object = getUnderlyingObject(Loc.Ptr);
920 
921   // Calls marked 'tail' cannot read or write allocas from the current frame
922   // because the current frame might be destroyed by the time they run. However,
923   // a tail call may use an alloca with byval. Calling with byval copies the
924   // contents of the alloca into argument registers or stack slots, so there is
925   // no lifetime issue.
926   if (isa<AllocaInst>(Object))
927     if (const CallInst *CI = dyn_cast<CallInst>(Call))
928       if (CI->isTailCall() &&
929           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
930         return ModRefInfo::NoModRef;
931 
932   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
933   // modify them even though the alloca is not escaped.
934   if (auto *AI = dyn_cast<AllocaInst>(Object))
935     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
936       return ModRefInfo::Mod;
937 
938   // If the pointer is to a locally allocated object that does not escape,
939   // then the call can not mod/ref the pointer unless the call takes the pointer
940   // as an argument, and itself doesn't capture it.
941   if (!isa<Constant>(Object) && Call != Object &&
942       AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) {
943 
944     // Optimistically assume that call doesn't touch Object and check this
945     // assumption in the following loop.
946     ModRefInfo Result = ModRefInfo::NoModRef;
947     bool IsMustAlias = true;
948 
949     unsigned OperandNo = 0;
950     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
951          CI != CE; ++CI, ++OperandNo) {
952       // Only look at the no-capture or byval pointer arguments.  If this
953       // pointer were passed to arguments that were neither of these, then it
954       // couldn't be no-capture.
955       if (!(*CI)->getType()->isPointerTy() ||
956           (!Call->doesNotCapture(OperandNo) &&
957            OperandNo < Call->getNumArgOperands() &&
958            !Call->isByValArgument(OperandNo)))
959         continue;
960 
961       // Call doesn't access memory through this operand, so we don't care
962       // if it aliases with Object.
963       if (Call->doesNotAccessMemory(OperandNo))
964         continue;
965 
966       // If this is a no-capture pointer argument, see if we can tell that it
967       // is impossible to alias the pointer we're checking.
968       AliasResult AR = getBestAAResults().alias(
969           MemoryLocation::getBeforeOrAfter(*CI),
970           MemoryLocation::getBeforeOrAfter(Object), AAQI);
971       if (AR != AliasResult::MustAlias)
972         IsMustAlias = false;
973       // Operand doesn't alias 'Object', continue looking for other aliases
974       if (AR == AliasResult::NoAlias)
975         continue;
976       // Operand aliases 'Object', but call doesn't modify it. Strengthen
977       // initial assumption and keep looking in case if there are more aliases.
978       if (Call->onlyReadsMemory(OperandNo)) {
979         Result = setRef(Result);
980         continue;
981       }
982       // Operand aliases 'Object' but call only writes into it.
983       if (Call->doesNotReadMemory(OperandNo)) {
984         Result = setMod(Result);
985         continue;
986       }
987       // This operand aliases 'Object' and call reads and writes into it.
988       // Setting ModRef will not yield an early return below, MustAlias is not
989       // used further.
990       Result = ModRefInfo::ModRef;
991       break;
992     }
993 
994     // No operand aliases, reset Must bit. Add below if at least one aliases
995     // and all aliases found are MustAlias.
996     if (isNoModRef(Result))
997       IsMustAlias = false;
998 
999     // Early return if we improved mod ref information
1000     if (!isModAndRefSet(Result)) {
1001       if (isNoModRef(Result))
1002         return ModRefInfo::NoModRef;
1003       return IsMustAlias ? setMust(Result) : clearMust(Result);
1004     }
1005   }
1006 
1007   // If the call is malloc/calloc like, we can assume that it doesn't
1008   // modify any IR visible value.  This is only valid because we assume these
1009   // routines do not read values visible in the IR.  TODO: Consider special
1010   // casing realloc and strdup routines which access only their arguments as
1011   // well.  Or alternatively, replace all of this with inaccessiblememonly once
1012   // that's implemented fully.
1013   if (isMallocOrCallocLikeFn(Call, &TLI)) {
1014     // Be conservative if the accessed pointer may alias the allocation -
1015     // fallback to the generic handling below.
1016     if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc,
1017                                  AAQI) == AliasResult::NoAlias)
1018       return ModRefInfo::NoModRef;
1019   }
1020 
1021   // The semantics of memcpy intrinsics either exactly overlap or do not
1022   // overlap, i.e., source and destination of any given memcpy are either
1023   // no-alias or must-alias.
1024   if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
1025     AliasResult SrcAA =
1026         getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
1027     AliasResult DestAA =
1028         getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
1029     // It's also possible for Loc to alias both src and dest, or neither.
1030     ModRefInfo rv = ModRefInfo::NoModRef;
1031     if (SrcAA != AliasResult::NoAlias)
1032       rv = setRef(rv);
1033     if (DestAA != AliasResult::NoAlias)
1034       rv = setMod(rv);
1035     return rv;
1036   }
1037 
1038   // Guard intrinsics are marked as arbitrarily writing so that proper control
1039   // dependencies are maintained but they never mods any particular memory
1040   // location.
1041   //
1042   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1043   // heap state at the point the guard is issued needs to be consistent in case
1044   // the guard invokes the "deopt" continuation.
1045   if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
1046     return ModRefInfo::Ref;
1047   // The same applies to deoptimize which is essentially a guard(false).
1048   if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
1049     return ModRefInfo::Ref;
1050 
1051   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1052   // writing so that proper control dependencies are maintained but they never
1053   // mod any particular memory location visible to the IR.
1054   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1055   // intrinsic is now modeled as reading memory. This prevents hoisting the
1056   // invariant.start intrinsic over stores. Consider:
1057   // *ptr = 40;
1058   // *ptr = 50;
1059   // invariant_start(ptr)
1060   // int val = *ptr;
1061   // print(val);
1062   //
1063   // This cannot be transformed to:
1064   //
1065   // *ptr = 40;
1066   // invariant_start(ptr)
1067   // *ptr = 50;
1068   // int val = *ptr;
1069   // print(val);
1070   //
1071   // The transformation will cause the second store to be ignored (based on
1072   // rules of invariant.start)  and print 40, while the first program always
1073   // prints 50.
1074   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1075     return ModRefInfo::Ref;
1076 
1077   // The AAResultBase base class has some smarts, lets use them.
1078   return AAResultBase::getModRefInfo(Call, Loc, AAQI);
1079 }
1080 
1081 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1082                                         const CallBase *Call2,
1083                                         AAQueryInfo &AAQI) {
1084   // Guard intrinsics are marked as arbitrarily writing so that proper control
1085   // dependencies are maintained but they never mods any particular memory
1086   // location.
1087   //
1088   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1089   // heap state at the point the guard is issued needs to be consistent in case
1090   // the guard invokes the "deopt" continuation.
1091 
1092   // NB! This function is *not* commutative, so we special case two
1093   // possibilities for guard intrinsics.
1094 
1095   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1096     return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1097                ? ModRefInfo::Ref
1098                : ModRefInfo::NoModRef;
1099 
1100   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1101     return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1102                ? ModRefInfo::Mod
1103                : ModRefInfo::NoModRef;
1104 
1105   // The AAResultBase base class has some smarts, lets use them.
1106   return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1107 }
1108 
1109 /// Return true if we know V to the base address of the corresponding memory
1110 /// object.  This implies that any address less than V must be out of bounds
1111 /// for the underlying object.  Note that just being isIdentifiedObject() is
1112 /// not enough - For example, a negative offset from a noalias argument or call
1113 /// can be inbounds w.r.t the actual underlying object.
1114 static bool isBaseOfObject(const Value *V) {
1115   // TODO: We can handle other cases here
1116   // 1) For GC languages, arguments to functions are often required to be
1117   //    base pointers.
1118   // 2) Result of allocation routines are often base pointers.  Leverage TLI.
1119   return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1120 }
1121 
1122 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1123 /// another pointer.
1124 ///
1125 /// We know that V1 is a GEP, but we don't know anything about V2.
1126 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1127 /// V2.
1128 AliasResult BasicAAResult::aliasGEP(
1129     const GEPOperator *GEP1, LocationSize V1Size,
1130     const Value *V2, LocationSize V2Size,
1131     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1132   if (!V1Size.hasValue() && !V2Size.hasValue()) {
1133     // TODO: This limitation exists for compile-time reasons. Relax it if we
1134     // can avoid exponential pathological cases.
1135     if (!isa<GEPOperator>(V2))
1136       return AliasResult::MayAlias;
1137 
1138     // If both accesses have unknown size, we can only check whether the base
1139     // objects don't alias.
1140     AliasResult BaseAlias = getBestAAResults().alias(
1141         MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1142         MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1143     return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1144                                              : AliasResult::MayAlias;
1145   }
1146 
1147   DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1148   DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1149 
1150   // Don't attempt to analyze the decomposed GEP if index scale is not a
1151   // compile-time constant.
1152   if (!DecompGEP1.HasCompileTimeConstantScale ||
1153       !DecompGEP2.HasCompileTimeConstantScale)
1154     return AliasResult::MayAlias;
1155 
1156   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1157          "DecomposeGEPExpression returned a result different from "
1158          "getUnderlyingObject");
1159 
1160   // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1161   // symbolic difference.
1162   subtractDecomposedGEPs(DecompGEP1, DecompGEP2);
1163 
1164   // If an inbounds GEP would have to start from an out of bounds address
1165   // for the two to alias, then we can assume noalias.
1166   if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1167       V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1168       isBaseOfObject(DecompGEP2.Base))
1169     return AliasResult::NoAlias;
1170 
1171   if (isa<GEPOperator>(V2)) {
1172     // Symmetric case to above.
1173     if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1174         V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1175         isBaseOfObject(DecompGEP1.Base))
1176       return AliasResult::NoAlias;
1177   }
1178 
1179   // For GEPs with identical offsets, we can preserve the size and AAInfo
1180   // when performing the alias check on the underlying objects.
1181   if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1182     return getBestAAResults().alias(
1183         MemoryLocation(UnderlyingV1, V1Size),
1184         MemoryLocation(UnderlyingV2, V2Size), AAQI);
1185 
1186   // Do the base pointers alias?
1187   AliasResult BaseAlias = getBestAAResults().alias(
1188       MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1189       MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1190 
1191   // If we get a No or May, then return it immediately, no amount of analysis
1192   // will improve this situation.
1193   if (BaseAlias != AliasResult::MustAlias) {
1194     assert(BaseAlias == AliasResult::NoAlias ||
1195            BaseAlias == AliasResult::MayAlias);
1196     return BaseAlias;
1197   }
1198 
1199   // If there is a constant difference between the pointers, but the difference
1200   // is less than the size of the associated memory object, then we know
1201   // that the objects are partially overlapping.  If the difference is
1202   // greater, we know they do not overlap.
1203   if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
1204     APInt &Off = DecompGEP1.Offset;
1205 
1206     // Initialize for Off >= 0 (V2 <= GEP1) case.
1207     const Value *LeftPtr = V2;
1208     const Value *RightPtr = GEP1;
1209     LocationSize VLeftSize = V2Size;
1210     LocationSize VRightSize = V1Size;
1211     const bool Swapped = Off.isNegative();
1212 
1213     if (Swapped) {
1214       // Swap if we have the situation where:
1215       // +                +
1216       // | BaseOffset     |
1217       // ---------------->|
1218       // |-->V1Size       |-------> V2Size
1219       // GEP1             V2
1220       std::swap(LeftPtr, RightPtr);
1221       std::swap(VLeftSize, VRightSize);
1222       Off = -Off;
1223     }
1224 
1225     if (VLeftSize.hasValue()) {
1226       const uint64_t LSize = VLeftSize.getValue();
1227       if (Off.ult(LSize)) {
1228         // Conservatively drop processing if a phi was visited and/or offset is
1229         // too big.
1230         AliasResult AR = AliasResult::PartialAlias;
1231         if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1232             (Off + VRightSize.getValue()).ule(LSize)) {
1233           // Memory referenced by right pointer is nested. Save the offset in
1234           // cache. Note that originally offset estimated as GEP1-V2, but
1235           // AliasResult contains the shift that represents GEP1+Offset=V2.
1236           AR.setOffset(-Off.getSExtValue());
1237           AR.swap(Swapped);
1238         }
1239         return AR;
1240       }
1241       return AliasResult::NoAlias;
1242     }
1243   }
1244 
1245   if (!DecompGEP1.VarIndices.empty()) {
1246     APInt GCD;
1247     bool AllNonNegative = DecompGEP1.Offset.isNonNegative();
1248     bool AllNonPositive = DecompGEP1.Offset.isNonPositive();
1249     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1250       APInt Scale = DecompGEP1.VarIndices[i].Scale;
1251       APInt ScaleForGCD = DecompGEP1.VarIndices[i].Scale;
1252       if (!DecompGEP1.VarIndices[i].IsNSW)
1253         ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(),
1254                                           Scale.countTrailingZeros());
1255 
1256       if (i == 0)
1257         GCD = ScaleForGCD.abs();
1258       else
1259         GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1260 
1261       if (AllNonNegative || AllNonPositive) {
1262         // If the Value could change between cycles, then any reasoning about
1263         // the Value this cycle may not hold in the next cycle. We'll just
1264         // give up if we can't determine conditions that hold for every cycle:
1265         const Value *V = DecompGEP1.VarIndices[i].Val.V;
1266         const Instruction *CxtI = DecompGEP1.VarIndices[i].CxtI;
1267 
1268         KnownBits Known = computeKnownBits(V, DL, 0, &AC, CxtI, DT);
1269         bool SignKnownZero = Known.isNonNegative();
1270         bool SignKnownOne = Known.isNegative();
1271 
1272         // Zero-extension widens the variable, and so forces the sign
1273         // bit to zero.
1274         bool IsZExt =
1275             DecompGEP1.VarIndices[i].Val.ZExtBits > 0 || isa<ZExtInst>(V);
1276         SignKnownZero |= IsZExt;
1277         SignKnownOne &= !IsZExt;
1278 
1279         AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
1280                           (SignKnownOne && Scale.isNonPositive());
1281         AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) ||
1282                           (SignKnownOne && Scale.isNonNegative());
1283       }
1284     }
1285 
1286     // We now have accesses at two offsets from the same base:
1287     //  1. (...)*GCD + DecompGEP1.Offset with size V1Size
1288     //  2. 0 with size V2Size
1289     // Using arithmetic modulo GCD, the accesses are at
1290     // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1291     // into the range [V2Size..GCD), then we know they cannot overlap.
1292     APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1293     if (ModOffset.isNegative())
1294       ModOffset += GCD; // We want mod, not rem.
1295     if (V1Size.hasValue() && V2Size.hasValue() &&
1296         ModOffset.uge(V2Size.getValue()) &&
1297         (GCD - ModOffset).uge(V1Size.getValue()))
1298       return AliasResult::NoAlias;
1299 
1300     // If we know all the variables are non-negative, then the total offset is
1301     // also non-negative and >= DecompGEP1.Offset. We have the following layout:
1302     // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size]
1303     // If DecompGEP1.Offset >= V2Size, the accesses don't alias.
1304     if (AllNonNegative && V2Size.hasValue() &&
1305         DecompGEP1.Offset.uge(V2Size.getValue()))
1306       return AliasResult::NoAlias;
1307     // Similarly, if the variables are non-positive, then the total offset is
1308     // also non-positive and <= DecompGEP1.Offset. We have the following layout:
1309     // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
1310     // If -DecompGEP1.Offset >= V1Size, the accesses don't alias.
1311     if (AllNonPositive && V1Size.hasValue() &&
1312         (-DecompGEP1.Offset).uge(V1Size.getValue()))
1313       return AliasResult::NoAlias;
1314 
1315     if (V1Size.hasValue() && V2Size.hasValue()) {
1316       // Try to determine the range of values for VarIndex.
1317       // VarIndexRange is such that:
1318       //    (VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex) &&
1319       //    VarIndexRange.contains(VarIndex)
1320       Optional<APInt> MinAbsVarIndex;
1321       Optional<ConstantRange> VarIndexRange;
1322       if (DecompGEP1.VarIndices.size() == 1) {
1323         // VarIndex = Scale*V.
1324         const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1325         if (isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1326           // If V != 0 then abs(VarIndex) >= abs(Scale).
1327           MinAbsVarIndex = Var.Scale.abs();
1328         }
1329         ConstantRange R = computeConstantRange(Var.Val.V, true, &AC, Var.CxtI);
1330         if (!R.isFullSet() && !R.isEmptySet()) {
1331           if (Var.Val.SExtBits)
1332             R = R.signExtend(R.getBitWidth() + Var.Val.SExtBits);
1333           if (Var.Val.ZExtBits)
1334             R = R.zeroExtend(R.getBitWidth() + Var.Val.ZExtBits);
1335           VarIndexRange = R.sextOrTrunc(Var.Scale.getBitWidth())
1336                               .multiply(ConstantRange(Var.Scale));
1337         }
1338       } else if (DecompGEP1.VarIndices.size() == 2) {
1339         // VarIndex = Scale*V0 + (-Scale)*V1.
1340         // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1341         // Check that VisitedPhiBBs is empty, to avoid reasoning about
1342         // inequality of values across loop iterations.
1343         const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1344         const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1345         if (Var0.Scale == -Var1.Scale &&
1346             Var0.Val.hasSameExtensionsAs(Var1.Val) && VisitedPhiBBs.empty() &&
1347             isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1348                             DT))
1349           MinAbsVarIndex = Var0.Scale.abs();
1350       }
1351 
1352       if (MinAbsVarIndex) {
1353         // The constant offset will have added at least +/-MinAbsVarIndex to it.
1354         APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1355         APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1356         // We know that Offset <= OffsetLo || Offset >= OffsetHi
1357         if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1358             OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1359           return AliasResult::NoAlias;
1360       }
1361 
1362       if (VarIndexRange) {
1363         ConstantRange OffsetRange =
1364             VarIndexRange->add(ConstantRange(DecompGEP1.Offset));
1365 
1366         // We know that Offset >= MinOffset.
1367         // (MinOffset >= V2Size) => (Offset >= V2Size) => NoAlias.
1368         if (OffsetRange.getSignedMin().sge(V2Size.getValue()))
1369           return AliasResult::NoAlias;
1370 
1371         // We know that Offset <= MaxOffset.
1372         // (MaxOffset <= -V1Size) => (Offset <= -V1Size) => NoAlias.
1373         if (OffsetRange.getSignedMax().sle(-V1Size.getValue()))
1374           return AliasResult::NoAlias;
1375       }
1376     }
1377 
1378     if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT))
1379       return AliasResult::NoAlias;
1380   }
1381 
1382   // Statically, we can see that the base objects are the same, but the
1383   // pointers have dynamic offsets which we can't resolve. And none of our
1384   // little tricks above worked.
1385   return AliasResult::MayAlias;
1386 }
1387 
1388 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1389   // If the results agree, take it.
1390   if (A == B)
1391     return A;
1392   // A mix of PartialAlias and MustAlias is PartialAlias.
1393   if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1394       (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1395     return AliasResult::PartialAlias;
1396   // Otherwise, we don't know anything.
1397   return AliasResult::MayAlias;
1398 }
1399 
1400 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1401 /// against another.
1402 AliasResult
1403 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1404                            const Value *V2, LocationSize V2Size,
1405                            AAQueryInfo &AAQI) {
1406   // If the values are Selects with the same condition, we can do a more precise
1407   // check: just check for aliases between the values on corresponding arms.
1408   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1409     if (SI->getCondition() == SI2->getCondition()) {
1410       AliasResult Alias = getBestAAResults().alias(
1411           MemoryLocation(SI->getTrueValue(), SISize),
1412           MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1413       if (Alias == AliasResult::MayAlias)
1414         return AliasResult::MayAlias;
1415       AliasResult ThisAlias = getBestAAResults().alias(
1416           MemoryLocation(SI->getFalseValue(), SISize),
1417           MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1418       return MergeAliasResults(ThisAlias, Alias);
1419     }
1420 
1421   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1422   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1423   AliasResult Alias = getBestAAResults().alias(
1424       MemoryLocation(V2, V2Size),
1425       MemoryLocation(SI->getTrueValue(), SISize), AAQI);
1426   if (Alias == AliasResult::MayAlias)
1427     return AliasResult::MayAlias;
1428 
1429   AliasResult ThisAlias = getBestAAResults().alias(
1430       MemoryLocation(V2, V2Size),
1431       MemoryLocation(SI->getFalseValue(), SISize), AAQI);
1432   return MergeAliasResults(ThisAlias, Alias);
1433 }
1434 
1435 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1436 /// another.
1437 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1438                                     const Value *V2, LocationSize V2Size,
1439                                     AAQueryInfo &AAQI) {
1440   if (!PN->getNumIncomingValues())
1441     return AliasResult::NoAlias;
1442   // If the values are PHIs in the same block, we can do a more precise
1443   // as well as efficient check: just check for aliases between the values
1444   // on corresponding edges.
1445   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1446     if (PN2->getParent() == PN->getParent()) {
1447       Optional<AliasResult> Alias;
1448       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1449         AliasResult ThisAlias = getBestAAResults().alias(
1450             MemoryLocation(PN->getIncomingValue(i), PNSize),
1451             MemoryLocation(
1452                 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1453             AAQI);
1454         if (Alias)
1455           *Alias = MergeAliasResults(*Alias, ThisAlias);
1456         else
1457           Alias = ThisAlias;
1458         if (*Alias == AliasResult::MayAlias)
1459           break;
1460       }
1461       return *Alias;
1462     }
1463 
1464   SmallVector<Value *, 4> V1Srcs;
1465   // If a phi operand recurses back to the phi, we can still determine NoAlias
1466   // if we don't alias the underlying objects of the other phi operands, as we
1467   // know that the recursive phi needs to be based on them in some way.
1468   bool isRecursive = false;
1469   auto CheckForRecPhi = [&](Value *PV) {
1470     if (!EnableRecPhiAnalysis)
1471       return false;
1472     if (getUnderlyingObject(PV) == PN) {
1473       isRecursive = true;
1474       return true;
1475     }
1476     return false;
1477   };
1478 
1479   if (PV) {
1480     // If we have PhiValues then use it to get the underlying phi values.
1481     const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1482     // If we have more phi values than the search depth then return MayAlias
1483     // conservatively to avoid compile time explosion. The worst possible case
1484     // is if both sides are PHI nodes. In which case, this is O(m x n) time
1485     // where 'm' and 'n' are the number of PHI sources.
1486     if (PhiValueSet.size() > MaxLookupSearchDepth)
1487       return AliasResult::MayAlias;
1488     // Add the values to V1Srcs
1489     for (Value *PV1 : PhiValueSet) {
1490       if (CheckForRecPhi(PV1))
1491         continue;
1492       V1Srcs.push_back(PV1);
1493     }
1494   } else {
1495     // If we don't have PhiInfo then just look at the operands of the phi itself
1496     // FIXME: Remove this once we can guarantee that we have PhiInfo always
1497     SmallPtrSet<Value *, 4> UniqueSrc;
1498     Value *OnePhi = nullptr;
1499     for (Value *PV1 : PN->incoming_values()) {
1500       if (isa<PHINode>(PV1)) {
1501         if (OnePhi && OnePhi != PV1) {
1502           // To control potential compile time explosion, we choose to be
1503           // conserviate when we have more than one Phi input.  It is important
1504           // that we handle the single phi case as that lets us handle LCSSA
1505           // phi nodes and (combined with the recursive phi handling) simple
1506           // pointer induction variable patterns.
1507           return AliasResult::MayAlias;
1508         }
1509         OnePhi = PV1;
1510       }
1511 
1512       if (CheckForRecPhi(PV1))
1513         continue;
1514 
1515       if (UniqueSrc.insert(PV1).second)
1516         V1Srcs.push_back(PV1);
1517     }
1518 
1519     if (OnePhi && UniqueSrc.size() > 1)
1520       // Out of an abundance of caution, allow only the trivial lcssa and
1521       // recursive phi cases.
1522       return AliasResult::MayAlias;
1523   }
1524 
1525   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1526   // value. This should only be possible in blocks unreachable from the entry
1527   // block, but return MayAlias just in case.
1528   if (V1Srcs.empty())
1529     return AliasResult::MayAlias;
1530 
1531   // If this PHI node is recursive, indicate that the pointer may be moved
1532   // across iterations. We can only prove NoAlias if different underlying
1533   // objects are involved.
1534   if (isRecursive)
1535     PNSize = LocationSize::beforeOrAfterPointer();
1536 
1537   // In the recursive alias queries below, we may compare values from two
1538   // different loop iterations. Keep track of visited phi blocks, which will
1539   // be used when determining value equivalence.
1540   bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1541   auto _ = make_scope_exit([&]() {
1542     if (BlockInserted)
1543       VisitedPhiBBs.erase(PN->getParent());
1544   });
1545 
1546   // If we inserted a block into VisitedPhiBBs, alias analysis results that
1547   // have been cached earlier may no longer be valid. Perform recursive queries
1548   // with a new AAQueryInfo.
1549   AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1550   AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1551 
1552   AliasResult Alias = getBestAAResults().alias(
1553       MemoryLocation(V2, V2Size),
1554       MemoryLocation(V1Srcs[0], PNSize), *UseAAQI);
1555 
1556   // Early exit if the check of the first PHI source against V2 is MayAlias.
1557   // Other results are not possible.
1558   if (Alias == AliasResult::MayAlias)
1559     return AliasResult::MayAlias;
1560   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1561   // remain valid to all elements and needs to conservatively return MayAlias.
1562   if (isRecursive && Alias != AliasResult::NoAlias)
1563     return AliasResult::MayAlias;
1564 
1565   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1566   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1567   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1568     Value *V = V1Srcs[i];
1569 
1570     AliasResult ThisAlias = getBestAAResults().alias(
1571         MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI);
1572     Alias = MergeAliasResults(ThisAlias, Alias);
1573     if (Alias == AliasResult::MayAlias)
1574       break;
1575   }
1576 
1577   return Alias;
1578 }
1579 
1580 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1581 /// array references.
1582 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1583                                       const Value *V2, LocationSize V2Size,
1584                                       AAQueryInfo &AAQI) {
1585   // If either of the memory references is empty, it doesn't matter what the
1586   // pointer values are.
1587   if (V1Size.isZero() || V2Size.isZero())
1588     return AliasResult::NoAlias;
1589 
1590   // Strip off any casts if they exist.
1591   V1 = V1->stripPointerCastsForAliasAnalysis();
1592   V2 = V2->stripPointerCastsForAliasAnalysis();
1593 
1594   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1595   // value for undef that aliases nothing in the program.
1596   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1597     return AliasResult::NoAlias;
1598 
1599   // Are we checking for alias of the same value?
1600   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1601   // different iterations. We must therefore make sure that this is not the
1602   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1603   // happen by looking at the visited phi nodes and making sure they cannot
1604   // reach the value.
1605   if (isValueEqualInPotentialCycles(V1, V2))
1606     return AliasResult::MustAlias;
1607 
1608   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1609     return AliasResult::NoAlias; // Scalars cannot alias each other
1610 
1611   // Figure out what objects these things are pointing to if we can.
1612   const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1613   const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1614 
1615   // Null values in the default address space don't point to any object, so they
1616   // don't alias any other pointer.
1617   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1618     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1619       return AliasResult::NoAlias;
1620   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1621     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1622       return AliasResult::NoAlias;
1623 
1624   if (O1 != O2) {
1625     // If V1/V2 point to two different objects, we know that we have no alias.
1626     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1627       return AliasResult::NoAlias;
1628 
1629     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1630     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1631         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1632       return AliasResult::NoAlias;
1633 
1634     // Function arguments can't alias with things that are known to be
1635     // unambigously identified at the function level.
1636     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1637         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1638       return AliasResult::NoAlias;
1639 
1640     // If one pointer is the result of a call/invoke or load and the other is a
1641     // non-escaping local object within the same function, then we know the
1642     // object couldn't escape to a point where the call could return it.
1643     //
1644     // Note that if the pointers are in different functions, there are a
1645     // variety of complications. A call with a nocapture argument may still
1646     // temporary store the nocapture argument's value in a temporary memory
1647     // location if that memory location doesn't escape. Or it may pass a
1648     // nocapture value to other functions as long as they don't capture it.
1649     if (isEscapeSource(O1) &&
1650         AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1)))
1651       return AliasResult::NoAlias;
1652     if (isEscapeSource(O2) &&
1653         AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2)))
1654       return AliasResult::NoAlias;
1655   }
1656 
1657   // If the size of one access is larger than the entire object on the other
1658   // side, then we know such behavior is undefined and can assume no alias.
1659   bool NullIsValidLocation = NullPointerIsDefined(&F);
1660   if ((isObjectSmallerThan(
1661           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1662           TLI, NullIsValidLocation)) ||
1663       (isObjectSmallerThan(
1664           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1665           TLI, NullIsValidLocation)))
1666     return AliasResult::NoAlias;
1667 
1668   // If one the accesses may be before the accessed pointer, canonicalize this
1669   // by using unknown after-pointer sizes for both accesses. This is
1670   // equivalent, because regardless of which pointer is lower, one of them
1671   // will always came after the other, as long as the underlying objects aren't
1672   // disjoint. We do this so that the rest of BasicAA does not have to deal
1673   // with accesses before the base pointer, and to improve cache utilization by
1674   // merging equivalent states.
1675   if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1676     V1Size = LocationSize::afterPointer();
1677     V2Size = LocationSize::afterPointer();
1678   }
1679 
1680   // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1681   // for recursive queries. For this reason, this limit is chosen to be large
1682   // enough to be very rarely hit, while still being small enough to avoid
1683   // stack overflows.
1684   if (AAQI.Depth >= 512)
1685     return AliasResult::MayAlias;
1686 
1687   // Check the cache before climbing up use-def chains. This also terminates
1688   // otherwise infinitely recursive queries.
1689   AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1690   const bool Swapped = V1 > V2;
1691   if (Swapped)
1692     std::swap(Locs.first, Locs.second);
1693   const auto &Pair = AAQI.AliasCache.try_emplace(
1694       Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1695   if (!Pair.second) {
1696     auto &Entry = Pair.first->second;
1697     if (!Entry.isDefinitive()) {
1698       // Remember that we used an assumption.
1699       ++Entry.NumAssumptionUses;
1700       ++AAQI.NumAssumptionUses;
1701     }
1702     // Cache contains sorted {V1,V2} pairs but we should return original order.
1703     auto Result = Entry.Result;
1704     Result.swap(Swapped);
1705     return Result;
1706   }
1707 
1708   int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1709   unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1710   AliasResult Result =
1711       aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1712 
1713   auto It = AAQI.AliasCache.find(Locs);
1714   assert(It != AAQI.AliasCache.end() && "Must be in cache");
1715   auto &Entry = It->second;
1716 
1717   // Check whether a NoAlias assumption has been used, but disproven.
1718   bool AssumptionDisproven =
1719       Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1720   if (AssumptionDisproven)
1721     Result = AliasResult::MayAlias;
1722 
1723   // This is a definitive result now, when considered as a root query.
1724   AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1725   Entry.Result = Result;
1726   // Cache contains sorted {V1,V2} pairs.
1727   Entry.Result.swap(Swapped);
1728   Entry.NumAssumptionUses = -1;
1729 
1730   // If the assumption has been disproven, remove any results that may have
1731   // been based on this assumption. Do this after the Entry updates above to
1732   // avoid iterator invalidation.
1733   if (AssumptionDisproven)
1734     while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1735       AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1736 
1737   // The result may still be based on assumptions higher up in the chain.
1738   // Remember it, so it can be purged from the cache later.
1739   if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1740       Result != AliasResult::MayAlias)
1741     AAQI.AssumptionBasedResults.push_back(Locs);
1742   return Result;
1743 }
1744 
1745 AliasResult BasicAAResult::aliasCheckRecursive(
1746     const Value *V1, LocationSize V1Size,
1747     const Value *V2, LocationSize V2Size,
1748     AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1749   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1750     AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1751     if (Result != AliasResult::MayAlias)
1752       return Result;
1753   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1754     AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1755     if (Result != AliasResult::MayAlias)
1756       return Result;
1757   }
1758 
1759   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1760     AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1761     if (Result != AliasResult::MayAlias)
1762       return Result;
1763   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1764     AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1765     if (Result != AliasResult::MayAlias)
1766       return Result;
1767   }
1768 
1769   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1770     AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1771     if (Result != AliasResult::MayAlias)
1772       return Result;
1773   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1774     AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1775     if (Result != AliasResult::MayAlias)
1776       return Result;
1777   }
1778 
1779   // If both pointers are pointing into the same object and one of them
1780   // accesses the entire object, then the accesses must overlap in some way.
1781   if (O1 == O2) {
1782     bool NullIsValidLocation = NullPointerIsDefined(&F);
1783     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1784         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1785          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1786       return AliasResult::PartialAlias;
1787   }
1788 
1789   return AliasResult::MayAlias;
1790 }
1791 
1792 /// Check whether two Values can be considered equivalent.
1793 ///
1794 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1795 /// they can not be part of a cycle in the value graph by looking at all
1796 /// visited phi nodes an making sure that the phis cannot reach the value. We
1797 /// have to do this because we are looking through phi nodes (That is we say
1798 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1799 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1800                                                   const Value *V2) {
1801   if (V != V2)
1802     return false;
1803 
1804   const Instruction *Inst = dyn_cast<Instruction>(V);
1805   if (!Inst)
1806     return true;
1807 
1808   if (VisitedPhiBBs.empty())
1809     return true;
1810 
1811   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1812     return false;
1813 
1814   // Make sure that the visited phis cannot reach the Value. This ensures that
1815   // the Values cannot come from different iterations of a potential cycle the
1816   // phi nodes could be involved in.
1817   for (auto *P : VisitedPhiBBs)
1818     if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1819       return false;
1820 
1821   return true;
1822 }
1823 
1824 /// Computes the symbolic difference between two de-composed GEPs.
1825 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1826                                            const DecomposedGEP &SrcGEP) {
1827   DestGEP.Offset -= SrcGEP.Offset;
1828   for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1829     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1830     // than a few variable indexes.
1831     bool Found = false;
1832     for (auto I : enumerate(DestGEP.VarIndices)) {
1833       VariableGEPIndex &Dest = I.value();
1834       if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) ||
1835           !Dest.Val.hasSameExtensionsAs(Src.Val))
1836         continue;
1837 
1838       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1839       // goes to zero, remove the entry.
1840       if (Dest.Scale != Src.Scale) {
1841         Dest.Scale -= Src.Scale;
1842         Dest.IsNSW = false;
1843       } else {
1844         DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1845       }
1846       Found = true;
1847       break;
1848     }
1849 
1850     // If we didn't consume this entry, add it to the end of the Dest list.
1851     if (!Found) {
1852       VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW};
1853       DestGEP.VarIndices.push_back(Entry);
1854     }
1855   }
1856 }
1857 
1858 bool BasicAAResult::constantOffsetHeuristic(
1859     const DecomposedGEP &GEP, LocationSize MaybeV1Size,
1860     LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) {
1861   if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1862       !MaybeV2Size.hasValue())
1863     return false;
1864 
1865   const uint64_t V1Size = MaybeV1Size.getValue();
1866   const uint64_t V2Size = MaybeV2Size.getValue();
1867 
1868   const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1869 
1870   if (!Var0.Val.hasSameExtensionsAs(Var1.Val) || Var0.Scale != -Var1.Scale ||
1871       Var0.Val.V->getType() != Var1.Val.V->getType())
1872     return false;
1873 
1874   // We'll strip off the Extensions of Var0 and Var1 and do another round
1875   // of GetLinearExpression decomposition. In the example above, if Var0
1876   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1877 
1878   LinearExpression E0 =
1879       GetLinearExpression(ExtendedValue(Var0.Val.V), DL, 0, AC, DT);
1880   LinearExpression E1 =
1881       GetLinearExpression(ExtendedValue(Var1.Val.V), DL, 0, AC, DT);
1882   if (E0.Scale != E1.Scale || !E0.Val.hasSameExtensionsAs(E1.Val) ||
1883       !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1884     return false;
1885 
1886   // We have a hit - Var0 and Var1 only differ by a constant offset!
1887 
1888   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1889   // Var1 is possible to calculate, but we're just interested in the absolute
1890   // minimum difference between the two. The minimum distance may occur due to
1891   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1892   // the minimum distance between %i and %i + 5 is 3.
1893   APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1894   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1895   APInt MinDiffBytes =
1896     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1897 
1898   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1899   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1900   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1901   // V2Size can fit in the MinDiffBytes gap.
1902   return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1903          MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1904 }
1905 
1906 //===----------------------------------------------------------------------===//
1907 // BasicAliasAnalysis Pass
1908 //===----------------------------------------------------------------------===//
1909 
1910 AnalysisKey BasicAA::Key;
1911 
1912 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1913   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1914   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1915   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1916   auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1917   return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1918 }
1919 
1920 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1921   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1922 }
1923 
1924 char BasicAAWrapperPass::ID = 0;
1925 
1926 void BasicAAWrapperPass::anchor() {}
1927 
1928 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1929                       "Basic Alias Analysis (stateless AA impl)", true, true)
1930 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1931 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1932 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1933 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1934 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1935                     "Basic Alias Analysis (stateless AA impl)", true, true)
1936 
1937 FunctionPass *llvm::createBasicAAWrapperPass() {
1938   return new BasicAAWrapperPass();
1939 }
1940 
1941 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1942   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1943   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1944   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1945   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1946 
1947   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1948                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1949                                  &DTWP.getDomTree(),
1950                                  PVWP ? &PVWP->getResult() : nullptr));
1951 
1952   return false;
1953 }
1954 
1955 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1956   AU.setPreservesAll();
1957   AU.addRequiredTransitive<AssumptionCacheTracker>();
1958   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1959   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1960   AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1961 }
1962 
1963 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1964   return BasicAAResult(
1965       F.getParent()->getDataLayout(), F,
1966       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1967       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1968 }
1969