1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/PhiValues.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/ConstantRange.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GetElementPtrTypeIterator.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/KnownBits.h"
59 #include <cassert>
60 #include <cstdint>
61 #include <cstdlib>
62 #include <utility>
63 
64 #define DEBUG_TYPE "basicaa"
65 
66 using namespace llvm;
67 
68 /// Enable analysis of recursive PHI nodes.
69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
70                                           cl::init(true));
71 
72 /// SearchLimitReached / SearchTimes shows how often the limit of
73 /// to decompose GEPs is reached. It will affect the precision
74 /// of basic alias analysis.
75 STATISTIC(SearchLimitReached, "Number of times the limit to "
76                               "decompose GEPs is reached");
77 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
78 
79 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
80 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
81 /// careful with value equivalence. We use reachability to make sure a value
82 /// cannot be involved in a cycle.
83 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
84 
85 // The max limit of the search depth in DecomposeGEPExpression() and
86 // getUnderlyingObject().
87 static const unsigned MaxLookupSearchDepth = 6;
88 
89 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
90                                FunctionAnalysisManager::Invalidator &Inv) {
91   // We don't care if this analysis itself is preserved, it has no state. But
92   // we need to check that the analyses it depends on have been. Note that we
93   // may be created without handles to some analyses and in that case don't
94   // depend on them.
95   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
96       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
97       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
98     return true;
99 
100   // Otherwise this analysis result remains valid.
101   return false;
102 }
103 
104 //===----------------------------------------------------------------------===//
105 // Useful predicates
106 //===----------------------------------------------------------------------===//
107 
108 /// Returns true if the pointer is one which would have been considered an
109 /// escape by isNonEscapingLocalObject.
110 static bool isEscapeSource(const Value *V) {
111   if (isa<CallBase>(V))
112     return true;
113 
114   // The load case works because isNonEscapingLocalObject considers all
115   // stores to be escapes (it passes true for the StoreCaptures argument
116   // to PointerMayBeCaptured).
117   if (isa<LoadInst>(V))
118     return true;
119 
120   // The inttoptr case works because isNonEscapingLocalObject considers all
121   // means of converting or equating a pointer to an int (ptrtoint, ptr store
122   // which could be followed by an integer load, ptr<->int compare) as
123   // escaping, and objects located at well-known addresses via platform-specific
124   // means cannot be considered non-escaping local objects.
125   if (isa<IntToPtrInst>(V))
126     return true;
127 
128   return false;
129 }
130 
131 /// Returns the size of the object specified by V or UnknownSize if unknown.
132 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
133                               const TargetLibraryInfo &TLI,
134                               bool NullIsValidLoc,
135                               bool RoundToAlign = false) {
136   uint64_t Size;
137   ObjectSizeOpts Opts;
138   Opts.RoundToAlign = RoundToAlign;
139   Opts.NullIsUnknownSize = NullIsValidLoc;
140   if (getObjectSize(V, Size, DL, &TLI, Opts))
141     return Size;
142   return MemoryLocation::UnknownSize;
143 }
144 
145 /// Returns true if we can prove that the object specified by V is smaller than
146 /// Size.
147 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
148                                 const DataLayout &DL,
149                                 const TargetLibraryInfo &TLI,
150                                 bool NullIsValidLoc) {
151   // Note that the meanings of the "object" are slightly different in the
152   // following contexts:
153   //    c1: llvm::getObjectSize()
154   //    c2: llvm.objectsize() intrinsic
155   //    c3: isObjectSmallerThan()
156   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
157   // refers to the "entire object".
158   //
159   //  Consider this example:
160   //     char *p = (char*)malloc(100)
161   //     char *q = p+80;
162   //
163   //  In the context of c1 and c2, the "object" pointed by q refers to the
164   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
165   //
166   //  However, in the context of c3, the "object" refers to the chunk of memory
167   // being allocated. So, the "object" has 100 bytes, and q points to the middle
168   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
169   // parameter, before the llvm::getObjectSize() is called to get the size of
170   // entire object, we should:
171   //    - either rewind the pointer q to the base-address of the object in
172   //      question (in this case rewind to p), or
173   //    - just give up. It is up to caller to make sure the pointer is pointing
174   //      to the base address the object.
175   //
176   // We go for 2nd option for simplicity.
177   if (!isIdentifiedObject(V))
178     return false;
179 
180   // This function needs to use the aligned object size because we allow
181   // reads a bit past the end given sufficient alignment.
182   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
183                                       /*RoundToAlign*/ true);
184 
185   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
186 }
187 
188 /// Return the minimal extent from \p V to the end of the underlying object,
189 /// assuming the result is used in an aliasing query. E.g., we do use the query
190 /// location size and the fact that null pointers cannot alias here.
191 static uint64_t getMinimalExtentFrom(const Value &V,
192                                      const LocationSize &LocSize,
193                                      const DataLayout &DL,
194                                      bool NullIsValidLoc) {
195   // If we have dereferenceability information we know a lower bound for the
196   // extent as accesses for a lower offset would be valid. We need to exclude
197   // the "or null" part if null is a valid pointer. We can ignore frees, as an
198   // access after free would be undefined behavior.
199   bool CanBeNull, CanBeFreed;
200   uint64_t DerefBytes =
201     V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
202   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
203   // If queried with a precise location size, we assume that location size to be
204   // accessed, thus valid.
205   if (LocSize.isPrecise())
206     DerefBytes = std::max(DerefBytes, LocSize.getValue());
207   return DerefBytes;
208 }
209 
210 /// Returns true if we can prove that the object specified by V has size Size.
211 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
212                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
213   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
214   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
215 }
216 
217 //===----------------------------------------------------------------------===//
218 // CaptureInfo implementations
219 //===----------------------------------------------------------------------===//
220 
221 CaptureInfo::~CaptureInfo() = default;
222 
223 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object,
224                                                 const Instruction *I) {
225   return isNonEscapingLocalObject(Object, &IsCapturedCache);
226 }
227 
228 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object,
229                                                  const Instruction *I) {
230   if (!isIdentifiedFunctionLocal(Object))
231     return false;
232 
233   auto Iter = EarliestEscapes.insert({Object, nullptr});
234   if (Iter.second) {
235     Instruction *EarliestCapture = FindEarliestCapture(
236         Object, *const_cast<Function *>(I->getFunction()),
237         /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
238     if (EarliestCapture) {
239       auto Ins = Inst2Obj.insert({EarliestCapture, {}});
240       Ins.first->second.push_back(Object);
241     }
242     Iter.first->second = EarliestCapture;
243   }
244 
245   // No capturing instruction.
246   if (!Iter.first->second)
247     return true;
248 
249   return I != Iter.first->second &&
250          !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI);
251 }
252 
253 void EarliestEscapeInfo::removeInstruction(Instruction *I) {
254   auto Iter = Inst2Obj.find(I);
255   if (Iter != Inst2Obj.end()) {
256     for (const Value *Obj : Iter->second)
257       EarliestEscapes.erase(Obj);
258     Inst2Obj.erase(I);
259   }
260 }
261 
262 //===----------------------------------------------------------------------===//
263 // GetElementPtr Instruction Decomposition and Analysis
264 //===----------------------------------------------------------------------===//
265 
266 namespace {
267 /// Represents zext(sext(trunc(V))).
268 struct CastedValue {
269   const Value *V;
270   unsigned ZExtBits = 0;
271   unsigned SExtBits = 0;
272   unsigned TruncBits = 0;
273 
274   explicit CastedValue(const Value *V) : V(V) {}
275   explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
276                        unsigned TruncBits)
277       : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
278 
279   unsigned getBitWidth() const {
280     return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
281            SExtBits;
282   }
283 
284   CastedValue withValue(const Value *NewV) const {
285     return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
286   }
287 
288   /// Replace V with zext(NewV)
289   CastedValue withZExtOfValue(const Value *NewV) const {
290     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
291                         NewV->getType()->getPrimitiveSizeInBits();
292     if (ExtendBy <= TruncBits)
293       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
294 
295     // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
296     ExtendBy -= TruncBits;
297     return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
298   }
299 
300   /// Replace V with sext(NewV)
301   CastedValue withSExtOfValue(const Value *NewV) const {
302     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
303                         NewV->getType()->getPrimitiveSizeInBits();
304     if (ExtendBy <= TruncBits)
305       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
306 
307     // zext(sext(sext(NewV)))
308     ExtendBy -= TruncBits;
309     return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
310   }
311 
312   APInt evaluateWith(APInt N) const {
313     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
314            "Incompatible bit width");
315     if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
316     if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
317     if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
318     return N;
319   }
320 
321   KnownBits evaluateWith(KnownBits N) const {
322     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
323            "Incompatible bit width");
324     if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
325     if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
326     if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
327     return N;
328   }
329 
330   ConstantRange evaluateWith(ConstantRange N) const {
331     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
332            "Incompatible bit width");
333     if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
334     if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
335     if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
336     return N;
337   }
338 
339   bool canDistributeOver(bool NUW, bool NSW) const {
340     // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
341     // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
342     // trunc(x op y) == trunc(x) op trunc(y)
343     return (!ZExtBits || NUW) && (!SExtBits || NSW);
344   }
345 
346   bool hasSameCastsAs(const CastedValue &Other) const {
347     return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
348            TruncBits == Other.TruncBits;
349   }
350 };
351 
352 /// Represents zext(sext(trunc(V))) * Scale + Offset.
353 struct LinearExpression {
354   CastedValue Val;
355   APInt Scale;
356   APInt Offset;
357 
358   /// True if all operations in this expression are NSW.
359   bool IsNSW;
360 
361   LinearExpression(const CastedValue &Val, const APInt &Scale,
362                    const APInt &Offset, bool IsNSW)
363       : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
364 
365   LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
366     unsigned BitWidth = Val.getBitWidth();
367     Scale = APInt(BitWidth, 1);
368     Offset = APInt(BitWidth, 0);
369   }
370 };
371 }
372 
373 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
374 /// B are constant integers.
375 static LinearExpression GetLinearExpression(
376     const CastedValue &Val,  const DataLayout &DL, unsigned Depth,
377     AssumptionCache *AC, DominatorTree *DT) {
378   // Limit our recursion depth.
379   if (Depth == 6)
380     return Val;
381 
382   if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
383     return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
384                             Val.evaluateWith(Const->getValue()), true);
385 
386   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
387     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
388       APInt RHS = Val.evaluateWith(RHSC->getValue());
389       // The only non-OBO case we deal with is or, and only limited to the
390       // case where it is both nuw and nsw.
391       bool NUW = true, NSW = true;
392       if (isa<OverflowingBinaryOperator>(BOp)) {
393         NUW &= BOp->hasNoUnsignedWrap();
394         NSW &= BOp->hasNoSignedWrap();
395       }
396       if (!Val.canDistributeOver(NUW, NSW))
397         return Val;
398 
399       // While we can distribute over trunc, we cannot preserve nowrap flags
400       // in that case.
401       if (Val.TruncBits)
402         NUW = NSW = false;
403 
404       LinearExpression E(Val);
405       switch (BOp->getOpcode()) {
406       default:
407         // We don't understand this instruction, so we can't decompose it any
408         // further.
409         return Val;
410       case Instruction::Or:
411         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
412         // analyze it.
413         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
414                                BOp, DT))
415           return Val;
416 
417         LLVM_FALLTHROUGH;
418       case Instruction::Add: {
419         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
420                                 Depth + 1, AC, DT);
421         E.Offset += RHS;
422         E.IsNSW &= NSW;
423         break;
424       }
425       case Instruction::Sub: {
426         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
427                                 Depth + 1, AC, DT);
428         E.Offset -= RHS;
429         E.IsNSW &= NSW;
430         break;
431       }
432       case Instruction::Mul: {
433         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
434                                 Depth + 1, AC, DT);
435         E.Offset *= RHS;
436         E.Scale *= RHS;
437         E.IsNSW &= NSW;
438         break;
439       }
440       case Instruction::Shl:
441         // We're trying to linearize an expression of the kind:
442         //   shl i8 -128, 36
443         // where the shift count exceeds the bitwidth of the type.
444         // We can't decompose this further (the expression would return
445         // a poison value).
446         if (RHS.getLimitedValue() > Val.getBitWidth())
447           return Val;
448 
449         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
450                                 Depth + 1, AC, DT);
451         E.Offset <<= RHS.getLimitedValue();
452         E.Scale <<= RHS.getLimitedValue();
453         E.IsNSW &= NSW;
454         break;
455       }
456       return E;
457     }
458   }
459 
460   if (isa<ZExtInst>(Val.V))
461     return GetLinearExpression(
462         Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
463         DL, Depth + 1, AC, DT);
464 
465   if (isa<SExtInst>(Val.V))
466     return GetLinearExpression(
467         Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
468         DL, Depth + 1, AC, DT);
469 
470   return Val;
471 }
472 
473 /// To ensure a pointer offset fits in an integer of size PointerSize
474 /// (in bits) when that size is smaller than the maximum pointer size. This is
475 /// an issue, for example, in particular for 32b pointers with negative indices
476 /// that rely on two's complement wrap-arounds for precise alias information
477 /// where the maximum pointer size is 64b.
478 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
479   assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
480   unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
481   return (Offset << ShiftBits).ashr(ShiftBits);
482 }
483 
484 namespace {
485 // A linear transformation of a Value; this class represents
486 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
487 struct VariableGEPIndex {
488   CastedValue Val;
489   APInt Scale;
490 
491   // Context instruction to use when querying information about this index.
492   const Instruction *CxtI;
493 
494   /// True if all operations in this expression are NSW.
495   bool IsNSW;
496 
497   void dump() const {
498     print(dbgs());
499     dbgs() << "\n";
500   }
501   void print(raw_ostream &OS) const {
502     OS << "(V=" << Val.V->getName()
503        << ", zextbits=" << Val.ZExtBits
504        << ", sextbits=" << Val.SExtBits
505        << ", truncbits=" << Val.TruncBits
506        << ", scale=" << Scale << ")";
507   }
508 };
509 }
510 
511 // Represents the internal structure of a GEP, decomposed into a base pointer,
512 // constant offsets, and variable scaled indices.
513 struct BasicAAResult::DecomposedGEP {
514   // Base pointer of the GEP
515   const Value *Base;
516   // Total constant offset from base.
517   APInt Offset;
518   // Scaled variable (non-constant) indices.
519   SmallVector<VariableGEPIndex, 4> VarIndices;
520   // Are all operations inbounds GEPs or non-indexing operations?
521   // (None iff expression doesn't involve any geps)
522   Optional<bool> InBounds;
523 
524   void dump() const {
525     print(dbgs());
526     dbgs() << "\n";
527   }
528   void print(raw_ostream &OS) const {
529     OS << "(DecomposedGEP Base=" << Base->getName()
530        << ", Offset=" << Offset
531        << ", VarIndices=[";
532     for (size_t i = 0; i < VarIndices.size(); i++) {
533       if (i != 0)
534         OS << ", ";
535       VarIndices[i].print(OS);
536     }
537     OS << "])";
538   }
539 };
540 
541 
542 /// If V is a symbolic pointer expression, decompose it into a base pointer
543 /// with a constant offset and a number of scaled symbolic offsets.
544 ///
545 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
546 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
547 /// specified amount, but which may have other unrepresented high bits. As
548 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
549 BasicAAResult::DecomposedGEP
550 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
551                                       AssumptionCache *AC, DominatorTree *DT) {
552   // Limit recursion depth to limit compile time in crazy cases.
553   unsigned MaxLookup = MaxLookupSearchDepth;
554   SearchTimes++;
555   const Instruction *CxtI = dyn_cast<Instruction>(V);
556 
557   unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
558   DecomposedGEP Decomposed;
559   Decomposed.Offset = APInt(MaxPointerSize, 0);
560   do {
561     // See if this is a bitcast or GEP.
562     const Operator *Op = dyn_cast<Operator>(V);
563     if (!Op) {
564       // The only non-operator case we can handle are GlobalAliases.
565       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
566         if (!GA->isInterposable()) {
567           V = GA->getAliasee();
568           continue;
569         }
570       }
571       Decomposed.Base = V;
572       return Decomposed;
573     }
574 
575     if (Op->getOpcode() == Instruction::BitCast ||
576         Op->getOpcode() == Instruction::AddrSpaceCast) {
577       V = Op->getOperand(0);
578       continue;
579     }
580 
581     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
582     if (!GEPOp) {
583       if (const auto *PHI = dyn_cast<PHINode>(V)) {
584         // Look through single-arg phi nodes created by LCSSA.
585         if (PHI->getNumIncomingValues() == 1) {
586           V = PHI->getIncomingValue(0);
587           continue;
588         }
589       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
590         // CaptureTracking can know about special capturing properties of some
591         // intrinsics like launder.invariant.group, that can't be expressed with
592         // the attributes, but have properties like returning aliasing pointer.
593         // Because some analysis may assume that nocaptured pointer is not
594         // returned from some special intrinsic (because function would have to
595         // be marked with returns attribute), it is crucial to use this function
596         // because it should be in sync with CaptureTracking. Not using it may
597         // cause weird miscompilations where 2 aliasing pointers are assumed to
598         // noalias.
599         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
600           V = RP;
601           continue;
602         }
603       }
604 
605       Decomposed.Base = V;
606       return Decomposed;
607     }
608 
609     // Track whether we've seen at least one in bounds gep, and if so, whether
610     // all geps parsed were in bounds.
611     if (Decomposed.InBounds == None)
612       Decomposed.InBounds = GEPOp->isInBounds();
613     else if (!GEPOp->isInBounds())
614       Decomposed.InBounds = false;
615 
616     assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
617 
618     // Don't attempt to analyze GEPs if index scale is not a compile-time
619     // constant.
620     if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
621       Decomposed.Base = V;
622       return Decomposed;
623     }
624 
625     unsigned AS = GEPOp->getPointerAddressSpace();
626     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
627     gep_type_iterator GTI = gep_type_begin(GEPOp);
628     unsigned PointerSize = DL.getPointerSizeInBits(AS);
629     // Assume all GEP operands are constants until proven otherwise.
630     bool GepHasConstantOffset = true;
631     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
632          I != E; ++I, ++GTI) {
633       const Value *Index = *I;
634       // Compute the (potentially symbolic) offset in bytes for this index.
635       if (StructType *STy = GTI.getStructTypeOrNull()) {
636         // For a struct, add the member offset.
637         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
638         if (FieldNo == 0)
639           continue;
640 
641         Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
642         continue;
643       }
644 
645       // For an array/pointer, add the element offset, explicitly scaled.
646       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
647         if (CIdx->isZero())
648           continue;
649         Decomposed.Offset +=
650             DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
651             CIdx->getValue().sextOrTrunc(MaxPointerSize);
652         continue;
653       }
654 
655       GepHasConstantOffset = false;
656 
657       APInt Scale(MaxPointerSize,
658                   DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
659       // If the integer type is smaller than the pointer size, it is implicitly
660       // sign extended to pointer size.
661       unsigned Width = Index->getType()->getIntegerBitWidth();
662       unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
663       unsigned TruncBits = PointerSize < Width ? Width - PointerSize : 0;
664       LinearExpression LE = GetLinearExpression(
665           CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
666 
667       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
668       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
669 
670       // It can be the case that, even through C1*V+C2 does not overflow for
671       // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
672       // decompose the expression in this way.
673       //
674       // FIXME: C1*Scale and the other operations in the decomposed
675       // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
676       // possibility.
677       bool Overflow;
678       APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
679                            .smul_ov(Scale, Overflow);
680       if (Overflow) {
681         LE = LinearExpression(CastedValue(Index, 0, SExtBits, TruncBits));
682       } else {
683         Decomposed.Offset += ScaledOffset;
684         Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
685       }
686 
687       // If we already had an occurrence of this index variable, merge this
688       // scale into it.  For example, we want to handle:
689       //   A[x][x] -> x*16 + x*4 -> x*20
690       // This also ensures that 'x' only appears in the index list once.
691       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
692         if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
693             Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
694           Scale += Decomposed.VarIndices[i].Scale;
695           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
696           break;
697         }
698       }
699 
700       // Make sure that we have a scale that makes sense for this target's
701       // pointer size.
702       Scale = adjustToPointerSize(Scale, PointerSize);
703 
704       if (!!Scale) {
705         VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW};
706         Decomposed.VarIndices.push_back(Entry);
707       }
708     }
709 
710     // Take care of wrap-arounds
711     if (GepHasConstantOffset)
712       Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
713 
714     // Analyze the base pointer next.
715     V = GEPOp->getOperand(0);
716   } while (--MaxLookup);
717 
718   // If the chain of expressions is too deep, just return early.
719   Decomposed.Base = V;
720   SearchLimitReached++;
721   return Decomposed;
722 }
723 
724 /// Returns whether the given pointer value points to memory that is local to
725 /// the function, with global constants being considered local to all
726 /// functions.
727 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
728                                            AAQueryInfo &AAQI, bool OrLocal) {
729   assert(Visited.empty() && "Visited must be cleared after use!");
730 
731   unsigned MaxLookup = 8;
732   SmallVector<const Value *, 16> Worklist;
733   Worklist.push_back(Loc.Ptr);
734   do {
735     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
736     if (!Visited.insert(V).second) {
737       Visited.clear();
738       return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
739     }
740 
741     // An alloca instruction defines local memory.
742     if (OrLocal && isa<AllocaInst>(V))
743       continue;
744 
745     // A global constant counts as local memory for our purposes.
746     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
747       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
748       // global to be marked constant in some modules and non-constant in
749       // others.  GV may even be a declaration, not a definition.
750       if (!GV->isConstant()) {
751         Visited.clear();
752         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
753       }
754       continue;
755     }
756 
757     // If both select values point to local memory, then so does the select.
758     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
759       Worklist.push_back(SI->getTrueValue());
760       Worklist.push_back(SI->getFalseValue());
761       continue;
762     }
763 
764     // If all values incoming to a phi node point to local memory, then so does
765     // the phi.
766     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
767       // Don't bother inspecting phi nodes with many operands.
768       if (PN->getNumIncomingValues() > MaxLookup) {
769         Visited.clear();
770         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
771       }
772       append_range(Worklist, PN->incoming_values());
773       continue;
774     }
775 
776     // Otherwise be conservative.
777     Visited.clear();
778     return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
779   } while (!Worklist.empty() && --MaxLookup);
780 
781   Visited.clear();
782   return Worklist.empty();
783 }
784 
785 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
786   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
787   return II && II->getIntrinsicID() == IID;
788 }
789 
790 /// Returns the behavior when calling the given call site.
791 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
792   if (Call->doesNotAccessMemory())
793     // Can't do better than this.
794     return FMRB_DoesNotAccessMemory;
795 
796   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
797 
798   // If the callsite knows it only reads memory, don't return worse
799   // than that.
800   if (Call->onlyReadsMemory())
801     Min = FMRB_OnlyReadsMemory;
802   else if (Call->doesNotReadMemory())
803     Min = FMRB_OnlyWritesMemory;
804 
805   if (Call->onlyAccessesArgMemory())
806     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
807   else if (Call->onlyAccessesInaccessibleMemory())
808     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
809   else if (Call->onlyAccessesInaccessibleMemOrArgMem())
810     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
811 
812   // If the call has operand bundles then aliasing attributes from the function
813   // it calls do not directly apply to the call.  This can be made more precise
814   // in the future.
815   if (!Call->hasOperandBundles())
816     if (const Function *F = Call->getCalledFunction())
817       Min =
818           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
819 
820   return Min;
821 }
822 
823 /// Returns the behavior when calling the given function. For use when the call
824 /// site is not known.
825 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
826   // If the function declares it doesn't access memory, we can't do better.
827   if (F->doesNotAccessMemory())
828     return FMRB_DoesNotAccessMemory;
829 
830   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
831 
832   // If the function declares it only reads memory, go with that.
833   if (F->onlyReadsMemory())
834     Min = FMRB_OnlyReadsMemory;
835   else if (F->doesNotReadMemory())
836     Min = FMRB_OnlyWritesMemory;
837 
838   if (F->onlyAccessesArgMemory())
839     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
840   else if (F->onlyAccessesInaccessibleMemory())
841     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
842   else if (F->onlyAccessesInaccessibleMemOrArgMem())
843     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
844 
845   return Min;
846 }
847 
848 /// Returns true if this is a writeonly (i.e Mod only) parameter.
849 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
850                              const TargetLibraryInfo &TLI) {
851   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
852     return true;
853 
854   // We can bound the aliasing properties of memset_pattern16 just as we can
855   // for memcpy/memset.  This is particularly important because the
856   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
857   // whenever possible.
858   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
859   // attributes.
860   LibFunc F;
861   if (Call->getCalledFunction() &&
862       TLI.getLibFunc(*Call->getCalledFunction(), F) &&
863       F == LibFunc_memset_pattern16 && TLI.has(F))
864     if (ArgIdx == 0)
865       return true;
866 
867   // TODO: memset_pattern4, memset_pattern8
868   // TODO: _chk variants
869   // TODO: strcmp, strcpy
870 
871   return false;
872 }
873 
874 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
875                                            unsigned ArgIdx) {
876   // Checking for known builtin intrinsics and target library functions.
877   if (isWriteOnlyParam(Call, ArgIdx, TLI))
878     return ModRefInfo::Mod;
879 
880   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
881     return ModRefInfo::Ref;
882 
883   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
884     return ModRefInfo::NoModRef;
885 
886   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
887 }
888 
889 #ifndef NDEBUG
890 static const Function *getParent(const Value *V) {
891   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
892     if (!inst->getParent())
893       return nullptr;
894     return inst->getParent()->getParent();
895   }
896 
897   if (const Argument *arg = dyn_cast<Argument>(V))
898     return arg->getParent();
899 
900   return nullptr;
901 }
902 
903 static bool notDifferentParent(const Value *O1, const Value *O2) {
904 
905   const Function *F1 = getParent(O1);
906   const Function *F2 = getParent(O2);
907 
908   return !F1 || !F2 || F1 == F2;
909 }
910 #endif
911 
912 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
913                                  const MemoryLocation &LocB,
914                                  AAQueryInfo &AAQI) {
915   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
916          "BasicAliasAnalysis doesn't support interprocedural queries.");
917   return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
918 }
919 
920 /// Checks to see if the specified callsite can clobber the specified memory
921 /// object.
922 ///
923 /// Since we only look at local properties of this function, we really can't
924 /// say much about this query.  We do, however, use simple "address taken"
925 /// analysis on local objects.
926 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
927                                         const MemoryLocation &Loc,
928                                         AAQueryInfo &AAQI) {
929   assert(notDifferentParent(Call, Loc.Ptr) &&
930          "AliasAnalysis query involving multiple functions!");
931 
932   const Value *Object = getUnderlyingObject(Loc.Ptr);
933 
934   // Calls marked 'tail' cannot read or write allocas from the current frame
935   // because the current frame might be destroyed by the time they run. However,
936   // a tail call may use an alloca with byval. Calling with byval copies the
937   // contents of the alloca into argument registers or stack slots, so there is
938   // no lifetime issue.
939   if (isa<AllocaInst>(Object))
940     if (const CallInst *CI = dyn_cast<CallInst>(Call))
941       if (CI->isTailCall() &&
942           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
943         return ModRefInfo::NoModRef;
944 
945   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
946   // modify them even though the alloca is not escaped.
947   if (auto *AI = dyn_cast<AllocaInst>(Object))
948     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
949       return ModRefInfo::Mod;
950 
951   // If the pointer is to a locally allocated object that does not escape,
952   // then the call can not mod/ref the pointer unless the call takes the pointer
953   // as an argument, and itself doesn't capture it.
954   if (!isa<Constant>(Object) && Call != Object &&
955       AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) {
956 
957     // Optimistically assume that call doesn't touch Object and check this
958     // assumption in the following loop.
959     ModRefInfo Result = ModRefInfo::NoModRef;
960     bool IsMustAlias = true;
961 
962     unsigned OperandNo = 0;
963     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
964          CI != CE; ++CI, ++OperandNo) {
965       // Only look at the no-capture or byval pointer arguments.  If this
966       // pointer were passed to arguments that were neither of these, then it
967       // couldn't be no-capture.
968       if (!(*CI)->getType()->isPointerTy() ||
969           (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() &&
970            !Call->isByValArgument(OperandNo)))
971         continue;
972 
973       // Call doesn't access memory through this operand, so we don't care
974       // if it aliases with Object.
975       if (Call->doesNotAccessMemory(OperandNo))
976         continue;
977 
978       // If this is a no-capture pointer argument, see if we can tell that it
979       // is impossible to alias the pointer we're checking.
980       AliasResult AR = getBestAAResults().alias(
981           MemoryLocation::getBeforeOrAfter(*CI),
982           MemoryLocation::getBeforeOrAfter(Object), AAQI);
983       if (AR != AliasResult::MustAlias)
984         IsMustAlias = false;
985       // Operand doesn't alias 'Object', continue looking for other aliases
986       if (AR == AliasResult::NoAlias)
987         continue;
988       // Operand aliases 'Object', but call doesn't modify it. Strengthen
989       // initial assumption and keep looking in case if there are more aliases.
990       if (Call->onlyReadsMemory(OperandNo)) {
991         Result = setRef(Result);
992         continue;
993       }
994       // Operand aliases 'Object' but call only writes into it.
995       if (Call->doesNotReadMemory(OperandNo)) {
996         Result = setMod(Result);
997         continue;
998       }
999       // This operand aliases 'Object' and call reads and writes into it.
1000       // Setting ModRef will not yield an early return below, MustAlias is not
1001       // used further.
1002       Result = ModRefInfo::ModRef;
1003       break;
1004     }
1005 
1006     // No operand aliases, reset Must bit. Add below if at least one aliases
1007     // and all aliases found are MustAlias.
1008     if (isNoModRef(Result))
1009       IsMustAlias = false;
1010 
1011     // Early return if we improved mod ref information
1012     if (!isModAndRefSet(Result)) {
1013       if (isNoModRef(Result))
1014         return ModRefInfo::NoModRef;
1015       return IsMustAlias ? setMust(Result) : clearMust(Result);
1016     }
1017   }
1018 
1019   // If the call is malloc/calloc like, we can assume that it doesn't
1020   // modify any IR visible value.  This is only valid because we assume these
1021   // routines do not read values visible in the IR.  TODO: Consider special
1022   // casing realloc and strdup routines which access only their arguments as
1023   // well.  Or alternatively, replace all of this with inaccessiblememonly once
1024   // that's implemented fully.
1025   if (isMallocOrCallocLikeFn(Call, &TLI)) {
1026     // Be conservative if the accessed pointer may alias the allocation -
1027     // fallback to the generic handling below.
1028     if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc,
1029                                  AAQI) == AliasResult::NoAlias)
1030       return ModRefInfo::NoModRef;
1031   }
1032 
1033   // The semantics of memcpy intrinsics either exactly overlap or do not
1034   // overlap, i.e., source and destination of any given memcpy are either
1035   // no-alias or must-alias.
1036   if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
1037     AliasResult SrcAA =
1038         getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
1039     AliasResult DestAA =
1040         getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
1041     // It's also possible for Loc to alias both src and dest, or neither.
1042     ModRefInfo rv = ModRefInfo::NoModRef;
1043     if (SrcAA != AliasResult::NoAlias)
1044       rv = setRef(rv);
1045     if (DestAA != AliasResult::NoAlias)
1046       rv = setMod(rv);
1047     return rv;
1048   }
1049 
1050   // Guard intrinsics are marked as arbitrarily writing so that proper control
1051   // dependencies are maintained but they never mods any particular memory
1052   // location.
1053   //
1054   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1055   // heap state at the point the guard is issued needs to be consistent in case
1056   // the guard invokes the "deopt" continuation.
1057   if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
1058     return ModRefInfo::Ref;
1059   // The same applies to deoptimize which is essentially a guard(false).
1060   if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
1061     return ModRefInfo::Ref;
1062 
1063   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1064   // writing so that proper control dependencies are maintained but they never
1065   // mod any particular memory location visible to the IR.
1066   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1067   // intrinsic is now modeled as reading memory. This prevents hoisting the
1068   // invariant.start intrinsic over stores. Consider:
1069   // *ptr = 40;
1070   // *ptr = 50;
1071   // invariant_start(ptr)
1072   // int val = *ptr;
1073   // print(val);
1074   //
1075   // This cannot be transformed to:
1076   //
1077   // *ptr = 40;
1078   // invariant_start(ptr)
1079   // *ptr = 50;
1080   // int val = *ptr;
1081   // print(val);
1082   //
1083   // The transformation will cause the second store to be ignored (based on
1084   // rules of invariant.start)  and print 40, while the first program always
1085   // prints 50.
1086   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1087     return ModRefInfo::Ref;
1088 
1089   // The AAResultBase base class has some smarts, lets use them.
1090   return AAResultBase::getModRefInfo(Call, Loc, AAQI);
1091 }
1092 
1093 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1094                                         const CallBase *Call2,
1095                                         AAQueryInfo &AAQI) {
1096   // Guard intrinsics are marked as arbitrarily writing so that proper control
1097   // dependencies are maintained but they never mods any particular memory
1098   // location.
1099   //
1100   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1101   // heap state at the point the guard is issued needs to be consistent in case
1102   // the guard invokes the "deopt" continuation.
1103 
1104   // NB! This function is *not* commutative, so we special case two
1105   // possibilities for guard intrinsics.
1106 
1107   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1108     return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1109                ? ModRefInfo::Ref
1110                : ModRefInfo::NoModRef;
1111 
1112   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1113     return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1114                ? ModRefInfo::Mod
1115                : ModRefInfo::NoModRef;
1116 
1117   // The AAResultBase base class has some smarts, lets use them.
1118   return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1119 }
1120 
1121 /// Return true if we know V to the base address of the corresponding memory
1122 /// object.  This implies that any address less than V must be out of bounds
1123 /// for the underlying object.  Note that just being isIdentifiedObject() is
1124 /// not enough - For example, a negative offset from a noalias argument or call
1125 /// can be inbounds w.r.t the actual underlying object.
1126 static bool isBaseOfObject(const Value *V) {
1127   // TODO: We can handle other cases here
1128   // 1) For GC languages, arguments to functions are often required to be
1129   //    base pointers.
1130   // 2) Result of allocation routines are often base pointers.  Leverage TLI.
1131   return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1132 }
1133 
1134 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1135 /// another pointer.
1136 ///
1137 /// We know that V1 is a GEP, but we don't know anything about V2.
1138 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1139 /// V2.
1140 AliasResult BasicAAResult::aliasGEP(
1141     const GEPOperator *GEP1, LocationSize V1Size,
1142     const Value *V2, LocationSize V2Size,
1143     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1144   if (!V1Size.hasValue() && !V2Size.hasValue()) {
1145     // TODO: This limitation exists for compile-time reasons. Relax it if we
1146     // can avoid exponential pathological cases.
1147     if (!isa<GEPOperator>(V2))
1148       return AliasResult::MayAlias;
1149 
1150     // If both accesses have unknown size, we can only check whether the base
1151     // objects don't alias.
1152     AliasResult BaseAlias = getBestAAResults().alias(
1153         MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1154         MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1155     return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1156                                              : AliasResult::MayAlias;
1157   }
1158 
1159   DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1160   DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1161 
1162   // Bail if we were not able to decompose anything.
1163   if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1164     return AliasResult::MayAlias;
1165 
1166   // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1167   // symbolic difference.
1168   subtractDecomposedGEPs(DecompGEP1, DecompGEP2);
1169 
1170   // If an inbounds GEP would have to start from an out of bounds address
1171   // for the two to alias, then we can assume noalias.
1172   if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1173       V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1174       isBaseOfObject(DecompGEP2.Base))
1175     return AliasResult::NoAlias;
1176 
1177   if (isa<GEPOperator>(V2)) {
1178     // Symmetric case to above.
1179     if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1180         V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1181         isBaseOfObject(DecompGEP1.Base))
1182       return AliasResult::NoAlias;
1183   }
1184 
1185   // For GEPs with identical offsets, we can preserve the size and AAInfo
1186   // when performing the alias check on the underlying objects.
1187   if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1188     return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size),
1189                                     MemoryLocation(DecompGEP2.Base, V2Size),
1190                                     AAQI);
1191 
1192   // Do the base pointers alias?
1193   AliasResult BaseAlias = getBestAAResults().alias(
1194       MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1195       MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1196 
1197   // If we get a No or May, then return it immediately, no amount of analysis
1198   // will improve this situation.
1199   if (BaseAlias != AliasResult::MustAlias) {
1200     assert(BaseAlias == AliasResult::NoAlias ||
1201            BaseAlias == AliasResult::MayAlias);
1202     return BaseAlias;
1203   }
1204 
1205   // If there is a constant difference between the pointers, but the difference
1206   // is less than the size of the associated memory object, then we know
1207   // that the objects are partially overlapping.  If the difference is
1208   // greater, we know they do not overlap.
1209   if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
1210     APInt &Off = DecompGEP1.Offset;
1211 
1212     // Initialize for Off >= 0 (V2 <= GEP1) case.
1213     const Value *LeftPtr = V2;
1214     const Value *RightPtr = GEP1;
1215     LocationSize VLeftSize = V2Size;
1216     LocationSize VRightSize = V1Size;
1217     const bool Swapped = Off.isNegative();
1218 
1219     if (Swapped) {
1220       // Swap if we have the situation where:
1221       // +                +
1222       // | BaseOffset     |
1223       // ---------------->|
1224       // |-->V1Size       |-------> V2Size
1225       // GEP1             V2
1226       std::swap(LeftPtr, RightPtr);
1227       std::swap(VLeftSize, VRightSize);
1228       Off = -Off;
1229     }
1230 
1231     if (VLeftSize.hasValue()) {
1232       const uint64_t LSize = VLeftSize.getValue();
1233       if (Off.ult(LSize)) {
1234         // Conservatively drop processing if a phi was visited and/or offset is
1235         // too big.
1236         AliasResult AR = AliasResult::PartialAlias;
1237         if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1238             (Off + VRightSize.getValue()).ule(LSize)) {
1239           // Memory referenced by right pointer is nested. Save the offset in
1240           // cache. Note that originally offset estimated as GEP1-V2, but
1241           // AliasResult contains the shift that represents GEP1+Offset=V2.
1242           AR.setOffset(-Off.getSExtValue());
1243           AR.swap(Swapped);
1244         }
1245         return AR;
1246       }
1247       return AliasResult::NoAlias;
1248     }
1249   }
1250 
1251   if (!DecompGEP1.VarIndices.empty()) {
1252     APInt GCD;
1253     bool AllNonNegative = DecompGEP1.Offset.isNonNegative();
1254     bool AllNonPositive = DecompGEP1.Offset.isNonPositive();
1255     ConstantRange Range = ConstantRange(DecompGEP1.Offset);
1256     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1257       const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1258       const APInt &Scale = Index.Scale;
1259       APInt ScaleForGCD = Scale;
1260       if (!Index.IsNSW)
1261         ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(),
1262                                           Scale.countTrailingZeros());
1263 
1264       if (i == 0)
1265         GCD = ScaleForGCD.abs();
1266       else
1267         GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1268 
1269       if (AllNonNegative || AllNonPositive) {
1270         KnownBits Known = Index.Val.evaluateWith(
1271             computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT));
1272         bool SignKnownZero = Known.isNonNegative();
1273         bool SignKnownOne = Known.isNegative();
1274         AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
1275                           (SignKnownOne && Scale.isNonPositive());
1276         AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) ||
1277                           (SignKnownOne && Scale.isNonNegative());
1278       }
1279 
1280       assert(Range.getBitWidth() == Scale.getBitWidth() &&
1281              "Bit widths are normalized to MaxPointerSize");
1282       Range = Range.add(Index.Val
1283                             .evaluateWith(computeConstantRange(
1284                                 Index.Val.V, true, &AC, Index.CxtI))
1285                             .sextOrTrunc(Range.getBitWidth())
1286                             .smul_fast(ConstantRange(Scale)));
1287     }
1288 
1289     // We now have accesses at two offsets from the same base:
1290     //  1. (...)*GCD + DecompGEP1.Offset with size V1Size
1291     //  2. 0 with size V2Size
1292     // Using arithmetic modulo GCD, the accesses are at
1293     // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1294     // into the range [V2Size..GCD), then we know they cannot overlap.
1295     APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1296     if (ModOffset.isNegative())
1297       ModOffset += GCD; // We want mod, not rem.
1298     if (V1Size.hasValue() && V2Size.hasValue() &&
1299         ModOffset.uge(V2Size.getValue()) &&
1300         (GCD - ModOffset).uge(V1Size.getValue()))
1301       return AliasResult::NoAlias;
1302 
1303     // If we know all the variables are non-negative, then the total offset is
1304     // also non-negative and >= DecompGEP1.Offset. We have the following layout:
1305     // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size]
1306     // If DecompGEP1.Offset >= V2Size, the accesses don't alias.
1307     if (AllNonNegative && V2Size.hasValue() &&
1308         DecompGEP1.Offset.uge(V2Size.getValue()))
1309       return AliasResult::NoAlias;
1310     // Similarly, if the variables are non-positive, then the total offset is
1311     // also non-positive and <= DecompGEP1.Offset. We have the following layout:
1312     // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
1313     // If -DecompGEP1.Offset >= V1Size, the accesses don't alias.
1314     if (AllNonPositive && V1Size.hasValue() &&
1315         (-DecompGEP1.Offset).uge(V1Size.getValue()))
1316       return AliasResult::NoAlias;
1317 
1318     if (!Range.isEmptySet()) {
1319       // We know that Offset >= MinOffset.
1320       // (MinOffset >= V2Size) => (Offset >= V2Size) => NoAlias.
1321       if (V2Size.hasValue() && Range.getSignedMin().sge(V2Size.getValue()))
1322         return AliasResult::NoAlias;
1323 
1324       // We know that Offset <= MaxOffset.
1325       // (MaxOffset <= -V1Size) => (Offset <= -V1Size) => NoAlias.
1326       if (V1Size.hasValue() && Range.getSignedMax().sle(-V1Size.getValue()))
1327         return AliasResult::NoAlias;
1328     }
1329 
1330     if (V1Size.hasValue() && V2Size.hasValue()) {
1331       // Try to determine the range of values for VarIndex such that
1332       // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1333       Optional<APInt> MinAbsVarIndex;
1334       if (DecompGEP1.VarIndices.size() == 1) {
1335         // VarIndex = Scale*V.
1336         const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1337         if (Var.Val.TruncBits == 0 &&
1338             isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1339           // If V != 0 then abs(VarIndex) >= abs(Scale).
1340           MinAbsVarIndex = Var.Scale.abs();
1341         }
1342       } else if (DecompGEP1.VarIndices.size() == 2) {
1343         // VarIndex = Scale*V0 + (-Scale)*V1.
1344         // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1345         // Check that VisitedPhiBBs is empty, to avoid reasoning about
1346         // inequality of values across loop iterations.
1347         const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1348         const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1349         if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 &&
1350             Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.empty() &&
1351             isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1352                             DT))
1353           MinAbsVarIndex = Var0.Scale.abs();
1354       }
1355 
1356       if (MinAbsVarIndex) {
1357         // The constant offset will have added at least +/-MinAbsVarIndex to it.
1358         APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1359         APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1360         // We know that Offset <= OffsetLo || Offset >= OffsetHi
1361         if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1362             OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1363           return AliasResult::NoAlias;
1364       }
1365     }
1366 
1367     if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT))
1368       return AliasResult::NoAlias;
1369   }
1370 
1371   // Statically, we can see that the base objects are the same, but the
1372   // pointers have dynamic offsets which we can't resolve. And none of our
1373   // little tricks above worked.
1374   return AliasResult::MayAlias;
1375 }
1376 
1377 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1378   // If the results agree, take it.
1379   if (A == B)
1380     return A;
1381   // A mix of PartialAlias and MustAlias is PartialAlias.
1382   if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1383       (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1384     return AliasResult::PartialAlias;
1385   // Otherwise, we don't know anything.
1386   return AliasResult::MayAlias;
1387 }
1388 
1389 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1390 /// against another.
1391 AliasResult
1392 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1393                            const Value *V2, LocationSize V2Size,
1394                            AAQueryInfo &AAQI) {
1395   // If the values are Selects with the same condition, we can do a more precise
1396   // check: just check for aliases between the values on corresponding arms.
1397   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1398     if (SI->getCondition() == SI2->getCondition()) {
1399       AliasResult Alias = getBestAAResults().alias(
1400           MemoryLocation(SI->getTrueValue(), SISize),
1401           MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1402       if (Alias == AliasResult::MayAlias)
1403         return AliasResult::MayAlias;
1404       AliasResult ThisAlias = getBestAAResults().alias(
1405           MemoryLocation(SI->getFalseValue(), SISize),
1406           MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1407       return MergeAliasResults(ThisAlias, Alias);
1408     }
1409 
1410   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1411   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1412   AliasResult Alias = getBestAAResults().alias(
1413       MemoryLocation(V2, V2Size),
1414       MemoryLocation(SI->getTrueValue(), SISize), AAQI);
1415   if (Alias == AliasResult::MayAlias)
1416     return AliasResult::MayAlias;
1417 
1418   AliasResult ThisAlias = getBestAAResults().alias(
1419       MemoryLocation(V2, V2Size),
1420       MemoryLocation(SI->getFalseValue(), SISize), AAQI);
1421   return MergeAliasResults(ThisAlias, Alias);
1422 }
1423 
1424 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1425 /// another.
1426 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1427                                     const Value *V2, LocationSize V2Size,
1428                                     AAQueryInfo &AAQI) {
1429   if (!PN->getNumIncomingValues())
1430     return AliasResult::NoAlias;
1431   // If the values are PHIs in the same block, we can do a more precise
1432   // as well as efficient check: just check for aliases between the values
1433   // on corresponding edges.
1434   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1435     if (PN2->getParent() == PN->getParent()) {
1436       Optional<AliasResult> Alias;
1437       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1438         AliasResult ThisAlias = getBestAAResults().alias(
1439             MemoryLocation(PN->getIncomingValue(i), PNSize),
1440             MemoryLocation(
1441                 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1442             AAQI);
1443         if (Alias)
1444           *Alias = MergeAliasResults(*Alias, ThisAlias);
1445         else
1446           Alias = ThisAlias;
1447         if (*Alias == AliasResult::MayAlias)
1448           break;
1449       }
1450       return *Alias;
1451     }
1452 
1453   SmallVector<Value *, 4> V1Srcs;
1454   // If a phi operand recurses back to the phi, we can still determine NoAlias
1455   // if we don't alias the underlying objects of the other phi operands, as we
1456   // know that the recursive phi needs to be based on them in some way.
1457   bool isRecursive = false;
1458   auto CheckForRecPhi = [&](Value *PV) {
1459     if (!EnableRecPhiAnalysis)
1460       return false;
1461     if (getUnderlyingObject(PV) == PN) {
1462       isRecursive = true;
1463       return true;
1464     }
1465     return false;
1466   };
1467 
1468   if (PV) {
1469     // If we have PhiValues then use it to get the underlying phi values.
1470     const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1471     // If we have more phi values than the search depth then return MayAlias
1472     // conservatively to avoid compile time explosion. The worst possible case
1473     // is if both sides are PHI nodes. In which case, this is O(m x n) time
1474     // where 'm' and 'n' are the number of PHI sources.
1475     if (PhiValueSet.size() > MaxLookupSearchDepth)
1476       return AliasResult::MayAlias;
1477     // Add the values to V1Srcs
1478     for (Value *PV1 : PhiValueSet) {
1479       if (CheckForRecPhi(PV1))
1480         continue;
1481       V1Srcs.push_back(PV1);
1482     }
1483   } else {
1484     // If we don't have PhiInfo then just look at the operands of the phi itself
1485     // FIXME: Remove this once we can guarantee that we have PhiInfo always
1486     SmallPtrSet<Value *, 4> UniqueSrc;
1487     Value *OnePhi = nullptr;
1488     for (Value *PV1 : PN->incoming_values()) {
1489       if (isa<PHINode>(PV1)) {
1490         if (OnePhi && OnePhi != PV1) {
1491           // To control potential compile time explosion, we choose to be
1492           // conserviate when we have more than one Phi input.  It is important
1493           // that we handle the single phi case as that lets us handle LCSSA
1494           // phi nodes and (combined with the recursive phi handling) simple
1495           // pointer induction variable patterns.
1496           return AliasResult::MayAlias;
1497         }
1498         OnePhi = PV1;
1499       }
1500 
1501       if (CheckForRecPhi(PV1))
1502         continue;
1503 
1504       if (UniqueSrc.insert(PV1).second)
1505         V1Srcs.push_back(PV1);
1506     }
1507 
1508     if (OnePhi && UniqueSrc.size() > 1)
1509       // Out of an abundance of caution, allow only the trivial lcssa and
1510       // recursive phi cases.
1511       return AliasResult::MayAlias;
1512   }
1513 
1514   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1515   // value. This should only be possible in blocks unreachable from the entry
1516   // block, but return MayAlias just in case.
1517   if (V1Srcs.empty())
1518     return AliasResult::MayAlias;
1519 
1520   // If this PHI node is recursive, indicate that the pointer may be moved
1521   // across iterations. We can only prove NoAlias if different underlying
1522   // objects are involved.
1523   if (isRecursive)
1524     PNSize = LocationSize::beforeOrAfterPointer();
1525 
1526   // In the recursive alias queries below, we may compare values from two
1527   // different loop iterations. Keep track of visited phi blocks, which will
1528   // be used when determining value equivalence.
1529   bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1530   auto _ = make_scope_exit([&]() {
1531     if (BlockInserted)
1532       VisitedPhiBBs.erase(PN->getParent());
1533   });
1534 
1535   // If we inserted a block into VisitedPhiBBs, alias analysis results that
1536   // have been cached earlier may no longer be valid. Perform recursive queries
1537   // with a new AAQueryInfo.
1538   AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1539   AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1540 
1541   AliasResult Alias = getBestAAResults().alias(
1542       MemoryLocation(V2, V2Size),
1543       MemoryLocation(V1Srcs[0], PNSize), *UseAAQI);
1544 
1545   // Early exit if the check of the first PHI source against V2 is MayAlias.
1546   // Other results are not possible.
1547   if (Alias == AliasResult::MayAlias)
1548     return AliasResult::MayAlias;
1549   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1550   // remain valid to all elements and needs to conservatively return MayAlias.
1551   if (isRecursive && Alias != AliasResult::NoAlias)
1552     return AliasResult::MayAlias;
1553 
1554   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1555   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1556   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1557     Value *V = V1Srcs[i];
1558 
1559     AliasResult ThisAlias = getBestAAResults().alias(
1560         MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI);
1561     Alias = MergeAliasResults(ThisAlias, Alias);
1562     if (Alias == AliasResult::MayAlias)
1563       break;
1564   }
1565 
1566   return Alias;
1567 }
1568 
1569 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1570 /// array references.
1571 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1572                                       const Value *V2, LocationSize V2Size,
1573                                       AAQueryInfo &AAQI) {
1574   // If either of the memory references is empty, it doesn't matter what the
1575   // pointer values are.
1576   if (V1Size.isZero() || V2Size.isZero())
1577     return AliasResult::NoAlias;
1578 
1579   // Strip off any casts if they exist.
1580   V1 = V1->stripPointerCastsForAliasAnalysis();
1581   V2 = V2->stripPointerCastsForAliasAnalysis();
1582 
1583   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1584   // value for undef that aliases nothing in the program.
1585   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1586     return AliasResult::NoAlias;
1587 
1588   // Are we checking for alias of the same value?
1589   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1590   // different iterations. We must therefore make sure that this is not the
1591   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1592   // happen by looking at the visited phi nodes and making sure they cannot
1593   // reach the value.
1594   if (isValueEqualInPotentialCycles(V1, V2))
1595     return AliasResult::MustAlias;
1596 
1597   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1598     return AliasResult::NoAlias; // Scalars cannot alias each other
1599 
1600   // Figure out what objects these things are pointing to if we can.
1601   const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1602   const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1603 
1604   // Null values in the default address space don't point to any object, so they
1605   // don't alias any other pointer.
1606   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1607     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1608       return AliasResult::NoAlias;
1609   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1610     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1611       return AliasResult::NoAlias;
1612 
1613   if (O1 != O2) {
1614     // If V1/V2 point to two different objects, we know that we have no alias.
1615     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1616       return AliasResult::NoAlias;
1617 
1618     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1619     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1620         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1621       return AliasResult::NoAlias;
1622 
1623     // Function arguments can't alias with things that are known to be
1624     // unambigously identified at the function level.
1625     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1626         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1627       return AliasResult::NoAlias;
1628 
1629     // If one pointer is the result of a call/invoke or load and the other is a
1630     // non-escaping local object within the same function, then we know the
1631     // object couldn't escape to a point where the call could return it.
1632     //
1633     // Note that if the pointers are in different functions, there are a
1634     // variety of complications. A call with a nocapture argument may still
1635     // temporary store the nocapture argument's value in a temporary memory
1636     // location if that memory location doesn't escape. Or it may pass a
1637     // nocapture value to other functions as long as they don't capture it.
1638     if (isEscapeSource(O1) &&
1639         AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1)))
1640       return AliasResult::NoAlias;
1641     if (isEscapeSource(O2) &&
1642         AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2)))
1643       return AliasResult::NoAlias;
1644   }
1645 
1646   // If the size of one access is larger than the entire object on the other
1647   // side, then we know such behavior is undefined and can assume no alias.
1648   bool NullIsValidLocation = NullPointerIsDefined(&F);
1649   if ((isObjectSmallerThan(
1650           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1651           TLI, NullIsValidLocation)) ||
1652       (isObjectSmallerThan(
1653           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1654           TLI, NullIsValidLocation)))
1655     return AliasResult::NoAlias;
1656 
1657   // If one the accesses may be before the accessed pointer, canonicalize this
1658   // by using unknown after-pointer sizes for both accesses. This is
1659   // equivalent, because regardless of which pointer is lower, one of them
1660   // will always came after the other, as long as the underlying objects aren't
1661   // disjoint. We do this so that the rest of BasicAA does not have to deal
1662   // with accesses before the base pointer, and to improve cache utilization by
1663   // merging equivalent states.
1664   if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1665     V1Size = LocationSize::afterPointer();
1666     V2Size = LocationSize::afterPointer();
1667   }
1668 
1669   // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1670   // for recursive queries. For this reason, this limit is chosen to be large
1671   // enough to be very rarely hit, while still being small enough to avoid
1672   // stack overflows.
1673   if (AAQI.Depth >= 512)
1674     return AliasResult::MayAlias;
1675 
1676   // Check the cache before climbing up use-def chains. This also terminates
1677   // otherwise infinitely recursive queries.
1678   AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1679   const bool Swapped = V1 > V2;
1680   if (Swapped)
1681     std::swap(Locs.first, Locs.second);
1682   const auto &Pair = AAQI.AliasCache.try_emplace(
1683       Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1684   if (!Pair.second) {
1685     auto &Entry = Pair.first->second;
1686     if (!Entry.isDefinitive()) {
1687       // Remember that we used an assumption.
1688       ++Entry.NumAssumptionUses;
1689       ++AAQI.NumAssumptionUses;
1690     }
1691     // Cache contains sorted {V1,V2} pairs but we should return original order.
1692     auto Result = Entry.Result;
1693     Result.swap(Swapped);
1694     return Result;
1695   }
1696 
1697   int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1698   unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1699   AliasResult Result =
1700       aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1701 
1702   auto It = AAQI.AliasCache.find(Locs);
1703   assert(It != AAQI.AliasCache.end() && "Must be in cache");
1704   auto &Entry = It->second;
1705 
1706   // Check whether a NoAlias assumption has been used, but disproven.
1707   bool AssumptionDisproven =
1708       Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1709   if (AssumptionDisproven)
1710     Result = AliasResult::MayAlias;
1711 
1712   // This is a definitive result now, when considered as a root query.
1713   AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1714   Entry.Result = Result;
1715   // Cache contains sorted {V1,V2} pairs.
1716   Entry.Result.swap(Swapped);
1717   Entry.NumAssumptionUses = -1;
1718 
1719   // If the assumption has been disproven, remove any results that may have
1720   // been based on this assumption. Do this after the Entry updates above to
1721   // avoid iterator invalidation.
1722   if (AssumptionDisproven)
1723     while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1724       AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1725 
1726   // The result may still be based on assumptions higher up in the chain.
1727   // Remember it, so it can be purged from the cache later.
1728   if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1729       Result != AliasResult::MayAlias)
1730     AAQI.AssumptionBasedResults.push_back(Locs);
1731   return Result;
1732 }
1733 
1734 AliasResult BasicAAResult::aliasCheckRecursive(
1735     const Value *V1, LocationSize V1Size,
1736     const Value *V2, LocationSize V2Size,
1737     AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1738   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1739     AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1740     if (Result != AliasResult::MayAlias)
1741       return Result;
1742   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1743     AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1744     if (Result != AliasResult::MayAlias)
1745       return Result;
1746   }
1747 
1748   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1749     AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1750     if (Result != AliasResult::MayAlias)
1751       return Result;
1752   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1753     AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1754     if (Result != AliasResult::MayAlias)
1755       return Result;
1756   }
1757 
1758   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1759     AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1760     if (Result != AliasResult::MayAlias)
1761       return Result;
1762   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1763     AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1764     if (Result != AliasResult::MayAlias)
1765       return Result;
1766   }
1767 
1768   // If both pointers are pointing into the same object and one of them
1769   // accesses the entire object, then the accesses must overlap in some way.
1770   if (O1 == O2) {
1771     bool NullIsValidLocation = NullPointerIsDefined(&F);
1772     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1773         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1774          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1775       return AliasResult::PartialAlias;
1776   }
1777 
1778   return AliasResult::MayAlias;
1779 }
1780 
1781 /// Check whether two Values can be considered equivalent.
1782 ///
1783 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1784 /// they can not be part of a cycle in the value graph by looking at all
1785 /// visited phi nodes an making sure that the phis cannot reach the value. We
1786 /// have to do this because we are looking through phi nodes (That is we say
1787 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1788 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1789                                                   const Value *V2) {
1790   if (V != V2)
1791     return false;
1792 
1793   const Instruction *Inst = dyn_cast<Instruction>(V);
1794   if (!Inst)
1795     return true;
1796 
1797   if (VisitedPhiBBs.empty())
1798     return true;
1799 
1800   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1801     return false;
1802 
1803   // Make sure that the visited phis cannot reach the Value. This ensures that
1804   // the Values cannot come from different iterations of a potential cycle the
1805   // phi nodes could be involved in.
1806   for (auto *P : VisitedPhiBBs)
1807     if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1808       return false;
1809 
1810   return true;
1811 }
1812 
1813 /// Computes the symbolic difference between two de-composed GEPs.
1814 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1815                                            const DecomposedGEP &SrcGEP) {
1816   DestGEP.Offset -= SrcGEP.Offset;
1817   for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1818     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1819     // than a few variable indexes.
1820     bool Found = false;
1821     for (auto I : enumerate(DestGEP.VarIndices)) {
1822       VariableGEPIndex &Dest = I.value();
1823       if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) ||
1824           !Dest.Val.hasSameCastsAs(Src.Val))
1825         continue;
1826 
1827       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1828       // goes to zero, remove the entry.
1829       if (Dest.Scale != Src.Scale) {
1830         Dest.Scale -= Src.Scale;
1831         Dest.IsNSW = false;
1832       } else {
1833         DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1834       }
1835       Found = true;
1836       break;
1837     }
1838 
1839     // If we didn't consume this entry, add it to the end of the Dest list.
1840     if (!Found) {
1841       VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW};
1842       DestGEP.VarIndices.push_back(Entry);
1843     }
1844   }
1845 }
1846 
1847 bool BasicAAResult::constantOffsetHeuristic(
1848     const DecomposedGEP &GEP, LocationSize MaybeV1Size,
1849     LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) {
1850   if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1851       !MaybeV2Size.hasValue())
1852     return false;
1853 
1854   const uint64_t V1Size = MaybeV1Size.getValue();
1855   const uint64_t V2Size = MaybeV2Size.getValue();
1856 
1857   const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1858 
1859   if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1860       Var0.Scale != -Var1.Scale ||
1861       Var0.Val.V->getType() != Var1.Val.V->getType())
1862     return false;
1863 
1864   // We'll strip off the Extensions of Var0 and Var1 and do another round
1865   // of GetLinearExpression decomposition. In the example above, if Var0
1866   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1867 
1868   LinearExpression E0 =
1869       GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1870   LinearExpression E1 =
1871       GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1872   if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1873       !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1874     return false;
1875 
1876   // We have a hit - Var0 and Var1 only differ by a constant offset!
1877 
1878   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1879   // Var1 is possible to calculate, but we're just interested in the absolute
1880   // minimum difference between the two. The minimum distance may occur due to
1881   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1882   // the minimum distance between %i and %i + 5 is 3.
1883   APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1884   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1885   APInt MinDiffBytes =
1886     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1887 
1888   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1889   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1890   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1891   // V2Size can fit in the MinDiffBytes gap.
1892   return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1893          MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1894 }
1895 
1896 //===----------------------------------------------------------------------===//
1897 // BasicAliasAnalysis Pass
1898 //===----------------------------------------------------------------------===//
1899 
1900 AnalysisKey BasicAA::Key;
1901 
1902 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1903   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1904   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1905   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1906   auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1907   return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1908 }
1909 
1910 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1911   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1912 }
1913 
1914 char BasicAAWrapperPass::ID = 0;
1915 
1916 void BasicAAWrapperPass::anchor() {}
1917 
1918 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1919                       "Basic Alias Analysis (stateless AA impl)", true, true)
1920 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1921 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1922 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1923 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1924 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1925                     "Basic Alias Analysis (stateless AA impl)", true, true)
1926 
1927 FunctionPass *llvm::createBasicAAWrapperPass() {
1928   return new BasicAAWrapperPass();
1929 }
1930 
1931 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1932   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1933   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1934   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1935   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1936 
1937   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1938                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1939                                  &DTWP.getDomTree(),
1940                                  PVWP ? &PVWP->getResult() : nullptr));
1941 
1942   return false;
1943 }
1944 
1945 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1946   AU.setPreservesAll();
1947   AU.addRequiredTransitive<AssumptionCacheTracker>();
1948   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1949   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1950   AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1951 }
1952 
1953 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1954   return BasicAAResult(
1955       F.getParent()->getDataLayout(), F,
1956       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1957       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1958 }
1959