1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/PhiValues.h"
30 #include "llvm/Analysis/TargetLibraryInfo.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/IR/Argument.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/Constant.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GetElementPtrTypeIterator.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Compiler.h"
58 #include "llvm/Support/KnownBits.h"
59 #include <cassert>
60 #include <cstdint>
61 #include <cstdlib>
62 #include <utility>
63 
64 #define DEBUG_TYPE "basicaa"
65 
66 using namespace llvm;
67 
68 /// Enable analysis of recursive PHI nodes.
69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
70                                           cl::init(true));
71 
72 /// By default, even on 32-bit architectures we use 64-bit integers for
73 /// calculations. This will allow us to more-aggressively decompose indexing
74 /// expressions calculated using i64 values (e.g., long long in C) which is
75 /// common enough to worry about.
76 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b",
77                                         cl::Hidden, cl::init(true));
78 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits",
79                                     cl::Hidden, cl::init(false));
80 
81 /// SearchLimitReached / SearchTimes shows how often the limit of
82 /// to decompose GEPs is reached. It will affect the precision
83 /// of basic alias analysis.
84 STATISTIC(SearchLimitReached, "Number of times the limit to "
85                               "decompose GEPs is reached");
86 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
87 
88 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
89 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
90 /// careful with value equivalence. We use reachability to make sure a value
91 /// cannot be involved in a cycle.
92 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
93 
94 // The max limit of the search depth in DecomposeGEPExpression() and
95 // getUnderlyingObject(), both functions need to use the same search
96 // depth otherwise the algorithm in aliasGEP will assert.
97 static const unsigned MaxLookupSearchDepth = 6;
98 
99 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
100                                FunctionAnalysisManager::Invalidator &Inv) {
101   // We don't care if this analysis itself is preserved, it has no state. But
102   // we need to check that the analyses it depends on have been. Note that we
103   // may be created without handles to some analyses and in that case don't
104   // depend on them.
105   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
106       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
107       (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) ||
108       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
109     return true;
110 
111   // Otherwise this analysis result remains valid.
112   return false;
113 }
114 
115 //===----------------------------------------------------------------------===//
116 // Useful predicates
117 //===----------------------------------------------------------------------===//
118 
119 /// Returns true if the pointer is one which would have been considered an
120 /// escape by isNonEscapingLocalObject.
121 static bool isEscapeSource(const Value *V) {
122   if (isa<CallBase>(V))
123     return true;
124 
125   if (isa<Argument>(V))
126     return true;
127 
128   // The load case works because isNonEscapingLocalObject considers all
129   // stores to be escapes (it passes true for the StoreCaptures argument
130   // to PointerMayBeCaptured).
131   if (isa<LoadInst>(V))
132     return true;
133 
134   return false;
135 }
136 
137 /// Returns the size of the object specified by V or UnknownSize if unknown.
138 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
139                               const TargetLibraryInfo &TLI,
140                               bool NullIsValidLoc,
141                               bool RoundToAlign = false) {
142   uint64_t Size;
143   ObjectSizeOpts Opts;
144   Opts.RoundToAlign = RoundToAlign;
145   Opts.NullIsUnknownSize = NullIsValidLoc;
146   if (getObjectSize(V, Size, DL, &TLI, Opts))
147     return Size;
148   return MemoryLocation::UnknownSize;
149 }
150 
151 /// Returns true if we can prove that the object specified by V is smaller than
152 /// Size.
153 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
154                                 const DataLayout &DL,
155                                 const TargetLibraryInfo &TLI,
156                                 bool NullIsValidLoc) {
157   // Note that the meanings of the "object" are slightly different in the
158   // following contexts:
159   //    c1: llvm::getObjectSize()
160   //    c2: llvm.objectsize() intrinsic
161   //    c3: isObjectSmallerThan()
162   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
163   // refers to the "entire object".
164   //
165   //  Consider this example:
166   //     char *p = (char*)malloc(100)
167   //     char *q = p+80;
168   //
169   //  In the context of c1 and c2, the "object" pointed by q refers to the
170   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
171   //
172   //  However, in the context of c3, the "object" refers to the chunk of memory
173   // being allocated. So, the "object" has 100 bytes, and q points to the middle
174   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
175   // parameter, before the llvm::getObjectSize() is called to get the size of
176   // entire object, we should:
177   //    - either rewind the pointer q to the base-address of the object in
178   //      question (in this case rewind to p), or
179   //    - just give up. It is up to caller to make sure the pointer is pointing
180   //      to the base address the object.
181   //
182   // We go for 2nd option for simplicity.
183   if (!isIdentifiedObject(V))
184     return false;
185 
186   // This function needs to use the aligned object size because we allow
187   // reads a bit past the end given sufficient alignment.
188   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
189                                       /*RoundToAlign*/ true);
190 
191   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
192 }
193 
194 /// Return the minimal extent from \p V to the end of the underlying object,
195 /// assuming the result is used in an aliasing query. E.g., we do use the query
196 /// location size and the fact that null pointers cannot alias here.
197 static uint64_t getMinimalExtentFrom(const Value &V,
198                                      const LocationSize &LocSize,
199                                      const DataLayout &DL,
200                                      bool NullIsValidLoc) {
201   // If we have dereferenceability information we know a lower bound for the
202   // extent as accesses for a lower offset would be valid. We need to exclude
203   // the "or null" part if null is a valid pointer.
204   bool CanBeNull;
205   uint64_t DerefBytes = V.getPointerDereferenceableBytes(DL, CanBeNull);
206   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
207   // If queried with a precise location size, we assume that location size to be
208   // accessed, thus valid.
209   if (LocSize.isPrecise())
210     DerefBytes = std::max(DerefBytes, LocSize.getValue());
211   return DerefBytes;
212 }
213 
214 /// Returns true if we can prove that the object specified by V has size Size.
215 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
216                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
217   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
218   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
219 }
220 
221 //===----------------------------------------------------------------------===//
222 // GetElementPtr Instruction Decomposition and Analysis
223 //===----------------------------------------------------------------------===//
224 
225 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
226 /// B are constant integers.
227 ///
228 /// Returns the scale and offset values as APInts and return V as a Value*, and
229 /// return whether we looked through any sign or zero extends.  The incoming
230 /// Value is known to have IntegerType, and it may already be sign or zero
231 /// extended.
232 ///
233 /// Note that this looks through extends, so the high bits may not be
234 /// represented in the result.
235 /*static*/ const Value *BasicAAResult::GetLinearExpression(
236     const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
237     unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
238     AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
239   assert(V->getType()->isIntegerTy() && "Not an integer value");
240 
241   // Limit our recursion depth.
242   if (Depth == 6) {
243     Scale = 1;
244     Offset = 0;
245     return V;
246   }
247 
248   if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
249     // If it's a constant, just convert it to an offset and remove the variable.
250     // If we've been called recursively, the Offset bit width will be greater
251     // than the constant's (the Offset's always as wide as the outermost call),
252     // so we'll zext here and process any extension in the isa<SExtInst> &
253     // isa<ZExtInst> cases below.
254     Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
255     assert(Scale == 0 && "Constant values don't have a scale");
256     return V;
257   }
258 
259   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
260     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
261       // If we've been called recursively, then Offset and Scale will be wider
262       // than the BOp operands. We'll always zext it here as we'll process sign
263       // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
264       APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
265 
266       switch (BOp->getOpcode()) {
267       default:
268         // We don't understand this instruction, so we can't decompose it any
269         // further.
270         Scale = 1;
271         Offset = 0;
272         return V;
273       case Instruction::Or:
274         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
275         // analyze it.
276         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
277                                BOp, DT)) {
278           Scale = 1;
279           Offset = 0;
280           return V;
281         }
282         LLVM_FALLTHROUGH;
283       case Instruction::Add:
284         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
285                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
286         Offset += RHS;
287         break;
288       case Instruction::Sub:
289         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
290                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
291         Offset -= RHS;
292         break;
293       case Instruction::Mul:
294         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
295                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
296         Offset *= RHS;
297         Scale *= RHS;
298         break;
299       case Instruction::Shl:
300         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
301                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
302 
303         // We're trying to linearize an expression of the kind:
304         //   shl i8 -128, 36
305         // where the shift count exceeds the bitwidth of the type.
306         // We can't decompose this further (the expression would return
307         // a poison value).
308         if (Offset.getBitWidth() < RHS.getLimitedValue() ||
309             Scale.getBitWidth() < RHS.getLimitedValue()) {
310           Scale = 1;
311           Offset = 0;
312           return V;
313         }
314 
315         Offset <<= RHS.getLimitedValue();
316         Scale <<= RHS.getLimitedValue();
317         // the semantics of nsw and nuw for left shifts don't match those of
318         // multiplications, so we won't propagate them.
319         NSW = NUW = false;
320         return V;
321       }
322 
323       if (isa<OverflowingBinaryOperator>(BOp)) {
324         NUW &= BOp->hasNoUnsignedWrap();
325         NSW &= BOp->hasNoSignedWrap();
326       }
327       return V;
328     }
329   }
330 
331   // Since GEP indices are sign extended anyway, we don't care about the high
332   // bits of a sign or zero extended value - just scales and offsets.  The
333   // extensions have to be consistent though.
334   if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
335     Value *CastOp = cast<CastInst>(V)->getOperand(0);
336     unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
337     unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
338     unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
339     const Value *Result =
340         GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
341                             Depth + 1, AC, DT, NSW, NUW);
342 
343     // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
344     // by just incrementing the number of bits we've extended by.
345     unsigned ExtendedBy = NewWidth - SmallWidth;
346 
347     if (isa<SExtInst>(V) && ZExtBits == 0) {
348       // sext(sext(%x, a), b) == sext(%x, a + b)
349 
350       if (NSW) {
351         // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
352         // into sext(%x) + sext(c). We'll sext the Offset ourselves:
353         unsigned OldWidth = Offset.getBitWidth();
354         Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
355       } else {
356         // We may have signed-wrapped, so don't decompose sext(%x + c) into
357         // sext(%x) + sext(c)
358         Scale = 1;
359         Offset = 0;
360         Result = CastOp;
361         ZExtBits = OldZExtBits;
362         SExtBits = OldSExtBits;
363       }
364       SExtBits += ExtendedBy;
365     } else {
366       // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
367 
368       if (!NUW) {
369         // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
370         // zext(%x) + zext(c)
371         Scale = 1;
372         Offset = 0;
373         Result = CastOp;
374         ZExtBits = OldZExtBits;
375         SExtBits = OldSExtBits;
376       }
377       ZExtBits += ExtendedBy;
378     }
379 
380     return Result;
381   }
382 
383   Scale = 1;
384   Offset = 0;
385   return V;
386 }
387 
388 /// To ensure a pointer offset fits in an integer of size PointerSize
389 /// (in bits) when that size is smaller than the maximum pointer size. This is
390 /// an issue, for example, in particular for 32b pointers with negative indices
391 /// that rely on two's complement wrap-arounds for precise alias information
392 /// where the maximum pointer size is 64b.
393 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
394   assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
395   unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
396   return (Offset << ShiftBits).ashr(ShiftBits);
397 }
398 
399 static unsigned getMaxPointerSize(const DataLayout &DL) {
400   unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
401   if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
402   if (DoubleCalcBits) MaxPointerSize *= 2;
403 
404   return MaxPointerSize;
405 }
406 
407 /// If V is a symbolic pointer expression, decompose it into a base pointer
408 /// with a constant offset and a number of scaled symbolic offsets.
409 ///
410 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
411 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
412 /// specified amount, but which may have other unrepresented high bits. As
413 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
414 ///
415 /// This function is capable of analyzing everything that getUnderlyingObject
416 /// can look through. To be able to do that getUnderlyingObject and
417 /// DecomposeGEPExpression must use the same search depth
418 /// (MaxLookupSearchDepth).
419 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
420        DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
421        DominatorTree *DT) {
422   // Limit recursion depth to limit compile time in crazy cases.
423   unsigned MaxLookup = MaxLookupSearchDepth;
424   SearchTimes++;
425 
426   unsigned MaxPointerSize = getMaxPointerSize(DL);
427   Decomposed.VarIndices.clear();
428   do {
429     // See if this is a bitcast or GEP.
430     const Operator *Op = dyn_cast<Operator>(V);
431     if (!Op) {
432       // The only non-operator case we can handle are GlobalAliases.
433       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
434         if (!GA->isInterposable()) {
435           V = GA->getAliasee();
436           continue;
437         }
438       }
439       Decomposed.Base = V;
440       return false;
441     }
442 
443     if (Op->getOpcode() == Instruction::BitCast ||
444         Op->getOpcode() == Instruction::AddrSpaceCast) {
445       V = Op->getOperand(0);
446       continue;
447     }
448 
449     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
450     if (!GEPOp) {
451       if (const auto *PHI = dyn_cast<PHINode>(V)) {
452         // Look through single-arg phi nodes created by LCSSA.
453         if (PHI->getNumIncomingValues() == 1) {
454           V = PHI->getIncomingValue(0);
455           continue;
456         }
457       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
458         // CaptureTracking can know about special capturing properties of some
459         // intrinsics like launder.invariant.group, that can't be expressed with
460         // the attributes, but have properties like returning aliasing pointer.
461         // Because some analysis may assume that nocaptured pointer is not
462         // returned from some special intrinsic (because function would have to
463         // be marked with returns attribute), it is crucial to use this function
464         // because it should be in sync with CaptureTracking. Not using it may
465         // cause weird miscompilations where 2 aliasing pointers are assumed to
466         // noalias.
467         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
468           V = RP;
469           continue;
470         }
471       }
472 
473       Decomposed.Base = V;
474       return false;
475     }
476 
477     // Don't attempt to analyze GEPs over unsized objects.
478     if (!GEPOp->getSourceElementType()->isSized()) {
479       Decomposed.Base = V;
480       return false;
481     }
482 
483     // Don't attempt to analyze GEPs if index scale is not a compile-time
484     // constant.
485     if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
486       Decomposed.Base = V;
487       Decomposed.HasCompileTimeConstantScale = false;
488       return false;
489     }
490 
491     unsigned AS = GEPOp->getPointerAddressSpace();
492     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
493     gep_type_iterator GTI = gep_type_begin(GEPOp);
494     unsigned PointerSize = DL.getPointerSizeInBits(AS);
495     // Assume all GEP operands are constants until proven otherwise.
496     bool GepHasConstantOffset = true;
497     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
498          I != E; ++I, ++GTI) {
499       const Value *Index = *I;
500       // Compute the (potentially symbolic) offset in bytes for this index.
501       if (StructType *STy = GTI.getStructTypeOrNull()) {
502         // For a struct, add the member offset.
503         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
504         if (FieldNo == 0)
505           continue;
506 
507         Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
508         continue;
509       }
510 
511       // For an array/pointer, add the element offset, explicitly scaled.
512       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
513         if (CIdx->isZero())
514           continue;
515         Decomposed.Offset +=
516             (DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
517              CIdx->getValue().sextOrSelf(MaxPointerSize))
518                 .sextOrTrunc(MaxPointerSize);
519         continue;
520       }
521 
522       GepHasConstantOffset = false;
523 
524       APInt Scale(MaxPointerSize,
525                   DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
526       unsigned ZExtBits = 0, SExtBits = 0;
527 
528       // If the integer type is smaller than the pointer size, it is implicitly
529       // sign extended to pointer size.
530       unsigned Width = Index->getType()->getIntegerBitWidth();
531       if (PointerSize > Width)
532         SExtBits += PointerSize - Width;
533 
534       // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
535       APInt IndexScale(Width, 0), IndexOffset(Width, 0);
536       bool NSW = true, NUW = true;
537       const Value *OrigIndex = Index;
538       Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
539                                   SExtBits, DL, 0, AC, DT, NSW, NUW);
540 
541       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
542       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
543 
544       // It can be the case that, even through C1*V+C2 does not overflow for
545       // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
546       // decompose the expression in this way.
547       //
548       // FIXME: C1*Scale and the other operations in the decomposed
549       // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
550       // possibility.
551       bool Overflow;
552       APInt ScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize)
553                            .smul_ov(Scale, Overflow);
554       if (Overflow) {
555         Index = OrigIndex;
556         IndexScale = 1;
557         IndexOffset = 0;
558 
559         ZExtBits = SExtBits = 0;
560         if (PointerSize > Width)
561           SExtBits += PointerSize - Width;
562       } else {
563         Decomposed.Offset += ScaledOffset;
564         Scale *= IndexScale.sextOrTrunc(MaxPointerSize);
565       }
566 
567       // If we already had an occurrence of this index variable, merge this
568       // scale into it.  For example, we want to handle:
569       //   A[x][x] -> x*16 + x*4 -> x*20
570       // This also ensures that 'x' only appears in the index list once.
571       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
572         if (Decomposed.VarIndices[i].V == Index &&
573             Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
574             Decomposed.VarIndices[i].SExtBits == SExtBits) {
575           Scale += Decomposed.VarIndices[i].Scale;
576           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
577           break;
578         }
579       }
580 
581       // Make sure that we have a scale that makes sense for this target's
582       // pointer size.
583       Scale = adjustToPointerSize(Scale, PointerSize);
584 
585       if (!!Scale) {
586         VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale};
587         Decomposed.VarIndices.push_back(Entry);
588       }
589     }
590 
591     // Take care of wrap-arounds
592     if (GepHasConstantOffset)
593       Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
594 
595     // Analyze the base pointer next.
596     V = GEPOp->getOperand(0);
597   } while (--MaxLookup);
598 
599   // If the chain of expressions is too deep, just return early.
600   Decomposed.Base = V;
601   SearchLimitReached++;
602   return true;
603 }
604 
605 /// Returns whether the given pointer value points to memory that is local to
606 /// the function, with global constants being considered local to all
607 /// functions.
608 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
609                                            AAQueryInfo &AAQI, bool OrLocal) {
610   assert(Visited.empty() && "Visited must be cleared after use!");
611 
612   unsigned MaxLookup = 8;
613   SmallVector<const Value *, 16> Worklist;
614   Worklist.push_back(Loc.Ptr);
615   do {
616     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
617     if (!Visited.insert(V).second) {
618       Visited.clear();
619       return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
620     }
621 
622     // An alloca instruction defines local memory.
623     if (OrLocal && isa<AllocaInst>(V))
624       continue;
625 
626     // A global constant counts as local memory for our purposes.
627     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
628       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
629       // global to be marked constant in some modules and non-constant in
630       // others.  GV may even be a declaration, not a definition.
631       if (!GV->isConstant()) {
632         Visited.clear();
633         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
634       }
635       continue;
636     }
637 
638     // If both select values point to local memory, then so does the select.
639     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
640       Worklist.push_back(SI->getTrueValue());
641       Worklist.push_back(SI->getFalseValue());
642       continue;
643     }
644 
645     // If all values incoming to a phi node point to local memory, then so does
646     // the phi.
647     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
648       // Don't bother inspecting phi nodes with many operands.
649       if (PN->getNumIncomingValues() > MaxLookup) {
650         Visited.clear();
651         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
652       }
653       for (Value *IncValue : PN->incoming_values())
654         Worklist.push_back(IncValue);
655       continue;
656     }
657 
658     // Otherwise be conservative.
659     Visited.clear();
660     return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
661   } while (!Worklist.empty() && --MaxLookup);
662 
663   Visited.clear();
664   return Worklist.empty();
665 }
666 
667 /// Returns the behavior when calling the given call site.
668 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
669   if (Call->doesNotAccessMemory())
670     // Can't do better than this.
671     return FMRB_DoesNotAccessMemory;
672 
673   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
674 
675   // If the callsite knows it only reads memory, don't return worse
676   // than that.
677   if (Call->onlyReadsMemory())
678     Min = FMRB_OnlyReadsMemory;
679   else if (Call->doesNotReadMemory())
680     Min = FMRB_OnlyWritesMemory;
681 
682   if (Call->onlyAccessesArgMemory())
683     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
684   else if (Call->onlyAccessesInaccessibleMemory())
685     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
686   else if (Call->onlyAccessesInaccessibleMemOrArgMem())
687     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
688 
689   // If the call has operand bundles then aliasing attributes from the function
690   // it calls do not directly apply to the call.  This can be made more precise
691   // in the future.
692   if (!Call->hasOperandBundles())
693     if (const Function *F = Call->getCalledFunction())
694       Min =
695           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
696 
697   return Min;
698 }
699 
700 /// Returns the behavior when calling the given function. For use when the call
701 /// site is not known.
702 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
703   // If the function declares it doesn't access memory, we can't do better.
704   if (F->doesNotAccessMemory())
705     return FMRB_DoesNotAccessMemory;
706 
707   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
708 
709   // If the function declares it only reads memory, go with that.
710   if (F->onlyReadsMemory())
711     Min = FMRB_OnlyReadsMemory;
712   else if (F->doesNotReadMemory())
713     Min = FMRB_OnlyWritesMemory;
714 
715   if (F->onlyAccessesArgMemory())
716     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
717   else if (F->onlyAccessesInaccessibleMemory())
718     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
719   else if (F->onlyAccessesInaccessibleMemOrArgMem())
720     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
721 
722   return Min;
723 }
724 
725 /// Returns true if this is a writeonly (i.e Mod only) parameter.
726 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
727                              const TargetLibraryInfo &TLI) {
728   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
729     return true;
730 
731   // We can bound the aliasing properties of memset_pattern16 just as we can
732   // for memcpy/memset.  This is particularly important because the
733   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
734   // whenever possible.
735   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
736   // attributes.
737   LibFunc F;
738   if (Call->getCalledFunction() &&
739       TLI.getLibFunc(*Call->getCalledFunction(), F) &&
740       F == LibFunc_memset_pattern16 && TLI.has(F))
741     if (ArgIdx == 0)
742       return true;
743 
744   // TODO: memset_pattern4, memset_pattern8
745   // TODO: _chk variants
746   // TODO: strcmp, strcpy
747 
748   return false;
749 }
750 
751 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
752                                            unsigned ArgIdx) {
753   // Checking for known builtin intrinsics and target library functions.
754   if (isWriteOnlyParam(Call, ArgIdx, TLI))
755     return ModRefInfo::Mod;
756 
757   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
758     return ModRefInfo::Ref;
759 
760   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
761     return ModRefInfo::NoModRef;
762 
763   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
764 }
765 
766 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
767   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
768   return II && II->getIntrinsicID() == IID;
769 }
770 
771 #ifndef NDEBUG
772 static const Function *getParent(const Value *V) {
773   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
774     if (!inst->getParent())
775       return nullptr;
776     return inst->getParent()->getParent();
777   }
778 
779   if (const Argument *arg = dyn_cast<Argument>(V))
780     return arg->getParent();
781 
782   return nullptr;
783 }
784 
785 static bool notDifferentParent(const Value *O1, const Value *O2) {
786 
787   const Function *F1 = getParent(O1);
788   const Function *F2 = getParent(O2);
789 
790   return !F1 || !F2 || F1 == F2;
791 }
792 #endif
793 
794 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
795                                  const MemoryLocation &LocB,
796                                  AAQueryInfo &AAQI) {
797   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
798          "BasicAliasAnalysis doesn't support interprocedural queries.");
799 
800   // If we have a directly cached entry for these locations, we have recursed
801   // through this once, so just return the cached results. Notably, when this
802   // happens, we don't clear the cache.
803   AAQueryInfo::LocPair Locs(LocA, LocB);
804   if (Locs.first.Ptr > Locs.second.Ptr)
805     std::swap(Locs.first, Locs.second);
806   auto CacheIt = AAQI.AliasCache.find(Locs);
807   if (CacheIt != AAQI.AliasCache.end())
808     return CacheIt->second;
809 
810   AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
811                                  LocB.Size, LocB.AATags, AAQI);
812 
813   assert(VisitedPhiBBs.empty());
814   return Alias;
815 }
816 
817 /// Checks to see if the specified callsite can clobber the specified memory
818 /// object.
819 ///
820 /// Since we only look at local properties of this function, we really can't
821 /// say much about this query.  We do, however, use simple "address taken"
822 /// analysis on local objects.
823 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
824                                         const MemoryLocation &Loc,
825                                         AAQueryInfo &AAQI) {
826   assert(notDifferentParent(Call, Loc.Ptr) &&
827          "AliasAnalysis query involving multiple functions!");
828 
829   const Value *Object = getUnderlyingObject(Loc.Ptr);
830 
831   // Calls marked 'tail' cannot read or write allocas from the current frame
832   // because the current frame might be destroyed by the time they run. However,
833   // a tail call may use an alloca with byval. Calling with byval copies the
834   // contents of the alloca into argument registers or stack slots, so there is
835   // no lifetime issue.
836   if (isa<AllocaInst>(Object))
837     if (const CallInst *CI = dyn_cast<CallInst>(Call))
838       if (CI->isTailCall() &&
839           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
840         return ModRefInfo::NoModRef;
841 
842   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
843   // modify them even though the alloca is not escaped.
844   if (auto *AI = dyn_cast<AllocaInst>(Object))
845     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
846       return ModRefInfo::Mod;
847 
848   // If the pointer is to a locally allocated object that does not escape,
849   // then the call can not mod/ref the pointer unless the call takes the pointer
850   // as an argument, and itself doesn't capture it.
851   if (!isa<Constant>(Object) && Call != Object &&
852       isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) {
853 
854     // Optimistically assume that call doesn't touch Object and check this
855     // assumption in the following loop.
856     ModRefInfo Result = ModRefInfo::NoModRef;
857     bool IsMustAlias = true;
858 
859     unsigned OperandNo = 0;
860     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
861          CI != CE; ++CI, ++OperandNo) {
862       // Only look at the no-capture or byval pointer arguments.  If this
863       // pointer were passed to arguments that were neither of these, then it
864       // couldn't be no-capture.
865       if (!(*CI)->getType()->isPointerTy() ||
866           (!Call->doesNotCapture(OperandNo) &&
867            OperandNo < Call->getNumArgOperands() &&
868            !Call->isByValArgument(OperandNo)))
869         continue;
870 
871       // Call doesn't access memory through this operand, so we don't care
872       // if it aliases with Object.
873       if (Call->doesNotAccessMemory(OperandNo))
874         continue;
875 
876       // If this is a no-capture pointer argument, see if we can tell that it
877       // is impossible to alias the pointer we're checking.
878       AliasResult AR = getBestAAResults().alias(MemoryLocation(*CI),
879                                                 MemoryLocation(Object), AAQI);
880       if (AR != MustAlias)
881         IsMustAlias = false;
882       // Operand doesn't alias 'Object', continue looking for other aliases
883       if (AR == NoAlias)
884         continue;
885       // Operand aliases 'Object', but call doesn't modify it. Strengthen
886       // initial assumption and keep looking in case if there are more aliases.
887       if (Call->onlyReadsMemory(OperandNo)) {
888         Result = setRef(Result);
889         continue;
890       }
891       // Operand aliases 'Object' but call only writes into it.
892       if (Call->doesNotReadMemory(OperandNo)) {
893         Result = setMod(Result);
894         continue;
895       }
896       // This operand aliases 'Object' and call reads and writes into it.
897       // Setting ModRef will not yield an early return below, MustAlias is not
898       // used further.
899       Result = ModRefInfo::ModRef;
900       break;
901     }
902 
903     // No operand aliases, reset Must bit. Add below if at least one aliases
904     // and all aliases found are MustAlias.
905     if (isNoModRef(Result))
906       IsMustAlias = false;
907 
908     // Early return if we improved mod ref information
909     if (!isModAndRefSet(Result)) {
910       if (isNoModRef(Result))
911         return ModRefInfo::NoModRef;
912       return IsMustAlias ? setMust(Result) : clearMust(Result);
913     }
914   }
915 
916   // If the call is malloc/calloc like, we can assume that it doesn't
917   // modify any IR visible value.  This is only valid because we assume these
918   // routines do not read values visible in the IR.  TODO: Consider special
919   // casing realloc and strdup routines which access only their arguments as
920   // well.  Or alternatively, replace all of this with inaccessiblememonly once
921   // that's implemented fully.
922   if (isMallocOrCallocLikeFn(Call, &TLI)) {
923     // Be conservative if the accessed pointer may alias the allocation -
924     // fallback to the generic handling below.
925     if (getBestAAResults().alias(MemoryLocation(Call), Loc, AAQI) == NoAlias)
926       return ModRefInfo::NoModRef;
927   }
928 
929   // The semantics of memcpy intrinsics either exactly overlap or do not
930   // overlap, i.e., source and destination of any given memcpy are either
931   // no-alias or must-alias.
932   if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
933     AliasResult SrcAA =
934         getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
935     AliasResult DestAA =
936         getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
937     // It's also possible for Loc to alias both src and dest, or neither.
938     ModRefInfo rv = ModRefInfo::NoModRef;
939     if (SrcAA != NoAlias)
940       rv = setRef(rv);
941     if (DestAA != NoAlias)
942       rv = setMod(rv);
943     return rv;
944   }
945 
946   // While the assume intrinsic is marked as arbitrarily writing so that
947   // proper control dependencies will be maintained, it never aliases any
948   // particular memory location.
949   if (isIntrinsicCall(Call, Intrinsic::assume))
950     return ModRefInfo::NoModRef;
951 
952   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
953   // that proper control dependencies are maintained but they never mods any
954   // particular memory location.
955   //
956   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
957   // heap state at the point the guard is issued needs to be consistent in case
958   // the guard invokes the "deopt" continuation.
959   if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
960     return ModRefInfo::Ref;
961 
962   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
963   // writing so that proper control dependencies are maintained but they never
964   // mod any particular memory location visible to the IR.
965   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
966   // intrinsic is now modeled as reading memory. This prevents hoisting the
967   // invariant.start intrinsic over stores. Consider:
968   // *ptr = 40;
969   // *ptr = 50;
970   // invariant_start(ptr)
971   // int val = *ptr;
972   // print(val);
973   //
974   // This cannot be transformed to:
975   //
976   // *ptr = 40;
977   // invariant_start(ptr)
978   // *ptr = 50;
979   // int val = *ptr;
980   // print(val);
981   //
982   // The transformation will cause the second store to be ignored (based on
983   // rules of invariant.start)  and print 40, while the first program always
984   // prints 50.
985   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
986     return ModRefInfo::Ref;
987 
988   // The AAResultBase base class has some smarts, lets use them.
989   return AAResultBase::getModRefInfo(Call, Loc, AAQI);
990 }
991 
992 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
993                                         const CallBase *Call2,
994                                         AAQueryInfo &AAQI) {
995   // While the assume intrinsic is marked as arbitrarily writing so that
996   // proper control dependencies will be maintained, it never aliases any
997   // particular memory location.
998   if (isIntrinsicCall(Call1, Intrinsic::assume) ||
999       isIntrinsicCall(Call2, Intrinsic::assume))
1000     return ModRefInfo::NoModRef;
1001 
1002   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
1003   // that proper control dependencies are maintained but they never mod any
1004   // particular memory location.
1005   //
1006   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1007   // heap state at the point the guard is issued needs to be consistent in case
1008   // the guard invokes the "deopt" continuation.
1009 
1010   // NB! This function is *not* commutative, so we special case two
1011   // possibilities for guard intrinsics.
1012 
1013   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1014     return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1015                ? ModRefInfo::Ref
1016                : ModRefInfo::NoModRef;
1017 
1018   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1019     return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1020                ? ModRefInfo::Mod
1021                : ModRefInfo::NoModRef;
1022 
1023   // The AAResultBase base class has some smarts, lets use them.
1024   return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1025 }
1026 
1027 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
1028 /// both having the exact same pointer operand.
1029 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
1030                                             LocationSize MaybeV1Size,
1031                                             const GEPOperator *GEP2,
1032                                             LocationSize MaybeV2Size,
1033                                             const DataLayout &DL) {
1034   assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1035              GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1036          GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
1037          "Expected GEPs with the same pointer operand");
1038 
1039   // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1040   // such that the struct field accesses provably cannot alias.
1041   // We also need at least two indices (the pointer, and the struct field).
1042   if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
1043       GEP1->getNumIndices() < 2)
1044     return MayAlias;
1045 
1046   // If we don't know the size of the accesses through both GEPs, we can't
1047   // determine whether the struct fields accessed can't alias.
1048   if (MaybeV1Size == LocationSize::unknown() ||
1049       MaybeV2Size == LocationSize::unknown())
1050     return MayAlias;
1051 
1052   const uint64_t V1Size = MaybeV1Size.getValue();
1053   const uint64_t V2Size = MaybeV2Size.getValue();
1054 
1055   ConstantInt *C1 =
1056       dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
1057   ConstantInt *C2 =
1058       dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
1059 
1060   // If the last (struct) indices are constants and are equal, the other indices
1061   // might be also be dynamically equal, so the GEPs can alias.
1062   if (C1 && C2) {
1063     unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth());
1064     if (C1->getValue().sextOrSelf(BitWidth) ==
1065         C2->getValue().sextOrSelf(BitWidth))
1066       return MayAlias;
1067   }
1068 
1069   // Find the last-indexed type of the GEP, i.e., the type you'd get if
1070   // you stripped the last index.
1071   // On the way, look at each indexed type.  If there's something other
1072   // than an array, different indices can lead to different final types.
1073   SmallVector<Value *, 8> IntermediateIndices;
1074 
1075   // Insert the first index; we don't need to check the type indexed
1076   // through it as it only drops the pointer indirection.
1077   assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
1078   IntermediateIndices.push_back(GEP1->getOperand(1));
1079 
1080   // Insert all the remaining indices but the last one.
1081   // Also, check that they all index through arrays.
1082   for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
1083     if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
1084             GEP1->getSourceElementType(), IntermediateIndices)))
1085       return MayAlias;
1086     IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1087   }
1088 
1089   auto *Ty = GetElementPtrInst::getIndexedType(
1090     GEP1->getSourceElementType(), IntermediateIndices);
1091   if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
1092     // We know that:
1093     // - both GEPs begin indexing from the exact same pointer;
1094     // - the last indices in both GEPs are constants, indexing into a sequential
1095     //   type (array or vector);
1096     // - both GEPs only index through arrays prior to that.
1097     //
1098     // Because array indices greater than the number of elements are valid in
1099     // GEPs, unless we know the intermediate indices are identical between
1100     // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1101     // partially overlap. We also need to check that the loaded size matches
1102     // the element size, otherwise we could still have overlap.
1103     Type *LastElementTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0);
1104     const uint64_t ElementSize =
1105         DL.getTypeStoreSize(LastElementTy).getFixedSize();
1106     if (V1Size != ElementSize || V2Size != ElementSize)
1107       return MayAlias;
1108 
1109     for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1110       if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1111         return MayAlias;
1112 
1113     // Now we know that the array/pointer that GEP1 indexes into and that
1114     // that GEP2 indexes into must either precisely overlap or be disjoint.
1115     // Because they cannot partially overlap and because fields in an array
1116     // cannot overlap, if we can prove the final indices are different between
1117     // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1118 
1119     // If the last indices are constants, we've already checked they don't
1120     // equal each other so we can exit early.
1121     if (C1 && C2)
1122       return NoAlias;
1123     {
1124       Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1125       Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1126       if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1127         // If one of the indices is a PHI node, be safe and only use
1128         // computeKnownBits so we don't make any assumptions about the
1129         // relationships between the two indices. This is important if we're
1130         // asking about values from different loop iterations. See PR32314.
1131         // TODO: We may be able to change the check so we only do this when
1132         // we definitely looked through a PHINode.
1133         if (GEP1LastIdx != GEP2LastIdx &&
1134             GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1135           KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1136           KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1137           if (Known1.Zero.intersects(Known2.One) ||
1138               Known1.One.intersects(Known2.Zero))
1139             return NoAlias;
1140         }
1141       } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1142         return NoAlias;
1143     }
1144   }
1145   return MayAlias;
1146 }
1147 
1148 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1149 // beginning of the object the GEP points would have a negative offset with
1150 // repsect to the alloca, that means the GEP can not alias pointer (b).
1151 // Note that the pointer based on the alloca may not be a GEP. For
1152 // example, it may be the alloca itself.
1153 // The same applies if (b) is based on a GlobalVariable. Note that just being
1154 // based on isIdentifiedObject() is not enough - we need an identified object
1155 // that does not permit access to negative offsets. For example, a negative
1156 // offset from a noalias argument or call can be inbounds w.r.t the actual
1157 // underlying object.
1158 //
1159 // For example, consider:
1160 //
1161 //   struct { int f0, int f1, ...} foo;
1162 //   foo alloca;
1163 //   foo* random = bar(alloca);
1164 //   int *f0 = &alloca.f0
1165 //   int *f1 = &random->f1;
1166 //
1167 // Which is lowered, approximately, to:
1168 //
1169 //  %alloca = alloca %struct.foo
1170 //  %random = call %struct.foo* @random(%struct.foo* %alloca)
1171 //  %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1172 //  %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1173 //
1174 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1175 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1176 // point into the same object. But since %f0 points to the beginning of %alloca,
1177 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1178 // than (%alloca - 1), and so is not inbounds, a contradiction.
1179 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1180       const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1181       LocationSize MaybeObjectAccessSize) {
1182   // If the object access size is unknown, or the GEP isn't inbounds, bail.
1183   if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds())
1184     return false;
1185 
1186   const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue();
1187 
1188   // We need the object to be an alloca or a globalvariable, and want to know
1189   // the offset of the pointer from the object precisely, so no variable
1190   // indices are allowed.
1191   if (!(isa<AllocaInst>(DecompObject.Base) ||
1192         isa<GlobalVariable>(DecompObject.Base)) ||
1193       !DecompObject.VarIndices.empty())
1194     return false;
1195 
1196   // If the GEP has no variable indices, we know the precise offset
1197   // from the base, then use it. If the GEP has variable indices,
1198   // we can't get exact GEP offset to identify pointer alias. So return
1199   // false in that case.
1200   if (!DecompGEP.VarIndices.empty())
1201     return false;
1202 
1203   return DecompGEP.Offset.sge(DecompObject.Offset + (int64_t)ObjectAccessSize);
1204 }
1205 
1206 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1207 /// another pointer.
1208 ///
1209 /// We know that V1 is a GEP, but we don't know anything about V2.
1210 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1211 /// V2.
1212 AliasResult BasicAAResult::aliasGEP(
1213     const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
1214     const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo,
1215     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1216   DecomposedGEP DecompGEP1, DecompGEP2;
1217   unsigned MaxPointerSize = getMaxPointerSize(DL);
1218   DecompGEP1.Offset = APInt(MaxPointerSize, 0);
1219   DecompGEP2.Offset = APInt(MaxPointerSize, 0);
1220   DecompGEP1.HasCompileTimeConstantScale =
1221       DecompGEP2.HasCompileTimeConstantScale = true;
1222 
1223   DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1224   DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1225 
1226   // Don't attempt to analyze the decomposed GEP if index scale is not a
1227   // compile-time constant.
1228   if (!DecompGEP1.HasCompileTimeConstantScale ||
1229       !DecompGEP2.HasCompileTimeConstantScale)
1230     return MayAlias;
1231 
1232   APInt GEP1BaseOffset = DecompGEP1.Offset;
1233   APInt GEP2BaseOffset = DecompGEP2.Offset;
1234 
1235   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1236          "DecomposeGEPExpression returned a result different from "
1237          "getUnderlyingObject");
1238 
1239   // If the GEP's offset relative to its base is such that the base would
1240   // fall below the start of the object underlying V2, then the GEP and V2
1241   // cannot alias.
1242   if (isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1243     return NoAlias;
1244   // If we have two gep instructions with must-alias or not-alias'ing base
1245   // pointers, figure out if the indexes to the GEP tell us anything about the
1246   // derived pointer.
1247   if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1248     // Check for the GEP base being at a negative offset, this time in the other
1249     // direction.
1250     if (isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1251       return NoAlias;
1252     // Do the base pointers alias?
1253     AliasResult BaseAlias =
1254         aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(),
1255                    UnderlyingV2, LocationSize::unknown(), AAMDNodes(), AAQI);
1256 
1257     // For GEPs with identical offsets, we can preserve the size and AAInfo
1258     // when performing the alias check on the underlying objects.
1259     if (BaseAlias == MayAlias && GEP1BaseOffset == GEP2BaseOffset &&
1260         DecompGEP1.VarIndices == DecompGEP2.VarIndices) {
1261       AliasResult PreciseBaseAlias = aliasCheck(
1262           UnderlyingV1, V1Size, V1AAInfo, UnderlyingV2, V2Size, V2AAInfo, AAQI);
1263       if (PreciseBaseAlias == NoAlias)
1264         return NoAlias;
1265     }
1266 
1267     // If we get a No or May, then return it immediately, no amount of analysis
1268     // will improve this situation.
1269     if (BaseAlias != MustAlias) {
1270       assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1271       return BaseAlias;
1272     }
1273 
1274     // Otherwise, we have a MustAlias.  Since the base pointers alias each other
1275     // exactly, see if the computed offset from the common pointer tells us
1276     // about the relation of the resulting pointer.
1277     // If we know the two GEPs are based off of the exact same pointer (and not
1278     // just the same underlying object), see if that tells us anything about
1279     // the resulting pointers.
1280     if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1281             GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1282         GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1283       AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1284       // If we couldn't find anything interesting, don't abandon just yet.
1285       if (R != MayAlias)
1286         return R;
1287     }
1288 
1289     // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1290     // symbolic difference.
1291     GEP1BaseOffset -= GEP2BaseOffset;
1292     GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1293 
1294   } else {
1295     // Check to see if these two pointers are related by the getelementptr
1296     // instruction.  If one pointer is a GEP with a non-zero index of the other
1297     // pointer, we know they cannot alias.
1298 
1299     // If both accesses are unknown size, we can't do anything useful here.
1300     if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown())
1301       return MayAlias;
1302 
1303     AliasResult R = aliasCheck(UnderlyingV1, LocationSize::unknown(),
1304                                AAMDNodes(), V2, LocationSize::unknown(),
1305                                V2AAInfo, AAQI, nullptr, UnderlyingV2);
1306     if (R != MustAlias) {
1307       // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1308       // If V2 is known not to alias GEP base pointer, then the two values
1309       // cannot alias per GEP semantics: "Any memory access must be done through
1310       // a pointer value associated with an address range of the memory access,
1311       // otherwise the behavior is undefined.".
1312       assert(R == NoAlias || R == MayAlias);
1313       return R;
1314     }
1315   }
1316 
1317   // In the two GEP Case, if there is no difference in the offsets of the
1318   // computed pointers, the resultant pointers are a must alias.  This
1319   // happens when we have two lexically identical GEP's (for example).
1320   //
1321   // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1322   // must aliases the GEP, the end result is a must alias also.
1323   if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1324     return MustAlias;
1325 
1326   // If there is a constant difference between the pointers, but the difference
1327   // is less than the size of the associated memory object, then we know
1328   // that the objects are partially overlapping.  If the difference is
1329   // greater, we know they do not overlap.
1330   if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1331     if (GEP1BaseOffset.sge(0)) {
1332       if (V2Size != LocationSize::unknown()) {
1333         if (GEP1BaseOffset.ult(V2Size.getValue()))
1334           return PartialAlias;
1335         return NoAlias;
1336       }
1337     } else {
1338       // We have the situation where:
1339       // +                +
1340       // | BaseOffset     |
1341       // ---------------->|
1342       // |-->V1Size       |-------> V2Size
1343       // GEP1             V2
1344       // We need to know that V2Size is not unknown, otherwise we might have
1345       // stripped a gep with negative index ('gep <ptr>, -1, ...).
1346       if (V1Size != LocationSize::unknown() &&
1347           V2Size != LocationSize::unknown()) {
1348         if ((-GEP1BaseOffset).ult(V1Size.getValue()))
1349           return PartialAlias;
1350         return NoAlias;
1351       }
1352     }
1353   }
1354 
1355   if (!DecompGEP1.VarIndices.empty()) {
1356     APInt GCD;
1357     bool AllNonNegative = GEP1BaseOffset.isNonNegative();
1358     bool AllNonPositive = GEP1BaseOffset.isNonPositive();
1359     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1360       const APInt &Scale = DecompGEP1.VarIndices[i].Scale;
1361       if (i == 0)
1362         GCD = Scale.abs();
1363       else
1364         GCD = APIntOps::GreatestCommonDivisor(GCD, Scale.abs());
1365 
1366       if (AllNonNegative || AllNonPositive) {
1367         // If the Value could change between cycles, then any reasoning about
1368         // the Value this cycle may not hold in the next cycle. We'll just
1369         // give up if we can't determine conditions that hold for every cycle:
1370         const Value *V = DecompGEP1.VarIndices[i].V;
1371 
1372         KnownBits Known =
1373             computeKnownBits(V, DL, 0, &AC, dyn_cast<Instruction>(GEP1), DT);
1374         bool SignKnownZero = Known.isNonNegative();
1375         bool SignKnownOne = Known.isNegative();
1376 
1377         // Zero-extension widens the variable, and so forces the sign
1378         // bit to zero.
1379         bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1380         SignKnownZero |= IsZExt;
1381         SignKnownOne &= !IsZExt;
1382 
1383         AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
1384                           (SignKnownOne && Scale.isNonPositive());
1385         AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) ||
1386                           (SignKnownOne && Scale.isNonNegative());
1387       }
1388     }
1389 
1390     // We now have accesses at two offsets from the same base:
1391     //  1. (...)*GCD + GEP1BaseOffset with size V1Size
1392     //  2. 0 with size V2Size
1393     // Using arithmetic modulo GCD, the accesses are at
1394     // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1395     // into the range [V2Size..GCD), then we know they cannot overlap.
1396     APInt ModOffset = GEP1BaseOffset.srem(GCD);
1397     if (ModOffset.isNegative())
1398       ModOffset += GCD; // We want mod, not rem.
1399     if (V1Size != LocationSize::unknown() &&
1400         V2Size != LocationSize::unknown() && ModOffset.uge(V2Size.getValue()) &&
1401         (GCD - ModOffset).uge(V1Size.getValue()))
1402       return NoAlias;
1403 
1404     // If we know all the variables are non-negative, then the total offset is
1405     // also non-negative and >= GEP1BaseOffset. We have the following layout:
1406     // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size]
1407     // If GEP1BaseOffset >= V2Size, the accesses don't alias.
1408     if (AllNonNegative && V2Size != LocationSize::unknown() &&
1409         GEP1BaseOffset.uge(V2Size.getValue()))
1410       return NoAlias;
1411     // Similarly, if the variables are non-positive, then the total offset is
1412     // also non-positive and <= GEP1BaseOffset. We have the following layout:
1413     // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
1414     // If -GEP1BaseOffset >= V1Size, the accesses don't alias.
1415     if (AllNonPositive && V1Size != LocationSize::unknown() &&
1416         (-GEP1BaseOffset).uge(V1Size.getValue()))
1417       return NoAlias;
1418 
1419     if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1420                                 GEP1BaseOffset, &AC, DT))
1421       return NoAlias;
1422   }
1423 
1424   // Statically, we can see that the base objects are the same, but the
1425   // pointers have dynamic offsets which we can't resolve. And none of our
1426   // little tricks above worked.
1427   return MayAlias;
1428 }
1429 
1430 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1431   // If the results agree, take it.
1432   if (A == B)
1433     return A;
1434   // A mix of PartialAlias and MustAlias is PartialAlias.
1435   if ((A == PartialAlias && B == MustAlias) ||
1436       (B == PartialAlias && A == MustAlias))
1437     return PartialAlias;
1438   // Otherwise, we don't know anything.
1439   return MayAlias;
1440 }
1441 
1442 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1443 /// against another.
1444 AliasResult
1445 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1446                            const AAMDNodes &SIAAInfo, const Value *V2,
1447                            LocationSize V2Size, const AAMDNodes &V2AAInfo,
1448                            const Value *UnderV2, AAQueryInfo &AAQI) {
1449   // If the values are Selects with the same condition, we can do a more precise
1450   // check: just check for aliases between the values on corresponding arms.
1451   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1452     if (SI->getCondition() == SI2->getCondition()) {
1453       AliasResult Alias =
1454           aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(),
1455                      V2Size, V2AAInfo, AAQI);
1456       if (Alias == MayAlias)
1457         return MayAlias;
1458       AliasResult ThisAlias =
1459           aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1460                      SI2->getFalseValue(), V2Size, V2AAInfo, AAQI);
1461       return MergeAliasResults(ThisAlias, Alias);
1462     }
1463 
1464   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1465   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1466   AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1467                                  SISize, SIAAInfo, AAQI, UnderV2);
1468   if (Alias == MayAlias)
1469     return MayAlias;
1470 
1471   AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(),
1472                                      SISize, SIAAInfo, AAQI, UnderV2);
1473   return MergeAliasResults(ThisAlias, Alias);
1474 }
1475 
1476 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1477 /// another.
1478 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1479                                     const AAMDNodes &PNAAInfo, const Value *V2,
1480                                     LocationSize V2Size,
1481                                     const AAMDNodes &V2AAInfo,
1482                                     const Value *UnderV2, AAQueryInfo &AAQI) {
1483   // If the values are PHIs in the same block, we can do a more precise
1484   // as well as efficient check: just check for aliases between the values
1485   // on corresponding edges.
1486   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1487     if (PN2->getParent() == PN->getParent()) {
1488       AAQueryInfo::LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1489                                 MemoryLocation(V2, V2Size, V2AAInfo));
1490       if (PN > V2)
1491         std::swap(Locs.first, Locs.second);
1492       // Analyse the PHIs' inputs under the assumption that the PHIs are
1493       // NoAlias.
1494       // If the PHIs are May/MustAlias there must be (recursively) an input
1495       // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1496       // there must be an operation on the PHIs within the PHIs' value cycle
1497       // that causes a MayAlias.
1498       // Pretend the phis do not alias.
1499       AliasResult Alias = NoAlias;
1500       AliasResult OrigAliasResult;
1501       {
1502         // Limited lifetime iterator invalidated by the aliasCheck call below.
1503         auto CacheIt = AAQI.AliasCache.find(Locs);
1504         assert((CacheIt != AAQI.AliasCache.end()) &&
1505                "There must exist an entry for the phi node");
1506         OrigAliasResult = CacheIt->second;
1507         CacheIt->second = NoAlias;
1508       }
1509 
1510       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1511         AliasResult ThisAlias =
1512             aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1513                        PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1514                        V2Size, V2AAInfo, AAQI);
1515         Alias = MergeAliasResults(ThisAlias, Alias);
1516         if (Alias == MayAlias)
1517           break;
1518       }
1519 
1520       // Reset if speculation failed.
1521       if (Alias != NoAlias)
1522         AAQI.updateResult(Locs, OrigAliasResult);
1523       return Alias;
1524     }
1525 
1526   SmallVector<Value *, 4> V1Srcs;
1527   // For a recursive phi, that recurses through a contant gep, we can perform
1528   // aliasing calculations using the other phi operands with an unknown size to
1529   // specify that an unknown number of elements after the initial value are
1530   // potentially accessed.
1531   bool isRecursive = false;
1532   auto CheckForRecPhi = [&](Value *PV) {
1533     if (!EnableRecPhiAnalysis)
1534       return false;
1535     if (GEPOperator *PVGEP = dyn_cast<GEPOperator>(PV)) {
1536       // Check whether the incoming value is a GEP that advances the pointer
1537       // result of this PHI node (e.g. in a loop). If this is the case, we
1538       // would recurse and always get a MayAlias. Handle this case specially
1539       // below. We need to ensure that the phi is inbounds and has a constant
1540       // positive operand so that we can check for alias with the initial value
1541       // and an unknown but positive size.
1542       if (PVGEP->getPointerOperand() == PN && PVGEP->isInBounds() &&
1543           PVGEP->getNumIndices() == 1 && isa<ConstantInt>(PVGEP->idx_begin()) &&
1544           !cast<ConstantInt>(PVGEP->idx_begin())->isNegative()) {
1545         isRecursive = true;
1546         return true;
1547       }
1548     }
1549     return false;
1550   };
1551 
1552   if (PV) {
1553     // If we have PhiValues then use it to get the underlying phi values.
1554     const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1555     // If we have more phi values than the search depth then return MayAlias
1556     // conservatively to avoid compile time explosion. The worst possible case
1557     // is if both sides are PHI nodes. In which case, this is O(m x n) time
1558     // where 'm' and 'n' are the number of PHI sources.
1559     if (PhiValueSet.size() > MaxLookupSearchDepth)
1560       return MayAlias;
1561     // Add the values to V1Srcs
1562     for (Value *PV1 : PhiValueSet) {
1563       if (CheckForRecPhi(PV1))
1564         continue;
1565       V1Srcs.push_back(PV1);
1566     }
1567   } else {
1568     // If we don't have PhiInfo then just look at the operands of the phi itself
1569     // FIXME: Remove this once we can guarantee that we have PhiInfo always
1570     SmallPtrSet<Value *, 4> UniqueSrc;
1571     for (Value *PV1 : PN->incoming_values()) {
1572       if (isa<PHINode>(PV1))
1573         // If any of the source itself is a PHI, return MayAlias conservatively
1574         // to avoid compile time explosion. The worst possible case is if both
1575         // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1576         // and 'n' are the number of PHI sources.
1577         return MayAlias;
1578 
1579       if (CheckForRecPhi(PV1))
1580         continue;
1581 
1582       if (UniqueSrc.insert(PV1).second)
1583         V1Srcs.push_back(PV1);
1584     }
1585   }
1586 
1587   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1588   // value. This should only be possible in blocks unreachable from the entry
1589   // block, but return MayAlias just in case.
1590   if (V1Srcs.empty())
1591     return MayAlias;
1592 
1593   // If this PHI node is recursive, set the size of the accessed memory to
1594   // unknown to represent all the possible values the GEP could advance the
1595   // pointer to.
1596   if (isRecursive)
1597     PNSize = LocationSize::unknown();
1598 
1599   // In the recursive alias queries below, we may compare values from two
1600   // different loop iterations. Keep track of visited phi blocks, which will
1601   // be used when determining value equivalence.
1602   bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1603   auto _ = make_scope_exit([&]() {
1604     if (BlockInserted)
1605       VisitedPhiBBs.erase(PN->getParent());
1606   });
1607 
1608   // If we inserted a block into VisitedPhiBBs, alias analysis results that
1609   // have been cached earlier may no longer be valid. Perform recursive queries
1610   // with a new AAQueryInfo.
1611   AAQueryInfo NewAAQI;
1612   AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1613 
1614   AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize,
1615                                  PNAAInfo, *UseAAQI, UnderV2);
1616 
1617   // Early exit if the check of the first PHI source against V2 is MayAlias.
1618   // Other results are not possible.
1619   if (Alias == MayAlias)
1620     return MayAlias;
1621   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1622   // remain valid to all elements and needs to conservatively return MayAlias.
1623   if (isRecursive && Alias != NoAlias)
1624     return MayAlias;
1625 
1626   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1627   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1628   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1629     Value *V = V1Srcs[i];
1630 
1631     AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, V, PNSize,
1632                                        PNAAInfo, *UseAAQI, UnderV2);
1633     Alias = MergeAliasResults(ThisAlias, Alias);
1634     if (Alias == MayAlias)
1635       break;
1636   }
1637 
1638   return Alias;
1639 }
1640 
1641 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1642 /// array references.
1643 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1644                                       const AAMDNodes &V1AAInfo,
1645                                       const Value *V2, LocationSize V2Size,
1646                                       const AAMDNodes &V2AAInfo,
1647                                       AAQueryInfo &AAQI, const Value *O1,
1648                                       const Value *O2) {
1649   // If either of the memory references is empty, it doesn't matter what the
1650   // pointer values are.
1651   if (V1Size.isZero() || V2Size.isZero())
1652     return NoAlias;
1653 
1654   // Strip off any casts if they exist.
1655   V1 = V1->stripPointerCastsAndInvariantGroups();
1656   V2 = V2->stripPointerCastsAndInvariantGroups();
1657 
1658   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1659   // value for undef that aliases nothing in the program.
1660   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1661     return NoAlias;
1662 
1663   // Are we checking for alias of the same value?
1664   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1665   // different iterations. We must therefore make sure that this is not the
1666   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1667   // happen by looking at the visited phi nodes and making sure they cannot
1668   // reach the value.
1669   if (isValueEqualInPotentialCycles(V1, V2))
1670     return MustAlias;
1671 
1672   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1673     return NoAlias; // Scalars cannot alias each other
1674 
1675   // Figure out what objects these things are pointing to if we can.
1676   if (O1 == nullptr)
1677     O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1678 
1679   if (O2 == nullptr)
1680     O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1681 
1682   // Null values in the default address space don't point to any object, so they
1683   // don't alias any other pointer.
1684   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1685     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1686       return NoAlias;
1687   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1688     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1689       return NoAlias;
1690 
1691   if (O1 != O2) {
1692     // If V1/V2 point to two different objects, we know that we have no alias.
1693     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1694       return NoAlias;
1695 
1696     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1697     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1698         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1699       return NoAlias;
1700 
1701     // Function arguments can't alias with things that are known to be
1702     // unambigously identified at the function level.
1703     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1704         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1705       return NoAlias;
1706 
1707     // If one pointer is the result of a call/invoke or load and the other is a
1708     // non-escaping local object within the same function, then we know the
1709     // object couldn't escape to a point where the call could return it.
1710     //
1711     // Note that if the pointers are in different functions, there are a
1712     // variety of complications. A call with a nocapture argument may still
1713     // temporary store the nocapture argument's value in a temporary memory
1714     // location if that memory location doesn't escape. Or it may pass a
1715     // nocapture value to other functions as long as they don't capture it.
1716     if (isEscapeSource(O1) &&
1717         isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
1718       return NoAlias;
1719     if (isEscapeSource(O2) &&
1720         isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
1721       return NoAlias;
1722   }
1723 
1724   // If the size of one access is larger than the entire object on the other
1725   // side, then we know such behavior is undefined and can assume no alias.
1726   bool NullIsValidLocation = NullPointerIsDefined(&F);
1727   if ((isObjectSmallerThan(
1728           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1729           TLI, NullIsValidLocation)) ||
1730       (isObjectSmallerThan(
1731           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1732           TLI, NullIsValidLocation)))
1733     return NoAlias;
1734 
1735   // Check the cache before climbing up use-def chains. This also terminates
1736   // otherwise infinitely recursive queries.
1737   AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1738                             MemoryLocation(V2, V2Size, V2AAInfo));
1739   if (V1 > V2)
1740     std::swap(Locs.first, Locs.second);
1741   std::pair<AAQueryInfo::AliasCacheT::iterator, bool> Pair =
1742       AAQI.AliasCache.try_emplace(Locs, MayAlias);
1743   if (!Pair.second)
1744     return Pair.first->second;
1745 
1746   // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1747   // GEP can't simplify, we don't even look at the PHI cases.
1748   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1749     AliasResult Result =
1750         aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI);
1751     if (Result != MayAlias)
1752       return AAQI.updateResult(Locs, Result);
1753   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1754     AliasResult Result =
1755         aliasGEP(GV2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O2, O1, AAQI);
1756     if (Result != MayAlias)
1757       return AAQI.updateResult(Locs, Result);
1758   }
1759 
1760   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1761     AliasResult Result =
1762         aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
1763     if (Result != MayAlias)
1764       return AAQI.updateResult(Locs, Result);
1765   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1766     AliasResult Result =
1767         aliasPHI(PN, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O1, AAQI);
1768     if (Result != MayAlias)
1769       return AAQI.updateResult(Locs, Result);
1770   }
1771 
1772   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1773     AliasResult Result =
1774         aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
1775     if (Result != MayAlias)
1776       return AAQI.updateResult(Locs, Result);
1777   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1778     AliasResult Result =
1779         aliasSelect(S2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O1, AAQI);
1780     if (Result != MayAlias)
1781       return AAQI.updateResult(Locs, Result);
1782   }
1783 
1784   // If both pointers are pointing into the same object and one of them
1785   // accesses the entire object, then the accesses must overlap in some way.
1786   if (O1 == O2)
1787     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1788         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1789          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1790       return AAQI.updateResult(Locs, PartialAlias);
1791 
1792   // Recurse back into the best AA results we have, potentially with refined
1793   // memory locations. We have already ensured that BasicAA has a MayAlias
1794   // cache result for these, so any recursion back into BasicAA won't loop.
1795   AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second, AAQI);
1796   return AAQI.updateResult(Locs, Result);
1797 }
1798 
1799 /// Check whether two Values can be considered equivalent.
1800 ///
1801 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1802 /// they can not be part of a cycle in the value graph by looking at all
1803 /// visited phi nodes an making sure that the phis cannot reach the value. We
1804 /// have to do this because we are looking through phi nodes (That is we say
1805 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1806 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1807                                                   const Value *V2) {
1808   if (V != V2)
1809     return false;
1810 
1811   const Instruction *Inst = dyn_cast<Instruction>(V);
1812   if (!Inst)
1813     return true;
1814 
1815   if (VisitedPhiBBs.empty())
1816     return true;
1817 
1818   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1819     return false;
1820 
1821   // Make sure that the visited phis cannot reach the Value. This ensures that
1822   // the Values cannot come from different iterations of a potential cycle the
1823   // phi nodes could be involved in.
1824   for (auto *P : VisitedPhiBBs)
1825     if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT, LI))
1826       return false;
1827 
1828   return true;
1829 }
1830 
1831 /// Computes the symbolic difference between two de-composed GEPs.
1832 ///
1833 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1834 /// instructions GEP1 and GEP2 which have common base pointers.
1835 void BasicAAResult::GetIndexDifference(
1836     SmallVectorImpl<VariableGEPIndex> &Dest,
1837     const SmallVectorImpl<VariableGEPIndex> &Src) {
1838   if (Src.empty())
1839     return;
1840 
1841   for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1842     const Value *V = Src[i].V;
1843     unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1844     APInt Scale = Src[i].Scale;
1845 
1846     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1847     // than a few variable indexes.
1848     for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1849       if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1850           Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1851         continue;
1852 
1853       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1854       // goes to zero, remove the entry.
1855       if (Dest[j].Scale != Scale)
1856         Dest[j].Scale -= Scale;
1857       else
1858         Dest.erase(Dest.begin() + j);
1859       Scale = 0;
1860       break;
1861     }
1862 
1863     // If we didn't consume this entry, add it to the end of the Dest list.
1864     if (!!Scale) {
1865       VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1866       Dest.push_back(Entry);
1867     }
1868   }
1869 }
1870 
1871 bool BasicAAResult::constantOffsetHeuristic(
1872     const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1873     LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset,
1874     AssumptionCache *AC, DominatorTree *DT) {
1875   if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() ||
1876       MaybeV2Size == LocationSize::unknown())
1877     return false;
1878 
1879   const uint64_t V1Size = MaybeV1Size.getValue();
1880   const uint64_t V2Size = MaybeV2Size.getValue();
1881 
1882   const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1883 
1884   if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1885       Var0.Scale != -Var1.Scale)
1886     return false;
1887 
1888   unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1889 
1890   // We'll strip off the Extensions of Var0 and Var1 and do another round
1891   // of GetLinearExpression decomposition. In the example above, if Var0
1892   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1893 
1894   APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1895       V1Offset(Width, 0);
1896   bool NSW = true, NUW = true;
1897   unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1898   const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1899                                         V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1900   NSW = true;
1901   NUW = true;
1902   const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1903                                         V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1904 
1905   if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1906       V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1907     return false;
1908 
1909   // We have a hit - Var0 and Var1 only differ by a constant offset!
1910 
1911   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1912   // Var1 is possible to calculate, but we're just interested in the absolute
1913   // minimum difference between the two. The minimum distance may occur due to
1914   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1915   // the minimum distance between %i and %i + 5 is 3.
1916   APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1917   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1918   APInt MinDiffBytes =
1919     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1920 
1921   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1922   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1923   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1924   // V2Size can fit in the MinDiffBytes gap.
1925   return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
1926          MinDiffBytes.uge(V2Size + BaseOffset.abs());
1927 }
1928 
1929 //===----------------------------------------------------------------------===//
1930 // BasicAliasAnalysis Pass
1931 //===----------------------------------------------------------------------===//
1932 
1933 AnalysisKey BasicAA::Key;
1934 
1935 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1936   return BasicAAResult(F.getParent()->getDataLayout(),
1937                        F,
1938                        AM.getResult<TargetLibraryAnalysis>(F),
1939                        AM.getResult<AssumptionAnalysis>(F),
1940                        &AM.getResult<DominatorTreeAnalysis>(F),
1941                        AM.getCachedResult<LoopAnalysis>(F),
1942                        AM.getCachedResult<PhiValuesAnalysis>(F));
1943 }
1944 
1945 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1946   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1947 }
1948 
1949 char BasicAAWrapperPass::ID = 0;
1950 
1951 void BasicAAWrapperPass::anchor() {}
1952 
1953 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1954                       "Basic Alias Analysis (stateless AA impl)", true, true)
1955 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1956 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1957 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1958 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1959 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1960                     "Basic Alias Analysis (stateless AA impl)", true, true)
1961 
1962 FunctionPass *llvm::createBasicAAWrapperPass() {
1963   return new BasicAAWrapperPass();
1964 }
1965 
1966 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1967   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1968   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1969   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1970   auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1971   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1972 
1973   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1974                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1975                                  &DTWP.getDomTree(),
1976                                  LIWP ? &LIWP->getLoopInfo() : nullptr,
1977                                  PVWP ? &PVWP->getResult() : nullptr));
1978 
1979   return false;
1980 }
1981 
1982 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1983   AU.setPreservesAll();
1984   AU.addRequired<AssumptionCacheTracker>();
1985   AU.addRequired<DominatorTreeWrapperPass>();
1986   AU.addRequired<TargetLibraryInfoWrapperPass>();
1987   AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1988 }
1989 
1990 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1991   return BasicAAResult(
1992       F.getParent()->getDataLayout(), F,
1993       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1994       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1995 }
1996