1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/PhiValues.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62 
63 #define DEBUG_TYPE "basicaa"
64 
65 using namespace llvm;
66 
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
69                                           cl::init(true));
70 
71 /// By default, even on 32-bit architectures we use 64-bit integers for
72 /// calculations. This will allow us to more-aggressively decompose indexing
73 /// expressions calculated using i64 values (e.g., long long in C) which is
74 /// common enough to worry about.
75 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b",
76                                         cl::Hidden, cl::init(true));
77 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits",
78                                     cl::Hidden, cl::init(false));
79 
80 /// SearchLimitReached / SearchTimes shows how often the limit of
81 /// to decompose GEPs is reached. It will affect the precision
82 /// of basic alias analysis.
83 STATISTIC(SearchLimitReached, "Number of times the limit to "
84                               "decompose GEPs is reached");
85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
86 
87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
89 /// careful with value equivalence. We use reachability to make sure a value
90 /// cannot be involved in a cycle.
91 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
92 
93 // The max limit of the search depth in DecomposeGEPExpression() and
94 // getUnderlyingObject(), both functions need to use the same search
95 // depth otherwise the algorithm in aliasGEP will assert.
96 static const unsigned MaxLookupSearchDepth = 6;
97 
98 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
99                                FunctionAnalysisManager::Invalidator &Inv) {
100   // We don't care if this analysis itself is preserved, it has no state. But
101   // we need to check that the analyses it depends on have been. Note that we
102   // may be created without handles to some analyses and in that case don't
103   // depend on them.
104   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
105       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
106       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
107     return true;
108 
109   // Otherwise this analysis result remains valid.
110   return false;
111 }
112 
113 //===----------------------------------------------------------------------===//
114 // Useful predicates
115 //===----------------------------------------------------------------------===//
116 
117 /// Returns true if the pointer is one which would have been considered an
118 /// escape by isNonEscapingLocalObject.
119 static bool isEscapeSource(const Value *V) {
120   if (isa<CallBase>(V))
121     return true;
122 
123   if (isa<Argument>(V))
124     return true;
125 
126   // The load case works because isNonEscapingLocalObject considers all
127   // stores to be escapes (it passes true for the StoreCaptures argument
128   // to PointerMayBeCaptured).
129   if (isa<LoadInst>(V))
130     return true;
131 
132   // The inttoptr case works because isNonEscapingLocalObject considers all
133   // means of converting or equating a pointer to an int (ptrtoint, ptr store
134   // which could be followed by an integer load, ptr<->int compare) as
135   // escaping, and objects located at well-known addresses via platform-specific
136   // means cannot be considered non-escaping local objects.
137   if (isa<IntToPtrInst>(V))
138     return true;
139 
140   return false;
141 }
142 
143 /// Returns the size of the object specified by V or UnknownSize if unknown.
144 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
145                               const TargetLibraryInfo &TLI,
146                               bool NullIsValidLoc,
147                               bool RoundToAlign = false) {
148   uint64_t Size;
149   ObjectSizeOpts Opts;
150   Opts.RoundToAlign = RoundToAlign;
151   Opts.NullIsUnknownSize = NullIsValidLoc;
152   if (getObjectSize(V, Size, DL, &TLI, Opts))
153     return Size;
154   return MemoryLocation::UnknownSize;
155 }
156 
157 /// Returns true if we can prove that the object specified by V is smaller than
158 /// Size.
159 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
160                                 const DataLayout &DL,
161                                 const TargetLibraryInfo &TLI,
162                                 bool NullIsValidLoc) {
163   // Note that the meanings of the "object" are slightly different in the
164   // following contexts:
165   //    c1: llvm::getObjectSize()
166   //    c2: llvm.objectsize() intrinsic
167   //    c3: isObjectSmallerThan()
168   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
169   // refers to the "entire object".
170   //
171   //  Consider this example:
172   //     char *p = (char*)malloc(100)
173   //     char *q = p+80;
174   //
175   //  In the context of c1 and c2, the "object" pointed by q refers to the
176   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
177   //
178   //  However, in the context of c3, the "object" refers to the chunk of memory
179   // being allocated. So, the "object" has 100 bytes, and q points to the middle
180   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
181   // parameter, before the llvm::getObjectSize() is called to get the size of
182   // entire object, we should:
183   //    - either rewind the pointer q to the base-address of the object in
184   //      question (in this case rewind to p), or
185   //    - just give up. It is up to caller to make sure the pointer is pointing
186   //      to the base address the object.
187   //
188   // We go for 2nd option for simplicity.
189   if (!isIdentifiedObject(V))
190     return false;
191 
192   // This function needs to use the aligned object size because we allow
193   // reads a bit past the end given sufficient alignment.
194   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
195                                       /*RoundToAlign*/ true);
196 
197   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
198 }
199 
200 /// Return the minimal extent from \p V to the end of the underlying object,
201 /// assuming the result is used in an aliasing query. E.g., we do use the query
202 /// location size and the fact that null pointers cannot alias here.
203 static uint64_t getMinimalExtentFrom(const Value &V,
204                                      const LocationSize &LocSize,
205                                      const DataLayout &DL,
206                                      bool NullIsValidLoc) {
207   // If we have dereferenceability information we know a lower bound for the
208   // extent as accesses for a lower offset would be valid. We need to exclude
209   // the "or null" part if null is a valid pointer.
210   bool CanBeNull, CanBeFreed;
211   uint64_t DerefBytes =
212     V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
213   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
214   DerefBytes = CanBeFreed ? 0 : DerefBytes;
215   // If queried with a precise location size, we assume that location size to be
216   // accessed, thus valid.
217   if (LocSize.isPrecise())
218     DerefBytes = std::max(DerefBytes, LocSize.getValue());
219   return DerefBytes;
220 }
221 
222 /// Returns true if we can prove that the object specified by V has size Size.
223 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
224                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
225   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
226   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
227 }
228 
229 //===----------------------------------------------------------------------===//
230 // GetElementPtr Instruction Decomposition and Analysis
231 //===----------------------------------------------------------------------===//
232 
233 namespace {
234 /// Represents zext(sext(V)).
235 struct ExtendedValue {
236   const Value *V;
237   unsigned ZExtBits;
238   unsigned SExtBits;
239 
240   explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0,
241                          unsigned SExtBits = 0)
242       : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {}
243 
244   unsigned getBitWidth() const {
245     return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits;
246   }
247 
248   ExtendedValue withValue(const Value *NewV) const {
249     return ExtendedValue(NewV, ZExtBits, SExtBits);
250   }
251 
252   ExtendedValue withZExtOfValue(const Value *NewV) const {
253     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
254                         NewV->getType()->getPrimitiveSizeInBits();
255     // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
256     return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0);
257   }
258 
259   ExtendedValue withSExtOfValue(const Value *NewV) const {
260     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
261                         NewV->getType()->getPrimitiveSizeInBits();
262     // zext(sext(sext(NewV)))
263     return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy);
264   }
265 
266   APInt evaluateWith(APInt N) const {
267     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
268            "Incompatible bit width");
269     if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
270     if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
271     return N;
272   }
273 
274   bool canDistributeOver(bool NUW, bool NSW) const {
275     // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
276     // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
277     return (!ZExtBits || NUW) && (!SExtBits || NSW);
278   }
279 };
280 
281 /// Represents zext(sext(V)) * Scale + Offset.
282 struct LinearExpression {
283   ExtendedValue Val;
284   APInt Scale;
285   APInt Offset;
286 
287   /// True if all operations in this expression are NSW.
288   bool IsNSW;
289 
290   LinearExpression(const ExtendedValue &Val, const APInt &Scale,
291                    const APInt &Offset, bool IsNSW)
292       : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
293 
294   LinearExpression(const ExtendedValue &Val) : Val(Val), IsNSW(true) {
295     unsigned BitWidth = Val.getBitWidth();
296     Scale = APInt(BitWidth, 1);
297     Offset = APInt(BitWidth, 0);
298   }
299 };
300 }
301 
302 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
303 /// B are constant integers.
304 static LinearExpression GetLinearExpression(
305     const ExtendedValue &Val,  const DataLayout &DL, unsigned Depth,
306     AssumptionCache *AC, DominatorTree *DT) {
307   // Limit our recursion depth.
308   if (Depth == 6)
309     return Val;
310 
311   if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
312     return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
313                             Val.evaluateWith(Const->getValue()), true);
314 
315   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
316     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
317       APInt RHS = Val.evaluateWith(RHSC->getValue());
318       // The only non-OBO case we deal with is or, and only limited to the
319       // case where it is both nuw and nsw.
320       bool NUW = true, NSW = true;
321       if (isa<OverflowingBinaryOperator>(BOp)) {
322         NUW &= BOp->hasNoUnsignedWrap();
323         NSW &= BOp->hasNoSignedWrap();
324       }
325       if (!Val.canDistributeOver(NUW, NSW))
326         return Val;
327 
328       LinearExpression E(Val);
329       switch (BOp->getOpcode()) {
330       default:
331         // We don't understand this instruction, so we can't decompose it any
332         // further.
333         return Val;
334       case Instruction::Or:
335         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
336         // analyze it.
337         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
338                                BOp, DT))
339           return Val;
340 
341         LLVM_FALLTHROUGH;
342       case Instruction::Add: {
343         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
344                                 Depth + 1, AC, DT);
345         E.Offset += RHS;
346         E.IsNSW &= NSW;
347         break;
348       }
349       case Instruction::Sub: {
350         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
351                                 Depth + 1, AC, DT);
352         E.Offset -= RHS;
353         E.IsNSW &= NSW;
354         break;
355       }
356       case Instruction::Mul: {
357         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
358                                 Depth + 1, AC, DT);
359         E.Offset *= RHS;
360         E.Scale *= RHS;
361         E.IsNSW &= NSW;
362         break;
363       }
364       case Instruction::Shl:
365         // We're trying to linearize an expression of the kind:
366         //   shl i8 -128, 36
367         // where the shift count exceeds the bitwidth of the type.
368         // We can't decompose this further (the expression would return
369         // a poison value).
370         if (RHS.getLimitedValue() > Val.getBitWidth())
371           return Val;
372 
373         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
374                                 Depth + 1, AC, DT);
375         E.Offset <<= RHS.getLimitedValue();
376         E.Scale <<= RHS.getLimitedValue();
377         E.IsNSW &= NSW;
378         break;
379       }
380       return E;
381     }
382   }
383 
384   if (isa<ZExtInst>(Val.V))
385     return GetLinearExpression(
386         Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
387         DL, Depth + 1, AC, DT);
388 
389   if (isa<SExtInst>(Val.V))
390     return GetLinearExpression(
391         Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
392         DL, Depth + 1, AC, DT);
393 
394   return Val;
395 }
396 
397 /// To ensure a pointer offset fits in an integer of size PointerSize
398 /// (in bits) when that size is smaller than the maximum pointer size. This is
399 /// an issue, for example, in particular for 32b pointers with negative indices
400 /// that rely on two's complement wrap-arounds for precise alias information
401 /// where the maximum pointer size is 64b.
402 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) {
403   assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
404   unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
405   return (Offset << ShiftBits).ashr(ShiftBits);
406 }
407 
408 static unsigned getMaxPointerSize(const DataLayout &DL) {
409   unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
410   if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
411   if (DoubleCalcBits) MaxPointerSize *= 2;
412 
413   return MaxPointerSize;
414 }
415 
416 /// If V is a symbolic pointer expression, decompose it into a base pointer
417 /// with a constant offset and a number of scaled symbolic offsets.
418 ///
419 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
420 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
421 /// specified amount, but which may have other unrepresented high bits. As
422 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
423 ///
424 /// This function is capable of analyzing everything that getUnderlyingObject
425 /// can look through. To be able to do that getUnderlyingObject and
426 /// DecomposeGEPExpression must use the same search depth
427 /// (MaxLookupSearchDepth).
428 BasicAAResult::DecomposedGEP
429 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
430                                       AssumptionCache *AC, DominatorTree *DT) {
431   // Limit recursion depth to limit compile time in crazy cases.
432   unsigned MaxLookup = MaxLookupSearchDepth;
433   SearchTimes++;
434   const Instruction *CxtI = dyn_cast<Instruction>(V);
435 
436   unsigned MaxPointerSize = getMaxPointerSize(DL);
437   DecomposedGEP Decomposed;
438   Decomposed.Offset = APInt(MaxPointerSize, 0);
439   Decomposed.HasCompileTimeConstantScale = true;
440   do {
441     // See if this is a bitcast or GEP.
442     const Operator *Op = dyn_cast<Operator>(V);
443     if (!Op) {
444       // The only non-operator case we can handle are GlobalAliases.
445       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
446         if (!GA->isInterposable()) {
447           V = GA->getAliasee();
448           continue;
449         }
450       }
451       Decomposed.Base = V;
452       return Decomposed;
453     }
454 
455     if (Op->getOpcode() == Instruction::BitCast ||
456         Op->getOpcode() == Instruction::AddrSpaceCast) {
457       V = Op->getOperand(0);
458       continue;
459     }
460 
461     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
462     if (!GEPOp) {
463       if (const auto *PHI = dyn_cast<PHINode>(V)) {
464         // Look through single-arg phi nodes created by LCSSA.
465         if (PHI->getNumIncomingValues() == 1) {
466           V = PHI->getIncomingValue(0);
467           continue;
468         }
469       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
470         // CaptureTracking can know about special capturing properties of some
471         // intrinsics like launder.invariant.group, that can't be expressed with
472         // the attributes, but have properties like returning aliasing pointer.
473         // Because some analysis may assume that nocaptured pointer is not
474         // returned from some special intrinsic (because function would have to
475         // be marked with returns attribute), it is crucial to use this function
476         // because it should be in sync with CaptureTracking. Not using it may
477         // cause weird miscompilations where 2 aliasing pointers are assumed to
478         // noalias.
479         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
480           V = RP;
481           continue;
482         }
483       }
484 
485       Decomposed.Base = V;
486       return Decomposed;
487     }
488 
489     // Track whether we've seen at least one in bounds gep, and if so, whether
490     // all geps parsed were in bounds.
491     if (Decomposed.InBounds == None)
492       Decomposed.InBounds = GEPOp->isInBounds();
493     else if (!GEPOp->isInBounds())
494       Decomposed.InBounds = false;
495 
496     // Don't attempt to analyze GEPs over unsized objects.
497     if (!GEPOp->getSourceElementType()->isSized()) {
498       Decomposed.Base = V;
499       return Decomposed;
500     }
501 
502     // Don't attempt to analyze GEPs if index scale is not a compile-time
503     // constant.
504     if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) {
505       Decomposed.Base = V;
506       Decomposed.HasCompileTimeConstantScale = false;
507       return Decomposed;
508     }
509 
510     unsigned AS = GEPOp->getPointerAddressSpace();
511     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
512     gep_type_iterator GTI = gep_type_begin(GEPOp);
513     unsigned PointerSize = DL.getPointerSizeInBits(AS);
514     // Assume all GEP operands are constants until proven otherwise.
515     bool GepHasConstantOffset = true;
516     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
517          I != E; ++I, ++GTI) {
518       const Value *Index = *I;
519       // Compute the (potentially symbolic) offset in bytes for this index.
520       if (StructType *STy = GTI.getStructTypeOrNull()) {
521         // For a struct, add the member offset.
522         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
523         if (FieldNo == 0)
524           continue;
525 
526         Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
527         continue;
528       }
529 
530       // For an array/pointer, add the element offset, explicitly scaled.
531       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
532         if (CIdx->isZero())
533           continue;
534         Decomposed.Offset +=
535             DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() *
536             CIdx->getValue().sextOrTrunc(MaxPointerSize);
537         continue;
538       }
539 
540       GepHasConstantOffset = false;
541 
542       APInt Scale(MaxPointerSize,
543                   DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
544       // If the integer type is smaller than the pointer size, it is implicitly
545       // sign extended to pointer size.
546       unsigned Width = Index->getType()->getIntegerBitWidth();
547       unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
548       LinearExpression LE = GetLinearExpression(
549           ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT);
550 
551       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
552       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
553 
554       // It can be the case that, even through C1*V+C2 does not overflow for
555       // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
556       // decompose the expression in this way.
557       //
558       // FIXME: C1*Scale and the other operations in the decomposed
559       // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
560       // possibility.
561       bool Overflow;
562       APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
563                            .smul_ov(Scale, Overflow);
564       if (Overflow) {
565         LE = LinearExpression(ExtendedValue(Index, 0, SExtBits));
566       } else {
567         Decomposed.Offset += ScaledOffset;
568         Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
569       }
570 
571       // If we already had an occurrence of this index variable, merge this
572       // scale into it.  For example, we want to handle:
573       //   A[x][x] -> x*16 + x*4 -> x*20
574       // This also ensures that 'x' only appears in the index list once.
575       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
576         if (Decomposed.VarIndices[i].V == LE.Val.V &&
577             Decomposed.VarIndices[i].ZExtBits == LE.Val.ZExtBits &&
578             Decomposed.VarIndices[i].SExtBits == LE.Val.SExtBits) {
579           Scale += Decomposed.VarIndices[i].Scale;
580           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
581           break;
582         }
583       }
584 
585       // Make sure that we have a scale that makes sense for this target's
586       // pointer size.
587       Scale = adjustToPointerSize(Scale, PointerSize);
588 
589       if (!!Scale) {
590         VariableGEPIndex Entry = {
591             LE.Val.V, LE.Val.ZExtBits, LE.Val.SExtBits, Scale, CxtI, LE.IsNSW};
592         Decomposed.VarIndices.push_back(Entry);
593       }
594     }
595 
596     // Take care of wrap-arounds
597     if (GepHasConstantOffset)
598       Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize);
599 
600     // Analyze the base pointer next.
601     V = GEPOp->getOperand(0);
602   } while (--MaxLookup);
603 
604   // If the chain of expressions is too deep, just return early.
605   Decomposed.Base = V;
606   SearchLimitReached++;
607   return Decomposed;
608 }
609 
610 /// Returns whether the given pointer value points to memory that is local to
611 /// the function, with global constants being considered local to all
612 /// functions.
613 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
614                                            AAQueryInfo &AAQI, bool OrLocal) {
615   assert(Visited.empty() && "Visited must be cleared after use!");
616 
617   unsigned MaxLookup = 8;
618   SmallVector<const Value *, 16> Worklist;
619   Worklist.push_back(Loc.Ptr);
620   do {
621     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
622     if (!Visited.insert(V).second) {
623       Visited.clear();
624       return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
625     }
626 
627     // An alloca instruction defines local memory.
628     if (OrLocal && isa<AllocaInst>(V))
629       continue;
630 
631     // A global constant counts as local memory for our purposes.
632     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
633       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
634       // global to be marked constant in some modules and non-constant in
635       // others.  GV may even be a declaration, not a definition.
636       if (!GV->isConstant()) {
637         Visited.clear();
638         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
639       }
640       continue;
641     }
642 
643     // If both select values point to local memory, then so does the select.
644     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
645       Worklist.push_back(SI->getTrueValue());
646       Worklist.push_back(SI->getFalseValue());
647       continue;
648     }
649 
650     // If all values incoming to a phi node point to local memory, then so does
651     // the phi.
652     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
653       // Don't bother inspecting phi nodes with many operands.
654       if (PN->getNumIncomingValues() > MaxLookup) {
655         Visited.clear();
656         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
657       }
658       append_range(Worklist, PN->incoming_values());
659       continue;
660     }
661 
662     // Otherwise be conservative.
663     Visited.clear();
664     return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
665   } while (!Worklist.empty() && --MaxLookup);
666 
667   Visited.clear();
668   return Worklist.empty();
669 }
670 
671 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
672   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
673   return II && II->getIntrinsicID() == IID;
674 }
675 
676 /// Returns the behavior when calling the given call site.
677 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
678   if (Call->doesNotAccessMemory())
679     // Can't do better than this.
680     return FMRB_DoesNotAccessMemory;
681 
682   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
683 
684   // If the callsite knows it only reads memory, don't return worse
685   // than that.
686   if (Call->onlyReadsMemory())
687     Min = FMRB_OnlyReadsMemory;
688   else if (Call->doesNotReadMemory())
689     Min = FMRB_OnlyWritesMemory;
690 
691   if (Call->onlyAccessesArgMemory())
692     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
693   else if (Call->onlyAccessesInaccessibleMemory())
694     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
695   else if (Call->onlyAccessesInaccessibleMemOrArgMem())
696     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
697 
698   // If the call has operand bundles then aliasing attributes from the function
699   // it calls do not directly apply to the call.  This can be made more precise
700   // in the future.
701   if (!Call->hasOperandBundles())
702     if (const Function *F = Call->getCalledFunction())
703       Min =
704           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
705 
706   return Min;
707 }
708 
709 /// Returns the behavior when calling the given function. For use when the call
710 /// site is not known.
711 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
712   // If the function declares it doesn't access memory, we can't do better.
713   if (F->doesNotAccessMemory())
714     return FMRB_DoesNotAccessMemory;
715 
716   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
717 
718   // If the function declares it only reads memory, go with that.
719   if (F->onlyReadsMemory())
720     Min = FMRB_OnlyReadsMemory;
721   else if (F->doesNotReadMemory())
722     Min = FMRB_OnlyWritesMemory;
723 
724   if (F->onlyAccessesArgMemory())
725     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
726   else if (F->onlyAccessesInaccessibleMemory())
727     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
728   else if (F->onlyAccessesInaccessibleMemOrArgMem())
729     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
730 
731   return Min;
732 }
733 
734 /// Returns true if this is a writeonly (i.e Mod only) parameter.
735 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
736                              const TargetLibraryInfo &TLI) {
737   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
738     return true;
739 
740   // We can bound the aliasing properties of memset_pattern16 just as we can
741   // for memcpy/memset.  This is particularly important because the
742   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
743   // whenever possible.
744   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
745   // attributes.
746   LibFunc F;
747   if (Call->getCalledFunction() &&
748       TLI.getLibFunc(*Call->getCalledFunction(), F) &&
749       F == LibFunc_memset_pattern16 && TLI.has(F))
750     if (ArgIdx == 0)
751       return true;
752 
753   // TODO: memset_pattern4, memset_pattern8
754   // TODO: _chk variants
755   // TODO: strcmp, strcpy
756 
757   return false;
758 }
759 
760 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
761                                            unsigned ArgIdx) {
762   // Checking for known builtin intrinsics and target library functions.
763   if (isWriteOnlyParam(Call, ArgIdx, TLI))
764     return ModRefInfo::Mod;
765 
766   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
767     return ModRefInfo::Ref;
768 
769   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
770     return ModRefInfo::NoModRef;
771 
772   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
773 }
774 
775 #ifndef NDEBUG
776 static const Function *getParent(const Value *V) {
777   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
778     if (!inst->getParent())
779       return nullptr;
780     return inst->getParent()->getParent();
781   }
782 
783   if (const Argument *arg = dyn_cast<Argument>(V))
784     return arg->getParent();
785 
786   return nullptr;
787 }
788 
789 static bool notDifferentParent(const Value *O1, const Value *O2) {
790 
791   const Function *F1 = getParent(O1);
792   const Function *F2 = getParent(O2);
793 
794   return !F1 || !F2 || F1 == F2;
795 }
796 #endif
797 
798 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
799                                  const MemoryLocation &LocB,
800                                  AAQueryInfo &AAQI) {
801   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
802          "BasicAliasAnalysis doesn't support interprocedural queries.");
803   return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
804 }
805 
806 /// Checks to see if the specified callsite can clobber the specified memory
807 /// object.
808 ///
809 /// Since we only look at local properties of this function, we really can't
810 /// say much about this query.  We do, however, use simple "address taken"
811 /// analysis on local objects.
812 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
813                                         const MemoryLocation &Loc,
814                                         AAQueryInfo &AAQI) {
815   assert(notDifferentParent(Call, Loc.Ptr) &&
816          "AliasAnalysis query involving multiple functions!");
817 
818   const Value *Object = getUnderlyingObject(Loc.Ptr);
819 
820   // Calls marked 'tail' cannot read or write allocas from the current frame
821   // because the current frame might be destroyed by the time they run. However,
822   // a tail call may use an alloca with byval. Calling with byval copies the
823   // contents of the alloca into argument registers or stack slots, so there is
824   // no lifetime issue.
825   if (isa<AllocaInst>(Object))
826     if (const CallInst *CI = dyn_cast<CallInst>(Call))
827       if (CI->isTailCall() &&
828           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
829         return ModRefInfo::NoModRef;
830 
831   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
832   // modify them even though the alloca is not escaped.
833   if (auto *AI = dyn_cast<AllocaInst>(Object))
834     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
835       return ModRefInfo::Mod;
836 
837   // If the pointer is to a locally allocated object that does not escape,
838   // then the call can not mod/ref the pointer unless the call takes the pointer
839   // as an argument, and itself doesn't capture it.
840   if (!isa<Constant>(Object) && Call != Object &&
841       isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) {
842 
843     // Optimistically assume that call doesn't touch Object and check this
844     // assumption in the following loop.
845     ModRefInfo Result = ModRefInfo::NoModRef;
846     bool IsMustAlias = true;
847 
848     unsigned OperandNo = 0;
849     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
850          CI != CE; ++CI, ++OperandNo) {
851       // Only look at the no-capture or byval pointer arguments.  If this
852       // pointer were passed to arguments that were neither of these, then it
853       // couldn't be no-capture.
854       if (!(*CI)->getType()->isPointerTy() ||
855           (!Call->doesNotCapture(OperandNo) &&
856            OperandNo < Call->getNumArgOperands() &&
857            !Call->isByValArgument(OperandNo)))
858         continue;
859 
860       // Call doesn't access memory through this operand, so we don't care
861       // if it aliases with Object.
862       if (Call->doesNotAccessMemory(OperandNo))
863         continue;
864 
865       // If this is a no-capture pointer argument, see if we can tell that it
866       // is impossible to alias the pointer we're checking.
867       AliasResult AR = getBestAAResults().alias(
868           MemoryLocation::getBeforeOrAfter(*CI),
869           MemoryLocation::getBeforeOrAfter(Object), AAQI);
870       if (AR != AliasResult::MustAlias)
871         IsMustAlias = false;
872       // Operand doesn't alias 'Object', continue looking for other aliases
873       if (AR == AliasResult::NoAlias)
874         continue;
875       // Operand aliases 'Object', but call doesn't modify it. Strengthen
876       // initial assumption and keep looking in case if there are more aliases.
877       if (Call->onlyReadsMemory(OperandNo)) {
878         Result = setRef(Result);
879         continue;
880       }
881       // Operand aliases 'Object' but call only writes into it.
882       if (Call->doesNotReadMemory(OperandNo)) {
883         Result = setMod(Result);
884         continue;
885       }
886       // This operand aliases 'Object' and call reads and writes into it.
887       // Setting ModRef will not yield an early return below, MustAlias is not
888       // used further.
889       Result = ModRefInfo::ModRef;
890       break;
891     }
892 
893     // No operand aliases, reset Must bit. Add below if at least one aliases
894     // and all aliases found are MustAlias.
895     if (isNoModRef(Result))
896       IsMustAlias = false;
897 
898     // Early return if we improved mod ref information
899     if (!isModAndRefSet(Result)) {
900       if (isNoModRef(Result))
901         return ModRefInfo::NoModRef;
902       return IsMustAlias ? setMust(Result) : clearMust(Result);
903     }
904   }
905 
906   // If the call is malloc/calloc like, we can assume that it doesn't
907   // modify any IR visible value.  This is only valid because we assume these
908   // routines do not read values visible in the IR.  TODO: Consider special
909   // casing realloc and strdup routines which access only their arguments as
910   // well.  Or alternatively, replace all of this with inaccessiblememonly once
911   // that's implemented fully.
912   if (isMallocOrCallocLikeFn(Call, &TLI)) {
913     // Be conservative if the accessed pointer may alias the allocation -
914     // fallback to the generic handling below.
915     if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc,
916                                  AAQI) == AliasResult::NoAlias)
917       return ModRefInfo::NoModRef;
918   }
919 
920   // The semantics of memcpy intrinsics either exactly overlap or do not
921   // overlap, i.e., source and destination of any given memcpy are either
922   // no-alias or must-alias.
923   if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
924     AliasResult SrcAA =
925         getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI);
926     AliasResult DestAA =
927         getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
928     // It's also possible for Loc to alias both src and dest, or neither.
929     ModRefInfo rv = ModRefInfo::NoModRef;
930     if (SrcAA != AliasResult::NoAlias)
931       rv = setRef(rv);
932     if (DestAA != AliasResult::NoAlias)
933       rv = setMod(rv);
934     return rv;
935   }
936 
937   // Guard intrinsics are marked as arbitrarily writing so that proper control
938   // dependencies are maintained but they never mods any particular memory
939   // location.
940   //
941   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
942   // heap state at the point the guard is issued needs to be consistent in case
943   // the guard invokes the "deopt" continuation.
944   if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
945     return ModRefInfo::Ref;
946   // The same applies to deoptimize which is essentially a guard(false).
947   if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize))
948     return ModRefInfo::Ref;
949 
950   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
951   // writing so that proper control dependencies are maintained but they never
952   // mod any particular memory location visible to the IR.
953   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
954   // intrinsic is now modeled as reading memory. This prevents hoisting the
955   // invariant.start intrinsic over stores. Consider:
956   // *ptr = 40;
957   // *ptr = 50;
958   // invariant_start(ptr)
959   // int val = *ptr;
960   // print(val);
961   //
962   // This cannot be transformed to:
963   //
964   // *ptr = 40;
965   // invariant_start(ptr)
966   // *ptr = 50;
967   // int val = *ptr;
968   // print(val);
969   //
970   // The transformation will cause the second store to be ignored (based on
971   // rules of invariant.start)  and print 40, while the first program always
972   // prints 50.
973   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
974     return ModRefInfo::Ref;
975 
976   // The AAResultBase base class has some smarts, lets use them.
977   return AAResultBase::getModRefInfo(Call, Loc, AAQI);
978 }
979 
980 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
981                                         const CallBase *Call2,
982                                         AAQueryInfo &AAQI) {
983   // Guard intrinsics are marked as arbitrarily writing so that proper control
984   // dependencies are maintained but they never mods any particular memory
985   // location.
986   //
987   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
988   // heap state at the point the guard is issued needs to be consistent in case
989   // the guard invokes the "deopt" continuation.
990 
991   // NB! This function is *not* commutative, so we special case two
992   // possibilities for guard intrinsics.
993 
994   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
995     return isModSet(createModRefInfo(getModRefBehavior(Call2)))
996                ? ModRefInfo::Ref
997                : ModRefInfo::NoModRef;
998 
999   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1000     return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1001                ? ModRefInfo::Mod
1002                : ModRefInfo::NoModRef;
1003 
1004   // The AAResultBase base class has some smarts, lets use them.
1005   return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1006 }
1007 
1008 /// Return true if we know V to the base address of the corresponding memory
1009 /// object.  This implies that any address less than V must be out of bounds
1010 /// for the underlying object.  Note that just being isIdentifiedObject() is
1011 /// not enough - For example, a negative offset from a noalias argument or call
1012 /// can be inbounds w.r.t the actual underlying object.
1013 static bool isBaseOfObject(const Value *V) {
1014   // TODO: We can handle other cases here
1015   // 1) For GC languages, arguments to functions are often required to be
1016   //    base pointers.
1017   // 2) Result of allocation routines are often base pointers.  Leverage TLI.
1018   return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1019 }
1020 
1021 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1022 /// another pointer.
1023 ///
1024 /// We know that V1 is a GEP, but we don't know anything about V2.
1025 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1026 /// V2.
1027 AliasResult BasicAAResult::aliasGEP(
1028     const GEPOperator *GEP1, LocationSize V1Size,
1029     const Value *V2, LocationSize V2Size,
1030     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1031   if (!V1Size.hasValue() && !V2Size.hasValue()) {
1032     // TODO: This limitation exists for compile-time reasons. Relax it if we
1033     // can avoid exponential pathological cases.
1034     if (!isa<GEPOperator>(V2))
1035       return AliasResult::MayAlias;
1036 
1037     // If both accesses have unknown size, we can only check whether the base
1038     // objects don't alias.
1039     AliasResult BaseAlias = getBestAAResults().alias(
1040         MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1041         MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1042     return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1043                                              : AliasResult::MayAlias;
1044   }
1045 
1046   DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1047   DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1048 
1049   // Don't attempt to analyze the decomposed GEP if index scale is not a
1050   // compile-time constant.
1051   if (!DecompGEP1.HasCompileTimeConstantScale ||
1052       !DecompGEP2.HasCompileTimeConstantScale)
1053     return AliasResult::MayAlias;
1054 
1055   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1056          "DecomposeGEPExpression returned a result different from "
1057          "getUnderlyingObject");
1058 
1059   // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1060   // symbolic difference.
1061   DecompGEP1.Offset -= DecompGEP2.Offset;
1062   GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1063 
1064   // If an inbounds GEP would have to start from an out of bounds address
1065   // for the two to alias, then we can assume noalias.
1066   if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1067       V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
1068       isBaseOfObject(DecompGEP2.Base))
1069     return AliasResult::NoAlias;
1070 
1071   if (isa<GEPOperator>(V2)) {
1072     // Symmetric case to above.
1073     if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1074         V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1075         isBaseOfObject(DecompGEP1.Base))
1076       return AliasResult::NoAlias;
1077   }
1078 
1079   // For GEPs with identical offsets, we can preserve the size and AAInfo
1080   // when performing the alias check on the underlying objects.
1081   if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1082     return getBestAAResults().alias(
1083         MemoryLocation(UnderlyingV1, V1Size),
1084         MemoryLocation(UnderlyingV2, V2Size), AAQI);
1085 
1086   // Do the base pointers alias?
1087   AliasResult BaseAlias = getBestAAResults().alias(
1088       MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1089       MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1090 
1091   // If we get a No or May, then return it immediately, no amount of analysis
1092   // will improve this situation.
1093   if (BaseAlias != AliasResult::MustAlias) {
1094     assert(BaseAlias == AliasResult::NoAlias ||
1095            BaseAlias == AliasResult::MayAlias);
1096     return BaseAlias;
1097   }
1098 
1099   // If there is a constant difference between the pointers, but the difference
1100   // is less than the size of the associated memory object, then we know
1101   // that the objects are partially overlapping.  If the difference is
1102   // greater, we know they do not overlap.
1103   if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
1104     APInt &Off = DecompGEP1.Offset;
1105 
1106     // Initialize for Off >= 0 (V2 <= GEP1) case.
1107     const Value *LeftPtr = V2;
1108     const Value *RightPtr = GEP1;
1109     LocationSize VLeftSize = V2Size;
1110     LocationSize VRightSize = V1Size;
1111     const bool Swapped = Off.isNegative();
1112 
1113     if (Swapped) {
1114       // Swap if we have the situation where:
1115       // +                +
1116       // | BaseOffset     |
1117       // ---------------->|
1118       // |-->V1Size       |-------> V2Size
1119       // GEP1             V2
1120       std::swap(LeftPtr, RightPtr);
1121       std::swap(VLeftSize, VRightSize);
1122       Off = -Off;
1123     }
1124 
1125     if (VLeftSize.hasValue()) {
1126       const uint64_t LSize = VLeftSize.getValue();
1127       if (Off.ult(LSize)) {
1128         // Conservatively drop processing if a phi was visited and/or offset is
1129         // too big.
1130         AliasResult AR = AliasResult::PartialAlias;
1131         if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1132             (Off + VRightSize.getValue()).ule(LSize)) {
1133           // Memory referenced by right pointer is nested. Save the offset in
1134           // cache. Note that originally offset estimated as GEP1-V2, but
1135           // AliasResult contains the shift that represents GEP1+Offset=V2.
1136           AR.setOffset(-Off.getSExtValue());
1137           AR.swap(Swapped);
1138         }
1139         return AR;
1140       }
1141       return AliasResult::NoAlias;
1142     }
1143   }
1144 
1145   if (!DecompGEP1.VarIndices.empty()) {
1146     APInt GCD;
1147     bool AllNonNegative = DecompGEP1.Offset.isNonNegative();
1148     bool AllNonPositive = DecompGEP1.Offset.isNonPositive();
1149     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1150       APInt Scale = DecompGEP1.VarIndices[i].Scale;
1151       if (!DecompGEP1.VarIndices[i].IsNSW)
1152         Scale = APInt::getOneBitSet(Scale.getBitWidth(),
1153                                     Scale.countTrailingZeros());
1154 
1155       if (i == 0)
1156         GCD = Scale.abs();
1157       else
1158         GCD = APIntOps::GreatestCommonDivisor(GCD, Scale.abs());
1159 
1160       if (AllNonNegative || AllNonPositive) {
1161         // If the Value could change between cycles, then any reasoning about
1162         // the Value this cycle may not hold in the next cycle. We'll just
1163         // give up if we can't determine conditions that hold for every cycle:
1164         const Value *V = DecompGEP1.VarIndices[i].V;
1165         const Instruction *CxtI = DecompGEP1.VarIndices[i].CxtI;
1166 
1167         KnownBits Known = computeKnownBits(V, DL, 0, &AC, CxtI, DT);
1168         bool SignKnownZero = Known.isNonNegative();
1169         bool SignKnownOne = Known.isNegative();
1170 
1171         // Zero-extension widens the variable, and so forces the sign
1172         // bit to zero.
1173         bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1174         SignKnownZero |= IsZExt;
1175         SignKnownOne &= !IsZExt;
1176 
1177         AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) ||
1178                           (SignKnownOne && Scale.isNonPositive());
1179         AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) ||
1180                           (SignKnownOne && Scale.isNonNegative());
1181       }
1182     }
1183 
1184     // We now have accesses at two offsets from the same base:
1185     //  1. (...)*GCD + DecompGEP1.Offset with size V1Size
1186     //  2. 0 with size V2Size
1187     // Using arithmetic modulo GCD, the accesses are at
1188     // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1189     // into the range [V2Size..GCD), then we know they cannot overlap.
1190     APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1191     if (ModOffset.isNegative())
1192       ModOffset += GCD; // We want mod, not rem.
1193     if (V1Size.hasValue() && V2Size.hasValue() &&
1194         ModOffset.uge(V2Size.getValue()) &&
1195         (GCD - ModOffset).uge(V1Size.getValue()))
1196       return AliasResult::NoAlias;
1197 
1198     // If we know all the variables are non-negative, then the total offset is
1199     // also non-negative and >= DecompGEP1.Offset. We have the following layout:
1200     // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size]
1201     // If DecompGEP1.Offset >= V2Size, the accesses don't alias.
1202     if (AllNonNegative && V2Size.hasValue() &&
1203         DecompGEP1.Offset.uge(V2Size.getValue()))
1204       return AliasResult::NoAlias;
1205     // Similarly, if the variables are non-positive, then the total offset is
1206     // also non-positive and <= DecompGEP1.Offset. We have the following layout:
1207     // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
1208     // If -DecompGEP1.Offset >= V1Size, the accesses don't alias.
1209     if (AllNonPositive && V1Size.hasValue() &&
1210         (-DecompGEP1.Offset).uge(V1Size.getValue()))
1211       return AliasResult::NoAlias;
1212 
1213     if (V1Size.hasValue() && V2Size.hasValue()) {
1214       // Try to determine whether abs(VarIndex) > 0.
1215       Optional<APInt> MinAbsVarIndex;
1216       if (DecompGEP1.VarIndices.size() == 1) {
1217         // VarIndex = Scale*V. If V != 0 then abs(VarIndex) >= abs(Scale).
1218         const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1219         if (isKnownNonZero(Var.V, DL, 0, &AC, Var.CxtI, DT))
1220           MinAbsVarIndex = Var.Scale.abs();
1221       } else if (DecompGEP1.VarIndices.size() == 2) {
1222         // VarIndex = Scale*V0 + (-Scale)*V1.
1223         // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1224         // Check that VisitedPhiBBs is empty, to avoid reasoning about
1225         // inequality of values across loop iterations.
1226         const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1227         const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1228         if (Var0.Scale == -Var1.Scale && Var0.ZExtBits == Var1.ZExtBits &&
1229             Var0.SExtBits == Var1.SExtBits && VisitedPhiBBs.empty() &&
1230             isKnownNonEqual(Var0.V, Var1.V, DL, &AC, /* CxtI */ nullptr, DT))
1231           MinAbsVarIndex = Var0.Scale.abs();
1232       }
1233 
1234       if (MinAbsVarIndex) {
1235         // The constant offset will have added at least +/-MinAbsVarIndex to it.
1236         APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1237         APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1238         // Check that an access at OffsetLo or lower, and an access at OffsetHi
1239         // or higher both do not alias.
1240         if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1241             OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1242           return AliasResult::NoAlias;
1243       }
1244     }
1245 
1246     if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1247                                 DecompGEP1.Offset, &AC, DT))
1248       return AliasResult::NoAlias;
1249   }
1250 
1251   // Statically, we can see that the base objects are the same, but the
1252   // pointers have dynamic offsets which we can't resolve. And none of our
1253   // little tricks above worked.
1254   return AliasResult::MayAlias;
1255 }
1256 
1257 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1258   // If the results agree, take it.
1259   if (A == B)
1260     return A;
1261   // A mix of PartialAlias and MustAlias is PartialAlias.
1262   if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1263       (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1264     return AliasResult::PartialAlias;
1265   // Otherwise, we don't know anything.
1266   return AliasResult::MayAlias;
1267 }
1268 
1269 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1270 /// against another.
1271 AliasResult
1272 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1273                            const Value *V2, LocationSize V2Size,
1274                            AAQueryInfo &AAQI) {
1275   // If the values are Selects with the same condition, we can do a more precise
1276   // check: just check for aliases between the values on corresponding arms.
1277   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1278     if (SI->getCondition() == SI2->getCondition()) {
1279       AliasResult Alias = getBestAAResults().alias(
1280           MemoryLocation(SI->getTrueValue(), SISize),
1281           MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1282       if (Alias == AliasResult::MayAlias)
1283         return AliasResult::MayAlias;
1284       AliasResult ThisAlias = getBestAAResults().alias(
1285           MemoryLocation(SI->getFalseValue(), SISize),
1286           MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1287       return MergeAliasResults(ThisAlias, Alias);
1288     }
1289 
1290   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1291   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1292   AliasResult Alias = getBestAAResults().alias(
1293       MemoryLocation(V2, V2Size),
1294       MemoryLocation(SI->getTrueValue(), SISize), AAQI);
1295   if (Alias == AliasResult::MayAlias)
1296     return AliasResult::MayAlias;
1297 
1298   AliasResult ThisAlias = getBestAAResults().alias(
1299       MemoryLocation(V2, V2Size),
1300       MemoryLocation(SI->getFalseValue(), SISize), AAQI);
1301   return MergeAliasResults(ThisAlias, Alias);
1302 }
1303 
1304 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1305 /// another.
1306 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1307                                     const Value *V2, LocationSize V2Size,
1308                                     AAQueryInfo &AAQI) {
1309   if (!PN->getNumIncomingValues())
1310     return AliasResult::NoAlias;
1311   // If the values are PHIs in the same block, we can do a more precise
1312   // as well as efficient check: just check for aliases between the values
1313   // on corresponding edges.
1314   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1315     if (PN2->getParent() == PN->getParent()) {
1316       Optional<AliasResult> Alias;
1317       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1318         AliasResult ThisAlias = getBestAAResults().alias(
1319             MemoryLocation(PN->getIncomingValue(i), PNSize),
1320             MemoryLocation(
1321                 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1322             AAQI);
1323         if (Alias)
1324           *Alias = MergeAliasResults(*Alias, ThisAlias);
1325         else
1326           Alias = ThisAlias;
1327         if (*Alias == AliasResult::MayAlias)
1328           break;
1329       }
1330       return *Alias;
1331     }
1332 
1333   SmallVector<Value *, 4> V1Srcs;
1334   // If a phi operand recurses back to the phi, we can still determine NoAlias
1335   // if we don't alias the underlying objects of the other phi operands, as we
1336   // know that the recursive phi needs to be based on them in some way.
1337   bool isRecursive = false;
1338   auto CheckForRecPhi = [&](Value *PV) {
1339     if (!EnableRecPhiAnalysis)
1340       return false;
1341     if (getUnderlyingObject(PV) == PN) {
1342       isRecursive = true;
1343       return true;
1344     }
1345     return false;
1346   };
1347 
1348   if (PV) {
1349     // If we have PhiValues then use it to get the underlying phi values.
1350     const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1351     // If we have more phi values than the search depth then return MayAlias
1352     // conservatively to avoid compile time explosion. The worst possible case
1353     // is if both sides are PHI nodes. In which case, this is O(m x n) time
1354     // where 'm' and 'n' are the number of PHI sources.
1355     if (PhiValueSet.size() > MaxLookupSearchDepth)
1356       return AliasResult::MayAlias;
1357     // Add the values to V1Srcs
1358     for (Value *PV1 : PhiValueSet) {
1359       if (CheckForRecPhi(PV1))
1360         continue;
1361       V1Srcs.push_back(PV1);
1362     }
1363   } else {
1364     // If we don't have PhiInfo then just look at the operands of the phi itself
1365     // FIXME: Remove this once we can guarantee that we have PhiInfo always
1366     SmallPtrSet<Value *, 4> UniqueSrc;
1367     Value *OnePhi = nullptr;
1368     for (Value *PV1 : PN->incoming_values()) {
1369       if (isa<PHINode>(PV1)) {
1370         if (OnePhi && OnePhi != PV1) {
1371           // To control potential compile time explosion, we choose to be
1372           // conserviate when we have more than one Phi input.  It is important
1373           // that we handle the single phi case as that lets us handle LCSSA
1374           // phi nodes and (combined with the recursive phi handling) simple
1375           // pointer induction variable patterns.
1376           return AliasResult::MayAlias;
1377         }
1378         OnePhi = PV1;
1379       }
1380 
1381       if (CheckForRecPhi(PV1))
1382         continue;
1383 
1384       if (UniqueSrc.insert(PV1).second)
1385         V1Srcs.push_back(PV1);
1386     }
1387 
1388     if (OnePhi && UniqueSrc.size() > 1)
1389       // Out of an abundance of caution, allow only the trivial lcssa and
1390       // recursive phi cases.
1391       return AliasResult::MayAlias;
1392   }
1393 
1394   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1395   // value. This should only be possible in blocks unreachable from the entry
1396   // block, but return MayAlias just in case.
1397   if (V1Srcs.empty())
1398     return AliasResult::MayAlias;
1399 
1400   // If this PHI node is recursive, indicate that the pointer may be moved
1401   // across iterations. We can only prove NoAlias if different underlying
1402   // objects are involved.
1403   if (isRecursive)
1404     PNSize = LocationSize::beforeOrAfterPointer();
1405 
1406   // In the recursive alias queries below, we may compare values from two
1407   // different loop iterations. Keep track of visited phi blocks, which will
1408   // be used when determining value equivalence.
1409   bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second;
1410   auto _ = make_scope_exit([&]() {
1411     if (BlockInserted)
1412       VisitedPhiBBs.erase(PN->getParent());
1413   });
1414 
1415   // If we inserted a block into VisitedPhiBBs, alias analysis results that
1416   // have been cached earlier may no longer be valid. Perform recursive queries
1417   // with a new AAQueryInfo.
1418   AAQueryInfo NewAAQI = AAQI.withEmptyCache();
1419   AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
1420 
1421   AliasResult Alias = getBestAAResults().alias(
1422       MemoryLocation(V2, V2Size),
1423       MemoryLocation(V1Srcs[0], PNSize), *UseAAQI);
1424 
1425   // Early exit if the check of the first PHI source against V2 is MayAlias.
1426   // Other results are not possible.
1427   if (Alias == AliasResult::MayAlias)
1428     return AliasResult::MayAlias;
1429   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1430   // remain valid to all elements and needs to conservatively return MayAlias.
1431   if (isRecursive && Alias != AliasResult::NoAlias)
1432     return AliasResult::MayAlias;
1433 
1434   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1435   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1436   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1437     Value *V = V1Srcs[i];
1438 
1439     AliasResult ThisAlias = getBestAAResults().alias(
1440         MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI);
1441     Alias = MergeAliasResults(ThisAlias, Alias);
1442     if (Alias == AliasResult::MayAlias)
1443       break;
1444   }
1445 
1446   return Alias;
1447 }
1448 
1449 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1450 /// array references.
1451 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1452                                       const Value *V2, LocationSize V2Size,
1453                                       AAQueryInfo &AAQI) {
1454   // If either of the memory references is empty, it doesn't matter what the
1455   // pointer values are.
1456   if (V1Size.isZero() || V2Size.isZero())
1457     return AliasResult::NoAlias;
1458 
1459   // Strip off any casts if they exist.
1460   V1 = V1->stripPointerCastsForAliasAnalysis();
1461   V2 = V2->stripPointerCastsForAliasAnalysis();
1462 
1463   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1464   // value for undef that aliases nothing in the program.
1465   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1466     return AliasResult::NoAlias;
1467 
1468   // Are we checking for alias of the same value?
1469   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1470   // different iterations. We must therefore make sure that this is not the
1471   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1472   // happen by looking at the visited phi nodes and making sure they cannot
1473   // reach the value.
1474   if (isValueEqualInPotentialCycles(V1, V2))
1475     return AliasResult::MustAlias;
1476 
1477   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1478     return AliasResult::NoAlias; // Scalars cannot alias each other
1479 
1480   // Figure out what objects these things are pointing to if we can.
1481   const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1482   const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1483 
1484   // Null values in the default address space don't point to any object, so they
1485   // don't alias any other pointer.
1486   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1487     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1488       return AliasResult::NoAlias;
1489   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1490     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1491       return AliasResult::NoAlias;
1492 
1493   if (O1 != O2) {
1494     // If V1/V2 point to two different objects, we know that we have no alias.
1495     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1496       return AliasResult::NoAlias;
1497 
1498     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1499     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1500         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1501       return AliasResult::NoAlias;
1502 
1503     // Function arguments can't alias with things that are known to be
1504     // unambigously identified at the function level.
1505     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1506         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1507       return AliasResult::NoAlias;
1508 
1509     // If one pointer is the result of a call/invoke or load and the other is a
1510     // non-escaping local object within the same function, then we know the
1511     // object couldn't escape to a point where the call could return it.
1512     //
1513     // Note that if the pointers are in different functions, there are a
1514     // variety of complications. A call with a nocapture argument may still
1515     // temporary store the nocapture argument's value in a temporary memory
1516     // location if that memory location doesn't escape. Or it may pass a
1517     // nocapture value to other functions as long as they don't capture it.
1518     if (isEscapeSource(O1) &&
1519         isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
1520       return AliasResult::NoAlias;
1521     if (isEscapeSource(O2) &&
1522         isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
1523       return AliasResult::NoAlias;
1524   }
1525 
1526   // If the size of one access is larger than the entire object on the other
1527   // side, then we know such behavior is undefined and can assume no alias.
1528   bool NullIsValidLocation = NullPointerIsDefined(&F);
1529   if ((isObjectSmallerThan(
1530           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1531           TLI, NullIsValidLocation)) ||
1532       (isObjectSmallerThan(
1533           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1534           TLI, NullIsValidLocation)))
1535     return AliasResult::NoAlias;
1536 
1537   // If one the accesses may be before the accessed pointer, canonicalize this
1538   // by using unknown after-pointer sizes for both accesses. This is
1539   // equivalent, because regardless of which pointer is lower, one of them
1540   // will always came after the other, as long as the underlying objects aren't
1541   // disjoint. We do this so that the rest of BasicAA does not have to deal
1542   // with accesses before the base pointer, and to improve cache utilization by
1543   // merging equivalent states.
1544   if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1545     V1Size = LocationSize::afterPointer();
1546     V2Size = LocationSize::afterPointer();
1547   }
1548 
1549   // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1550   // for recursive queries. For this reason, this limit is chosen to be large
1551   // enough to be very rarely hit, while still being small enough to avoid
1552   // stack overflows.
1553   if (AAQI.Depth >= 512)
1554     return AliasResult::MayAlias;
1555 
1556   // Check the cache before climbing up use-def chains. This also terminates
1557   // otherwise infinitely recursive queries.
1558   AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
1559   const bool Swapped = V1 > V2;
1560   if (Swapped)
1561     std::swap(Locs.first, Locs.second);
1562   const auto &Pair = AAQI.AliasCache.try_emplace(
1563       Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1564   if (!Pair.second) {
1565     auto &Entry = Pair.first->second;
1566     if (!Entry.isDefinitive()) {
1567       // Remember that we used an assumption.
1568       ++Entry.NumAssumptionUses;
1569       ++AAQI.NumAssumptionUses;
1570     }
1571     // Cache contains sorted {V1,V2} pairs but we should return original order.
1572     auto Result = Entry.Result;
1573     Result.swap(Swapped);
1574     return Result;
1575   }
1576 
1577   int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1578   unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1579   AliasResult Result =
1580       aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1581 
1582   auto It = AAQI.AliasCache.find(Locs);
1583   assert(It != AAQI.AliasCache.end() && "Must be in cache");
1584   auto &Entry = It->second;
1585 
1586   // Check whether a NoAlias assumption has been used, but disproven.
1587   bool AssumptionDisproven =
1588       Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1589   if (AssumptionDisproven)
1590     Result = AliasResult::MayAlias;
1591 
1592   // This is a definitive result now, when considered as a root query.
1593   AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1594   Entry.Result = Result;
1595   // Cache contains sorted {V1,V2} pairs.
1596   Entry.Result.swap(Swapped);
1597   Entry.NumAssumptionUses = -1;
1598 
1599   // If the assumption has been disproven, remove any results that may have
1600   // been based on this assumption. Do this after the Entry updates above to
1601   // avoid iterator invalidation.
1602   if (AssumptionDisproven)
1603     while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1604       AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1605 
1606   // The result may still be based on assumptions higher up in the chain.
1607   // Remember it, so it can be purged from the cache later.
1608   if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1609       Result != AliasResult::MayAlias)
1610     AAQI.AssumptionBasedResults.push_back(Locs);
1611   return Result;
1612 }
1613 
1614 AliasResult BasicAAResult::aliasCheckRecursive(
1615     const Value *V1, LocationSize V1Size,
1616     const Value *V2, LocationSize V2Size,
1617     AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1618   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1619     AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1620     if (Result != AliasResult::MayAlias)
1621       return Result;
1622   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1623     AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1624     if (Result != AliasResult::MayAlias)
1625       return Result;
1626   }
1627 
1628   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1629     AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1630     if (Result != AliasResult::MayAlias)
1631       return Result;
1632   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1633     AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1634     if (Result != AliasResult::MayAlias)
1635       return Result;
1636   }
1637 
1638   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1639     AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1640     if (Result != AliasResult::MayAlias)
1641       return Result;
1642   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1643     AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1644     if (Result != AliasResult::MayAlias)
1645       return Result;
1646   }
1647 
1648   // If both pointers are pointing into the same object and one of them
1649   // accesses the entire object, then the accesses must overlap in some way.
1650   if (O1 == O2) {
1651     bool NullIsValidLocation = NullPointerIsDefined(&F);
1652     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1653         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1654          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1655       return AliasResult::PartialAlias;
1656   }
1657 
1658   return AliasResult::MayAlias;
1659 }
1660 
1661 /// Check whether two Values can be considered equivalent.
1662 ///
1663 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1664 /// they can not be part of a cycle in the value graph by looking at all
1665 /// visited phi nodes an making sure that the phis cannot reach the value. We
1666 /// have to do this because we are looking through phi nodes (That is we say
1667 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1668 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1669                                                   const Value *V2) {
1670   if (V != V2)
1671     return false;
1672 
1673   const Instruction *Inst = dyn_cast<Instruction>(V);
1674   if (!Inst)
1675     return true;
1676 
1677   if (VisitedPhiBBs.empty())
1678     return true;
1679 
1680   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1681     return false;
1682 
1683   // Make sure that the visited phis cannot reach the Value. This ensures that
1684   // the Values cannot come from different iterations of a potential cycle the
1685   // phi nodes could be involved in.
1686   for (auto *P : VisitedPhiBBs)
1687     if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
1688       return false;
1689 
1690   return true;
1691 }
1692 
1693 /// Computes the symbolic difference between two de-composed GEPs.
1694 ///
1695 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1696 /// instructions GEP1 and GEP2 which have common base pointers.
1697 void BasicAAResult::GetIndexDifference(
1698     SmallVectorImpl<VariableGEPIndex> &Dest,
1699     const SmallVectorImpl<VariableGEPIndex> &Src) {
1700   if (Src.empty())
1701     return;
1702 
1703   for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1704     const Value *V = Src[i].V;
1705     unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1706     APInt Scale = Src[i].Scale;
1707 
1708     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1709     // than a few variable indexes.
1710     for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1711       if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1712           Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1713         continue;
1714 
1715       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1716       // goes to zero, remove the entry.
1717       if (Dest[j].Scale != Scale) {
1718         Dest[j].Scale -= Scale;
1719         Dest[j].IsNSW = false;
1720       } else
1721         Dest.erase(Dest.begin() + j);
1722       Scale = 0;
1723       break;
1724     }
1725 
1726     // If we didn't consume this entry, add it to the end of the Dest list.
1727     if (!!Scale) {
1728       VariableGEPIndex Entry = {V,      ZExtBits,    SExtBits,
1729                                 -Scale, Src[i].CxtI, Src[i].IsNSW};
1730       Dest.push_back(Entry);
1731     }
1732   }
1733 }
1734 
1735 bool BasicAAResult::constantOffsetHeuristic(
1736     const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1737     LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset,
1738     AssumptionCache *AC, DominatorTree *DT) {
1739   if (VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1740       !MaybeV2Size.hasValue())
1741     return false;
1742 
1743   const uint64_t V1Size = MaybeV1Size.getValue();
1744   const uint64_t V2Size = MaybeV2Size.getValue();
1745 
1746   const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1747 
1748   if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1749       Var0.Scale != -Var1.Scale || Var0.V->getType() != Var1.V->getType())
1750     return false;
1751 
1752   // We'll strip off the Extensions of Var0 and Var1 and do another round
1753   // of GetLinearExpression decomposition. In the example above, if Var0
1754   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1755 
1756   LinearExpression E0 =
1757       GetLinearExpression(ExtendedValue(Var0.V), DL, 0, AC, DT);
1758   LinearExpression E1 =
1759       GetLinearExpression(ExtendedValue(Var1.V), DL, 0, AC, DT);
1760   if (E0.Scale != E1.Scale || E0.Val.ZExtBits != E1.Val.ZExtBits ||
1761       E0.Val.SExtBits != E1.Val.SExtBits ||
1762       !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
1763     return false;
1764 
1765   // We have a hit - Var0 and Var1 only differ by a constant offset!
1766 
1767   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1768   // Var1 is possible to calculate, but we're just interested in the absolute
1769   // minimum difference between the two. The minimum distance may occur due to
1770   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1771   // the minimum distance between %i and %i + 5 is 3.
1772   APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1773   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1774   APInt MinDiffBytes =
1775     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1776 
1777   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1778   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1779   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1780   // V2Size can fit in the MinDiffBytes gap.
1781   return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
1782          MinDiffBytes.uge(V2Size + BaseOffset.abs());
1783 }
1784 
1785 //===----------------------------------------------------------------------===//
1786 // BasicAliasAnalysis Pass
1787 //===----------------------------------------------------------------------===//
1788 
1789 AnalysisKey BasicAA::Key;
1790 
1791 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1792   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1793   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1794   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1795   auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
1796   return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
1797 }
1798 
1799 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1800   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1801 }
1802 
1803 char BasicAAWrapperPass::ID = 0;
1804 
1805 void BasicAAWrapperPass::anchor() {}
1806 
1807 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1808                       "Basic Alias Analysis (stateless AA impl)", true, true)
1809 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1810 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1811 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1812 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1813 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1814                     "Basic Alias Analysis (stateless AA impl)", true, true)
1815 
1816 FunctionPass *llvm::createBasicAAWrapperPass() {
1817   return new BasicAAWrapperPass();
1818 }
1819 
1820 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1821   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1822   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1823   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1824   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
1825 
1826   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1827                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1828                                  &DTWP.getDomTree(),
1829                                  PVWP ? &PVWP->getResult() : nullptr));
1830 
1831   return false;
1832 }
1833 
1834 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1835   AU.setPreservesAll();
1836   AU.addRequiredTransitive<AssumptionCacheTracker>();
1837   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1838   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1839   AU.addUsedIfAvailable<PhiValuesWrapperPass>();
1840 }
1841 
1842 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1843   return BasicAAResult(
1844       F.getParent()->getDataLayout(), F,
1845       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
1846       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1847 }
1848