1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Analysis/BasicAliasAnalysis.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/LoopInfo.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/CallSite.h"
34 #include "llvm/IR/Constant.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GetElementPtrTypeIterator.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/KnownBits.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <utility>
62 
63 #define DEBUG_TYPE "basicaa"
64 
65 using namespace llvm;
66 
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
69                                           cl::init(false));
70 /// SearchLimitReached / SearchTimes shows how often the limit of
71 /// to decompose GEPs is reached. It will affect the precision
72 /// of basic alias analysis.
73 STATISTIC(SearchLimitReached, "Number of times the limit to "
74                               "decompose GEPs is reached");
75 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
76 
77 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
78 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
79 /// careful with value equivalence. We use reachability to make sure a value
80 /// cannot be involved in a cycle.
81 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
82 
83 // The max limit of the search depth in DecomposeGEPExpression() and
84 // GetUnderlyingObject(), both functions need to use the same search
85 // depth otherwise the algorithm in aliasGEP will assert.
86 static const unsigned MaxLookupSearchDepth = 6;
87 
88 bool BasicAAResult::invalidate(Function &F, const PreservedAnalyses &PA,
89                                FunctionAnalysisManager::Invalidator &Inv) {
90   // We don't care if this analysis itself is preserved, it has no state. But
91   // we need to check that the analyses it depends on have been. Note that we
92   // may be created without handles to some analyses and in that case don't
93   // depend on them.
94   if (Inv.invalidate<AssumptionAnalysis>(F, PA) ||
95       (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)) ||
96       (LI && Inv.invalidate<LoopAnalysis>(F, PA)))
97     return true;
98 
99   // Otherwise this analysis result remains valid.
100   return false;
101 }
102 
103 //===----------------------------------------------------------------------===//
104 // Useful predicates
105 //===----------------------------------------------------------------------===//
106 
107 /// Returns true if the pointer is to a function-local object that never
108 /// escapes from the function.
109 static bool isNonEscapingLocalObject(const Value *V) {
110   // If this is a local allocation, check to see if it escapes.
111   if (isa<AllocaInst>(V) || isNoAliasCall(V))
112     // Set StoreCaptures to True so that we can assume in our callers that the
113     // pointer is not the result of a load instruction. Currently
114     // PointerMayBeCaptured doesn't have any special analysis for the
115     // StoreCaptures=false case; if it did, our callers could be refined to be
116     // more precise.
117     return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
118 
119   // If this is an argument that corresponds to a byval or noalias argument,
120   // then it has not escaped before entering the function.  Check if it escapes
121   // inside the function.
122   if (const Argument *A = dyn_cast<Argument>(V))
123     if (A->hasByValAttr() || A->hasNoAliasAttr())
124       // Note even if the argument is marked nocapture, we still need to check
125       // for copies made inside the function. The nocapture attribute only
126       // specifies that there are no copies made that outlive the function.
127       return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
128 
129   return false;
130 }
131 
132 /// Returns true if the pointer is one which would have been considered an
133 /// escape by isNonEscapingLocalObject.
134 static bool isEscapeSource(const Value *V) {
135   if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
136     return true;
137 
138   // The load case works because isNonEscapingLocalObject considers all
139   // stores to be escapes (it passes true for the StoreCaptures argument
140   // to PointerMayBeCaptured).
141   if (isa<LoadInst>(V))
142     return true;
143 
144   return false;
145 }
146 
147 /// Returns the size of the object specified by V or UnknownSize if unknown.
148 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
149                               const TargetLibraryInfo &TLI,
150                               bool RoundToAlign = false) {
151   uint64_t Size;
152   ObjectSizeOpts Opts;
153   Opts.RoundToAlign = RoundToAlign;
154   if (getObjectSize(V, Size, DL, &TLI, Opts))
155     return Size;
156   return MemoryLocation::UnknownSize;
157 }
158 
159 /// Returns true if we can prove that the object specified by V is smaller than
160 /// Size.
161 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
162                                 const DataLayout &DL,
163                                 const TargetLibraryInfo &TLI) {
164   // Note that the meanings of the "object" are slightly different in the
165   // following contexts:
166   //    c1: llvm::getObjectSize()
167   //    c2: llvm.objectsize() intrinsic
168   //    c3: isObjectSmallerThan()
169   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
170   // refers to the "entire object".
171   //
172   //  Consider this example:
173   //     char *p = (char*)malloc(100)
174   //     char *q = p+80;
175   //
176   //  In the context of c1 and c2, the "object" pointed by q refers to the
177   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
178   //
179   //  However, in the context of c3, the "object" refers to the chunk of memory
180   // being allocated. So, the "object" has 100 bytes, and q points to the middle
181   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
182   // parameter, before the llvm::getObjectSize() is called to get the size of
183   // entire object, we should:
184   //    - either rewind the pointer q to the base-address of the object in
185   //      question (in this case rewind to p), or
186   //    - just give up. It is up to caller to make sure the pointer is pointing
187   //      to the base address the object.
188   //
189   // We go for 2nd option for simplicity.
190   if (!isIdentifiedObject(V))
191     return false;
192 
193   // This function needs to use the aligned object size because we allow
194   // reads a bit past the end given sufficient alignment.
195   uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true);
196 
197   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
198 }
199 
200 /// Returns true if we can prove that the object specified by V has size Size.
201 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
202                          const TargetLibraryInfo &TLI) {
203   uint64_t ObjectSize = getObjectSize(V, DL, TLI);
204   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
205 }
206 
207 //===----------------------------------------------------------------------===//
208 // GetElementPtr Instruction Decomposition and Analysis
209 //===----------------------------------------------------------------------===//
210 
211 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
212 /// B are constant integers.
213 ///
214 /// Returns the scale and offset values as APInts and return V as a Value*, and
215 /// return whether we looked through any sign or zero extends.  The incoming
216 /// Value is known to have IntegerType, and it may already be sign or zero
217 /// extended.
218 ///
219 /// Note that this looks through extends, so the high bits may not be
220 /// represented in the result.
221 /*static*/ const Value *BasicAAResult::GetLinearExpression(
222     const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
223     unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
224     AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
225   assert(V->getType()->isIntegerTy() && "Not an integer value");
226 
227   // Limit our recursion depth.
228   if (Depth == 6) {
229     Scale = 1;
230     Offset = 0;
231     return V;
232   }
233 
234   if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
235     // If it's a constant, just convert it to an offset and remove the variable.
236     // If we've been called recursively, the Offset bit width will be greater
237     // than the constant's (the Offset's always as wide as the outermost call),
238     // so we'll zext here and process any extension in the isa<SExtInst> &
239     // isa<ZExtInst> cases below.
240     Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
241     assert(Scale == 0 && "Constant values don't have a scale");
242     return V;
243   }
244 
245   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
246     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
247       // If we've been called recursively, then Offset and Scale will be wider
248       // than the BOp operands. We'll always zext it here as we'll process sign
249       // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
250       APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
251 
252       switch (BOp->getOpcode()) {
253       default:
254         // We don't understand this instruction, so we can't decompose it any
255         // further.
256         Scale = 1;
257         Offset = 0;
258         return V;
259       case Instruction::Or:
260         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
261         // analyze it.
262         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
263                                BOp, DT)) {
264           Scale = 1;
265           Offset = 0;
266           return V;
267         }
268         LLVM_FALLTHROUGH;
269       case Instruction::Add:
270         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
271                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
272         Offset += RHS;
273         break;
274       case Instruction::Sub:
275         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
276                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
277         Offset -= RHS;
278         break;
279       case Instruction::Mul:
280         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
281                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
282         Offset *= RHS;
283         Scale *= RHS;
284         break;
285       case Instruction::Shl:
286         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
287                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
288         Offset <<= RHS.getLimitedValue();
289         Scale <<= RHS.getLimitedValue();
290         // the semantics of nsw and nuw for left shifts don't match those of
291         // multiplications, so we won't propagate them.
292         NSW = NUW = false;
293         return V;
294       }
295 
296       if (isa<OverflowingBinaryOperator>(BOp)) {
297         NUW &= BOp->hasNoUnsignedWrap();
298         NSW &= BOp->hasNoSignedWrap();
299       }
300       return V;
301     }
302   }
303 
304   // Since GEP indices are sign extended anyway, we don't care about the high
305   // bits of a sign or zero extended value - just scales and offsets.  The
306   // extensions have to be consistent though.
307   if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
308     Value *CastOp = cast<CastInst>(V)->getOperand(0);
309     unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
310     unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
311     unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
312     const Value *Result =
313         GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
314                             Depth + 1, AC, DT, NSW, NUW);
315 
316     // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
317     // by just incrementing the number of bits we've extended by.
318     unsigned ExtendedBy = NewWidth - SmallWidth;
319 
320     if (isa<SExtInst>(V) && ZExtBits == 0) {
321       // sext(sext(%x, a), b) == sext(%x, a + b)
322 
323       if (NSW) {
324         // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
325         // into sext(%x) + sext(c). We'll sext the Offset ourselves:
326         unsigned OldWidth = Offset.getBitWidth();
327         Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
328       } else {
329         // We may have signed-wrapped, so don't decompose sext(%x + c) into
330         // sext(%x) + sext(c)
331         Scale = 1;
332         Offset = 0;
333         Result = CastOp;
334         ZExtBits = OldZExtBits;
335         SExtBits = OldSExtBits;
336       }
337       SExtBits += ExtendedBy;
338     } else {
339       // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
340 
341       if (!NUW) {
342         // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
343         // zext(%x) + zext(c)
344         Scale = 1;
345         Offset = 0;
346         Result = CastOp;
347         ZExtBits = OldZExtBits;
348         SExtBits = OldSExtBits;
349       }
350       ZExtBits += ExtendedBy;
351     }
352 
353     return Result;
354   }
355 
356   Scale = 1;
357   Offset = 0;
358   return V;
359 }
360 
361 /// To ensure a pointer offset fits in an integer of size PointerSize
362 /// (in bits) when that size is smaller than 64. This is an issue in
363 /// particular for 32b programs with negative indices that rely on two's
364 /// complement wrap-arounds for precise alias information.
365 static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
366   assert(PointerSize <= 64 && "Invalid PointerSize!");
367   unsigned ShiftBits = 64 - PointerSize;
368   return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
369 }
370 
371 /// If V is a symbolic pointer expression, decompose it into a base pointer
372 /// with a constant offset and a number of scaled symbolic offsets.
373 ///
374 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
375 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
376 /// specified amount, but which may have other unrepresented high bits. As
377 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
378 ///
379 /// When DataLayout is around, this function is capable of analyzing everything
380 /// that GetUnderlyingObject can look through. To be able to do that
381 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
382 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
383 /// through pointer casts.
384 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
385        DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
386        DominatorTree *DT) {
387   // Limit recursion depth to limit compile time in crazy cases.
388   unsigned MaxLookup = MaxLookupSearchDepth;
389   SearchTimes++;
390 
391   Decomposed.StructOffset = 0;
392   Decomposed.OtherOffset = 0;
393   Decomposed.VarIndices.clear();
394   do {
395     // See if this is a bitcast or GEP.
396     const Operator *Op = dyn_cast<Operator>(V);
397     if (!Op) {
398       // The only non-operator case we can handle are GlobalAliases.
399       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
400         if (!GA->isInterposable()) {
401           V = GA->getAliasee();
402           continue;
403         }
404       }
405       Decomposed.Base = V;
406       return false;
407     }
408 
409     if (Op->getOpcode() == Instruction::BitCast ||
410         Op->getOpcode() == Instruction::AddrSpaceCast) {
411       V = Op->getOperand(0);
412       continue;
413     }
414 
415     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
416     if (!GEPOp) {
417       if (auto CS = ImmutableCallSite(V))
418         if (const Value *RV = CS.getReturnedArgOperand()) {
419           V = RV;
420           continue;
421         }
422 
423       // If it's not a GEP, hand it off to SimplifyInstruction to see if it
424       // can come up with something. This matches what GetUnderlyingObject does.
425       if (const Instruction *I = dyn_cast<Instruction>(V))
426         // TODO: Get a DominatorTree and AssumptionCache and use them here
427         // (these are both now available in this function, but this should be
428         // updated when GetUnderlyingObject is updated). TLI should be
429         // provided also.
430         if (const Value *Simplified =
431                 SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
432           V = Simplified;
433           continue;
434         }
435 
436       Decomposed.Base = V;
437       return false;
438     }
439 
440     // Don't attempt to analyze GEPs over unsized objects.
441     if (!GEPOp->getSourceElementType()->isSized()) {
442       Decomposed.Base = V;
443       return false;
444     }
445 
446     unsigned AS = GEPOp->getPointerAddressSpace();
447     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
448     gep_type_iterator GTI = gep_type_begin(GEPOp);
449     unsigned PointerSize = DL.getPointerSizeInBits(AS);
450     // Assume all GEP operands are constants until proven otherwise.
451     bool GepHasConstantOffset = true;
452     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
453          I != E; ++I, ++GTI) {
454       const Value *Index = *I;
455       // Compute the (potentially symbolic) offset in bytes for this index.
456       if (StructType *STy = GTI.getStructTypeOrNull()) {
457         // For a struct, add the member offset.
458         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
459         if (FieldNo == 0)
460           continue;
461 
462         Decomposed.StructOffset +=
463           DL.getStructLayout(STy)->getElementOffset(FieldNo);
464         continue;
465       }
466 
467       // For an array/pointer, add the element offset, explicitly scaled.
468       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
469         if (CIdx->isZero())
470           continue;
471         Decomposed.OtherOffset +=
472           DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
473         continue;
474       }
475 
476       GepHasConstantOffset = false;
477 
478       uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
479       unsigned ZExtBits = 0, SExtBits = 0;
480 
481       // If the integer type is smaller than the pointer size, it is implicitly
482       // sign extended to pointer size.
483       unsigned Width = Index->getType()->getIntegerBitWidth();
484       if (PointerSize > Width)
485         SExtBits += PointerSize - Width;
486 
487       // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
488       APInt IndexScale(Width, 0), IndexOffset(Width, 0);
489       bool NSW = true, NUW = true;
490       Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
491                                   SExtBits, DL, 0, AC, DT, NSW, NUW);
492 
493       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
494       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
495       Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
496       Scale *= IndexScale.getSExtValue();
497 
498       // If we already had an occurrence of this index variable, merge this
499       // scale into it.  For example, we want to handle:
500       //   A[x][x] -> x*16 + x*4 -> x*20
501       // This also ensures that 'x' only appears in the index list once.
502       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
503         if (Decomposed.VarIndices[i].V == Index &&
504             Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
505             Decomposed.VarIndices[i].SExtBits == SExtBits) {
506           Scale += Decomposed.VarIndices[i].Scale;
507           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
508           break;
509         }
510       }
511 
512       // Make sure that we have a scale that makes sense for this target's
513       // pointer size.
514       Scale = adjustToPointerSize(Scale, PointerSize);
515 
516       if (Scale) {
517         VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
518                                   static_cast<int64_t>(Scale)};
519         Decomposed.VarIndices.push_back(Entry);
520       }
521     }
522 
523     // Take care of wrap-arounds
524     if (GepHasConstantOffset) {
525       Decomposed.StructOffset =
526           adjustToPointerSize(Decomposed.StructOffset, PointerSize);
527       Decomposed.OtherOffset =
528           adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
529     }
530 
531     // Analyze the base pointer next.
532     V = GEPOp->getOperand(0);
533   } while (--MaxLookup);
534 
535   // If the chain of expressions is too deep, just return early.
536   Decomposed.Base = V;
537   SearchLimitReached++;
538   return true;
539 }
540 
541 /// Returns whether the given pointer value points to memory that is local to
542 /// the function, with global constants being considered local to all
543 /// functions.
544 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
545                                            bool OrLocal) {
546   assert(Visited.empty() && "Visited must be cleared after use!");
547 
548   unsigned MaxLookup = 8;
549   SmallVector<const Value *, 16> Worklist;
550   Worklist.push_back(Loc.Ptr);
551   do {
552     const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
553     if (!Visited.insert(V).second) {
554       Visited.clear();
555       return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
556     }
557 
558     // An alloca instruction defines local memory.
559     if (OrLocal && isa<AllocaInst>(V))
560       continue;
561 
562     // A global constant counts as local memory for our purposes.
563     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
564       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
565       // global to be marked constant in some modules and non-constant in
566       // others.  GV may even be a declaration, not a definition.
567       if (!GV->isConstant()) {
568         Visited.clear();
569         return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
570       }
571       continue;
572     }
573 
574     // If both select values point to local memory, then so does the select.
575     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
576       Worklist.push_back(SI->getTrueValue());
577       Worklist.push_back(SI->getFalseValue());
578       continue;
579     }
580 
581     // If all values incoming to a phi node point to local memory, then so does
582     // the phi.
583     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
584       // Don't bother inspecting phi nodes with many operands.
585       if (PN->getNumIncomingValues() > MaxLookup) {
586         Visited.clear();
587         return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
588       }
589       for (Value *IncValue : PN->incoming_values())
590         Worklist.push_back(IncValue);
591       continue;
592     }
593 
594     // Otherwise be conservative.
595     Visited.clear();
596     return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
597   } while (!Worklist.empty() && --MaxLookup);
598 
599   Visited.clear();
600   return Worklist.empty();
601 }
602 
603 /// Returns the behavior when calling the given call site.
604 FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) {
605   if (CS.doesNotAccessMemory())
606     // Can't do better than this.
607     return FMRB_DoesNotAccessMemory;
608 
609   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
610 
611   // If the callsite knows it only reads memory, don't return worse
612   // than that.
613   if (CS.onlyReadsMemory())
614     Min = FMRB_OnlyReadsMemory;
615   else if (CS.doesNotReadMemory())
616     Min = FMRB_DoesNotReadMemory;
617 
618   if (CS.onlyAccessesArgMemory())
619     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
620   else if (CS.onlyAccessesInaccessibleMemory())
621     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
622   else if (CS.onlyAccessesInaccessibleMemOrArgMem())
623     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
624 
625   // If CS has operand bundles then aliasing attributes from the function it
626   // calls do not directly apply to the CallSite.  This can be made more
627   // precise in the future.
628   if (!CS.hasOperandBundles())
629     if (const Function *F = CS.getCalledFunction())
630       Min =
631           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
632 
633   return Min;
634 }
635 
636 /// Returns the behavior when calling the given function. For use when the call
637 /// site is not known.
638 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
639   // If the function declares it doesn't access memory, we can't do better.
640   if (F->doesNotAccessMemory())
641     return FMRB_DoesNotAccessMemory;
642 
643   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
644 
645   // If the function declares it only reads memory, go with that.
646   if (F->onlyReadsMemory())
647     Min = FMRB_OnlyReadsMemory;
648   else if (F->doesNotReadMemory())
649     Min = FMRB_DoesNotReadMemory;
650 
651   if (F->onlyAccessesArgMemory())
652     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
653   else if (F->onlyAccessesInaccessibleMemory())
654     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
655   else if (F->onlyAccessesInaccessibleMemOrArgMem())
656     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
657 
658   return Min;
659 }
660 
661 /// Returns true if this is a writeonly (i.e Mod only) parameter.
662 static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
663                              const TargetLibraryInfo &TLI) {
664   if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly))
665     return true;
666 
667   // We can bound the aliasing properties of memset_pattern16 just as we can
668   // for memcpy/memset.  This is particularly important because the
669   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
670   // whenever possible.
671   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
672   // attributes.
673   LibFunc F;
674   if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
675       F == LibFunc_memset_pattern16 && TLI.has(F))
676     if (ArgIdx == 0)
677       return true;
678 
679   // TODO: memset_pattern4, memset_pattern8
680   // TODO: _chk variants
681   // TODO: strcmp, strcpy
682 
683   return false;
684 }
685 
686 ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
687                                            unsigned ArgIdx) {
688   // Checking for known builtin intrinsics and target library functions.
689   if (isWriteOnlyParam(CS, ArgIdx, TLI))
690     return MRI_Mod;
691 
692   if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly))
693     return MRI_Ref;
694 
695   if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone))
696     return MRI_NoModRef;
697 
698   return AAResultBase::getArgModRefInfo(CS, ArgIdx);
699 }
700 
701 static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) {
702   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
703   return II && II->getIntrinsicID() == IID;
704 }
705 
706 #ifndef NDEBUG
707 static const Function *getParent(const Value *V) {
708   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
709     if (!inst->getParent())
710       return nullptr;
711     return inst->getParent()->getParent();
712   }
713 
714   if (const Argument *arg = dyn_cast<Argument>(V))
715     return arg->getParent();
716 
717   return nullptr;
718 }
719 
720 static bool notDifferentParent(const Value *O1, const Value *O2) {
721 
722   const Function *F1 = getParent(O1);
723   const Function *F2 = getParent(O2);
724 
725   return !F1 || !F2 || F1 == F2;
726 }
727 #endif
728 
729 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
730                                  const MemoryLocation &LocB) {
731   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
732          "BasicAliasAnalysis doesn't support interprocedural queries.");
733 
734   // If we have a directly cached entry for these locations, we have recursed
735   // through this once, so just return the cached results. Notably, when this
736   // happens, we don't clear the cache.
737   auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
738   if (CacheIt != AliasCache.end())
739     return CacheIt->second;
740 
741   AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
742                                  LocB.Size, LocB.AATags);
743   // AliasCache rarely has more than 1 or 2 elements, always use
744   // shrink_and_clear so it quickly returns to the inline capacity of the
745   // SmallDenseMap if it ever grows larger.
746   // FIXME: This should really be shrink_to_inline_capacity_and_clear().
747   AliasCache.shrink_and_clear();
748   VisitedPhiBBs.clear();
749   return Alias;
750 }
751 
752 /// Checks to see if the specified callsite can clobber the specified memory
753 /// object.
754 ///
755 /// Since we only look at local properties of this function, we really can't
756 /// say much about this query.  We do, however, use simple "address taken"
757 /// analysis on local objects.
758 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
759                                         const MemoryLocation &Loc) {
760   assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
761          "AliasAnalysis query involving multiple functions!");
762 
763   const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
764 
765   // If this is a tail call and Loc.Ptr points to a stack location, we know that
766   // the tail call cannot access or modify the local stack.
767   // We cannot exclude byval arguments here; these belong to the caller of
768   // the current function not to the current function, and a tail callee
769   // may reference them.
770   if (isa<AllocaInst>(Object))
771     if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
772       if (CI->isTailCall())
773         return MRI_NoModRef;
774 
775   // If the pointer is to a locally allocated object that does not escape,
776   // then the call can not mod/ref the pointer unless the call takes the pointer
777   // as an argument, and itself doesn't capture it.
778   if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
779       isNonEscapingLocalObject(Object)) {
780 
781     // Optimistically assume that call doesn't touch Object and check this
782     // assumption in the following loop.
783     ModRefInfo Result = MRI_NoModRef;
784 
785     unsigned OperandNo = 0;
786     for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
787          CI != CE; ++CI, ++OperandNo) {
788       // Only look at the no-capture or byval pointer arguments.  If this
789       // pointer were passed to arguments that were neither of these, then it
790       // couldn't be no-capture.
791       if (!(*CI)->getType()->isPointerTy() ||
792           (!CS.doesNotCapture(OperandNo) &&
793            OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo)))
794         continue;
795 
796       // Call doesn't access memory through this operand, so we don't care
797       // if it aliases with Object.
798       if (CS.doesNotAccessMemory(OperandNo))
799         continue;
800 
801       // If this is a no-capture pointer argument, see if we can tell that it
802       // is impossible to alias the pointer we're checking.
803       AliasResult AR =
804           getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object));
805 
806       // Operand doesnt alias 'Object', continue looking for other aliases
807       if (AR == NoAlias)
808         continue;
809       // Operand aliases 'Object', but call doesn't modify it. Strengthen
810       // initial assumption and keep looking in case if there are more aliases.
811       if (CS.onlyReadsMemory(OperandNo)) {
812         Result = static_cast<ModRefInfo>(Result | MRI_Ref);
813         continue;
814       }
815       // Operand aliases 'Object' but call only writes into it.
816       if (CS.doesNotReadMemory(OperandNo)) {
817         Result = static_cast<ModRefInfo>(Result | MRI_Mod);
818         continue;
819       }
820       // This operand aliases 'Object' and call reads and writes into it.
821       Result = MRI_ModRef;
822       break;
823     }
824 
825     // Early return if we improved mod ref information
826     if (Result != MRI_ModRef)
827       return Result;
828   }
829 
830   // If the CallSite is to malloc or calloc, we can assume that it doesn't
831   // modify any IR visible value.  This is only valid because we assume these
832   // routines do not read values visible in the IR.  TODO: Consider special
833   // casing realloc and strdup routines which access only their arguments as
834   // well.  Or alternatively, replace all of this with inaccessiblememonly once
835   // that's implemented fully.
836   auto *Inst = CS.getInstruction();
837   if (isMallocOrCallocLikeFn(Inst, &TLI)) {
838     // Be conservative if the accessed pointer may alias the allocation -
839     // fallback to the generic handling below.
840     if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
841       return MRI_NoModRef;
842   }
843 
844   // The semantics of memcpy intrinsics forbid overlap between their respective
845   // operands, i.e., source and destination of any given memcpy must no-alias.
846   // If Loc must-aliases either one of these two locations, then it necessarily
847   // no-aliases the other.
848   if (auto *Inst = dyn_cast<MemCpyInst>(CS.getInstruction())) {
849     AliasResult SrcAA, DestAA;
850 
851     if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
852                                           Loc)) == MustAlias)
853       // Loc is exactly the memcpy source thus disjoint from memcpy dest.
854       return MRI_Ref;
855     if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
856                                            Loc)) == MustAlias)
857       // The converse case.
858       return MRI_Mod;
859 
860     // It's also possible for Loc to alias both src and dest, or neither.
861     ModRefInfo rv = MRI_NoModRef;
862     if (SrcAA != NoAlias)
863       rv = static_cast<ModRefInfo>(rv | MRI_Ref);
864     if (DestAA != NoAlias)
865       rv = static_cast<ModRefInfo>(rv | MRI_Mod);
866     return rv;
867   }
868 
869   // While the assume intrinsic is marked as arbitrarily writing so that
870   // proper control dependencies will be maintained, it never aliases any
871   // particular memory location.
872   if (isIntrinsicCall(CS, Intrinsic::assume))
873     return MRI_NoModRef;
874 
875   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
876   // that proper control dependencies are maintained but they never mods any
877   // particular memory location.
878   //
879   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
880   // heap state at the point the guard is issued needs to be consistent in case
881   // the guard invokes the "deopt" continuation.
882   if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
883     return MRI_Ref;
884 
885   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
886   // writing so that proper control dependencies are maintained but they never
887   // mod any particular memory location visible to the IR.
888   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
889   // intrinsic is now modeled as reading memory. This prevents hoisting the
890   // invariant.start intrinsic over stores. Consider:
891   // *ptr = 40;
892   // *ptr = 50;
893   // invariant_start(ptr)
894   // int val = *ptr;
895   // print(val);
896   //
897   // This cannot be transformed to:
898   //
899   // *ptr = 40;
900   // invariant_start(ptr)
901   // *ptr = 50;
902   // int val = *ptr;
903   // print(val);
904   //
905   // The transformation will cause the second store to be ignored (based on
906   // rules of invariant.start)  and print 40, while the first program always
907   // prints 50.
908   if (isIntrinsicCall(CS, Intrinsic::invariant_start))
909     return MRI_Ref;
910 
911   // The AAResultBase base class has some smarts, lets use them.
912   return AAResultBase::getModRefInfo(CS, Loc);
913 }
914 
915 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
916                                         ImmutableCallSite CS2) {
917   // While the assume intrinsic is marked as arbitrarily writing so that
918   // proper control dependencies will be maintained, it never aliases any
919   // particular memory location.
920   if (isIntrinsicCall(CS1, Intrinsic::assume) ||
921       isIntrinsicCall(CS2, Intrinsic::assume))
922     return MRI_NoModRef;
923 
924   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
925   // that proper control dependencies are maintained but they never mod any
926   // particular memory location.
927   //
928   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
929   // heap state at the point the guard is issued needs to be consistent in case
930   // the guard invokes the "deopt" continuation.
931 
932   // NB! This function is *not* commutative, so we specical case two
933   // possibilities for guard intrinsics.
934 
935   if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
936     return getModRefBehavior(CS2) & MRI_Mod ? MRI_Ref : MRI_NoModRef;
937 
938   if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
939     return getModRefBehavior(CS1) & MRI_Mod ? MRI_Mod : MRI_NoModRef;
940 
941   // The AAResultBase base class has some smarts, lets use them.
942   return AAResultBase::getModRefInfo(CS1, CS2);
943 }
944 
945 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
946 /// both having the exact same pointer operand.
947 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
948                                             uint64_t V1Size,
949                                             const GEPOperator *GEP2,
950                                             uint64_t V2Size,
951                                             const DataLayout &DL) {
952   assert(GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
953              GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
954          GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
955          "Expected GEPs with the same pointer operand");
956 
957   // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
958   // such that the struct field accesses provably cannot alias.
959   // We also need at least two indices (the pointer, and the struct field).
960   if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
961       GEP1->getNumIndices() < 2)
962     return MayAlias;
963 
964   // If we don't know the size of the accesses through both GEPs, we can't
965   // determine whether the struct fields accessed can't alias.
966   if (V1Size == MemoryLocation::UnknownSize ||
967       V2Size == MemoryLocation::UnknownSize)
968     return MayAlias;
969 
970   ConstantInt *C1 =
971       dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
972   ConstantInt *C2 =
973       dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
974 
975   // If the last (struct) indices are constants and are equal, the other indices
976   // might be also be dynamically equal, so the GEPs can alias.
977   if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue())
978     return MayAlias;
979 
980   // Find the last-indexed type of the GEP, i.e., the type you'd get if
981   // you stripped the last index.
982   // On the way, look at each indexed type.  If there's something other
983   // than an array, different indices can lead to different final types.
984   SmallVector<Value *, 8> IntermediateIndices;
985 
986   // Insert the first index; we don't need to check the type indexed
987   // through it as it only drops the pointer indirection.
988   assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
989   IntermediateIndices.push_back(GEP1->getOperand(1));
990 
991   // Insert all the remaining indices but the last one.
992   // Also, check that they all index through arrays.
993   for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
994     if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
995             GEP1->getSourceElementType(), IntermediateIndices)))
996       return MayAlias;
997     IntermediateIndices.push_back(GEP1->getOperand(i + 1));
998   }
999 
1000   auto *Ty = GetElementPtrInst::getIndexedType(
1001     GEP1->getSourceElementType(), IntermediateIndices);
1002   StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1003 
1004   if (isa<SequentialType>(Ty)) {
1005     // We know that:
1006     // - both GEPs begin indexing from the exact same pointer;
1007     // - the last indices in both GEPs are constants, indexing into a sequential
1008     //   type (array or pointer);
1009     // - both GEPs only index through arrays prior to that.
1010     //
1011     // Because array indices greater than the number of elements are valid in
1012     // GEPs, unless we know the intermediate indices are identical between
1013     // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1014     // partially overlap. We also need to check that the loaded size matches
1015     // the element size, otherwise we could still have overlap.
1016     const uint64_t ElementSize =
1017         DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
1018     if (V1Size != ElementSize || V2Size != ElementSize)
1019       return MayAlias;
1020 
1021     for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1022       if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1023         return MayAlias;
1024 
1025     // Now we know that the array/pointer that GEP1 indexes into and that
1026     // that GEP2 indexes into must either precisely overlap or be disjoint.
1027     // Because they cannot partially overlap and because fields in an array
1028     // cannot overlap, if we can prove the final indices are different between
1029     // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1030 
1031     // If the last indices are constants, we've already checked they don't
1032     // equal each other so we can exit early.
1033     if (C1 && C2)
1034       return NoAlias;
1035     {
1036       Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1037       Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1038       if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1039         // If one of the indices is a PHI node, be safe and only use
1040         // computeKnownBits so we don't make any assumptions about the
1041         // relationships between the two indices. This is important if we're
1042         // asking about values from different loop iterations. See PR32314.
1043         // TODO: We may be able to change the check so we only do this when
1044         // we definitely looked through a PHINode.
1045         if (GEP1LastIdx != GEP2LastIdx &&
1046             GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1047           KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1048           KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1049           if (Known1.Zero.intersects(Known2.One) ||
1050               Known1.One.intersects(Known2.Zero))
1051             return NoAlias;
1052         }
1053       } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1054         return NoAlias;
1055     }
1056     return MayAlias;
1057   } else if (!LastIndexedStruct || !C1 || !C2) {
1058     return MayAlias;
1059   }
1060 
1061   // We know that:
1062   // - both GEPs begin indexing from the exact same pointer;
1063   // - the last indices in both GEPs are constants, indexing into a struct;
1064   // - said indices are different, hence, the pointed-to fields are different;
1065   // - both GEPs only index through arrays prior to that.
1066   //
1067   // This lets us determine that the struct that GEP1 indexes into and the
1068   // struct that GEP2 indexes into must either precisely overlap or be
1069   // completely disjoint.  Because they cannot partially overlap, indexing into
1070   // different non-overlapping fields of the struct will never alias.
1071 
1072   // Therefore, the only remaining thing needed to show that both GEPs can't
1073   // alias is that the fields are not overlapping.
1074   const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1075   const uint64_t StructSize = SL->getSizeInBytes();
1076   const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1077   const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1078 
1079   auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1080                                       uint64_t V2Off, uint64_t V2Size) {
1081     return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1082            ((V2Off + V2Size <= StructSize) ||
1083             (V2Off + V2Size - StructSize <= V1Off));
1084   };
1085 
1086   if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1087       EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1088     return NoAlias;
1089 
1090   return MayAlias;
1091 }
1092 
1093 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1094 // beginning of the object the GEP points would have a negative offset with
1095 // repsect to the alloca, that means the GEP can not alias pointer (b).
1096 // Note that the pointer based on the alloca may not be a GEP. For
1097 // example, it may be the alloca itself.
1098 // The same applies if (b) is based on a GlobalVariable. Note that just being
1099 // based on isIdentifiedObject() is not enough - we need an identified object
1100 // that does not permit access to negative offsets. For example, a negative
1101 // offset from a noalias argument or call can be inbounds w.r.t the actual
1102 // underlying object.
1103 //
1104 // For example, consider:
1105 //
1106 //   struct { int f0, int f1, ...} foo;
1107 //   foo alloca;
1108 //   foo* random = bar(alloca);
1109 //   int *f0 = &alloca.f0
1110 //   int *f1 = &random->f1;
1111 //
1112 // Which is lowered, approximately, to:
1113 //
1114 //  %alloca = alloca %struct.foo
1115 //  %random = call %struct.foo* @random(%struct.foo* %alloca)
1116 //  %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1117 //  %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1118 //
1119 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1120 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1121 // point into the same object. But since %f0 points to the beginning of %alloca,
1122 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1123 // than (%alloca - 1), and so is not inbounds, a contradiction.
1124 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1125       const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1126       uint64_t ObjectAccessSize) {
1127   // If the object access size is unknown, or the GEP isn't inbounds, bail.
1128   if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
1129     return false;
1130 
1131   // We need the object to be an alloca or a globalvariable, and want to know
1132   // the offset of the pointer from the object precisely, so no variable
1133   // indices are allowed.
1134   if (!(isa<AllocaInst>(DecompObject.Base) ||
1135         isa<GlobalVariable>(DecompObject.Base)) ||
1136       !DecompObject.VarIndices.empty())
1137     return false;
1138 
1139   int64_t ObjectBaseOffset = DecompObject.StructOffset +
1140                              DecompObject.OtherOffset;
1141 
1142   // If the GEP has no variable indices, we know the precise offset
1143   // from the base, then use it. If the GEP has variable indices, we're in
1144   // a bit more trouble: we can't count on the constant offsets that come
1145   // from non-struct sources, since these can be "rewound" by a negative
1146   // variable offset. So use only offsets that came from structs.
1147   int64_t GEPBaseOffset = DecompGEP.StructOffset;
1148   if (DecompGEP.VarIndices.empty())
1149     GEPBaseOffset += DecompGEP.OtherOffset;
1150 
1151   return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
1152 }
1153 
1154 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1155 /// another pointer.
1156 ///
1157 /// We know that V1 is a GEP, but we don't know anything about V2.
1158 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1159 /// V2.
1160 AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
1161                                     const AAMDNodes &V1AAInfo, const Value *V2,
1162                                     uint64_t V2Size, const AAMDNodes &V2AAInfo,
1163                                     const Value *UnderlyingV1,
1164                                     const Value *UnderlyingV2) {
1165   DecomposedGEP DecompGEP1, DecompGEP2;
1166   bool GEP1MaxLookupReached =
1167     DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1168   bool GEP2MaxLookupReached =
1169     DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1170 
1171   int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1172   int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1173 
1174   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1175          "DecomposeGEPExpression returned a result different from "
1176          "GetUnderlyingObject");
1177 
1178   // If the GEP's offset relative to its base is such that the base would
1179   // fall below the start of the object underlying V2, then the GEP and V2
1180   // cannot alias.
1181   if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1182       isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1183     return NoAlias;
1184   // If we have two gep instructions with must-alias or not-alias'ing base
1185   // pointers, figure out if the indexes to the GEP tell us anything about the
1186   // derived pointer.
1187   if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1188     // Check for the GEP base being at a negative offset, this time in the other
1189     // direction.
1190     if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1191         isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1192       return NoAlias;
1193     // Do the base pointers alias?
1194     AliasResult BaseAlias =
1195         aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
1196                    UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
1197 
1198     // Check for geps of non-aliasing underlying pointers where the offsets are
1199     // identical.
1200     if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1201       // Do the base pointers alias assuming type and size.
1202       AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1203                                                 UnderlyingV2, V2Size, V2AAInfo);
1204       if (PreciseBaseAlias == NoAlias) {
1205         // See if the computed offset from the common pointer tells us about the
1206         // relation of the resulting pointer.
1207         // If the max search depth is reached the result is undefined
1208         if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1209           return MayAlias;
1210 
1211         // Same offsets.
1212         if (GEP1BaseOffset == GEP2BaseOffset &&
1213             DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1214           return NoAlias;
1215       }
1216     }
1217 
1218     // If we get a No or May, then return it immediately, no amount of analysis
1219     // will improve this situation.
1220     if (BaseAlias != MustAlias) {
1221       assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1222       return BaseAlias;
1223     }
1224 
1225     // Otherwise, we have a MustAlias.  Since the base pointers alias each other
1226     // exactly, see if the computed offset from the common pointer tells us
1227     // about the relation of the resulting pointer.
1228     // If we know the two GEPs are based off of the exact same pointer (and not
1229     // just the same underlying object), see if that tells us anything about
1230     // the resulting pointers.
1231     if (GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
1232             GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
1233         GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1234       AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1235       // If we couldn't find anything interesting, don't abandon just yet.
1236       if (R != MayAlias)
1237         return R;
1238     }
1239 
1240     // If the max search depth is reached, the result is undefined
1241     if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1242       return MayAlias;
1243 
1244     // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1245     // symbolic difference.
1246     GEP1BaseOffset -= GEP2BaseOffset;
1247     GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1248 
1249   } else {
1250     // Check to see if these two pointers are related by the getelementptr
1251     // instruction.  If one pointer is a GEP with a non-zero index of the other
1252     // pointer, we know they cannot alias.
1253 
1254     // If both accesses are unknown size, we can't do anything useful here.
1255     if (V1Size == MemoryLocation::UnknownSize &&
1256         V2Size == MemoryLocation::UnknownSize)
1257       return MayAlias;
1258 
1259     AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
1260                                AAMDNodes(), V2, MemoryLocation::UnknownSize,
1261                                V2AAInfo, nullptr, UnderlyingV2);
1262     if (R != MustAlias) {
1263       // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1264       // If V2 is known not to alias GEP base pointer, then the two values
1265       // cannot alias per GEP semantics: "Any memory access must be done through
1266       // a pointer value associated with an address range of the memory access,
1267       // otherwise the behavior is undefined.".
1268       assert(R == NoAlias || R == MayAlias);
1269       return R;
1270     }
1271 
1272     // If the max search depth is reached the result is undefined
1273     if (GEP1MaxLookupReached)
1274       return MayAlias;
1275   }
1276 
1277   // In the two GEP Case, if there is no difference in the offsets of the
1278   // computed pointers, the resultant pointers are a must alias.  This
1279   // happens when we have two lexically identical GEP's (for example).
1280   //
1281   // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1282   // must aliases the GEP, the end result is a must alias also.
1283   if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1284     return MustAlias;
1285 
1286   // If there is a constant difference between the pointers, but the difference
1287   // is less than the size of the associated memory object, then we know
1288   // that the objects are partially overlapping.  If the difference is
1289   // greater, we know they do not overlap.
1290   if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1291     if (GEP1BaseOffset >= 0) {
1292       if (V2Size != MemoryLocation::UnknownSize) {
1293         if ((uint64_t)GEP1BaseOffset < V2Size)
1294           return PartialAlias;
1295         return NoAlias;
1296       }
1297     } else {
1298       // We have the situation where:
1299       // +                +
1300       // | BaseOffset     |
1301       // ---------------->|
1302       // |-->V1Size       |-------> V2Size
1303       // GEP1             V2
1304       // We need to know that V2Size is not unknown, otherwise we might have
1305       // stripped a gep with negative index ('gep <ptr>, -1, ...).
1306       if (V1Size != MemoryLocation::UnknownSize &&
1307           V2Size != MemoryLocation::UnknownSize) {
1308         if (-(uint64_t)GEP1BaseOffset < V1Size)
1309           return PartialAlias;
1310         return NoAlias;
1311       }
1312     }
1313   }
1314 
1315   if (!DecompGEP1.VarIndices.empty()) {
1316     uint64_t Modulo = 0;
1317     bool AllPositive = true;
1318     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1319 
1320       // Try to distinguish something like &A[i][1] against &A[42][0].
1321       // Grab the least significant bit set in any of the scales. We
1322       // don't need std::abs here (even if the scale's negative) as we'll
1323       // be ^'ing Modulo with itself later.
1324       Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
1325 
1326       if (AllPositive) {
1327         // If the Value could change between cycles, then any reasoning about
1328         // the Value this cycle may not hold in the next cycle. We'll just
1329         // give up if we can't determine conditions that hold for every cycle:
1330         const Value *V = DecompGEP1.VarIndices[i].V;
1331 
1332         KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT);
1333         bool SignKnownZero = Known.isNonNegative();
1334         bool SignKnownOne = Known.isNegative();
1335 
1336         // Zero-extension widens the variable, and so forces the sign
1337         // bit to zero.
1338         bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1339         SignKnownZero |= IsZExt;
1340         SignKnownOne &= !IsZExt;
1341 
1342         // If the variable begins with a zero then we know it's
1343         // positive, regardless of whether the value is signed or
1344         // unsigned.
1345         int64_t Scale = DecompGEP1.VarIndices[i].Scale;
1346         AllPositive =
1347             (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1348       }
1349     }
1350 
1351     Modulo = Modulo ^ (Modulo & (Modulo - 1));
1352 
1353     // We can compute the difference between the two addresses
1354     // mod Modulo. Check whether that difference guarantees that the
1355     // two locations do not alias.
1356     uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1357     if (V1Size != MemoryLocation::UnknownSize &&
1358         V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
1359         V1Size <= Modulo - ModOffset)
1360       return NoAlias;
1361 
1362     // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1363     // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1364     // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1365     if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
1366       return NoAlias;
1367 
1368     if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1369                                 GEP1BaseOffset, &AC, DT))
1370       return NoAlias;
1371   }
1372 
1373   // Statically, we can see that the base objects are the same, but the
1374   // pointers have dynamic offsets which we can't resolve. And none of our
1375   // little tricks above worked.
1376   return MayAlias;
1377 }
1378 
1379 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1380   // If the results agree, take it.
1381   if (A == B)
1382     return A;
1383   // A mix of PartialAlias and MustAlias is PartialAlias.
1384   if ((A == PartialAlias && B == MustAlias) ||
1385       (B == PartialAlias && A == MustAlias))
1386     return PartialAlias;
1387   // Otherwise, we don't know anything.
1388   return MayAlias;
1389 }
1390 
1391 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1392 /// against another.
1393 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize,
1394                                        const AAMDNodes &SIAAInfo,
1395                                        const Value *V2, uint64_t V2Size,
1396                                        const AAMDNodes &V2AAInfo,
1397                                        const Value *UnderV2) {
1398   // If the values are Selects with the same condition, we can do a more precise
1399   // check: just check for aliases between the values on corresponding arms.
1400   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1401     if (SI->getCondition() == SI2->getCondition()) {
1402       AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1403                                      SI2->getTrueValue(), V2Size, V2AAInfo);
1404       if (Alias == MayAlias)
1405         return MayAlias;
1406       AliasResult ThisAlias =
1407           aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1408                      SI2->getFalseValue(), V2Size, V2AAInfo);
1409       return MergeAliasResults(ThisAlias, Alias);
1410     }
1411 
1412   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1413   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1414   AliasResult Alias =
1415       aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1416                  SISize, SIAAInfo, UnderV2);
1417   if (Alias == MayAlias)
1418     return MayAlias;
1419 
1420   AliasResult ThisAlias =
1421       aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo,
1422                  UnderV2);
1423   return MergeAliasResults(ThisAlias, Alias);
1424 }
1425 
1426 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1427 /// another.
1428 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
1429                                     const AAMDNodes &PNAAInfo, const Value *V2,
1430                                     uint64_t V2Size, const AAMDNodes &V2AAInfo,
1431                                     const Value *UnderV2) {
1432   // Track phi nodes we have visited. We use this information when we determine
1433   // value equivalence.
1434   VisitedPhiBBs.insert(PN->getParent());
1435 
1436   // If the values are PHIs in the same block, we can do a more precise
1437   // as well as efficient check: just check for aliases between the values
1438   // on corresponding edges.
1439   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1440     if (PN2->getParent() == PN->getParent()) {
1441       LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1442                    MemoryLocation(V2, V2Size, V2AAInfo));
1443       if (PN > V2)
1444         std::swap(Locs.first, Locs.second);
1445       // Analyse the PHIs' inputs under the assumption that the PHIs are
1446       // NoAlias.
1447       // If the PHIs are May/MustAlias there must be (recursively) an input
1448       // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1449       // there must be an operation on the PHIs within the PHIs' value cycle
1450       // that causes a MayAlias.
1451       // Pretend the phis do not alias.
1452       AliasResult Alias = NoAlias;
1453       assert(AliasCache.count(Locs) &&
1454              "There must exist an entry for the phi node");
1455       AliasResult OrigAliasResult = AliasCache[Locs];
1456       AliasCache[Locs] = NoAlias;
1457 
1458       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1459         AliasResult ThisAlias =
1460             aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1461                        PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1462                        V2Size, V2AAInfo);
1463         Alias = MergeAliasResults(ThisAlias, Alias);
1464         if (Alias == MayAlias)
1465           break;
1466       }
1467 
1468       // Reset if speculation failed.
1469       if (Alias != NoAlias)
1470         AliasCache[Locs] = OrigAliasResult;
1471 
1472       return Alias;
1473     }
1474 
1475   SmallPtrSet<Value *, 4> UniqueSrc;
1476   SmallVector<Value *, 4> V1Srcs;
1477   bool isRecursive = false;
1478   for (Value *PV1 : PN->incoming_values()) {
1479     if (isa<PHINode>(PV1))
1480       // If any of the source itself is a PHI, return MayAlias conservatively
1481       // to avoid compile time explosion. The worst possible case is if both
1482       // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1483       // and 'n' are the number of PHI sources.
1484       return MayAlias;
1485 
1486     if (EnableRecPhiAnalysis)
1487       if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1488         // Check whether the incoming value is a GEP that advances the pointer
1489         // result of this PHI node (e.g. in a loop). If this is the case, we
1490         // would recurse and always get a MayAlias. Handle this case specially
1491         // below.
1492         if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1493             isa<ConstantInt>(PV1GEP->idx_begin())) {
1494           isRecursive = true;
1495           continue;
1496         }
1497       }
1498 
1499     if (UniqueSrc.insert(PV1).second)
1500       V1Srcs.push_back(PV1);
1501   }
1502 
1503   // If this PHI node is recursive, set the size of the accessed memory to
1504   // unknown to represent all the possible values the GEP could advance the
1505   // pointer to.
1506   if (isRecursive)
1507     PNSize = MemoryLocation::UnknownSize;
1508 
1509   AliasResult Alias =
1510       aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
1511                  PNSize, PNAAInfo, UnderV2);
1512 
1513   // Early exit if the check of the first PHI source against V2 is MayAlias.
1514   // Other results are not possible.
1515   if (Alias == MayAlias)
1516     return MayAlias;
1517 
1518   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1519   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1520   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1521     Value *V = V1Srcs[i];
1522 
1523     AliasResult ThisAlias =
1524         aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
1525     Alias = MergeAliasResults(ThisAlias, Alias);
1526     if (Alias == MayAlias)
1527       break;
1528   }
1529 
1530   return Alias;
1531 }
1532 
1533 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1534 /// array references.
1535 AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
1536                                       AAMDNodes V1AAInfo, const Value *V2,
1537                                       uint64_t V2Size, AAMDNodes V2AAInfo,
1538                                       const Value *O1, const Value *O2) {
1539   // If either of the memory references is empty, it doesn't matter what the
1540   // pointer values are.
1541   if (V1Size == 0 || V2Size == 0)
1542     return NoAlias;
1543 
1544   // Strip off any casts if they exist.
1545   V1 = V1->stripPointerCastsAndBarriers();
1546   V2 = V2->stripPointerCastsAndBarriers();
1547 
1548   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1549   // value for undef that aliases nothing in the program.
1550   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1551     return NoAlias;
1552 
1553   // Are we checking for alias of the same value?
1554   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1555   // different iterations. We must therefore make sure that this is not the
1556   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1557   // happen by looking at the visited phi nodes and making sure they cannot
1558   // reach the value.
1559   if (isValueEqualInPotentialCycles(V1, V2))
1560     return MustAlias;
1561 
1562   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1563     return NoAlias; // Scalars cannot alias each other
1564 
1565   // Figure out what objects these things are pointing to if we can.
1566   if (O1 == nullptr)
1567     O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1568 
1569   if (O2 == nullptr)
1570     O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1571 
1572   // Null values in the default address space don't point to any object, so they
1573   // don't alias any other pointer.
1574   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1575     if (CPN->getType()->getAddressSpace() == 0)
1576       return NoAlias;
1577   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1578     if (CPN->getType()->getAddressSpace() == 0)
1579       return NoAlias;
1580 
1581   if (O1 != O2) {
1582     // If V1/V2 point to two different objects, we know that we have no alias.
1583     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1584       return NoAlias;
1585 
1586     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1587     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1588         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1589       return NoAlias;
1590 
1591     // Function arguments can't alias with things that are known to be
1592     // unambigously identified at the function level.
1593     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1594         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1595       return NoAlias;
1596 
1597     // If one pointer is the result of a call/invoke or load and the other is a
1598     // non-escaping local object within the same function, then we know the
1599     // object couldn't escape to a point where the call could return it.
1600     //
1601     // Note that if the pointers are in different functions, there are a
1602     // variety of complications. A call with a nocapture argument may still
1603     // temporary store the nocapture argument's value in a temporary memory
1604     // location if that memory location doesn't escape. Or it may pass a
1605     // nocapture value to other functions as long as they don't capture it.
1606     if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
1607       return NoAlias;
1608     if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
1609       return NoAlias;
1610   }
1611 
1612   // If the size of one access is larger than the entire object on the other
1613   // side, then we know such behavior is undefined and can assume no alias.
1614   if ((V1Size != MemoryLocation::UnknownSize &&
1615        isObjectSmallerThan(O2, V1Size, DL, TLI)) ||
1616       (V2Size != MemoryLocation::UnknownSize &&
1617        isObjectSmallerThan(O1, V2Size, DL, TLI)))
1618     return NoAlias;
1619 
1620   // Check the cache before climbing up use-def chains. This also terminates
1621   // otherwise infinitely recursive queries.
1622   LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1623                MemoryLocation(V2, V2Size, V2AAInfo));
1624   if (V1 > V2)
1625     std::swap(Locs.first, Locs.second);
1626   std::pair<AliasCacheTy::iterator, bool> Pair =
1627       AliasCache.insert(std::make_pair(Locs, MayAlias));
1628   if (!Pair.second)
1629     return Pair.first->second;
1630 
1631   // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1632   // GEP can't simplify, we don't even look at the PHI cases.
1633   if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1634     std::swap(V1, V2);
1635     std::swap(V1Size, V2Size);
1636     std::swap(O1, O2);
1637     std::swap(V1AAInfo, V2AAInfo);
1638   }
1639   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1640     AliasResult Result =
1641         aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1642     if (Result != MayAlias)
1643       return AliasCache[Locs] = Result;
1644   }
1645 
1646   if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1647     std::swap(V1, V2);
1648     std::swap(O1, O2);
1649     std::swap(V1Size, V2Size);
1650     std::swap(V1AAInfo, V2AAInfo);
1651   }
1652   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1653     AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1654                                   V2, V2Size, V2AAInfo, O2);
1655     if (Result != MayAlias)
1656       return AliasCache[Locs] = Result;
1657   }
1658 
1659   if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1660     std::swap(V1, V2);
1661     std::swap(O1, O2);
1662     std::swap(V1Size, V2Size);
1663     std::swap(V1AAInfo, V2AAInfo);
1664   }
1665   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1666     AliasResult Result =
1667         aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
1668     if (Result != MayAlias)
1669       return AliasCache[Locs] = Result;
1670   }
1671 
1672   // If both pointers are pointing into the same object and one of them
1673   // accesses the entire object, then the accesses must overlap in some way.
1674   if (O1 == O2)
1675     if (V1Size != MemoryLocation::UnknownSize &&
1676         V2Size != MemoryLocation::UnknownSize &&
1677         (isObjectSize(O1, V1Size, DL, TLI) ||
1678          isObjectSize(O2, V2Size, DL, TLI)))
1679       return AliasCache[Locs] = PartialAlias;
1680 
1681   // Recurse back into the best AA results we have, potentially with refined
1682   // memory locations. We have already ensured that BasicAA has a MayAlias
1683   // cache result for these, so any recursion back into BasicAA won't loop.
1684   AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1685   return AliasCache[Locs] = Result;
1686 }
1687 
1688 /// Check whether two Values can be considered equivalent.
1689 ///
1690 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1691 /// they can not be part of a cycle in the value graph by looking at all
1692 /// visited phi nodes an making sure that the phis cannot reach the value. We
1693 /// have to do this because we are looking through phi nodes (That is we say
1694 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1695 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1696                                                   const Value *V2) {
1697   if (V != V2)
1698     return false;
1699 
1700   const Instruction *Inst = dyn_cast<Instruction>(V);
1701   if (!Inst)
1702     return true;
1703 
1704   if (VisitedPhiBBs.empty())
1705     return true;
1706 
1707   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1708     return false;
1709 
1710   // Make sure that the visited phis cannot reach the Value. This ensures that
1711   // the Values cannot come from different iterations of a potential cycle the
1712   // phi nodes could be involved in.
1713   for (auto *P : VisitedPhiBBs)
1714     if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1715       return false;
1716 
1717   return true;
1718 }
1719 
1720 /// Computes the symbolic difference between two de-composed GEPs.
1721 ///
1722 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1723 /// instructions GEP1 and GEP2 which have common base pointers.
1724 void BasicAAResult::GetIndexDifference(
1725     SmallVectorImpl<VariableGEPIndex> &Dest,
1726     const SmallVectorImpl<VariableGEPIndex> &Src) {
1727   if (Src.empty())
1728     return;
1729 
1730   for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1731     const Value *V = Src[i].V;
1732     unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1733     int64_t Scale = Src[i].Scale;
1734 
1735     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1736     // than a few variable indexes.
1737     for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1738       if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1739           Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1740         continue;
1741 
1742       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1743       // goes to zero, remove the entry.
1744       if (Dest[j].Scale != Scale)
1745         Dest[j].Scale -= Scale;
1746       else
1747         Dest.erase(Dest.begin() + j);
1748       Scale = 0;
1749       break;
1750     }
1751 
1752     // If we didn't consume this entry, add it to the end of the Dest list.
1753     if (Scale) {
1754       VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1755       Dest.push_back(Entry);
1756     }
1757   }
1758 }
1759 
1760 bool BasicAAResult::constantOffsetHeuristic(
1761     const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
1762     uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC,
1763     DominatorTree *DT) {
1764   if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
1765       V2Size == MemoryLocation::UnknownSize)
1766     return false;
1767 
1768   const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1769 
1770   if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1771       Var0.Scale != -Var1.Scale)
1772     return false;
1773 
1774   unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1775 
1776   // We'll strip off the Extensions of Var0 and Var1 and do another round
1777   // of GetLinearExpression decomposition. In the example above, if Var0
1778   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1779 
1780   APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1781       V1Offset(Width, 0);
1782   bool NSW = true, NUW = true;
1783   unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1784   const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1785                                         V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1786   NSW = true;
1787   NUW = true;
1788   const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1789                                         V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1790 
1791   if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1792       V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1793     return false;
1794 
1795   // We have a hit - Var0 and Var1 only differ by a constant offset!
1796 
1797   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1798   // Var1 is possible to calculate, but we're just interested in the absolute
1799   // minimum difference between the two. The minimum distance may occur due to
1800   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1801   // the minimum distance between %i and %i + 5 is 3.
1802   APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1803   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1804   uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
1805 
1806   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1807   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1808   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1809   // V2Size can fit in the MinDiffBytes gap.
1810   return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
1811          V2Size + std::abs(BaseOffset) <= MinDiffBytes;
1812 }
1813 
1814 //===----------------------------------------------------------------------===//
1815 // BasicAliasAnalysis Pass
1816 //===----------------------------------------------------------------------===//
1817 
1818 AnalysisKey BasicAA::Key;
1819 
1820 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1821   return BasicAAResult(F.getParent()->getDataLayout(),
1822                        AM.getResult<TargetLibraryAnalysis>(F),
1823                        AM.getResult<AssumptionAnalysis>(F),
1824                        &AM.getResult<DominatorTreeAnalysis>(F),
1825                        AM.getCachedResult<LoopAnalysis>(F));
1826 }
1827 
1828 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1829     initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1830 }
1831 
1832 char BasicAAWrapperPass::ID = 0;
1833 
1834 void BasicAAWrapperPass::anchor() {}
1835 
1836 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
1837                       "Basic Alias Analysis (stateless AA impl)", true, true)
1838 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1839 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1840 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1841 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
1842                     "Basic Alias Analysis (stateless AA impl)", true, true)
1843 
1844 FunctionPass *llvm::createBasicAAWrapperPass() {
1845   return new BasicAAWrapperPass();
1846 }
1847 
1848 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1849   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1850   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1851   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1852   auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1853 
1854   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
1855                                  ACT.getAssumptionCache(F), &DTWP.getDomTree(),
1856                                  LIWP ? &LIWP->getLoopInfo() : nullptr));
1857 
1858   return false;
1859 }
1860 
1861 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1862   AU.setPreservesAll();
1863   AU.addRequired<AssumptionCacheTracker>();
1864   AU.addRequired<DominatorTreeWrapperPass>();
1865   AU.addRequired<TargetLibraryInfoWrapperPass>();
1866 }
1867 
1868 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1869   return BasicAAResult(
1870       F.getParent()->getDataLayout(),
1871       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
1872       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1873 }
1874