1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Analysis/BasicAliasAnalysis.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/CFG.h"
21 #include "llvm/Analysis/CaptureTracking.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/Analysis/LoopInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/Pass.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include <algorithm>
40 
41 #define DEBUG_TYPE "basicaa"
42 
43 using namespace llvm;
44 
45 /// Enable analysis of recursive PHI nodes.
46 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
47                                           cl::init(false));
48 /// SearchLimitReached / SearchTimes shows how often the limit of
49 /// to decompose GEPs is reached. It will affect the precision
50 /// of basic alias analysis.
51 STATISTIC(SearchLimitReached, "Number of times the limit to "
52                               "decompose GEPs is reached");
53 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
54 
55 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
56 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
57 /// careful with value equivalence. We use reachability to make sure a value
58 /// cannot be involved in a cycle.
59 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
60 
61 // The max limit of the search depth in DecomposeGEPExpression() and
62 // GetUnderlyingObject(), both functions need to use the same search
63 // depth otherwise the algorithm in aliasGEP will assert.
64 static const unsigned MaxLookupSearchDepth = 6;
65 
66 //===----------------------------------------------------------------------===//
67 // Useful predicates
68 //===----------------------------------------------------------------------===//
69 
70 /// Returns true if the pointer is to a function-local object that never
71 /// escapes from the function.
72 static bool isNonEscapingLocalObject(const Value *V) {
73   // If this is a local allocation, check to see if it escapes.
74   if (isa<AllocaInst>(V) || isNoAliasCall(V))
75     // Set StoreCaptures to True so that we can assume in our callers that the
76     // pointer is not the result of a load instruction. Currently
77     // PointerMayBeCaptured doesn't have any special analysis for the
78     // StoreCaptures=false case; if it did, our callers could be refined to be
79     // more precise.
80     return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
81 
82   // If this is an argument that corresponds to a byval or noalias argument,
83   // then it has not escaped before entering the function.  Check if it escapes
84   // inside the function.
85   if (const Argument *A = dyn_cast<Argument>(V))
86     if (A->hasByValAttr() || A->hasNoAliasAttr())
87       // Note even if the argument is marked nocapture, we still need to check
88       // for copies made inside the function. The nocapture attribute only
89       // specifies that there are no copies made that outlive the function.
90       return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
91 
92   return false;
93 }
94 
95 /// Returns true if the pointer is one which would have been considered an
96 /// escape by isNonEscapingLocalObject.
97 static bool isEscapeSource(const Value *V) {
98   if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
99     return true;
100 
101   // The load case works because isNonEscapingLocalObject considers all
102   // stores to be escapes (it passes true for the StoreCaptures argument
103   // to PointerMayBeCaptured).
104   if (isa<LoadInst>(V))
105     return true;
106 
107   return false;
108 }
109 
110 /// Returns the size of the object specified by V or UnknownSize if unknown.
111 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
112                               const TargetLibraryInfo &TLI,
113                               bool RoundToAlign = false) {
114   uint64_t Size;
115   if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
116     return Size;
117   return MemoryLocation::UnknownSize;
118 }
119 
120 /// Returns true if we can prove that the object specified by V is smaller than
121 /// Size.
122 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
123                                 const DataLayout &DL,
124                                 const TargetLibraryInfo &TLI) {
125   // Note that the meanings of the "object" are slightly different in the
126   // following contexts:
127   //    c1: llvm::getObjectSize()
128   //    c2: llvm.objectsize() intrinsic
129   //    c3: isObjectSmallerThan()
130   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
131   // refers to the "entire object".
132   //
133   //  Consider this example:
134   //     char *p = (char*)malloc(100)
135   //     char *q = p+80;
136   //
137   //  In the context of c1 and c2, the "object" pointed by q refers to the
138   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
139   //
140   //  However, in the context of c3, the "object" refers to the chunk of memory
141   // being allocated. So, the "object" has 100 bytes, and q points to the middle
142   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
143   // parameter, before the llvm::getObjectSize() is called to get the size of
144   // entire object, we should:
145   //    - either rewind the pointer q to the base-address of the object in
146   //      question (in this case rewind to p), or
147   //    - just give up. It is up to caller to make sure the pointer is pointing
148   //      to the base address the object.
149   //
150   // We go for 2nd option for simplicity.
151   if (!isIdentifiedObject(V))
152     return false;
153 
154   // This function needs to use the aligned object size because we allow
155   // reads a bit past the end given sufficient alignment.
156   uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true);
157 
158   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
159 }
160 
161 /// Returns true if we can prove that the object specified by V has size Size.
162 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
163                          const TargetLibraryInfo &TLI) {
164   uint64_t ObjectSize = getObjectSize(V, DL, TLI);
165   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
166 }
167 
168 //===----------------------------------------------------------------------===//
169 // GetElementPtr Instruction Decomposition and Analysis
170 //===----------------------------------------------------------------------===//
171 
172 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
173 /// B are constant integers.
174 ///
175 /// Returns the scale and offset values as APInts and return V as a Value*, and
176 /// return whether we looked through any sign or zero extends.  The incoming
177 /// Value is known to have IntegerType, and it may already be sign or zero
178 /// extended.
179 ///
180 /// Note that this looks through extends, so the high bits may not be
181 /// represented in the result.
182 /*static*/ const Value *BasicAAResult::GetLinearExpression(
183     const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
184     unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
185     AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
186   assert(V->getType()->isIntegerTy() && "Not an integer value");
187 
188   // Limit our recursion depth.
189   if (Depth == 6) {
190     Scale = 1;
191     Offset = 0;
192     return V;
193   }
194 
195   if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
196     // If it's a constant, just convert it to an offset and remove the variable.
197     // If we've been called recursively, the Offset bit width will be greater
198     // than the constant's (the Offset's always as wide as the outermost call),
199     // so we'll zext here and process any extension in the isa<SExtInst> &
200     // isa<ZExtInst> cases below.
201     Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
202     assert(Scale == 0 && "Constant values don't have a scale");
203     return V;
204   }
205 
206   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
207     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
208 
209       // If we've been called recursively, then Offset and Scale will be wider
210       // than the BOp operands. We'll always zext it here as we'll process sign
211       // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
212       APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
213 
214       switch (BOp->getOpcode()) {
215       default:
216         // We don't understand this instruction, so we can't decompose it any
217         // further.
218         Scale = 1;
219         Offset = 0;
220         return V;
221       case Instruction::Or:
222         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
223         // analyze it.
224         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
225                                BOp, DT)) {
226           Scale = 1;
227           Offset = 0;
228           return V;
229         }
230       // FALL THROUGH.
231       case Instruction::Add:
232         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
233                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
234         Offset += RHS;
235         break;
236       case Instruction::Sub:
237         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
238                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
239         Offset -= RHS;
240         break;
241       case Instruction::Mul:
242         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
243                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
244         Offset *= RHS;
245         Scale *= RHS;
246         break;
247       case Instruction::Shl:
248         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
249                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
250         Offset <<= RHS.getLimitedValue();
251         Scale <<= RHS.getLimitedValue();
252         // the semantics of nsw and nuw for left shifts don't match those of
253         // multiplications, so we won't propagate them.
254         NSW = NUW = false;
255         return V;
256       }
257 
258       if (isa<OverflowingBinaryOperator>(BOp)) {
259         NUW &= BOp->hasNoUnsignedWrap();
260         NSW &= BOp->hasNoSignedWrap();
261       }
262       return V;
263     }
264   }
265 
266   // Since GEP indices are sign extended anyway, we don't care about the high
267   // bits of a sign or zero extended value - just scales and offsets.  The
268   // extensions have to be consistent though.
269   if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
270     Value *CastOp = cast<CastInst>(V)->getOperand(0);
271     unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
272     unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
273     unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
274     const Value *Result =
275         GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
276                             Depth + 1, AC, DT, NSW, NUW);
277 
278     // zext(zext(%x)) == zext(%x), and similiarly for sext; we'll handle this
279     // by just incrementing the number of bits we've extended by.
280     unsigned ExtendedBy = NewWidth - SmallWidth;
281 
282     if (isa<SExtInst>(V) && ZExtBits == 0) {
283       // sext(sext(%x, a), b) == sext(%x, a + b)
284 
285       if (NSW) {
286         // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
287         // into sext(%x) + sext(c). We'll sext the Offset ourselves:
288         unsigned OldWidth = Offset.getBitWidth();
289         Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
290       } else {
291         // We may have signed-wrapped, so don't decompose sext(%x + c) into
292         // sext(%x) + sext(c)
293         Scale = 1;
294         Offset = 0;
295         Result = CastOp;
296         ZExtBits = OldZExtBits;
297         SExtBits = OldSExtBits;
298       }
299       SExtBits += ExtendedBy;
300     } else {
301       // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
302 
303       if (!NUW) {
304         // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
305         // zext(%x) + zext(c)
306         Scale = 1;
307         Offset = 0;
308         Result = CastOp;
309         ZExtBits = OldZExtBits;
310         SExtBits = OldSExtBits;
311       }
312       ZExtBits += ExtendedBy;
313     }
314 
315     return Result;
316   }
317 
318   Scale = 1;
319   Offset = 0;
320   return V;
321 }
322 
323 /// To ensure a pointer offset fits in an integer of size PointerSize
324 /// (in bits) when that size is smaller than 64. This is an issue in
325 /// particular for 32b programs with negative indices that rely on two's
326 /// complement wrap-arounds for precise alias information.
327 static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) {
328   assert(PointerSize <= 64 && "Invalid PointerSize!");
329   unsigned ShiftBits = 64 - PointerSize;
330   return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits;
331 }
332 
333 /// If V is a symbolic pointer expression, decompose it into a base pointer
334 /// with a constant offset and a number of scaled symbolic offsets.
335 ///
336 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
337 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
338 /// specified amount, but which may have other unrepresented high bits. As
339 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
340 ///
341 /// When DataLayout is around, this function is capable of analyzing everything
342 /// that GetUnderlyingObject can look through. To be able to do that
343 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
344 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
345 /// through pointer casts.
346 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
347        DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
348        DominatorTree *DT) {
349   // Limit recursion depth to limit compile time in crazy cases.
350   unsigned MaxLookup = MaxLookupSearchDepth;
351   SearchTimes++;
352 
353   Decomposed.StructOffset = 0;
354   Decomposed.OtherOffset = 0;
355   Decomposed.VarIndices.clear();
356   do {
357     // See if this is a bitcast or GEP.
358     const Operator *Op = dyn_cast<Operator>(V);
359     if (!Op) {
360       // The only non-operator case we can handle are GlobalAliases.
361       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
362         if (!GA->isInterposable()) {
363           V = GA->getAliasee();
364           continue;
365         }
366       }
367       Decomposed.Base = V;
368       return false;
369     }
370 
371     if (Op->getOpcode() == Instruction::BitCast ||
372         Op->getOpcode() == Instruction::AddrSpaceCast) {
373       V = Op->getOperand(0);
374       continue;
375     }
376 
377     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
378     if (!GEPOp) {
379       // If it's not a GEP, hand it off to SimplifyInstruction to see if it
380       // can come up with something. This matches what GetUnderlyingObject does.
381       if (const Instruction *I = dyn_cast<Instruction>(V))
382         // TODO: Get a DominatorTree and AssumptionCache and use them here
383         // (these are both now available in this function, but this should be
384         // updated when GetUnderlyingObject is updated). TLI should be
385         // provided also.
386         if (const Value *Simplified =
387                 SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
388           V = Simplified;
389           continue;
390         }
391 
392       Decomposed.Base = V;
393       return false;
394     }
395 
396     // Don't attempt to analyze GEPs over unsized objects.
397     if (!GEPOp->getSourceElementType()->isSized()) {
398       Decomposed.Base = V;
399       return false;
400     }
401 
402     unsigned AS = GEPOp->getPointerAddressSpace();
403     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
404     gep_type_iterator GTI = gep_type_begin(GEPOp);
405     unsigned PointerSize = DL.getPointerSizeInBits(AS);
406     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
407          I != E; ++I) {
408       const Value *Index = *I;
409       // Compute the (potentially symbolic) offset in bytes for this index.
410       if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
411         // For a struct, add the member offset.
412         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
413         if (FieldNo == 0)
414           continue;
415 
416         Decomposed.StructOffset +=
417           DL.getStructLayout(STy)->getElementOffset(FieldNo);
418         continue;
419       }
420 
421       // For an array/pointer, add the element offset, explicitly scaled.
422       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
423         if (CIdx->isZero())
424           continue;
425         Decomposed.OtherOffset +=
426           DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
427         continue;
428       }
429 
430       uint64_t Scale = DL.getTypeAllocSize(*GTI);
431       unsigned ZExtBits = 0, SExtBits = 0;
432 
433       // If the integer type is smaller than the pointer size, it is implicitly
434       // sign extended to pointer size.
435       unsigned Width = Index->getType()->getIntegerBitWidth();
436       if (PointerSize > Width)
437         SExtBits += PointerSize - Width;
438 
439       // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
440       APInt IndexScale(Width, 0), IndexOffset(Width, 0);
441       bool NSW = true, NUW = true;
442       Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
443                                   SExtBits, DL, 0, AC, DT, NSW, NUW);
444 
445       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
446       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
447       Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
448       Scale *= IndexScale.getSExtValue();
449 
450       // If we already had an occurrence of this index variable, merge this
451       // scale into it.  For example, we want to handle:
452       //   A[x][x] -> x*16 + x*4 -> x*20
453       // This also ensures that 'x' only appears in the index list once.
454       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
455         if (Decomposed.VarIndices[i].V == Index &&
456             Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
457             Decomposed.VarIndices[i].SExtBits == SExtBits) {
458           Scale += Decomposed.VarIndices[i].Scale;
459           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
460           break;
461         }
462       }
463 
464       // Make sure that we have a scale that makes sense for this target's
465       // pointer size.
466       Scale = adjustToPointerSize(Scale, PointerSize);
467 
468       if (Scale) {
469         VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
470                                   static_cast<int64_t>(Scale)};
471         Decomposed.VarIndices.push_back(Entry);
472       }
473     }
474 
475     // Take care of wrap-arounds
476     Decomposed.StructOffset =
477       adjustToPointerSize(Decomposed.StructOffset, PointerSize);
478     Decomposed.OtherOffset =
479       adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
480 
481     // Analyze the base pointer next.
482     V = GEPOp->getOperand(0);
483   } while (--MaxLookup);
484 
485   // If the chain of expressions is too deep, just return early.
486   Decomposed.Base = V;
487   SearchLimitReached++;
488   return true;
489 }
490 
491 /// Returns whether the given pointer value points to memory that is local to
492 /// the function, with global constants being considered local to all
493 /// functions.
494 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
495                                            bool OrLocal) {
496   assert(Visited.empty() && "Visited must be cleared after use!");
497 
498   unsigned MaxLookup = 8;
499   SmallVector<const Value *, 16> Worklist;
500   Worklist.push_back(Loc.Ptr);
501   do {
502     const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
503     if (!Visited.insert(V).second) {
504       Visited.clear();
505       return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
506     }
507 
508     // An alloca instruction defines local memory.
509     if (OrLocal && isa<AllocaInst>(V))
510       continue;
511 
512     // A global constant counts as local memory for our purposes.
513     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
514       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
515       // global to be marked constant in some modules and non-constant in
516       // others.  GV may even be a declaration, not a definition.
517       if (!GV->isConstant()) {
518         Visited.clear();
519         return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
520       }
521       continue;
522     }
523 
524     // If both select values point to local memory, then so does the select.
525     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
526       Worklist.push_back(SI->getTrueValue());
527       Worklist.push_back(SI->getFalseValue());
528       continue;
529     }
530 
531     // If all values incoming to a phi node point to local memory, then so does
532     // the phi.
533     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
534       // Don't bother inspecting phi nodes with many operands.
535       if (PN->getNumIncomingValues() > MaxLookup) {
536         Visited.clear();
537         return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
538       }
539       for (Value *IncValue : PN->incoming_values())
540         Worklist.push_back(IncValue);
541       continue;
542     }
543 
544     // Otherwise be conservative.
545     Visited.clear();
546     return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
547 
548   } while (!Worklist.empty() && --MaxLookup);
549 
550   Visited.clear();
551   return Worklist.empty();
552 }
553 
554 /// Returns the behavior when calling the given call site.
555 FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) {
556   if (CS.doesNotAccessMemory())
557     // Can't do better than this.
558     return FMRB_DoesNotAccessMemory;
559 
560   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
561 
562   // If the callsite knows it only reads memory, don't return worse
563   // than that.
564   if (CS.onlyReadsMemory())
565     Min = FMRB_OnlyReadsMemory;
566 
567   if (CS.onlyAccessesArgMemory())
568     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
569 
570   // If CS has operand bundles then aliasing attributes from the function it
571   // calls do not directly apply to the CallSite.  This can be made more
572   // precise in the future.
573   if (!CS.hasOperandBundles())
574     if (const Function *F = CS.getCalledFunction())
575       Min =
576           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
577 
578   return Min;
579 }
580 
581 /// Returns the behavior when calling the given function. For use when the call
582 /// site is not known.
583 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
584   // If the function declares it doesn't access memory, we can't do better.
585   if (F->doesNotAccessMemory())
586     return FMRB_DoesNotAccessMemory;
587 
588   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
589 
590   // If the function declares it only reads memory, go with that.
591   if (F->onlyReadsMemory())
592     Min = FMRB_OnlyReadsMemory;
593 
594   if (F->onlyAccessesArgMemory())
595     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
596 
597   return Min;
598 }
599 
600 /// Returns true if this is a writeonly (i.e Mod only) parameter.  Currently,
601 /// we don't have a writeonly attribute, so this only knows about builtin
602 /// intrinsics and target library functions.  We could consider adding a
603 /// writeonly attribute in the future and moving all of these facts to either
604 /// Intrinsics.td or InferFunctionAttr.cpp
605 static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx,
606                              const TargetLibraryInfo &TLI) {
607   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()))
608     switch (II->getIntrinsicID()) {
609     default:
610       break;
611     case Intrinsic::memset:
612     case Intrinsic::memcpy:
613     case Intrinsic::memmove:
614       // We don't currently have a writeonly attribute.  All other properties
615       // of these intrinsics are nicely described via attributes in
616       // Intrinsics.td and handled generically.
617       if (ArgIdx == 0)
618         return true;
619     }
620 
621   // We can bound the aliasing properties of memset_pattern16 just as we can
622   // for memcpy/memset.  This is particularly important because the
623   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
624   // whenever possible.  Note that all but the missing writeonly attribute are
625   // handled via InferFunctionAttr.
626   LibFunc::Func F;
627   if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) &&
628       F == LibFunc::memset_pattern16 && TLI.has(F))
629     if (ArgIdx == 0)
630       return true;
631 
632   // TODO: memset_pattern4, memset_pattern8
633   // TODO: _chk variants
634   // TODO: strcmp, strcpy
635 
636   return false;
637 }
638 
639 ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
640                                            unsigned ArgIdx) {
641 
642   // Emulate the missing writeonly attribute by checking for known builtin
643   // intrinsics and target library functions.
644   if (isWriteOnlyParam(CS, ArgIdx, TLI))
645     return MRI_Mod;
646 
647   if (CS.paramHasAttr(ArgIdx + 1, Attribute::ReadOnly))
648     return MRI_Ref;
649 
650   if (CS.paramHasAttr(ArgIdx + 1, Attribute::ReadNone))
651     return MRI_NoModRef;
652 
653   return AAResultBase::getArgModRefInfo(CS, ArgIdx);
654 }
655 
656 static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) {
657   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
658   return II && II->getIntrinsicID() == IID;
659 }
660 
661 #ifndef NDEBUG
662 static const Function *getParent(const Value *V) {
663   if (const Instruction *inst = dyn_cast<Instruction>(V))
664     return inst->getParent()->getParent();
665 
666   if (const Argument *arg = dyn_cast<Argument>(V))
667     return arg->getParent();
668 
669   return nullptr;
670 }
671 
672 static bool notDifferentParent(const Value *O1, const Value *O2) {
673 
674   const Function *F1 = getParent(O1);
675   const Function *F2 = getParent(O2);
676 
677   return !F1 || !F2 || F1 == F2;
678 }
679 #endif
680 
681 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
682                                  const MemoryLocation &LocB) {
683   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
684          "BasicAliasAnalysis doesn't support interprocedural queries.");
685 
686   // If we have a directly cached entry for these locations, we have recursed
687   // through this once, so just return the cached results. Notably, when this
688   // happens, we don't clear the cache.
689   auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
690   if (CacheIt != AliasCache.end())
691     return CacheIt->second;
692 
693   AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
694                                  LocB.Size, LocB.AATags);
695   // AliasCache rarely has more than 1 or 2 elements, always use
696   // shrink_and_clear so it quickly returns to the inline capacity of the
697   // SmallDenseMap if it ever grows larger.
698   // FIXME: This should really be shrink_to_inline_capacity_and_clear().
699   AliasCache.shrink_and_clear();
700   VisitedPhiBBs.clear();
701   return Alias;
702 }
703 
704 /// Checks to see if the specified callsite can clobber the specified memory
705 /// object.
706 ///
707 /// Since we only look at local properties of this function, we really can't
708 /// say much about this query.  We do, however, use simple "address taken"
709 /// analysis on local objects.
710 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
711                                         const MemoryLocation &Loc) {
712   assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
713          "AliasAnalysis query involving multiple functions!");
714 
715   const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
716 
717   // If this is a tail call and Loc.Ptr points to a stack location, we know that
718   // the tail call cannot access or modify the local stack.
719   // We cannot exclude byval arguments here; these belong to the caller of
720   // the current function not to the current function, and a tail callee
721   // may reference them.
722   if (isa<AllocaInst>(Object))
723     if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
724       if (CI->isTailCall())
725         return MRI_NoModRef;
726 
727   // If the pointer is to a locally allocated object that does not escape,
728   // then the call can not mod/ref the pointer unless the call takes the pointer
729   // as an argument, and itself doesn't capture it.
730   if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
731       isNonEscapingLocalObject(Object)) {
732     bool PassedAsArg = false;
733     unsigned OperandNo = 0;
734     for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
735          CI != CE; ++CI, ++OperandNo) {
736       // Only look at the no-capture or byval pointer arguments.  If this
737       // pointer were passed to arguments that were neither of these, then it
738       // couldn't be no-capture.
739       if (!(*CI)->getType()->isPointerTy() ||
740           (!CS.doesNotCapture(OperandNo) && !CS.isByValArgument(OperandNo)))
741         continue;
742 
743       // If this is a no-capture pointer argument, see if we can tell that it
744       // is impossible to alias the pointer we're checking.  If not, we have to
745       // assume that the call could touch the pointer, even though it doesn't
746       // escape.
747       AliasResult AR =
748           getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object));
749       if (AR) {
750         PassedAsArg = true;
751         break;
752       }
753     }
754 
755     if (!PassedAsArg)
756       return MRI_NoModRef;
757   }
758 
759   // If the CallSite is to malloc or calloc, we can assume that it doesn't
760   // modify any IR visible value.  This is only valid because we assume these
761   // routines do not read values visible in the IR.  TODO: Consider special
762   // casing realloc and strdup routines which access only their arguments as
763   // well.  Or alternatively, replace all of this with inaccessiblememonly once
764   // that's implemented fully.
765   auto *Inst = CS.getInstruction();
766   if (isMallocLikeFn(Inst, &TLI) || isCallocLikeFn(Inst, &TLI)) {
767     // Be conservative if the accessed pointer may alias the allocation -
768     // fallback to the generic handling below.
769     if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias)
770       return MRI_NoModRef;
771   }
772 
773   // While the assume intrinsic is marked as arbitrarily writing so that
774   // proper control dependencies will be maintained, it never aliases any
775   // particular memory location.
776   if (isIntrinsicCall(CS, Intrinsic::assume))
777     return MRI_NoModRef;
778 
779   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
780   // that proper control dependencies are maintained but they never mods any
781   // particular memory location.
782   //
783   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
784   // heap state at the point the guard is issued needs to be consistent in case
785   // the guard invokes the "deopt" continuation.
786   if (isIntrinsicCall(CS, Intrinsic::experimental_guard))
787     return MRI_Ref;
788 
789   // The AAResultBase base class has some smarts, lets use them.
790   return AAResultBase::getModRefInfo(CS, Loc);
791 }
792 
793 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
794                                         ImmutableCallSite CS2) {
795   // While the assume intrinsic is marked as arbitrarily writing so that
796   // proper control dependencies will be maintained, it never aliases any
797   // particular memory location.
798   if (isIntrinsicCall(CS1, Intrinsic::assume) ||
799       isIntrinsicCall(CS2, Intrinsic::assume))
800     return MRI_NoModRef;
801 
802   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
803   // that proper control dependencies are maintained but they never mod any
804   // particular memory location.
805   //
806   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
807   // heap state at the point the guard is issued needs to be consistent in case
808   // the guard invokes the "deopt" continuation.
809 
810   // NB! This function is *not* commutative, so we specical case two
811   // possibilities for guard intrinsics.
812 
813   if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
814     return getModRefBehavior(CS2) & MRI_Mod ? MRI_Ref : MRI_NoModRef;
815 
816   if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
817     return getModRefBehavior(CS1) & MRI_Mod ? MRI_Mod : MRI_NoModRef;
818 
819   // The AAResultBase base class has some smarts, lets use them.
820   return AAResultBase::getModRefInfo(CS1, CS2);
821 }
822 
823 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
824 /// both having the exact same pointer operand.
825 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
826                                             uint64_t V1Size,
827                                             const GEPOperator *GEP2,
828                                             uint64_t V2Size,
829                                             const DataLayout &DL) {
830 
831   assert(GEP1->getPointerOperand() == GEP2->getPointerOperand() &&
832          "Expected GEPs with the same pointer operand");
833 
834   // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
835   // such that the struct field accesses provably cannot alias.
836   // We also need at least two indices (the pointer, and the struct field).
837   if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
838       GEP1->getNumIndices() < 2)
839     return MayAlias;
840 
841   // If we don't know the size of the accesses through both GEPs, we can't
842   // determine whether the struct fields accessed can't alias.
843   if (V1Size == MemoryLocation::UnknownSize ||
844       V2Size == MemoryLocation::UnknownSize)
845     return MayAlias;
846 
847   ConstantInt *C1 =
848       dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
849   ConstantInt *C2 =
850       dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
851 
852   // If the last (struct) indices are constants and are equal, the other indices
853   // might be also be dynamically equal, so the GEPs can alias.
854   if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue())
855     return MayAlias;
856 
857   // Find the last-indexed type of the GEP, i.e., the type you'd get if
858   // you stripped the last index.
859   // On the way, look at each indexed type.  If there's something other
860   // than an array, different indices can lead to different final types.
861   SmallVector<Value *, 8> IntermediateIndices;
862 
863   // Insert the first index; we don't need to check the type indexed
864   // through it as it only drops the pointer indirection.
865   assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
866   IntermediateIndices.push_back(GEP1->getOperand(1));
867 
868   // Insert all the remaining indices but the last one.
869   // Also, check that they all index through arrays.
870   for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
871     if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
872             GEP1->getSourceElementType(), IntermediateIndices)))
873       return MayAlias;
874     IntermediateIndices.push_back(GEP1->getOperand(i + 1));
875   }
876 
877   auto *Ty = GetElementPtrInst::getIndexedType(
878     GEP1->getSourceElementType(), IntermediateIndices);
879   StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
880 
881   if (isa<SequentialType>(Ty)) {
882     // We know that:
883     // - both GEPs begin indexing from the exact same pointer;
884     // - the last indices in both GEPs are constants, indexing into a sequential
885     //   type (array or pointer);
886     // - both GEPs only index through arrays prior to that.
887     //
888     // Because array indices greater than the number of elements are valid in
889     // GEPs, unless we know the intermediate indices are identical between
890     // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
891     // partially overlap. We also need to check that the loaded size matches
892     // the element size, otherwise we could still have overlap.
893     const uint64_t ElementSize =
894         DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
895     if (V1Size != ElementSize || V2Size != ElementSize)
896       return MayAlias;
897 
898     for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
899       if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
900         return MayAlias;
901 
902     // Now we know that the array/pointer that GEP1 indexes into and that
903     // that GEP2 indexes into must either precisely overlap or be disjoint.
904     // Because they cannot partially overlap and because fields in an array
905     // cannot overlap, if we can prove the final indices are different between
906     // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
907 
908     // If the last indices are constants, we've already checked they don't
909     // equal each other so we can exit early.
910     if (C1 && C2)
911       return NoAlias;
912     if (isKnownNonEqual(GEP1->getOperand(GEP1->getNumOperands() - 1),
913                         GEP2->getOperand(GEP2->getNumOperands() - 1),
914                         DL))
915       return NoAlias;
916     return MayAlias;
917   } else if (!LastIndexedStruct || !C1 || !C2) {
918     return MayAlias;
919   }
920 
921   // We know that:
922   // - both GEPs begin indexing from the exact same pointer;
923   // - the last indices in both GEPs are constants, indexing into a struct;
924   // - said indices are different, hence, the pointed-to fields are different;
925   // - both GEPs only index through arrays prior to that.
926   //
927   // This lets us determine that the struct that GEP1 indexes into and the
928   // struct that GEP2 indexes into must either precisely overlap or be
929   // completely disjoint.  Because they cannot partially overlap, indexing into
930   // different non-overlapping fields of the struct will never alias.
931 
932   // Therefore, the only remaining thing needed to show that both GEPs can't
933   // alias is that the fields are not overlapping.
934   const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
935   const uint64_t StructSize = SL->getSizeInBytes();
936   const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
937   const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
938 
939   auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
940                                       uint64_t V2Off, uint64_t V2Size) {
941     return V1Off < V2Off && V1Off + V1Size <= V2Off &&
942            ((V2Off + V2Size <= StructSize) ||
943             (V2Off + V2Size - StructSize <= V1Off));
944   };
945 
946   if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
947       EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
948     return NoAlias;
949 
950   return MayAlias;
951 }
952 
953 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
954 // beginning of the object the GEP points would have a negative offset with
955 // repsect to the alloca, that means the GEP can not alias pointer (b).
956 // Note that the pointer based on the alloca may not be a GEP. For
957 // example, it may be the alloca itself.
958 // The same applies if (b) is based on a GlobalVariable. Note that just being
959 // based on isIdentifiedObject() is not enough - we need an identified object
960 // that does not permit access to negative offsets. For example, a negative
961 // offset from a noalias argument or call can be inbounds w.r.t the actual
962 // underlying object.
963 //
964 // For example, consider:
965 //
966 //   struct { int f0, int f1, ...} foo;
967 //   foo alloca;
968 //   foo* random = bar(alloca);
969 //   int *f0 = &alloca.f0
970 //   int *f1 = &random->f1;
971 //
972 // Which is lowered, approximately, to:
973 //
974 //  %alloca = alloca %struct.foo
975 //  %random = call %struct.foo* @random(%struct.foo* %alloca)
976 //  %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
977 //  %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
978 //
979 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
980 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
981 // point into the same object. But since %f0 points to the beginning of %alloca,
982 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
983 // than (%alloca - 1), and so is not inbounds, a contradiction.
984 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
985       const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
986       uint64_t ObjectAccessSize) {
987   // If the object access size is unknown, or the GEP isn't inbounds, bail.
988   if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
989     return false;
990 
991   // We need the object to be an alloca or a globalvariable, and want to know
992   // the offset of the pointer from the object precisely, so no variable
993   // indices are allowed.
994   if (!(isa<AllocaInst>(DecompObject.Base) ||
995         isa<GlobalVariable>(DecompObject.Base)) ||
996       !DecompObject.VarIndices.empty())
997     return false;
998 
999   int64_t ObjectBaseOffset = DecompObject.StructOffset +
1000                              DecompObject.OtherOffset;
1001 
1002   // If the GEP has no variable indices, we know the precise offset
1003   // from the base, then use it. If the GEP has variable indices, we're in
1004   // a bit more trouble: we can't count on the constant offsets that come
1005   // from non-struct sources, since these can be "rewound" by a negative
1006   // variable offset. So use only offsets that came from structs.
1007   int64_t GEPBaseOffset = DecompGEP.StructOffset;
1008   if (DecompGEP.VarIndices.empty())
1009     GEPBaseOffset += DecompGEP.OtherOffset;
1010 
1011   return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize);
1012 }
1013 
1014 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1015 /// another pointer.
1016 ///
1017 /// We know that V1 is a GEP, but we don't know anything about V2.
1018 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1019 /// V2.
1020 AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
1021                                     const AAMDNodes &V1AAInfo, const Value *V2,
1022                                     uint64_t V2Size, const AAMDNodes &V2AAInfo,
1023                                     const Value *UnderlyingV1,
1024                                     const Value *UnderlyingV2) {
1025   DecomposedGEP DecompGEP1, DecompGEP2;
1026   bool GEP1MaxLookupReached =
1027     DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1028   bool GEP2MaxLookupReached =
1029     DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1030 
1031   int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1032   int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1033 
1034   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1035          "DecomposeGEPExpression returned a result different from "
1036          "GetUnderlyingObject");
1037 
1038   // If the GEP's offset relative to its base is such that the base would
1039   // fall below the start of the object underlying V2, then the GEP and V2
1040   // cannot alias.
1041   if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1042       isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1043     return NoAlias;
1044   // If we have two gep instructions with must-alias or not-alias'ing base
1045   // pointers, figure out if the indexes to the GEP tell us anything about the
1046   // derived pointer.
1047   if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1048     // Check for the GEP base being at a negative offset, this time in the other
1049     // direction.
1050     if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1051         isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1052       return NoAlias;
1053     // Do the base pointers alias?
1054     AliasResult BaseAlias =
1055         aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
1056                    UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
1057 
1058     // Check for geps of non-aliasing underlying pointers where the offsets are
1059     // identical.
1060     if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1061       // Do the base pointers alias assuming type and size.
1062       AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1063                                                 UnderlyingV2, V2Size, V2AAInfo);
1064       if (PreciseBaseAlias == NoAlias) {
1065         // See if the computed offset from the common pointer tells us about the
1066         // relation of the resulting pointer.
1067         // If the max search depth is reached the result is undefined
1068         if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1069           return MayAlias;
1070 
1071         // Same offsets.
1072         if (GEP1BaseOffset == GEP2BaseOffset &&
1073             DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1074           return NoAlias;
1075       }
1076     }
1077 
1078     // If we get a No or May, then return it immediately, no amount of analysis
1079     // will improve this situation.
1080     if (BaseAlias != MustAlias)
1081       return BaseAlias;
1082 
1083     // Otherwise, we have a MustAlias.  Since the base pointers alias each other
1084     // exactly, see if the computed offset from the common pointer tells us
1085     // about the relation of the resulting pointer.
1086     // If we know the two GEPs are based off of the exact same pointer (and not
1087     // just the same underlying object), see if that tells us anything about
1088     // the resulting pointers.
1089     if (GEP1->getPointerOperand() == GEP2->getPointerOperand()) {
1090       AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1091       // If we couldn't find anything interesting, don't abandon just yet.
1092       if (R != MayAlias)
1093         return R;
1094     }
1095 
1096     // If the max search depth is reached, the result is undefined
1097     if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1098       return MayAlias;
1099 
1100     // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1101     // symbolic difference.
1102     GEP1BaseOffset -= GEP2BaseOffset;
1103     GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1104 
1105   } else {
1106     // Check to see if these two pointers are related by the getelementptr
1107     // instruction.  If one pointer is a GEP with a non-zero index of the other
1108     // pointer, we know they cannot alias.
1109 
1110     // If both accesses are unknown size, we can't do anything useful here.
1111     if (V1Size == MemoryLocation::UnknownSize &&
1112         V2Size == MemoryLocation::UnknownSize)
1113       return MayAlias;
1114 
1115     AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
1116                                AAMDNodes(), V2, V2Size, V2AAInfo);
1117     if (R != MustAlias)
1118       // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1119       // If V2 is known not to alias GEP base pointer, then the two values
1120       // cannot alias per GEP semantics: "A pointer value formed from a
1121       // getelementptr instruction is associated with the addresses associated
1122       // with the first operand of the getelementptr".
1123       return R;
1124 
1125     // If the max search depth is reached the result is undefined
1126     if (GEP1MaxLookupReached)
1127       return MayAlias;
1128   }
1129 
1130   // In the two GEP Case, if there is no difference in the offsets of the
1131   // computed pointers, the resultant pointers are a must alias.  This
1132   // happens when we have two lexically identical GEP's (for example).
1133   //
1134   // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1135   // must aliases the GEP, the end result is a must alias also.
1136   if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1137     return MustAlias;
1138 
1139   // If there is a constant difference between the pointers, but the difference
1140   // is less than the size of the associated memory object, then we know
1141   // that the objects are partially overlapping.  If the difference is
1142   // greater, we know they do not overlap.
1143   if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1144     if (GEP1BaseOffset >= 0) {
1145       if (V2Size != MemoryLocation::UnknownSize) {
1146         if ((uint64_t)GEP1BaseOffset < V2Size)
1147           return PartialAlias;
1148         return NoAlias;
1149       }
1150     } else {
1151       // We have the situation where:
1152       // +                +
1153       // | BaseOffset     |
1154       // ---------------->|
1155       // |-->V1Size       |-------> V2Size
1156       // GEP1             V2
1157       // We need to know that V2Size is not unknown, otherwise we might have
1158       // stripped a gep with negative index ('gep <ptr>, -1, ...).
1159       if (V1Size != MemoryLocation::UnknownSize &&
1160           V2Size != MemoryLocation::UnknownSize) {
1161         if (-(uint64_t)GEP1BaseOffset < V1Size)
1162           return PartialAlias;
1163         return NoAlias;
1164       }
1165     }
1166   }
1167 
1168   if (!DecompGEP1.VarIndices.empty()) {
1169     uint64_t Modulo = 0;
1170     bool AllPositive = true;
1171     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1172 
1173       // Try to distinguish something like &A[i][1] against &A[42][0].
1174       // Grab the least significant bit set in any of the scales. We
1175       // don't need std::abs here (even if the scale's negative) as we'll
1176       // be ^'ing Modulo with itself later.
1177       Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
1178 
1179       if (AllPositive) {
1180         // If the Value could change between cycles, then any reasoning about
1181         // the Value this cycle may not hold in the next cycle. We'll just
1182         // give up if we can't determine conditions that hold for every cycle:
1183         const Value *V = DecompGEP1.VarIndices[i].V;
1184 
1185         bool SignKnownZero, SignKnownOne;
1186         ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, DL,
1187                        0, &AC, nullptr, DT);
1188 
1189         // Zero-extension widens the variable, and so forces the sign
1190         // bit to zero.
1191         bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1192         SignKnownZero |= IsZExt;
1193         SignKnownOne &= !IsZExt;
1194 
1195         // If the variable begins with a zero then we know it's
1196         // positive, regardless of whether the value is signed or
1197         // unsigned.
1198         int64_t Scale = DecompGEP1.VarIndices[i].Scale;
1199         AllPositive =
1200             (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1201       }
1202     }
1203 
1204     Modulo = Modulo ^ (Modulo & (Modulo - 1));
1205 
1206     // We can compute the difference between the two addresses
1207     // mod Modulo. Check whether that difference guarantees that the
1208     // two locations do not alias.
1209     uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1210     if (V1Size != MemoryLocation::UnknownSize &&
1211         V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
1212         V1Size <= Modulo - ModOffset)
1213       return NoAlias;
1214 
1215     // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1216     // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1217     // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1218     if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
1219       return NoAlias;
1220 
1221     if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1222                                 GEP1BaseOffset, &AC, DT))
1223       return NoAlias;
1224   }
1225 
1226   // Statically, we can see that the base objects are the same, but the
1227   // pointers have dynamic offsets which we can't resolve. And none of our
1228   // little tricks above worked.
1229   //
1230   // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the
1231   // practical effect of this is protecting TBAA in the case of dynamic
1232   // indices into arrays of unions or malloc'd memory.
1233   return PartialAlias;
1234 }
1235 
1236 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1237   // If the results agree, take it.
1238   if (A == B)
1239     return A;
1240   // A mix of PartialAlias and MustAlias is PartialAlias.
1241   if ((A == PartialAlias && B == MustAlias) ||
1242       (B == PartialAlias && A == MustAlias))
1243     return PartialAlias;
1244   // Otherwise, we don't know anything.
1245   return MayAlias;
1246 }
1247 
1248 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1249 /// against another.
1250 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize,
1251                                        const AAMDNodes &SIAAInfo,
1252                                        const Value *V2, uint64_t V2Size,
1253                                        const AAMDNodes &V2AAInfo) {
1254   // If the values are Selects with the same condition, we can do a more precise
1255   // check: just check for aliases between the values on corresponding arms.
1256   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1257     if (SI->getCondition() == SI2->getCondition()) {
1258       AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1259                                      SI2->getTrueValue(), V2Size, V2AAInfo);
1260       if (Alias == MayAlias)
1261         return MayAlias;
1262       AliasResult ThisAlias =
1263           aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1264                      SI2->getFalseValue(), V2Size, V2AAInfo);
1265       return MergeAliasResults(ThisAlias, Alias);
1266     }
1267 
1268   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1269   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1270   AliasResult Alias =
1271       aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo);
1272   if (Alias == MayAlias)
1273     return MayAlias;
1274 
1275   AliasResult ThisAlias =
1276       aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo);
1277   return MergeAliasResults(ThisAlias, Alias);
1278 }
1279 
1280 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1281 /// another.
1282 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
1283                                     const AAMDNodes &PNAAInfo, const Value *V2,
1284                                     uint64_t V2Size,
1285                                     const AAMDNodes &V2AAInfo) {
1286   // Track phi nodes we have visited. We use this information when we determine
1287   // value equivalence.
1288   VisitedPhiBBs.insert(PN->getParent());
1289 
1290   // If the values are PHIs in the same block, we can do a more precise
1291   // as well as efficient check: just check for aliases between the values
1292   // on corresponding edges.
1293   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1294     if (PN2->getParent() == PN->getParent()) {
1295       LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1296                    MemoryLocation(V2, V2Size, V2AAInfo));
1297       if (PN > V2)
1298         std::swap(Locs.first, Locs.second);
1299       // Analyse the PHIs' inputs under the assumption that the PHIs are
1300       // NoAlias.
1301       // If the PHIs are May/MustAlias there must be (recursively) an input
1302       // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1303       // there must be an operation on the PHIs within the PHIs' value cycle
1304       // that causes a MayAlias.
1305       // Pretend the phis do not alias.
1306       AliasResult Alias = NoAlias;
1307       assert(AliasCache.count(Locs) &&
1308              "There must exist an entry for the phi node");
1309       AliasResult OrigAliasResult = AliasCache[Locs];
1310       AliasCache[Locs] = NoAlias;
1311 
1312       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1313         AliasResult ThisAlias =
1314             aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1315                        PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1316                        V2Size, V2AAInfo);
1317         Alias = MergeAliasResults(ThisAlias, Alias);
1318         if (Alias == MayAlias)
1319           break;
1320       }
1321 
1322       // Reset if speculation failed.
1323       if (Alias != NoAlias)
1324         AliasCache[Locs] = OrigAliasResult;
1325 
1326       return Alias;
1327     }
1328 
1329   SmallPtrSet<Value *, 4> UniqueSrc;
1330   SmallVector<Value *, 4> V1Srcs;
1331   bool isRecursive = false;
1332   for (Value *PV1 : PN->incoming_values()) {
1333     if (isa<PHINode>(PV1))
1334       // If any of the source itself is a PHI, return MayAlias conservatively
1335       // to avoid compile time explosion. The worst possible case is if both
1336       // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1337       // and 'n' are the number of PHI sources.
1338       return MayAlias;
1339 
1340     if (EnableRecPhiAnalysis)
1341       if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1342         // Check whether the incoming value is a GEP that advances the pointer
1343         // result of this PHI node (e.g. in a loop). If this is the case, we
1344         // would recurse and always get a MayAlias. Handle this case specially
1345         // below.
1346         if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1347             isa<ConstantInt>(PV1GEP->idx_begin())) {
1348           isRecursive = true;
1349           continue;
1350         }
1351       }
1352 
1353     if (UniqueSrc.insert(PV1).second)
1354       V1Srcs.push_back(PV1);
1355   }
1356 
1357   // If this PHI node is recursive, set the size of the accessed memory to
1358   // unknown to represent all the possible values the GEP could advance the
1359   // pointer to.
1360   if (isRecursive)
1361     PNSize = MemoryLocation::UnknownSize;
1362 
1363   AliasResult Alias =
1364       aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize, PNAAInfo);
1365 
1366   // Early exit if the check of the first PHI source against V2 is MayAlias.
1367   // Other results are not possible.
1368   if (Alias == MayAlias)
1369     return MayAlias;
1370 
1371   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1372   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1373   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1374     Value *V = V1Srcs[i];
1375 
1376     AliasResult ThisAlias =
1377         aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo);
1378     Alias = MergeAliasResults(ThisAlias, Alias);
1379     if (Alias == MayAlias)
1380       break;
1381   }
1382 
1383   return Alias;
1384 }
1385 
1386 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1387 /// array references.
1388 AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
1389                                       AAMDNodes V1AAInfo, const Value *V2,
1390                                       uint64_t V2Size, AAMDNodes V2AAInfo) {
1391   // If either of the memory references is empty, it doesn't matter what the
1392   // pointer values are.
1393   if (V1Size == 0 || V2Size == 0)
1394     return NoAlias;
1395 
1396   // Strip off any casts if they exist.
1397   V1 = V1->stripPointerCasts();
1398   V2 = V2->stripPointerCasts();
1399 
1400   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1401   // value for undef that aliases nothing in the program.
1402   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1403     return NoAlias;
1404 
1405   // Are we checking for alias of the same value?
1406   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1407   // different iterations. We must therefore make sure that this is not the
1408   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1409   // happen by looking at the visited phi nodes and making sure they cannot
1410   // reach the value.
1411   if (isValueEqualInPotentialCycles(V1, V2))
1412     return MustAlias;
1413 
1414   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1415     return NoAlias; // Scalars cannot alias each other
1416 
1417   // Figure out what objects these things are pointing to if we can.
1418   const Value *O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1419   const Value *O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1420 
1421   // Null values in the default address space don't point to any object, so they
1422   // don't alias any other pointer.
1423   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1424     if (CPN->getType()->getAddressSpace() == 0)
1425       return NoAlias;
1426   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1427     if (CPN->getType()->getAddressSpace() == 0)
1428       return NoAlias;
1429 
1430   if (O1 != O2) {
1431     // If V1/V2 point to two different objects, we know that we have no alias.
1432     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1433       return NoAlias;
1434 
1435     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1436     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1437         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1438       return NoAlias;
1439 
1440     // Function arguments can't alias with things that are known to be
1441     // unambigously identified at the function level.
1442     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1443         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1444       return NoAlias;
1445 
1446     // Most objects can't alias null.
1447     if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) ||
1448         (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2)))
1449       return NoAlias;
1450 
1451     // If one pointer is the result of a call/invoke or load and the other is a
1452     // non-escaping local object within the same function, then we know the
1453     // object couldn't escape to a point where the call could return it.
1454     //
1455     // Note that if the pointers are in different functions, there are a
1456     // variety of complications. A call with a nocapture argument may still
1457     // temporary store the nocapture argument's value in a temporary memory
1458     // location if that memory location doesn't escape. Or it may pass a
1459     // nocapture value to other functions as long as they don't capture it.
1460     if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
1461       return NoAlias;
1462     if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
1463       return NoAlias;
1464   }
1465 
1466   // If the size of one access is larger than the entire object on the other
1467   // side, then we know such behavior is undefined and can assume no alias.
1468   if ((V1Size != MemoryLocation::UnknownSize &&
1469        isObjectSmallerThan(O2, V1Size, DL, TLI)) ||
1470       (V2Size != MemoryLocation::UnknownSize &&
1471        isObjectSmallerThan(O1, V2Size, DL, TLI)))
1472     return NoAlias;
1473 
1474   // Check the cache before climbing up use-def chains. This also terminates
1475   // otherwise infinitely recursive queries.
1476   LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1477                MemoryLocation(V2, V2Size, V2AAInfo));
1478   if (V1 > V2)
1479     std::swap(Locs.first, Locs.second);
1480   std::pair<AliasCacheTy::iterator, bool> Pair =
1481       AliasCache.insert(std::make_pair(Locs, MayAlias));
1482   if (!Pair.second)
1483     return Pair.first->second;
1484 
1485   // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1486   // GEP can't simplify, we don't even look at the PHI cases.
1487   if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1488     std::swap(V1, V2);
1489     std::swap(V1Size, V2Size);
1490     std::swap(O1, O2);
1491     std::swap(V1AAInfo, V2AAInfo);
1492   }
1493   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1494     AliasResult Result =
1495         aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1496     if (Result != MayAlias)
1497       return AliasCache[Locs] = Result;
1498   }
1499 
1500   if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1501     std::swap(V1, V2);
1502     std::swap(V1Size, V2Size);
1503     std::swap(V1AAInfo, V2AAInfo);
1504   }
1505   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1506     AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo);
1507     if (Result != MayAlias)
1508       return AliasCache[Locs] = Result;
1509   }
1510 
1511   if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1512     std::swap(V1, V2);
1513     std::swap(V1Size, V2Size);
1514     std::swap(V1AAInfo, V2AAInfo);
1515   }
1516   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1517     AliasResult Result =
1518         aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo);
1519     if (Result != MayAlias)
1520       return AliasCache[Locs] = Result;
1521   }
1522 
1523   // If both pointers are pointing into the same object and one of them
1524   // accesses the entire object, then the accesses must overlap in some way.
1525   if (O1 == O2)
1526     if ((V1Size != MemoryLocation::UnknownSize &&
1527          isObjectSize(O1, V1Size, DL, TLI)) ||
1528         (V2Size != MemoryLocation::UnknownSize &&
1529          isObjectSize(O2, V2Size, DL, TLI)))
1530       return AliasCache[Locs] = PartialAlias;
1531 
1532   // Recurse back into the best AA results we have, potentially with refined
1533   // memory locations. We have already ensured that BasicAA has a MayAlias
1534   // cache result for these, so any recursion back into BasicAA won't loop.
1535   AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1536   return AliasCache[Locs] = Result;
1537 }
1538 
1539 /// Check whether two Values can be considered equivalent.
1540 ///
1541 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1542 /// they can not be part of a cycle in the value graph by looking at all
1543 /// visited phi nodes an making sure that the phis cannot reach the value. We
1544 /// have to do this because we are looking through phi nodes (That is we say
1545 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1546 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1547                                                   const Value *V2) {
1548   if (V != V2)
1549     return false;
1550 
1551   const Instruction *Inst = dyn_cast<Instruction>(V);
1552   if (!Inst)
1553     return true;
1554 
1555   if (VisitedPhiBBs.empty())
1556     return true;
1557 
1558   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1559     return false;
1560 
1561   // Make sure that the visited phis cannot reach the Value. This ensures that
1562   // the Values cannot come from different iterations of a potential cycle the
1563   // phi nodes could be involved in.
1564   for (auto *P : VisitedPhiBBs)
1565     if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1566       return false;
1567 
1568   return true;
1569 }
1570 
1571 /// Computes the symbolic difference between two de-composed GEPs.
1572 ///
1573 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1574 /// instructions GEP1 and GEP2 which have common base pointers.
1575 void BasicAAResult::GetIndexDifference(
1576     SmallVectorImpl<VariableGEPIndex> &Dest,
1577     const SmallVectorImpl<VariableGEPIndex> &Src) {
1578   if (Src.empty())
1579     return;
1580 
1581   for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1582     const Value *V = Src[i].V;
1583     unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1584     int64_t Scale = Src[i].Scale;
1585 
1586     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1587     // than a few variable indexes.
1588     for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1589       if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1590           Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1591         continue;
1592 
1593       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1594       // goes to zero, remove the entry.
1595       if (Dest[j].Scale != Scale)
1596         Dest[j].Scale -= Scale;
1597       else
1598         Dest.erase(Dest.begin() + j);
1599       Scale = 0;
1600       break;
1601     }
1602 
1603     // If we didn't consume this entry, add it to the end of the Dest list.
1604     if (Scale) {
1605       VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1606       Dest.push_back(Entry);
1607     }
1608   }
1609 }
1610 
1611 bool BasicAAResult::constantOffsetHeuristic(
1612     const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
1613     uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC,
1614     DominatorTree *DT) {
1615   if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
1616       V2Size == MemoryLocation::UnknownSize)
1617     return false;
1618 
1619   const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1620 
1621   if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1622       Var0.Scale != -Var1.Scale)
1623     return false;
1624 
1625   unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1626 
1627   // We'll strip off the Extensions of Var0 and Var1 and do another round
1628   // of GetLinearExpression decomposition. In the example above, if Var0
1629   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1630 
1631   APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1632       V1Offset(Width, 0);
1633   bool NSW = true, NUW = true;
1634   unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1635   const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1636                                         V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1637   NSW = true;
1638   NUW = true;
1639   const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1640                                         V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1641 
1642   if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1643       V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1644     return false;
1645 
1646   // We have a hit - Var0 and Var1 only differ by a constant offset!
1647 
1648   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1649   // Var1 is possible to calculate, but we're just interested in the absolute
1650   // minimum difference between the two. The minimum distance may occur due to
1651   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1652   // the minimum distance between %i and %i + 5 is 3.
1653   APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1654   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1655   uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
1656 
1657   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1658   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1659   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1660   // V2Size can fit in the MinDiffBytes gap.
1661   return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
1662          V2Size + std::abs(BaseOffset) <= MinDiffBytes;
1663 }
1664 
1665 //===----------------------------------------------------------------------===//
1666 // BasicAliasAnalysis Pass
1667 //===----------------------------------------------------------------------===//
1668 
1669 char BasicAA::PassID;
1670 
1671 BasicAAResult BasicAA::run(Function &F, AnalysisManager<Function> &AM) {
1672   return BasicAAResult(F.getParent()->getDataLayout(),
1673                        AM.getResult<TargetLibraryAnalysis>(F),
1674                        AM.getResult<AssumptionAnalysis>(F),
1675                        &AM.getResult<DominatorTreeAnalysis>(F),
1676                        AM.getCachedResult<LoopAnalysis>(F));
1677 }
1678 
1679 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1680     initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1681 }
1682 
1683 char BasicAAWrapperPass::ID = 0;
1684 void BasicAAWrapperPass::anchor() {}
1685 
1686 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
1687                       "Basic Alias Analysis (stateless AA impl)", true, true)
1688 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1689 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1690 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1691 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
1692                     "Basic Alias Analysis (stateless AA impl)", true, true)
1693 
1694 FunctionPass *llvm::createBasicAAWrapperPass() {
1695   return new BasicAAWrapperPass();
1696 }
1697 
1698 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1699   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1700   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1701   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1702   auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1703 
1704   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
1705                                  ACT.getAssumptionCache(F), &DTWP.getDomTree(),
1706                                  LIWP ? &LIWP->getLoopInfo() : nullptr));
1707 
1708   return false;
1709 }
1710 
1711 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1712   AU.setPreservesAll();
1713   AU.addRequired<AssumptionCacheTracker>();
1714   AU.addRequired<DominatorTreeWrapperPass>();
1715   AU.addRequired<TargetLibraryInfoWrapperPass>();
1716 }
1717 
1718 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1719   return BasicAAResult(
1720       F.getParent()->getDataLayout(),
1721       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
1722       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1723 }
1724