1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements an analysis that determines, for a given memory
11 // operation, what preceding memory operations it depends on.  It builds on
12 // alias analysis information, and tries to provide a lazy, caching interface to
13 // a common kind of alias information query.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/OrderedBasicBlock.h"
28 #include "llvm/Analysis/PHITransAddr.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Attributes.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/CallSite.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/InstrTypes.h"
40 #include "llvm/IR/Instruction.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/IntrinsicInst.h"
43 #include "llvm/IR/LLVMContext.h"
44 #include "llvm/IR/Metadata.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/IR/PredIteratorCache.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/Use.h"
49 #include "llvm/IR/User.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/AtomicOrdering.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/MathExtras.h"
58 #include <algorithm>
59 #include <cassert>
60 #include <cstdint>
61 #include <iterator>
62 #include <utility>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "memdep"
67 
68 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
69 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
70 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
71 
72 STATISTIC(NumCacheNonLocalPtr,
73           "Number of fully cached non-local ptr responses");
74 STATISTIC(NumCacheDirtyNonLocalPtr,
75           "Number of cached, but dirty, non-local ptr responses");
76 STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses");
77 STATISTIC(NumCacheCompleteNonLocalPtr,
78           "Number of block queries that were completely cached");
79 
80 // Limit for the number of instructions to scan in a block.
81 
82 static cl::opt<unsigned> BlockScanLimit(
83     "memdep-block-scan-limit", cl::Hidden, cl::init(100),
84     cl::desc("The number of instructions to scan in a block in memory "
85              "dependency analysis (default = 100)"));
86 
87 static cl::opt<unsigned>
88     BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000),
89                      cl::desc("The number of blocks to scan during memory "
90                               "dependency analysis (default = 1000)"));
91 
92 // Limit on the number of memdep results to process.
93 static const unsigned int NumResultsLimit = 100;
94 
95 /// This is a helper function that removes Val from 'Inst's set in ReverseMap.
96 ///
97 /// If the set becomes empty, remove Inst's entry.
98 template <typename KeyTy>
99 static void
100 RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap,
101                      Instruction *Inst, KeyTy Val) {
102   typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt =
103       ReverseMap.find(Inst);
104   assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
105   bool Found = InstIt->second.erase(Val);
106   assert(Found && "Invalid reverse map!");
107   (void)Found;
108   if (InstIt->second.empty())
109     ReverseMap.erase(InstIt);
110 }
111 
112 /// If the given instruction references a specific memory location, fill in Loc
113 /// with the details, otherwise set Loc.Ptr to null.
114 ///
115 /// Returns a ModRefInfo value describing the general behavior of the
116 /// instruction.
117 static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
118                               const TargetLibraryInfo &TLI) {
119   if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
120     if (LI->isUnordered()) {
121       Loc = MemoryLocation::get(LI);
122       return ModRefInfo::Ref;
123     }
124     if (LI->getOrdering() == AtomicOrdering::Monotonic) {
125       Loc = MemoryLocation::get(LI);
126       return ModRefInfo::ModRef;
127     }
128     Loc = MemoryLocation();
129     return ModRefInfo::ModRef;
130   }
131 
132   if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
133     if (SI->isUnordered()) {
134       Loc = MemoryLocation::get(SI);
135       return ModRefInfo::Mod;
136     }
137     if (SI->getOrdering() == AtomicOrdering::Monotonic) {
138       Loc = MemoryLocation::get(SI);
139       return ModRefInfo::ModRef;
140     }
141     Loc = MemoryLocation();
142     return ModRefInfo::ModRef;
143   }
144 
145   if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
146     Loc = MemoryLocation::get(V);
147     return ModRefInfo::ModRef;
148   }
149 
150   if (const CallInst *CI = isFreeCall(Inst, &TLI)) {
151     // calls to free() deallocate the entire structure
152     Loc = MemoryLocation(CI->getArgOperand(0));
153     return ModRefInfo::Mod;
154   }
155 
156   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
157     switch (II->getIntrinsicID()) {
158     case Intrinsic::lifetime_start:
159     case Intrinsic::lifetime_end:
160     case Intrinsic::invariant_start:
161       Loc = MemoryLocation::getForArgument(II, 1, TLI);
162       // These intrinsics don't really modify the memory, but returning Mod
163       // will allow them to be handled conservatively.
164       return ModRefInfo::Mod;
165     case Intrinsic::invariant_end:
166       Loc = MemoryLocation::getForArgument(II, 2, TLI);
167       // These intrinsics don't really modify the memory, but returning Mod
168       // will allow them to be handled conservatively.
169       return ModRefInfo::Mod;
170     default:
171       break;
172     }
173   }
174 
175   // Otherwise, just do the coarse-grained thing that always works.
176   if (Inst->mayWriteToMemory())
177     return ModRefInfo::ModRef;
178   if (Inst->mayReadFromMemory())
179     return ModRefInfo::Ref;
180   return ModRefInfo::NoModRef;
181 }
182 
183 /// Private helper for finding the local dependencies of a call site.
184 MemDepResult MemoryDependenceResults::getCallSiteDependencyFrom(
185     CallSite CS, bool isReadOnlyCall, BasicBlock::iterator ScanIt,
186     BasicBlock *BB) {
187   unsigned Limit = BlockScanLimit;
188 
189   // Walk backwards through the block, looking for dependencies.
190   while (ScanIt != BB->begin()) {
191     Instruction *Inst = &*--ScanIt;
192     // Debug intrinsics don't cause dependences and should not affect Limit
193     if (isa<DbgInfoIntrinsic>(Inst))
194       continue;
195 
196     // Limit the amount of scanning we do so we don't end up with quadratic
197     // running time on extreme testcases.
198     --Limit;
199     if (!Limit)
200       return MemDepResult::getUnknown();
201 
202     // If this inst is a memory op, get the pointer it accessed
203     MemoryLocation Loc;
204     ModRefInfo MR = GetLocation(Inst, Loc, TLI);
205     if (Loc.Ptr) {
206       // A simple instruction.
207       if (isModOrRefSet(AA.getModRefInfo(CS, Loc)))
208         return MemDepResult::getClobber(Inst);
209       continue;
210     }
211 
212     if (auto InstCS = CallSite(Inst)) {
213       // If these two calls do not interfere, look past it.
214       if (isNoModRef(AA.getModRefInfo(CS, InstCS))) {
215         // If the two calls are the same, return InstCS as a Def, so that
216         // CS can be found redundant and eliminated.
217         if (isReadOnlyCall && !isModSet(MR) &&
218             CS.getInstruction()->isIdenticalToWhenDefined(Inst))
219           return MemDepResult::getDef(Inst);
220 
221         // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
222         // keep scanning.
223         continue;
224       } else
225         return MemDepResult::getClobber(Inst);
226     }
227 
228     // If we could not obtain a pointer for the instruction and the instruction
229     // touches memory then assume that this is a dependency.
230     if (isModOrRefSet(MR))
231       return MemDepResult::getClobber(Inst);
232   }
233 
234   // No dependence found.  If this is the entry block of the function, it is
235   // unknown, otherwise it is non-local.
236   if (BB != &BB->getParent()->getEntryBlock())
237     return MemDepResult::getNonLocal();
238   return MemDepResult::getNonFuncLocal();
239 }
240 
241 unsigned MemoryDependenceResults::getLoadLoadClobberFullWidthSize(
242     const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize,
243     const LoadInst *LI) {
244   // We can only extend simple integer loads.
245   if (!isa<IntegerType>(LI->getType()) || !LI->isSimple())
246     return 0;
247 
248   // Load widening is hostile to ThreadSanitizer: it may cause false positives
249   // or make the reports more cryptic (access sizes are wrong).
250   if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
251     return 0;
252 
253   const DataLayout &DL = LI->getModule()->getDataLayout();
254 
255   // Get the base of this load.
256   int64_t LIOffs = 0;
257   const Value *LIBase =
258       GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL);
259 
260   // If the two pointers are not based on the same pointer, we can't tell that
261   // they are related.
262   if (LIBase != MemLocBase)
263     return 0;
264 
265   // Okay, the two values are based on the same pointer, but returned as
266   // no-alias.  This happens when we have things like two byte loads at "P+1"
267   // and "P+3".  Check to see if increasing the size of the "LI" load up to its
268   // alignment (or the largest native integer type) will allow us to load all
269   // the bits required by MemLoc.
270 
271   // If MemLoc is before LI, then no widening of LI will help us out.
272   if (MemLocOffs < LIOffs)
273     return 0;
274 
275   // Get the alignment of the load in bytes.  We assume that it is safe to load
276   // any legal integer up to this size without a problem.  For example, if we're
277   // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
278   // widen it up to an i32 load.  If it is known 2-byte aligned, we can widen it
279   // to i16.
280   unsigned LoadAlign = LI->getAlignment();
281 
282   int64_t MemLocEnd = MemLocOffs + MemLocSize;
283 
284   // If no amount of rounding up will let MemLoc fit into LI, then bail out.
285   if (LIOffs + LoadAlign < MemLocEnd)
286     return 0;
287 
288   // This is the size of the load to try.  Start with the next larger power of
289   // two.
290   unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits() / 8U;
291   NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
292 
293   while (true) {
294     // If this load size is bigger than our known alignment or would not fit
295     // into a native integer register, then we fail.
296     if (NewLoadByteSize > LoadAlign ||
297         !DL.fitsInLegalInteger(NewLoadByteSize * 8))
298       return 0;
299 
300     if (LIOffs + NewLoadByteSize > MemLocEnd &&
301         (LI->getParent()->getParent()->hasFnAttribute(
302              Attribute::SanitizeAddress) ||
303          LI->getParent()->getParent()->hasFnAttribute(
304              Attribute::SanitizeHWAddress)))
305       // We will be reading past the location accessed by the original program.
306       // While this is safe in a regular build, Address Safety analysis tools
307       // may start reporting false warnings. So, don't do widening.
308       return 0;
309 
310     // If a load of this width would include all of MemLoc, then we succeed.
311     if (LIOffs + NewLoadByteSize >= MemLocEnd)
312       return NewLoadByteSize;
313 
314     NewLoadByteSize <<= 1;
315   }
316 }
317 
318 static bool isVolatile(Instruction *Inst) {
319   if (auto *LI = dyn_cast<LoadInst>(Inst))
320     return LI->isVolatile();
321   if (auto *SI = dyn_cast<StoreInst>(Inst))
322     return SI->isVolatile();
323   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
324     return AI->isVolatile();
325   return false;
326 }
327 
328 MemDepResult MemoryDependenceResults::getPointerDependencyFrom(
329     const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
330     BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
331   MemDepResult InvariantGroupDependency = MemDepResult::getUnknown();
332   if (QueryInst != nullptr) {
333     if (auto *LI = dyn_cast<LoadInst>(QueryInst)) {
334       InvariantGroupDependency = getInvariantGroupPointerDependency(LI, BB);
335 
336       if (InvariantGroupDependency.isDef())
337         return InvariantGroupDependency;
338     }
339   }
340   MemDepResult SimpleDep = getSimplePointerDependencyFrom(
341       MemLoc, isLoad, ScanIt, BB, QueryInst, Limit);
342   if (SimpleDep.isDef())
343     return SimpleDep;
344   // Non-local invariant group dependency indicates there is non local Def
345   // (it only returns nonLocal if it finds nonLocal def), which is better than
346   // local clobber and everything else.
347   if (InvariantGroupDependency.isNonLocal())
348     return InvariantGroupDependency;
349 
350   assert(InvariantGroupDependency.isUnknown() &&
351          "InvariantGroupDependency should be only unknown at this point");
352   return SimpleDep;
353 }
354 
355 MemDepResult
356 MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI,
357                                                             BasicBlock *BB) {
358   auto *InvariantGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group);
359   if (!InvariantGroupMD)
360     return MemDepResult::getUnknown();
361 
362   // Take the ptr operand after all casts and geps 0. This way we can search
363   // cast graph down only.
364   Value *LoadOperand = LI->getPointerOperand()->stripPointerCasts();
365 
366   // It's is not safe to walk the use list of global value, because function
367   // passes aren't allowed to look outside their functions.
368   // FIXME: this could be fixed by filtering instructions from outside
369   // of current function.
370   if (isa<GlobalValue>(LoadOperand))
371     return MemDepResult::getUnknown();
372 
373   // Queue to process all pointers that are equivalent to load operand.
374   SmallVector<const Value *, 8> LoadOperandsQueue;
375   LoadOperandsQueue.push_back(LoadOperand);
376 
377   Instruction *ClosestDependency = nullptr;
378   // Order of instructions in uses list is unpredictible. In order to always
379   // get the same result, we will look for the closest dominance.
380   auto GetClosestDependency = [this](Instruction *Best, Instruction *Other) {
381     assert(Other && "Must call it with not null instruction");
382     if (Best == nullptr || DT.dominates(Best, Other))
383       return Other;
384     return Best;
385   };
386 
387   // FIXME: This loop is O(N^2) because dominates can be O(n) and in worst case
388   // we will see all the instructions. This should be fixed in MSSA.
389   while (!LoadOperandsQueue.empty()) {
390     const Value *Ptr = LoadOperandsQueue.pop_back_val();
391     assert(Ptr && !isa<GlobalValue>(Ptr) &&
392            "Null or GlobalValue should not be inserted");
393 
394     for (const Use &Us : Ptr->uses()) {
395       auto *U = dyn_cast<Instruction>(Us.getUser());
396       if (!U || U == LI || !DT.dominates(U, LI))
397         continue;
398 
399       // Bitcast or gep with zeros are using Ptr. Add to queue to check it's
400       // users.      U = bitcast Ptr
401       if (isa<BitCastInst>(U)) {
402         LoadOperandsQueue.push_back(U);
403         continue;
404       }
405       // Gep with zeros is equivalent to bitcast.
406       // FIXME: we are not sure if some bitcast should be canonicalized to gep 0
407       // or gep 0 to bitcast because of SROA, so there are 2 forms. When
408       // typeless pointers will be ready then both cases will be gone
409       // (and this BFS also won't be needed).
410       if (auto *GEP = dyn_cast<GetElementPtrInst>(U))
411         if (GEP->hasAllZeroIndices()) {
412           LoadOperandsQueue.push_back(U);
413           continue;
414         }
415 
416       // If we hit load/store with the same invariant.group metadata (and the
417       // same pointer operand) we can assume that value pointed by pointer
418       // operand didn't change.
419       if ((isa<LoadInst>(U) || isa<StoreInst>(U)) &&
420           U->getMetadata(LLVMContext::MD_invariant_group) == InvariantGroupMD)
421         ClosestDependency = GetClosestDependency(ClosestDependency, U);
422     }
423   }
424 
425   if (!ClosestDependency)
426     return MemDepResult::getUnknown();
427   if (ClosestDependency->getParent() == BB)
428     return MemDepResult::getDef(ClosestDependency);
429   // Def(U) can't be returned here because it is non-local. If local
430   // dependency won't be found then return nonLocal counting that the
431   // user will call getNonLocalPointerDependency, which will return cached
432   // result.
433   NonLocalDefsCache.try_emplace(
434       LI, NonLocalDepResult(ClosestDependency->getParent(),
435                             MemDepResult::getDef(ClosestDependency), nullptr));
436   return MemDepResult::getNonLocal();
437 }
438 
439 MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
440     const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
441     BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
442   bool isInvariantLoad = false;
443 
444   if (!Limit) {
445     unsigned DefaultLimit = BlockScanLimit;
446     return getSimplePointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst,
447                                           &DefaultLimit);
448   }
449 
450   // We must be careful with atomic accesses, as they may allow another thread
451   //   to touch this location, clobbering it. We are conservative: if the
452   //   QueryInst is not a simple (non-atomic) memory access, we automatically
453   //   return getClobber.
454   // If it is simple, we know based on the results of
455   // "Compiler testing via a theory of sound optimisations in the C11/C++11
456   //   memory model" in PLDI 2013, that a non-atomic location can only be
457   //   clobbered between a pair of a release and an acquire action, with no
458   //   access to the location in between.
459   // Here is an example for giving the general intuition behind this rule.
460   // In the following code:
461   //   store x 0;
462   //   release action; [1]
463   //   acquire action; [4]
464   //   %val = load x;
465   // It is unsafe to replace %val by 0 because another thread may be running:
466   //   acquire action; [2]
467   //   store x 42;
468   //   release action; [3]
469   // with synchronization from 1 to 2 and from 3 to 4, resulting in %val
470   // being 42. A key property of this program however is that if either
471   // 1 or 4 were missing, there would be a race between the store of 42
472   // either the store of 0 or the load (making the whole program racy).
473   // The paper mentioned above shows that the same property is respected
474   // by every program that can detect any optimization of that kind: either
475   // it is racy (undefined) or there is a release followed by an acquire
476   // between the pair of accesses under consideration.
477 
478   // If the load is invariant, we "know" that it doesn't alias *any* write. We
479   // do want to respect mustalias results since defs are useful for value
480   // forwarding, but any mayalias write can be assumed to be noalias.
481   // Arguably, this logic should be pushed inside AliasAnalysis itself.
482   if (isLoad && QueryInst) {
483     LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
484     if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
485       isInvariantLoad = true;
486   }
487 
488   const DataLayout &DL = BB->getModule()->getDataLayout();
489 
490   // Create a numbered basic block to lazily compute and cache instruction
491   // positions inside a BB. This is used to provide fast queries for relative
492   // position between two instructions in a BB and can be used by
493   // AliasAnalysis::callCapturesBefore.
494   OrderedBasicBlock OBB(BB);
495 
496   // Return "true" if and only if the instruction I is either a non-simple
497   // load or a non-simple store.
498   auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool {
499     if (auto *LI = dyn_cast<LoadInst>(I))
500       return !LI->isSimple();
501     if (auto *SI = dyn_cast<StoreInst>(I))
502       return !SI->isSimple();
503     return false;
504   };
505 
506   // Return "true" if I is not a load and not a store, but it does access
507   // memory.
508   auto isOtherMemAccess = [](Instruction *I) -> bool {
509     return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory();
510   };
511 
512   // Walk backwards through the basic block, looking for dependencies.
513   while (ScanIt != BB->begin()) {
514     Instruction *Inst = &*--ScanIt;
515 
516     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
517       // Debug intrinsics don't (and can't) cause dependencies.
518       if (isa<DbgInfoIntrinsic>(II))
519         continue;
520 
521     // Limit the amount of scanning we do so we don't end up with quadratic
522     // running time on extreme testcases.
523     --*Limit;
524     if (!*Limit)
525       return MemDepResult::getUnknown();
526 
527     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
528       // If we reach a lifetime begin or end marker, then the query ends here
529       // because the value is undefined.
530       if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
531         // FIXME: This only considers queries directly on the invariant-tagged
532         // pointer, not on query pointers that are indexed off of them.  It'd
533         // be nice to handle that at some point (the right approach is to use
534         // GetPointerBaseWithConstantOffset).
535         if (AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc))
536           return MemDepResult::getDef(II);
537         continue;
538       }
539     }
540 
541     // Values depend on loads if the pointers are must aliased.  This means
542     // that a load depends on another must aliased load from the same value.
543     // One exception is atomic loads: a value can depend on an atomic load that
544     // it does not alias with when this atomic load indicates that another
545     // thread may be accessing the location.
546     if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
547       // While volatile access cannot be eliminated, they do not have to clobber
548       // non-aliasing locations, as normal accesses, for example, can be safely
549       // reordered with volatile accesses.
550       if (LI->isVolatile()) {
551         if (!QueryInst)
552           // Original QueryInst *may* be volatile
553           return MemDepResult::getClobber(LI);
554         if (isVolatile(QueryInst))
555           // Ordering required if QueryInst is itself volatile
556           return MemDepResult::getClobber(LI);
557         // Otherwise, volatile doesn't imply any special ordering
558       }
559 
560       // Atomic loads have complications involved.
561       // A Monotonic (or higher) load is OK if the query inst is itself not
562       // atomic.
563       // FIXME: This is overly conservative.
564       if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) {
565         if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
566             isOtherMemAccess(QueryInst))
567           return MemDepResult::getClobber(LI);
568         if (LI->getOrdering() != AtomicOrdering::Monotonic)
569           return MemDepResult::getClobber(LI);
570       }
571 
572       MemoryLocation LoadLoc = MemoryLocation::get(LI);
573 
574       // If we found a pointer, check if it could be the same as our pointer.
575       AliasResult R = AA.alias(LoadLoc, MemLoc);
576 
577       if (isLoad) {
578         if (R == NoAlias)
579           continue;
580 
581         // Must aliased loads are defs of each other.
582         if (R == MustAlias)
583           return MemDepResult::getDef(Inst);
584 
585 #if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
586       // in terms of clobbering loads, but since it does this by looking
587       // at the clobbering load directly, it doesn't know about any
588       // phi translation that may have happened along the way.
589 
590         // If we have a partial alias, then return this as a clobber for the
591         // client to handle.
592         if (R == PartialAlias)
593           return MemDepResult::getClobber(Inst);
594 #endif
595 
596         // Random may-alias loads don't depend on each other without a
597         // dependence.
598         continue;
599       }
600 
601       // Stores don't depend on other no-aliased accesses.
602       if (R == NoAlias)
603         continue;
604 
605       // Stores don't alias loads from read-only memory.
606       if (AA.pointsToConstantMemory(LoadLoc))
607         continue;
608 
609       // Stores depend on may/must aliased loads.
610       return MemDepResult::getDef(Inst);
611     }
612 
613     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
614       // Atomic stores have complications involved.
615       // A Monotonic store is OK if the query inst is itself not atomic.
616       // FIXME: This is overly conservative.
617       if (!SI->isUnordered() && SI->isAtomic()) {
618         if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
619             isOtherMemAccess(QueryInst))
620           return MemDepResult::getClobber(SI);
621         if (SI->getOrdering() != AtomicOrdering::Monotonic)
622           return MemDepResult::getClobber(SI);
623       }
624 
625       // FIXME: this is overly conservative.
626       // While volatile access cannot be eliminated, they do not have to clobber
627       // non-aliasing locations, as normal accesses can for example be reordered
628       // with volatile accesses.
629       if (SI->isVolatile())
630         if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
631             isOtherMemAccess(QueryInst))
632           return MemDepResult::getClobber(SI);
633 
634       // If alias analysis can tell that this store is guaranteed to not modify
635       // the query pointer, ignore it.  Use getModRefInfo to handle cases where
636       // the query pointer points to constant memory etc.
637       if (!isModOrRefSet(AA.getModRefInfo(SI, MemLoc)))
638         continue;
639 
640       // Ok, this store might clobber the query pointer.  Check to see if it is
641       // a must alias: in this case, we want to return this as a def.
642       // FIXME: Use ModRefInfo::Must bit from getModRefInfo call above.
643       MemoryLocation StoreLoc = MemoryLocation::get(SI);
644 
645       // If we found a pointer, check if it could be the same as our pointer.
646       AliasResult R = AA.alias(StoreLoc, MemLoc);
647 
648       if (R == NoAlias)
649         continue;
650       if (R == MustAlias)
651         return MemDepResult::getDef(Inst);
652       if (isInvariantLoad)
653         continue;
654       return MemDepResult::getClobber(Inst);
655     }
656 
657     // If this is an allocation, and if we know that the accessed pointer is to
658     // the allocation, return Def.  This means that there is no dependence and
659     // the access can be optimized based on that.  For example, a load could
660     // turn into undef.  Note that we can bypass the allocation itself when
661     // looking for a clobber in many cases; that's an alias property and is
662     // handled by BasicAA.
663     if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) {
664       const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
665       if (AccessPtr == Inst || AA.isMustAlias(Inst, AccessPtr))
666         return MemDepResult::getDef(Inst);
667     }
668 
669     if (isInvariantLoad)
670       continue;
671 
672     // A release fence requires that all stores complete before it, but does
673     // not prevent the reordering of following loads or stores 'before' the
674     // fence.  As a result, we look past it when finding a dependency for
675     // loads.  DSE uses this to find preceeding stores to delete and thus we
676     // can't bypass the fence if the query instruction is a store.
677     if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
678       if (isLoad && FI->getOrdering() == AtomicOrdering::Release)
679         continue;
680 
681     // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
682     ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc);
683     // If necessary, perform additional analysis.
684     if (isModAndRefSet(MR))
685       MR = AA.callCapturesBefore(Inst, MemLoc, &DT, &OBB);
686     switch (clearMust(MR)) {
687     case ModRefInfo::NoModRef:
688       // If the call has no effect on the queried pointer, just ignore it.
689       continue;
690     case ModRefInfo::Mod:
691       return MemDepResult::getClobber(Inst);
692     case ModRefInfo::Ref:
693       // If the call is known to never store to the pointer, and if this is a
694       // load query, we can safely ignore it (scan past it).
695       if (isLoad)
696         continue;
697       LLVM_FALLTHROUGH;
698     default:
699       // Otherwise, there is a potential dependence.  Return a clobber.
700       return MemDepResult::getClobber(Inst);
701     }
702   }
703 
704   // No dependence found.  If this is the entry block of the function, it is
705   // unknown, otherwise it is non-local.
706   if (BB != &BB->getParent()->getEntryBlock())
707     return MemDepResult::getNonLocal();
708   return MemDepResult::getNonFuncLocal();
709 }
710 
711 MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) {
712   Instruction *ScanPos = QueryInst;
713 
714   // Check for a cached result
715   MemDepResult &LocalCache = LocalDeps[QueryInst];
716 
717   // If the cached entry is non-dirty, just return it.  Note that this depends
718   // on MemDepResult's default constructing to 'dirty'.
719   if (!LocalCache.isDirty())
720     return LocalCache;
721 
722   // Otherwise, if we have a dirty entry, we know we can start the scan at that
723   // instruction, which may save us some work.
724   if (Instruction *Inst = LocalCache.getInst()) {
725     ScanPos = Inst;
726 
727     RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
728   }
729 
730   BasicBlock *QueryParent = QueryInst->getParent();
731 
732   // Do the scan.
733   if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
734     // No dependence found. If this is the entry block of the function, it is
735     // unknown, otherwise it is non-local.
736     if (QueryParent != &QueryParent->getParent()->getEntryBlock())
737       LocalCache = MemDepResult::getNonLocal();
738     else
739       LocalCache = MemDepResult::getNonFuncLocal();
740   } else {
741     MemoryLocation MemLoc;
742     ModRefInfo MR = GetLocation(QueryInst, MemLoc, TLI);
743     if (MemLoc.Ptr) {
744       // If we can do a pointer scan, make it happen.
745       bool isLoad = !isModSet(MR);
746       if (auto *II = dyn_cast<IntrinsicInst>(QueryInst))
747         isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
748 
749       LocalCache = getPointerDependencyFrom(
750           MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst);
751     } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
752       CallSite QueryCS(QueryInst);
753       bool isReadOnly = AA.onlyReadsMemory(QueryCS);
754       LocalCache = getCallSiteDependencyFrom(
755           QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent);
756     } else
757       // Non-memory instruction.
758       LocalCache = MemDepResult::getUnknown();
759   }
760 
761   // Remember the result!
762   if (Instruction *I = LocalCache.getInst())
763     ReverseLocalDeps[I].insert(QueryInst);
764 
765   return LocalCache;
766 }
767 
768 #ifndef NDEBUG
769 /// This method is used when -debug is specified to verify that cache arrays
770 /// are properly kept sorted.
771 static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache,
772                          int Count = -1) {
773   if (Count == -1)
774     Count = Cache.size();
775   assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) &&
776          "Cache isn't sorted!");
777 }
778 #endif
779 
780 const MemoryDependenceResults::NonLocalDepInfo &
781 MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
782   assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
783          "getNonLocalCallDependency should only be used on calls with "
784          "non-local deps!");
785   PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
786   NonLocalDepInfo &Cache = CacheP.first;
787 
788   // This is the set of blocks that need to be recomputed.  In the cached case,
789   // this can happen due to instructions being deleted etc. In the uncached
790   // case, this starts out as the set of predecessors we care about.
791   SmallVector<BasicBlock *, 32> DirtyBlocks;
792 
793   if (!Cache.empty()) {
794     // Okay, we have a cache entry.  If we know it is not dirty, just return it
795     // with no computation.
796     if (!CacheP.second) {
797       ++NumCacheNonLocal;
798       return Cache;
799     }
800 
801     // If we already have a partially computed set of results, scan them to
802     // determine what is dirty, seeding our initial DirtyBlocks worklist.
803     for (auto &Entry : Cache)
804       if (Entry.getResult().isDirty())
805         DirtyBlocks.push_back(Entry.getBB());
806 
807     // Sort the cache so that we can do fast binary search lookups below.
808     std::sort(Cache.begin(), Cache.end());
809 
810     ++NumCacheDirtyNonLocal;
811     // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
812     //     << Cache.size() << " cached: " << *QueryInst;
813   } else {
814     // Seed DirtyBlocks with each of the preds of QueryInst's block.
815     BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
816     for (BasicBlock *Pred : PredCache.get(QueryBB))
817       DirtyBlocks.push_back(Pred);
818     ++NumUncacheNonLocal;
819   }
820 
821   // isReadonlyCall - If this is a read-only call, we can be more aggressive.
822   bool isReadonlyCall = AA.onlyReadsMemory(QueryCS);
823 
824   SmallPtrSet<BasicBlock *, 32> Visited;
825 
826   unsigned NumSortedEntries = Cache.size();
827   DEBUG(AssertSorted(Cache));
828 
829   // Iterate while we still have blocks to update.
830   while (!DirtyBlocks.empty()) {
831     BasicBlock *DirtyBB = DirtyBlocks.back();
832     DirtyBlocks.pop_back();
833 
834     // Already processed this block?
835     if (!Visited.insert(DirtyBB).second)
836       continue;
837 
838     // Do a binary search to see if we already have an entry for this block in
839     // the cache set.  If so, find it.
840     DEBUG(AssertSorted(Cache, NumSortedEntries));
841     NonLocalDepInfo::iterator Entry =
842         std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries,
843                          NonLocalDepEntry(DirtyBB));
844     if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
845       --Entry;
846 
847     NonLocalDepEntry *ExistingResult = nullptr;
848     if (Entry != Cache.begin() + NumSortedEntries &&
849         Entry->getBB() == DirtyBB) {
850       // If we already have an entry, and if it isn't already dirty, the block
851       // is done.
852       if (!Entry->getResult().isDirty())
853         continue;
854 
855       // Otherwise, remember this slot so we can update the value.
856       ExistingResult = &*Entry;
857     }
858 
859     // If the dirty entry has a pointer, start scanning from it so we don't have
860     // to rescan the entire block.
861     BasicBlock::iterator ScanPos = DirtyBB->end();
862     if (ExistingResult) {
863       if (Instruction *Inst = ExistingResult->getResult().getInst()) {
864         ScanPos = Inst->getIterator();
865         // We're removing QueryInst's use of Inst.
866         RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
867                              QueryCS.getInstruction());
868       }
869     }
870 
871     // Find out if this block has a local dependency for QueryInst.
872     MemDepResult Dep;
873 
874     if (ScanPos != DirtyBB->begin()) {
875       Dep =
876           getCallSiteDependencyFrom(QueryCS, isReadonlyCall, ScanPos, DirtyBB);
877     } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
878       // No dependence found.  If this is the entry block of the function, it is
879       // a clobber, otherwise it is unknown.
880       Dep = MemDepResult::getNonLocal();
881     } else {
882       Dep = MemDepResult::getNonFuncLocal();
883     }
884 
885     // If we had a dirty entry for the block, update it.  Otherwise, just add
886     // a new entry.
887     if (ExistingResult)
888       ExistingResult->setResult(Dep);
889     else
890       Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
891 
892     // If the block has a dependency (i.e. it isn't completely transparent to
893     // the value), remember the association!
894     if (!Dep.isNonLocal()) {
895       // Keep the ReverseNonLocalDeps map up to date so we can efficiently
896       // update this when we remove instructions.
897       if (Instruction *Inst = Dep.getInst())
898         ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
899     } else {
900 
901       // If the block *is* completely transparent to the load, we need to check
902       // the predecessors of this block.  Add them to our worklist.
903       for (BasicBlock *Pred : PredCache.get(DirtyBB))
904         DirtyBlocks.push_back(Pred);
905     }
906   }
907 
908   return Cache;
909 }
910 
911 void MemoryDependenceResults::getNonLocalPointerDependency(
912     Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) {
913   const MemoryLocation Loc = MemoryLocation::get(QueryInst);
914   bool isLoad = isa<LoadInst>(QueryInst);
915   BasicBlock *FromBB = QueryInst->getParent();
916   assert(FromBB);
917 
918   assert(Loc.Ptr->getType()->isPointerTy() &&
919          "Can't get pointer deps of a non-pointer!");
920   Result.clear();
921   {
922     // Check if there is cached Def with invariant.group. FIXME: cache might be
923     // invalid if cached instruction would be removed between call to
924     // getPointerDependencyFrom and this function.
925     auto NonLocalDefIt = NonLocalDefsCache.find(QueryInst);
926     if (NonLocalDefIt != NonLocalDefsCache.end()) {
927       Result.push_back(std::move(NonLocalDefIt->second));
928       NonLocalDefsCache.erase(NonLocalDefIt);
929       return;
930     }
931   }
932   // This routine does not expect to deal with volatile instructions.
933   // Doing so would require piping through the QueryInst all the way through.
934   // TODO: volatiles can't be elided, but they can be reordered with other
935   // non-volatile accesses.
936 
937   // We currently give up on any instruction which is ordered, but we do handle
938   // atomic instructions which are unordered.
939   // TODO: Handle ordered instructions
940   auto isOrdered = [](Instruction *Inst) {
941     if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
942       return !LI->isUnordered();
943     } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
944       return !SI->isUnordered();
945     }
946     return false;
947   };
948   if (isVolatile(QueryInst) || isOrdered(QueryInst)) {
949     Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(),
950                                        const_cast<Value *>(Loc.Ptr)));
951     return;
952   }
953   const DataLayout &DL = FromBB->getModule()->getDataLayout();
954   PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC);
955 
956   // This is the set of blocks we've inspected, and the pointer we consider in
957   // each block.  Because of critical edges, we currently bail out if querying
958   // a block with multiple different pointers.  This can happen during PHI
959   // translation.
960   DenseMap<BasicBlock *, Value *> Visited;
961   if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB,
962                                    Result, Visited, true))
963     return;
964   Result.clear();
965   Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(),
966                                      const_cast<Value *>(Loc.Ptr)));
967 }
968 
969 /// Compute the memdep value for BB with Pointer/PointeeSize using either
970 /// cached information in Cache or by doing a lookup (which may use dirty cache
971 /// info if available).
972 ///
973 /// If we do a lookup, add the result to the cache.
974 MemDepResult MemoryDependenceResults::GetNonLocalInfoForBlock(
975     Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad,
976     BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
977 
978   // Do a binary search to see if we already have an entry for this block in
979   // the cache set.  If so, find it.
980   NonLocalDepInfo::iterator Entry = std::upper_bound(
981       Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB));
982   if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB)
983     --Entry;
984 
985   NonLocalDepEntry *ExistingResult = nullptr;
986   if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB)
987     ExistingResult = &*Entry;
988 
989   // If we have a cached entry, and it is non-dirty, use it as the value for
990   // this dependency.
991   if (ExistingResult && !ExistingResult->getResult().isDirty()) {
992     ++NumCacheNonLocalPtr;
993     return ExistingResult->getResult();
994   }
995 
996   // Otherwise, we have to scan for the value.  If we have a dirty cache
997   // entry, start scanning from its position, otherwise we scan from the end
998   // of the block.
999   BasicBlock::iterator ScanPos = BB->end();
1000   if (ExistingResult && ExistingResult->getResult().getInst()) {
1001     assert(ExistingResult->getResult().getInst()->getParent() == BB &&
1002            "Instruction invalidated?");
1003     ++NumCacheDirtyNonLocalPtr;
1004     ScanPos = ExistingResult->getResult().getInst()->getIterator();
1005 
1006     // Eliminating the dirty entry from 'Cache', so update the reverse info.
1007     ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
1008     RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey);
1009   } else {
1010     ++NumUncacheNonLocalPtr;
1011   }
1012 
1013   // Scan the block for the dependency.
1014   MemDepResult Dep =
1015       getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst);
1016 
1017   // If we had a dirty entry for the block, update it.  Otherwise, just add
1018   // a new entry.
1019   if (ExistingResult)
1020     ExistingResult->setResult(Dep);
1021   else
1022     Cache->push_back(NonLocalDepEntry(BB, Dep));
1023 
1024   // If the block has a dependency (i.e. it isn't completely transparent to
1025   // the value), remember the reverse association because we just added it
1026   // to Cache!
1027   if (!Dep.isDef() && !Dep.isClobber())
1028     return Dep;
1029 
1030   // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
1031   // update MemDep when we remove instructions.
1032   Instruction *Inst = Dep.getInst();
1033   assert(Inst && "Didn't depend on anything?");
1034   ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
1035   ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
1036   return Dep;
1037 }
1038 
1039 /// Sort the NonLocalDepInfo cache, given a certain number of elements in the
1040 /// array that are already properly ordered.
1041 ///
1042 /// This is optimized for the case when only a few entries are added.
1043 static void
1044 SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache,
1045                          unsigned NumSortedEntries) {
1046   switch (Cache.size() - NumSortedEntries) {
1047   case 0:
1048     // done, no new entries.
1049     break;
1050   case 2: {
1051     // Two new entries, insert the last one into place.
1052     NonLocalDepEntry Val = Cache.back();
1053     Cache.pop_back();
1054     MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
1055         std::upper_bound(Cache.begin(), Cache.end() - 1, Val);
1056     Cache.insert(Entry, Val);
1057     LLVM_FALLTHROUGH;
1058   }
1059   case 1:
1060     // One new entry, Just insert the new value at the appropriate position.
1061     if (Cache.size() != 1) {
1062       NonLocalDepEntry Val = Cache.back();
1063       Cache.pop_back();
1064       MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
1065           std::upper_bound(Cache.begin(), Cache.end(), Val);
1066       Cache.insert(Entry, Val);
1067     }
1068     break;
1069   default:
1070     // Added many values, do a full scale sort.
1071     std::sort(Cache.begin(), Cache.end());
1072     break;
1073   }
1074 }
1075 
1076 /// Perform a dependency query based on pointer/pointeesize starting at the end
1077 /// of StartBB.
1078 ///
1079 /// Add any clobber/def results to the results vector and keep track of which
1080 /// blocks are visited in 'Visited'.
1081 ///
1082 /// This has special behavior for the first block queries (when SkipFirstBlock
1083 /// is true).  In this special case, it ignores the contents of the specified
1084 /// block and starts returning dependence info for its predecessors.
1085 ///
1086 /// This function returns true on success, or false to indicate that it could
1087 /// not compute dependence information for some reason.  This should be treated
1088 /// as a clobber dependence on the first instruction in the predecessor block.
1089 bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
1090     Instruction *QueryInst, const PHITransAddr &Pointer,
1091     const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB,
1092     SmallVectorImpl<NonLocalDepResult> &Result,
1093     DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock) {
1094   // Look up the cached info for Pointer.
1095   ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
1096 
1097   // Set up a temporary NLPI value. If the map doesn't yet have an entry for
1098   // CacheKey, this value will be inserted as the associated value. Otherwise,
1099   // it'll be ignored, and we'll have to check to see if the cached size and
1100   // aa tags are consistent with the current query.
1101   NonLocalPointerInfo InitialNLPI;
1102   InitialNLPI.Size = Loc.Size;
1103   InitialNLPI.AATags = Loc.AATags;
1104 
1105   // Get the NLPI for CacheKey, inserting one into the map if it doesn't
1106   // already have one.
1107   std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
1108       NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
1109   NonLocalPointerInfo *CacheInfo = &Pair.first->second;
1110 
1111   // If we already have a cache entry for this CacheKey, we may need to do some
1112   // work to reconcile the cache entry and the current query.
1113   if (!Pair.second) {
1114     if (CacheInfo->Size < Loc.Size) {
1115       // The query's Size is greater than the cached one. Throw out the
1116       // cached data and proceed with the query at the greater size.
1117       CacheInfo->Pair = BBSkipFirstBlockPair();
1118       CacheInfo->Size = Loc.Size;
1119       for (auto &Entry : CacheInfo->NonLocalDeps)
1120         if (Instruction *Inst = Entry.getResult().getInst())
1121           RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
1122       CacheInfo->NonLocalDeps.clear();
1123     } else if (CacheInfo->Size > Loc.Size) {
1124       // This query's Size is less than the cached one. Conservatively restart
1125       // the query using the greater size.
1126       return getNonLocalPointerDepFromBB(
1127           QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad,
1128           StartBB, Result, Visited, SkipFirstBlock);
1129     }
1130 
1131     // If the query's AATags are inconsistent with the cached one,
1132     // conservatively throw out the cached data and restart the query with
1133     // no tag if needed.
1134     if (CacheInfo->AATags != Loc.AATags) {
1135       if (CacheInfo->AATags) {
1136         CacheInfo->Pair = BBSkipFirstBlockPair();
1137         CacheInfo->AATags = AAMDNodes();
1138         for (auto &Entry : CacheInfo->NonLocalDeps)
1139           if (Instruction *Inst = Entry.getResult().getInst())
1140             RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
1141         CacheInfo->NonLocalDeps.clear();
1142       }
1143       if (Loc.AATags)
1144         return getNonLocalPointerDepFromBB(
1145             QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result,
1146             Visited, SkipFirstBlock);
1147     }
1148   }
1149 
1150   NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
1151 
1152   // If we have valid cached information for exactly the block we are
1153   // investigating, just return it with no recomputation.
1154   if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
1155     // We have a fully cached result for this query then we can just return the
1156     // cached results and populate the visited set.  However, we have to verify
1157     // that we don't already have conflicting results for these blocks.  Check
1158     // to ensure that if a block in the results set is in the visited set that
1159     // it was for the same pointer query.
1160     if (!Visited.empty()) {
1161       for (auto &Entry : *Cache) {
1162         DenseMap<BasicBlock *, Value *>::iterator VI =
1163             Visited.find(Entry.getBB());
1164         if (VI == Visited.end() || VI->second == Pointer.getAddr())
1165           continue;
1166 
1167         // We have a pointer mismatch in a block.  Just return false, saying
1168         // that something was clobbered in this result.  We could also do a
1169         // non-fully cached query, but there is little point in doing this.
1170         return false;
1171       }
1172     }
1173 
1174     Value *Addr = Pointer.getAddr();
1175     for (auto &Entry : *Cache) {
1176       Visited.insert(std::make_pair(Entry.getBB(), Addr));
1177       if (Entry.getResult().isNonLocal()) {
1178         continue;
1179       }
1180 
1181       if (DT.isReachableFromEntry(Entry.getBB())) {
1182         Result.push_back(
1183             NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr));
1184       }
1185     }
1186     ++NumCacheCompleteNonLocalPtr;
1187     return true;
1188   }
1189 
1190   // Otherwise, either this is a new block, a block with an invalid cache
1191   // pointer or one that we're about to invalidate by putting more info into it
1192   // than its valid cache info.  If empty, the result will be valid cache info,
1193   // otherwise it isn't.
1194   if (Cache->empty())
1195     CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
1196   else
1197     CacheInfo->Pair = BBSkipFirstBlockPair();
1198 
1199   SmallVector<BasicBlock *, 32> Worklist;
1200   Worklist.push_back(StartBB);
1201 
1202   // PredList used inside loop.
1203   SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList;
1204 
1205   // Keep track of the entries that we know are sorted.  Previously cached
1206   // entries will all be sorted.  The entries we add we only sort on demand (we
1207   // don't insert every element into its sorted position).  We know that we
1208   // won't get any reuse from currently inserted values, because we don't
1209   // revisit blocks after we insert info for them.
1210   unsigned NumSortedEntries = Cache->size();
1211   unsigned WorklistEntries = BlockNumberLimit;
1212   bool GotWorklistLimit = false;
1213   DEBUG(AssertSorted(*Cache));
1214 
1215   while (!Worklist.empty()) {
1216     BasicBlock *BB = Worklist.pop_back_val();
1217 
1218     // If we do process a large number of blocks it becomes very expensive and
1219     // likely it isn't worth worrying about
1220     if (Result.size() > NumResultsLimit) {
1221       Worklist.clear();
1222       // Sort it now (if needed) so that recursive invocations of
1223       // getNonLocalPointerDepFromBB and other routines that could reuse the
1224       // cache value will only see properly sorted cache arrays.
1225       if (Cache && NumSortedEntries != Cache->size()) {
1226         SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1227       }
1228       // Since we bail out, the "Cache" set won't contain all of the
1229       // results for the query.  This is ok (we can still use it to accelerate
1230       // specific block queries) but we can't do the fastpath "return all
1231       // results from the set".  Clear out the indicator for this.
1232       CacheInfo->Pair = BBSkipFirstBlockPair();
1233       return false;
1234     }
1235 
1236     // Skip the first block if we have it.
1237     if (!SkipFirstBlock) {
1238       // Analyze the dependency of *Pointer in FromBB.  See if we already have
1239       // been here.
1240       assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
1241 
1242       // Get the dependency info for Pointer in BB.  If we have cached
1243       // information, we will use it, otherwise we compute it.
1244       DEBUG(AssertSorted(*Cache, NumSortedEntries));
1245       MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB,
1246                                                  Cache, NumSortedEntries);
1247 
1248       // If we got a Def or Clobber, add this to the list of results.
1249       if (!Dep.isNonLocal()) {
1250         if (DT.isReachableFromEntry(BB)) {
1251           Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
1252           continue;
1253         }
1254       }
1255     }
1256 
1257     // If 'Pointer' is an instruction defined in this block, then we need to do
1258     // phi translation to change it into a value live in the predecessor block.
1259     // If not, we just add the predecessors to the worklist and scan them with
1260     // the same Pointer.
1261     if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
1262       SkipFirstBlock = false;
1263       SmallVector<BasicBlock *, 16> NewBlocks;
1264       for (BasicBlock *Pred : PredCache.get(BB)) {
1265         // Verify that we haven't looked at this block yet.
1266         std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes =
1267             Visited.insert(std::make_pair(Pred, Pointer.getAddr()));
1268         if (InsertRes.second) {
1269           // First time we've looked at *PI.
1270           NewBlocks.push_back(Pred);
1271           continue;
1272         }
1273 
1274         // If we have seen this block before, but it was with a different
1275         // pointer then we have a phi translation failure and we have to treat
1276         // this as a clobber.
1277         if (InsertRes.first->second != Pointer.getAddr()) {
1278           // Make sure to clean up the Visited map before continuing on to
1279           // PredTranslationFailure.
1280           for (unsigned i = 0; i < NewBlocks.size(); i++)
1281             Visited.erase(NewBlocks[i]);
1282           goto PredTranslationFailure;
1283         }
1284       }
1285       if (NewBlocks.size() > WorklistEntries) {
1286         // Make sure to clean up the Visited map before continuing on to
1287         // PredTranslationFailure.
1288         for (unsigned i = 0; i < NewBlocks.size(); i++)
1289           Visited.erase(NewBlocks[i]);
1290         GotWorklistLimit = true;
1291         goto PredTranslationFailure;
1292       }
1293       WorklistEntries -= NewBlocks.size();
1294       Worklist.append(NewBlocks.begin(), NewBlocks.end());
1295       continue;
1296     }
1297 
1298     // We do need to do phi translation, if we know ahead of time we can't phi
1299     // translate this value, don't even try.
1300     if (!Pointer.IsPotentiallyPHITranslatable())
1301       goto PredTranslationFailure;
1302 
1303     // We may have added values to the cache list before this PHI translation.
1304     // If so, we haven't done anything to ensure that the cache remains sorted.
1305     // Sort it now (if needed) so that recursive invocations of
1306     // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1307     // value will only see properly sorted cache arrays.
1308     if (Cache && NumSortedEntries != Cache->size()) {
1309       SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1310       NumSortedEntries = Cache->size();
1311     }
1312     Cache = nullptr;
1313 
1314     PredList.clear();
1315     for (BasicBlock *Pred : PredCache.get(BB)) {
1316       PredList.push_back(std::make_pair(Pred, Pointer));
1317 
1318       // Get the PHI translated pointer in this predecessor.  This can fail if
1319       // not translatable, in which case the getAddr() returns null.
1320       PHITransAddr &PredPointer = PredList.back().second;
1321       PredPointer.PHITranslateValue(BB, Pred, &DT, /*MustDominate=*/false);
1322       Value *PredPtrVal = PredPointer.getAddr();
1323 
1324       // Check to see if we have already visited this pred block with another
1325       // pointer.  If so, we can't do this lookup.  This failure can occur
1326       // with PHI translation when a critical edge exists and the PHI node in
1327       // the successor translates to a pointer value different than the
1328       // pointer the block was first analyzed with.
1329       std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes =
1330           Visited.insert(std::make_pair(Pred, PredPtrVal));
1331 
1332       if (!InsertRes.second) {
1333         // We found the pred; take it off the list of preds to visit.
1334         PredList.pop_back();
1335 
1336         // If the predecessor was visited with PredPtr, then we already did
1337         // the analysis and can ignore it.
1338         if (InsertRes.first->second == PredPtrVal)
1339           continue;
1340 
1341         // Otherwise, the block was previously analyzed with a different
1342         // pointer.  We can't represent the result of this case, so we just
1343         // treat this as a phi translation failure.
1344 
1345         // Make sure to clean up the Visited map before continuing on to
1346         // PredTranslationFailure.
1347         for (unsigned i = 0, n = PredList.size(); i < n; ++i)
1348           Visited.erase(PredList[i].first);
1349 
1350         goto PredTranslationFailure;
1351       }
1352     }
1353 
1354     // Actually process results here; this need to be a separate loop to avoid
1355     // calling getNonLocalPointerDepFromBB for blocks we don't want to return
1356     // any results for.  (getNonLocalPointerDepFromBB will modify our
1357     // datastructures in ways the code after the PredTranslationFailure label
1358     // doesn't expect.)
1359     for (unsigned i = 0, n = PredList.size(); i < n; ++i) {
1360       BasicBlock *Pred = PredList[i].first;
1361       PHITransAddr &PredPointer = PredList[i].second;
1362       Value *PredPtrVal = PredPointer.getAddr();
1363 
1364       bool CanTranslate = true;
1365       // If PHI translation was unable to find an available pointer in this
1366       // predecessor, then we have to assume that the pointer is clobbered in
1367       // that predecessor.  We can still do PRE of the load, which would insert
1368       // a computation of the pointer in this predecessor.
1369       if (!PredPtrVal)
1370         CanTranslate = false;
1371 
1372       // FIXME: it is entirely possible that PHI translating will end up with
1373       // the same value.  Consider PHI translating something like:
1374       // X = phi [x, bb1], [y, bb2].  PHI translating for bb1 doesn't *need*
1375       // to recurse here, pedantically speaking.
1376 
1377       // If getNonLocalPointerDepFromBB fails here, that means the cached
1378       // result conflicted with the Visited list; we have to conservatively
1379       // assume it is unknown, but this also does not block PRE of the load.
1380       if (!CanTranslate ||
1381           !getNonLocalPointerDepFromBB(QueryInst, PredPointer,
1382                                       Loc.getWithNewPtr(PredPtrVal), isLoad,
1383                                       Pred, Result, Visited)) {
1384         // Add the entry to the Result list.
1385         NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
1386         Result.push_back(Entry);
1387 
1388         // Since we had a phi translation failure, the cache for CacheKey won't
1389         // include all of the entries that we need to immediately satisfy future
1390         // queries.  Mark this in NonLocalPointerDeps by setting the
1391         // BBSkipFirstBlockPair pointer to null.  This requires reuse of the
1392         // cached value to do more work but not miss the phi trans failure.
1393         NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1394         NLPI.Pair = BBSkipFirstBlockPair();
1395         continue;
1396       }
1397     }
1398 
1399     // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1400     CacheInfo = &NonLocalPointerDeps[CacheKey];
1401     Cache = &CacheInfo->NonLocalDeps;
1402     NumSortedEntries = Cache->size();
1403 
1404     // Since we did phi translation, the "Cache" set won't contain all of the
1405     // results for the query.  This is ok (we can still use it to accelerate
1406     // specific block queries) but we can't do the fastpath "return all
1407     // results from the set"  Clear out the indicator for this.
1408     CacheInfo->Pair = BBSkipFirstBlockPair();
1409     SkipFirstBlock = false;
1410     continue;
1411 
1412   PredTranslationFailure:
1413     // The following code is "failure"; we can't produce a sane translation
1414     // for the given block.  It assumes that we haven't modified any of
1415     // our datastructures while processing the current block.
1416 
1417     if (!Cache) {
1418       // Refresh the CacheInfo/Cache pointer if it got invalidated.
1419       CacheInfo = &NonLocalPointerDeps[CacheKey];
1420       Cache = &CacheInfo->NonLocalDeps;
1421       NumSortedEntries = Cache->size();
1422     }
1423 
1424     // Since we failed phi translation, the "Cache" set won't contain all of the
1425     // results for the query.  This is ok (we can still use it to accelerate
1426     // specific block queries) but we can't do the fastpath "return all
1427     // results from the set".  Clear out the indicator for this.
1428     CacheInfo->Pair = BBSkipFirstBlockPair();
1429 
1430     // If *nothing* works, mark the pointer as unknown.
1431     //
1432     // If this is the magic first block, return this as a clobber of the whole
1433     // incoming value.  Since we can't phi translate to one of the predecessors,
1434     // we have to bail out.
1435     if (SkipFirstBlock)
1436       return false;
1437 
1438     bool foundBlock = false;
1439     for (NonLocalDepEntry &I : llvm::reverse(*Cache)) {
1440       if (I.getBB() != BB)
1441         continue;
1442 
1443       assert((GotWorklistLimit || I.getResult().isNonLocal() ||
1444               !DT.isReachableFromEntry(BB)) &&
1445              "Should only be here with transparent block");
1446       foundBlock = true;
1447       I.setResult(MemDepResult::getUnknown());
1448       Result.push_back(
1449           NonLocalDepResult(I.getBB(), I.getResult(), Pointer.getAddr()));
1450       break;
1451     }
1452     (void)foundBlock; (void)GotWorklistLimit;
1453     assert((foundBlock || GotWorklistLimit) && "Current block not in cache?");
1454   }
1455 
1456   // Okay, we're done now.  If we added new values to the cache, re-sort it.
1457   SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1458   DEBUG(AssertSorted(*Cache));
1459   return true;
1460 }
1461 
1462 /// If P exists in CachedNonLocalPointerInfo, remove it.
1463 void MemoryDependenceResults::RemoveCachedNonLocalPointerDependencies(
1464     ValueIsLoadPair P) {
1465   CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P);
1466   if (It == NonLocalPointerDeps.end())
1467     return;
1468 
1469   // Remove all of the entries in the BB->val map.  This involves removing
1470   // instructions from the reverse map.
1471   NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
1472 
1473   for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
1474     Instruction *Target = PInfo[i].getResult().getInst();
1475     if (!Target)
1476       continue; // Ignore non-local dep results.
1477     assert(Target->getParent() == PInfo[i].getBB());
1478 
1479     // Eliminating the dirty entry from 'Cache', so update the reverse info.
1480     RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
1481   }
1482 
1483   // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1484   NonLocalPointerDeps.erase(It);
1485 }
1486 
1487 void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) {
1488   // If Ptr isn't really a pointer, just ignore it.
1489   if (!Ptr->getType()->isPointerTy())
1490     return;
1491   // Flush store info for the pointer.
1492   RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
1493   // Flush load info for the pointer.
1494   RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
1495 }
1496 
1497 void MemoryDependenceResults::invalidateCachedPredecessors() {
1498   PredCache.clear();
1499 }
1500 
1501 void MemoryDependenceResults::removeInstruction(Instruction *RemInst) {
1502   // Walk through the Non-local dependencies, removing this one as the value
1503   // for any cached queries.
1504   NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
1505   if (NLDI != NonLocalDeps.end()) {
1506     NonLocalDepInfo &BlockMap = NLDI->second.first;
1507     for (auto &Entry : BlockMap)
1508       if (Instruction *Inst = Entry.getResult().getInst())
1509         RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
1510     NonLocalDeps.erase(NLDI);
1511   }
1512 
1513   // If we have a cached local dependence query for this instruction, remove it.
1514   LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
1515   if (LocalDepEntry != LocalDeps.end()) {
1516     // Remove us from DepInst's reverse set now that the local dep info is gone.
1517     if (Instruction *Inst = LocalDepEntry->second.getInst())
1518       RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
1519 
1520     // Remove this local dependency info.
1521     LocalDeps.erase(LocalDepEntry);
1522   }
1523 
1524   // If we have any cached pointer dependencies on this instruction, remove
1525   // them.  If the instruction has non-pointer type, then it can't be a pointer
1526   // base.
1527 
1528   // Remove it from both the load info and the store info.  The instruction
1529   // can't be in either of these maps if it is non-pointer.
1530   if (RemInst->getType()->isPointerTy()) {
1531     RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
1532     RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
1533   }
1534 
1535   // Loop over all of the things that depend on the instruction we're removing.
1536   SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd;
1537 
1538   // If we find RemInst as a clobber or Def in any of the maps for other values,
1539   // we need to replace its entry with a dirty version of the instruction after
1540   // it.  If RemInst is a terminator, we use a null dirty value.
1541   //
1542   // Using a dirty version of the instruction after RemInst saves having to scan
1543   // the entire block to get to this point.
1544   MemDepResult NewDirtyVal;
1545   if (!RemInst->isTerminator())
1546     NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator());
1547 
1548   ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1549   if (ReverseDepIt != ReverseLocalDeps.end()) {
1550     // RemInst can't be the terminator if it has local stuff depending on it.
1551     assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) &&
1552            "Nothing can locally depend on a terminator");
1553 
1554     for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) {
1555       assert(InstDependingOnRemInst != RemInst &&
1556              "Already removed our local dep info");
1557 
1558       LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
1559 
1560       // Make sure to remember that new things depend on NewDepInst.
1561       assert(NewDirtyVal.getInst() &&
1562              "There is no way something else can have "
1563              "a local dep on this if it is a terminator!");
1564       ReverseDepsToAdd.push_back(
1565           std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst));
1566     }
1567 
1568     ReverseLocalDeps.erase(ReverseDepIt);
1569 
1570     // Add new reverse deps after scanning the set, to avoid invalidating the
1571     // 'ReverseDeps' reference.
1572     while (!ReverseDepsToAdd.empty()) {
1573       ReverseLocalDeps[ReverseDepsToAdd.back().first].insert(
1574           ReverseDepsToAdd.back().second);
1575       ReverseDepsToAdd.pop_back();
1576     }
1577   }
1578 
1579   ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1580   if (ReverseDepIt != ReverseNonLocalDeps.end()) {
1581     for (Instruction *I : ReverseDepIt->second) {
1582       assert(I != RemInst && "Already removed NonLocalDep info for RemInst");
1583 
1584       PerInstNLInfo &INLD = NonLocalDeps[I];
1585       // The information is now dirty!
1586       INLD.second = true;
1587 
1588       for (auto &Entry : INLD.first) {
1589         if (Entry.getResult().getInst() != RemInst)
1590           continue;
1591 
1592         // Convert to a dirty entry for the subsequent instruction.
1593         Entry.setResult(NewDirtyVal);
1594 
1595         if (Instruction *NextI = NewDirtyVal.getInst())
1596           ReverseDepsToAdd.push_back(std::make_pair(NextI, I));
1597       }
1598     }
1599 
1600     ReverseNonLocalDeps.erase(ReverseDepIt);
1601 
1602     // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1603     while (!ReverseDepsToAdd.empty()) {
1604       ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert(
1605           ReverseDepsToAdd.back().second);
1606       ReverseDepsToAdd.pop_back();
1607     }
1608   }
1609 
1610   // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1611   // value in the NonLocalPointerDeps info.
1612   ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
1613       ReverseNonLocalPtrDeps.find(RemInst);
1614   if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
1615     SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8>
1616         ReversePtrDepsToAdd;
1617 
1618     for (ValueIsLoadPair P : ReversePtrDepIt->second) {
1619       assert(P.getPointer() != RemInst &&
1620              "Already removed NonLocalPointerDeps info for RemInst");
1621 
1622       NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
1623 
1624       // The cache is not valid for any specific block anymore.
1625       NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
1626 
1627       // Update any entries for RemInst to use the instruction after it.
1628       for (auto &Entry : NLPDI) {
1629         if (Entry.getResult().getInst() != RemInst)
1630           continue;
1631 
1632         // Convert to a dirty entry for the subsequent instruction.
1633         Entry.setResult(NewDirtyVal);
1634 
1635         if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1636           ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1637       }
1638 
1639       // Re-sort the NonLocalDepInfo.  Changing the dirty entry to its
1640       // subsequent value may invalidate the sortedness.
1641       std::sort(NLPDI.begin(), NLPDI.end());
1642     }
1643 
1644     ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
1645 
1646     while (!ReversePtrDepsToAdd.empty()) {
1647       ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert(
1648           ReversePtrDepsToAdd.back().second);
1649       ReversePtrDepsToAdd.pop_back();
1650     }
1651   }
1652 
1653   assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
1654   DEBUG(verifyRemoved(RemInst));
1655 }
1656 
1657 /// Verify that the specified instruction does not occur in our internal data
1658 /// structures.
1659 ///
1660 /// This function verifies by asserting in debug builds.
1661 void MemoryDependenceResults::verifyRemoved(Instruction *D) const {
1662 #ifndef NDEBUG
1663   for (const auto &DepKV : LocalDeps) {
1664     assert(DepKV.first != D && "Inst occurs in data structures");
1665     assert(DepKV.second.getInst() != D && "Inst occurs in data structures");
1666   }
1667 
1668   for (const auto &DepKV : NonLocalPointerDeps) {
1669     assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key");
1670     for (const auto &Entry : DepKV.second.NonLocalDeps)
1671       assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value");
1672   }
1673 
1674   for (const auto &DepKV : NonLocalDeps) {
1675     assert(DepKV.first != D && "Inst occurs in data structures");
1676     const PerInstNLInfo &INLD = DepKV.second;
1677     for (const auto &Entry : INLD.first)
1678       assert(Entry.getResult().getInst() != D &&
1679              "Inst occurs in data structures");
1680   }
1681 
1682   for (const auto &DepKV : ReverseLocalDeps) {
1683     assert(DepKV.first != D && "Inst occurs in data structures");
1684     for (Instruction *Inst : DepKV.second)
1685       assert(Inst != D && "Inst occurs in data structures");
1686   }
1687 
1688   for (const auto &DepKV : ReverseNonLocalDeps) {
1689     assert(DepKV.first != D && "Inst occurs in data structures");
1690     for (Instruction *Inst : DepKV.second)
1691       assert(Inst != D && "Inst occurs in data structures");
1692   }
1693 
1694   for (const auto &DepKV : ReverseNonLocalPtrDeps) {
1695     assert(DepKV.first != D && "Inst occurs in rev NLPD map");
1696 
1697     for (ValueIsLoadPair P : DepKV.second)
1698       assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) &&
1699              "Inst occurs in ReverseNonLocalPtrDeps map");
1700   }
1701 #endif
1702 }
1703 
1704 AnalysisKey MemoryDependenceAnalysis::Key;
1705 
1706 MemoryDependenceResults
1707 MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) {
1708   auto &AA = AM.getResult<AAManager>(F);
1709   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1710   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1711   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1712   return MemoryDependenceResults(AA, AC, TLI, DT);
1713 }
1714 
1715 char MemoryDependenceWrapperPass::ID = 0;
1716 
1717 INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep",
1718                       "Memory Dependence Analysis", false, true)
1719 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1720 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1721 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1722 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1723 INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep",
1724                     "Memory Dependence Analysis", false, true)
1725 
1726 MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) {
1727   initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry());
1728 }
1729 
1730 MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() = default;
1731 
1732 void MemoryDependenceWrapperPass::releaseMemory() {
1733   MemDep.reset();
1734 }
1735 
1736 void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1737   AU.setPreservesAll();
1738   AU.addRequired<AssumptionCacheTracker>();
1739   AU.addRequired<DominatorTreeWrapperPass>();
1740   AU.addRequiredTransitive<AAResultsWrapperPass>();
1741   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1742 }
1743 
1744 bool MemoryDependenceResults::invalidate(Function &F, const PreservedAnalyses &PA,
1745                                FunctionAnalysisManager::Invalidator &Inv) {
1746   // Check whether our analysis is preserved.
1747   auto PAC = PA.getChecker<MemoryDependenceAnalysis>();
1748   if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
1749     // If not, give up now.
1750     return true;
1751 
1752   // Check whether the analyses we depend on became invalid for any reason.
1753   if (Inv.invalidate<AAManager>(F, PA) ||
1754       Inv.invalidate<AssumptionAnalysis>(F, PA) ||
1755       Inv.invalidate<DominatorTreeAnalysis>(F, PA))
1756     return true;
1757 
1758   // Otherwise this analysis result remains valid.
1759   return false;
1760 }
1761 
1762 unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const {
1763   return BlockScanLimit;
1764 }
1765 
1766 bool MemoryDependenceWrapperPass::runOnFunction(Function &F) {
1767   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1768   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1769   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1770   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1771   MemDep.emplace(AA, AC, TLI, DT);
1772   return false;
1773 }
1774