1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an analysis that determines, for a given memory
10 // operation, what preceding memory operations it depends on.  It builds on
11 // alias analysis information, and tries to provide a lazy, caching interface to
12 // a common kind of alias information query.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/MemoryLocation.h"
26 #include "llvm/Analysis/OrderedBasicBlock.h"
27 #include "llvm/Analysis/PHITransAddr.h"
28 #include "llvm/Analysis/PhiValues.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Attributes.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/LLVMContext.h"
43 #include "llvm/IR/Metadata.h"
44 #include "llvm/IR/Module.h"
45 #include "llvm/IR/PredIteratorCache.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/IR/Use.h"
48 #include "llvm/IR/User.h"
49 #include "llvm/IR/Value.h"
50 #include "llvm/InitializePasses.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/AtomicOrdering.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/MathExtras.h"
58 #include <algorithm>
59 #include <cassert>
60 #include <cstdint>
61 #include <iterator>
62 #include <utility>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "memdep"
67 
68 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
69 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
70 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
71 
72 STATISTIC(NumCacheNonLocalPtr,
73           "Number of fully cached non-local ptr responses");
74 STATISTIC(NumCacheDirtyNonLocalPtr,
75           "Number of cached, but dirty, non-local ptr responses");
76 STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses");
77 STATISTIC(NumCacheCompleteNonLocalPtr,
78           "Number of block queries that were completely cached");
79 
80 // Limit for the number of instructions to scan in a block.
81 
82 static cl::opt<unsigned> BlockScanLimit(
83     "memdep-block-scan-limit", cl::Hidden, cl::init(100),
84     cl::desc("The number of instructions to scan in a block in memory "
85              "dependency analysis (default = 100)"));
86 
87 static cl::opt<unsigned>
88     BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000),
89                      cl::desc("The number of blocks to scan during memory "
90                               "dependency analysis (default = 1000)"));
91 
92 // Limit on the number of memdep results to process.
93 static const unsigned int NumResultsLimit = 100;
94 
95 /// This is a helper function that removes Val from 'Inst's set in ReverseMap.
96 ///
97 /// If the set becomes empty, remove Inst's entry.
98 template <typename KeyTy>
99 static void
100 RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap,
101                      Instruction *Inst, KeyTy Val) {
102   typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt =
103       ReverseMap.find(Inst);
104   assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
105   bool Found = InstIt->second.erase(Val);
106   assert(Found && "Invalid reverse map!");
107   (void)Found;
108   if (InstIt->second.empty())
109     ReverseMap.erase(InstIt);
110 }
111 
112 /// If the given instruction references a specific memory location, fill in Loc
113 /// with the details, otherwise set Loc.Ptr to null.
114 ///
115 /// Returns a ModRefInfo value describing the general behavior of the
116 /// instruction.
117 static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
118                               const TargetLibraryInfo &TLI) {
119   if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
120     if (LI->isUnordered()) {
121       Loc = MemoryLocation::get(LI);
122       return ModRefInfo::Ref;
123     }
124     if (LI->getOrdering() == AtomicOrdering::Monotonic) {
125       Loc = MemoryLocation::get(LI);
126       return ModRefInfo::ModRef;
127     }
128     Loc = MemoryLocation();
129     return ModRefInfo::ModRef;
130   }
131 
132   if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
133     if (SI->isUnordered()) {
134       Loc = MemoryLocation::get(SI);
135       return ModRefInfo::Mod;
136     }
137     if (SI->getOrdering() == AtomicOrdering::Monotonic) {
138       Loc = MemoryLocation::get(SI);
139       return ModRefInfo::ModRef;
140     }
141     Loc = MemoryLocation();
142     return ModRefInfo::ModRef;
143   }
144 
145   if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
146     Loc = MemoryLocation::get(V);
147     return ModRefInfo::ModRef;
148   }
149 
150   if (const CallInst *CI = isFreeCall(Inst, &TLI)) {
151     // calls to free() deallocate the entire structure
152     Loc = MemoryLocation(CI->getArgOperand(0));
153     return ModRefInfo::Mod;
154   }
155 
156   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
157     switch (II->getIntrinsicID()) {
158     case Intrinsic::lifetime_start:
159     case Intrinsic::lifetime_end:
160     case Intrinsic::invariant_start:
161       Loc = MemoryLocation::getForArgument(II, 1, TLI);
162       // These intrinsics don't really modify the memory, but returning Mod
163       // will allow them to be handled conservatively.
164       return ModRefInfo::Mod;
165     case Intrinsic::invariant_end:
166       Loc = MemoryLocation::getForArgument(II, 2, TLI);
167       // These intrinsics don't really modify the memory, but returning Mod
168       // will allow them to be handled conservatively.
169       return ModRefInfo::Mod;
170     default:
171       break;
172     }
173   }
174 
175   // Otherwise, just do the coarse-grained thing that always works.
176   if (Inst->mayWriteToMemory())
177     return ModRefInfo::ModRef;
178   if (Inst->mayReadFromMemory())
179     return ModRefInfo::Ref;
180   return ModRefInfo::NoModRef;
181 }
182 
183 /// Private helper for finding the local dependencies of a call site.
184 MemDepResult MemoryDependenceResults::getCallDependencyFrom(
185     CallBase *Call, bool isReadOnlyCall, BasicBlock::iterator ScanIt,
186     BasicBlock *BB) {
187   unsigned Limit = getDefaultBlockScanLimit();
188 
189   // Walk backwards through the block, looking for dependencies.
190   while (ScanIt != BB->begin()) {
191     Instruction *Inst = &*--ScanIt;
192     // Debug intrinsics don't cause dependences and should not affect Limit
193     if (isa<DbgInfoIntrinsic>(Inst))
194       continue;
195 
196     // Limit the amount of scanning we do so we don't end up with quadratic
197     // running time on extreme testcases.
198     --Limit;
199     if (!Limit)
200       return MemDepResult::getUnknown();
201 
202     // If this inst is a memory op, get the pointer it accessed
203     MemoryLocation Loc;
204     ModRefInfo MR = GetLocation(Inst, Loc, TLI);
205     if (Loc.Ptr) {
206       // A simple instruction.
207       if (isModOrRefSet(AA.getModRefInfo(Call, Loc)))
208         return MemDepResult::getClobber(Inst);
209       continue;
210     }
211 
212     if (auto *CallB = dyn_cast<CallBase>(Inst)) {
213       // If these two calls do not interfere, look past it.
214       if (isNoModRef(AA.getModRefInfo(Call, CallB))) {
215         // If the two calls are the same, return Inst as a Def, so that
216         // Call can be found redundant and eliminated.
217         if (isReadOnlyCall && !isModSet(MR) &&
218             Call->isIdenticalToWhenDefined(CallB))
219           return MemDepResult::getDef(Inst);
220 
221         // Otherwise if the two calls don't interact (e.g. CallB is readnone)
222         // keep scanning.
223         continue;
224       } else
225         return MemDepResult::getClobber(Inst);
226     }
227 
228     // If we could not obtain a pointer for the instruction and the instruction
229     // touches memory then assume that this is a dependency.
230     if (isModOrRefSet(MR))
231       return MemDepResult::getClobber(Inst);
232   }
233 
234   // No dependence found.  If this is the entry block of the function, it is
235   // unknown, otherwise it is non-local.
236   if (BB != &BB->getParent()->getEntryBlock())
237     return MemDepResult::getNonLocal();
238   return MemDepResult::getNonFuncLocal();
239 }
240 
241 static bool isVolatile(Instruction *Inst) {
242   if (auto *LI = dyn_cast<LoadInst>(Inst))
243     return LI->isVolatile();
244   if (auto *SI = dyn_cast<StoreInst>(Inst))
245     return SI->isVolatile();
246   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
247     return AI->isVolatile();
248   return false;
249 }
250 
251 MemDepResult MemoryDependenceResults::getPointerDependencyFrom(
252     const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
253     BasicBlock *BB, Instruction *QueryInst, unsigned *Limit,
254     OrderedBasicBlock *OBB) {
255   MemDepResult InvariantGroupDependency = MemDepResult::getUnknown();
256   if (QueryInst != nullptr) {
257     if (auto *LI = dyn_cast<LoadInst>(QueryInst)) {
258       InvariantGroupDependency = getInvariantGroupPointerDependency(LI, BB);
259 
260       if (InvariantGroupDependency.isDef())
261         return InvariantGroupDependency;
262     }
263   }
264   MemDepResult SimpleDep = getSimplePointerDependencyFrom(
265       MemLoc, isLoad, ScanIt, BB, QueryInst, Limit, OBB);
266   if (SimpleDep.isDef())
267     return SimpleDep;
268   // Non-local invariant group dependency indicates there is non local Def
269   // (it only returns nonLocal if it finds nonLocal def), which is better than
270   // local clobber and everything else.
271   if (InvariantGroupDependency.isNonLocal())
272     return InvariantGroupDependency;
273 
274   assert(InvariantGroupDependency.isUnknown() &&
275          "InvariantGroupDependency should be only unknown at this point");
276   return SimpleDep;
277 }
278 
279 MemDepResult
280 MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI,
281                                                             BasicBlock *BB) {
282 
283   if (!LI->hasMetadata(LLVMContext::MD_invariant_group))
284     return MemDepResult::getUnknown();
285 
286   // Take the ptr operand after all casts and geps 0. This way we can search
287   // cast graph down only.
288   Value *LoadOperand = LI->getPointerOperand()->stripPointerCasts();
289 
290   // It's is not safe to walk the use list of global value, because function
291   // passes aren't allowed to look outside their functions.
292   // FIXME: this could be fixed by filtering instructions from outside
293   // of current function.
294   if (isa<GlobalValue>(LoadOperand))
295     return MemDepResult::getUnknown();
296 
297   // Queue to process all pointers that are equivalent to load operand.
298   SmallVector<const Value *, 8> LoadOperandsQueue;
299   LoadOperandsQueue.push_back(LoadOperand);
300 
301   Instruction *ClosestDependency = nullptr;
302   // Order of instructions in uses list is unpredictible. In order to always
303   // get the same result, we will look for the closest dominance.
304   auto GetClosestDependency = [this](Instruction *Best, Instruction *Other) {
305     assert(Other && "Must call it with not null instruction");
306     if (Best == nullptr || DT.dominates(Best, Other))
307       return Other;
308     return Best;
309   };
310 
311   // FIXME: This loop is O(N^2) because dominates can be O(n) and in worst case
312   // we will see all the instructions. This should be fixed in MSSA.
313   while (!LoadOperandsQueue.empty()) {
314     const Value *Ptr = LoadOperandsQueue.pop_back_val();
315     assert(Ptr && !isa<GlobalValue>(Ptr) &&
316            "Null or GlobalValue should not be inserted");
317 
318     for (const Use &Us : Ptr->uses()) {
319       auto *U = dyn_cast<Instruction>(Us.getUser());
320       if (!U || U == LI || !DT.dominates(U, LI))
321         continue;
322 
323       // Bitcast or gep with zeros are using Ptr. Add to queue to check it's
324       // users.      U = bitcast Ptr
325       if (isa<BitCastInst>(U)) {
326         LoadOperandsQueue.push_back(U);
327         continue;
328       }
329       // Gep with zeros is equivalent to bitcast.
330       // FIXME: we are not sure if some bitcast should be canonicalized to gep 0
331       // or gep 0 to bitcast because of SROA, so there are 2 forms. When
332       // typeless pointers will be ready then both cases will be gone
333       // (and this BFS also won't be needed).
334       if (auto *GEP = dyn_cast<GetElementPtrInst>(U))
335         if (GEP->hasAllZeroIndices()) {
336           LoadOperandsQueue.push_back(U);
337           continue;
338         }
339 
340       // If we hit load/store with the same invariant.group metadata (and the
341       // same pointer operand) we can assume that value pointed by pointer
342       // operand didn't change.
343       if ((isa<LoadInst>(U) || isa<StoreInst>(U)) &&
344           U->hasMetadata(LLVMContext::MD_invariant_group))
345         ClosestDependency = GetClosestDependency(ClosestDependency, U);
346     }
347   }
348 
349   if (!ClosestDependency)
350     return MemDepResult::getUnknown();
351   if (ClosestDependency->getParent() == BB)
352     return MemDepResult::getDef(ClosestDependency);
353   // Def(U) can't be returned here because it is non-local. If local
354   // dependency won't be found then return nonLocal counting that the
355   // user will call getNonLocalPointerDependency, which will return cached
356   // result.
357   NonLocalDefsCache.try_emplace(
358       LI, NonLocalDepResult(ClosestDependency->getParent(),
359                             MemDepResult::getDef(ClosestDependency), nullptr));
360   ReverseNonLocalDefsCache[ClosestDependency].insert(LI);
361   return MemDepResult::getNonLocal();
362 }
363 
364 MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
365     const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
366     BasicBlock *BB, Instruction *QueryInst, unsigned *Limit,
367     OrderedBasicBlock *OBB) {
368   bool isInvariantLoad = false;
369 
370   unsigned DefaultLimit = getDefaultBlockScanLimit();
371   if (!Limit)
372     Limit = &DefaultLimit;
373 
374   // We must be careful with atomic accesses, as they may allow another thread
375   //   to touch this location, clobbering it. We are conservative: if the
376   //   QueryInst is not a simple (non-atomic) memory access, we automatically
377   //   return getClobber.
378   // If it is simple, we know based on the results of
379   // "Compiler testing via a theory of sound optimisations in the C11/C++11
380   //   memory model" in PLDI 2013, that a non-atomic location can only be
381   //   clobbered between a pair of a release and an acquire action, with no
382   //   access to the location in between.
383   // Here is an example for giving the general intuition behind this rule.
384   // In the following code:
385   //   store x 0;
386   //   release action; [1]
387   //   acquire action; [4]
388   //   %val = load x;
389   // It is unsafe to replace %val by 0 because another thread may be running:
390   //   acquire action; [2]
391   //   store x 42;
392   //   release action; [3]
393   // with synchronization from 1 to 2 and from 3 to 4, resulting in %val
394   // being 42. A key property of this program however is that if either
395   // 1 or 4 were missing, there would be a race between the store of 42
396   // either the store of 0 or the load (making the whole program racy).
397   // The paper mentioned above shows that the same property is respected
398   // by every program that can detect any optimization of that kind: either
399   // it is racy (undefined) or there is a release followed by an acquire
400   // between the pair of accesses under consideration.
401 
402   // If the load is invariant, we "know" that it doesn't alias *any* write. We
403   // do want to respect mustalias results since defs are useful for value
404   // forwarding, but any mayalias write can be assumed to be noalias.
405   // Arguably, this logic should be pushed inside AliasAnalysis itself.
406   if (isLoad && QueryInst) {
407     LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
408     if (LI && LI->hasMetadata(LLVMContext::MD_invariant_load))
409       isInvariantLoad = true;
410   }
411 
412   const DataLayout &DL = BB->getModule()->getDataLayout();
413 
414   // If the caller did not provide an ordered basic block,
415   // create one to lazily compute and cache instruction
416   // positions inside a BB. This is used to provide fast queries for relative
417   // position between two instructions in a BB and can be used by
418   // AliasAnalysis::callCapturesBefore.
419   OrderedBasicBlock OBBTmp(BB);
420   if (!OBB)
421     OBB = &OBBTmp;
422 
423   // Return "true" if and only if the instruction I is either a non-simple
424   // load or a non-simple store.
425   auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool {
426     if (auto *LI = dyn_cast<LoadInst>(I))
427       return !LI->isSimple();
428     if (auto *SI = dyn_cast<StoreInst>(I))
429       return !SI->isSimple();
430     return false;
431   };
432 
433   // Return "true" if I is not a load and not a store, but it does access
434   // memory.
435   auto isOtherMemAccess = [](Instruction *I) -> bool {
436     return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory();
437   };
438 
439   // Walk backwards through the basic block, looking for dependencies.
440   while (ScanIt != BB->begin()) {
441     Instruction *Inst = &*--ScanIt;
442 
443     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
444       // Debug intrinsics don't (and can't) cause dependencies.
445       if (isa<DbgInfoIntrinsic>(II))
446         continue;
447 
448     // Limit the amount of scanning we do so we don't end up with quadratic
449     // running time on extreme testcases.
450     --*Limit;
451     if (!*Limit)
452       return MemDepResult::getUnknown();
453 
454     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
455       // If we reach a lifetime begin or end marker, then the query ends here
456       // because the value is undefined.
457       if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
458         // FIXME: This only considers queries directly on the invariant-tagged
459         // pointer, not on query pointers that are indexed off of them.  It'd
460         // be nice to handle that at some point (the right approach is to use
461         // GetPointerBaseWithConstantOffset).
462         if (AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc))
463           return MemDepResult::getDef(II);
464         continue;
465       }
466     }
467 
468     // Values depend on loads if the pointers are must aliased.  This means
469     // that a load depends on another must aliased load from the same value.
470     // One exception is atomic loads: a value can depend on an atomic load that
471     // it does not alias with when this atomic load indicates that another
472     // thread may be accessing the location.
473     if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
474       // While volatile access cannot be eliminated, they do not have to clobber
475       // non-aliasing locations, as normal accesses, for example, can be safely
476       // reordered with volatile accesses.
477       if (LI->isVolatile()) {
478         if (!QueryInst)
479           // Original QueryInst *may* be volatile
480           return MemDepResult::getClobber(LI);
481         if (isVolatile(QueryInst))
482           // Ordering required if QueryInst is itself volatile
483           return MemDepResult::getClobber(LI);
484         // Otherwise, volatile doesn't imply any special ordering
485       }
486 
487       // Atomic loads have complications involved.
488       // A Monotonic (or higher) load is OK if the query inst is itself not
489       // atomic.
490       // FIXME: This is overly conservative.
491       if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) {
492         if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
493             isOtherMemAccess(QueryInst))
494           return MemDepResult::getClobber(LI);
495         if (LI->getOrdering() != AtomicOrdering::Monotonic)
496           return MemDepResult::getClobber(LI);
497       }
498 
499       MemoryLocation LoadLoc = MemoryLocation::get(LI);
500 
501       // If we found a pointer, check if it could be the same as our pointer.
502       AliasResult R = AA.alias(LoadLoc, MemLoc);
503 
504       if (isLoad) {
505         if (R == NoAlias)
506           continue;
507 
508         // Must aliased loads are defs of each other.
509         if (R == MustAlias)
510           return MemDepResult::getDef(Inst);
511 
512 #if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
513       // in terms of clobbering loads, but since it does this by looking
514       // at the clobbering load directly, it doesn't know about any
515       // phi translation that may have happened along the way.
516 
517         // If we have a partial alias, then return this as a clobber for the
518         // client to handle.
519         if (R == PartialAlias)
520           return MemDepResult::getClobber(Inst);
521 #endif
522 
523         // Random may-alias loads don't depend on each other without a
524         // dependence.
525         continue;
526       }
527 
528       // Stores don't depend on other no-aliased accesses.
529       if (R == NoAlias)
530         continue;
531 
532       // Stores don't alias loads from read-only memory.
533       if (AA.pointsToConstantMemory(LoadLoc))
534         continue;
535 
536       // Stores depend on may/must aliased loads.
537       return MemDepResult::getDef(Inst);
538     }
539 
540     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
541       // Atomic stores have complications involved.
542       // A Monotonic store is OK if the query inst is itself not atomic.
543       // FIXME: This is overly conservative.
544       if (!SI->isUnordered() && SI->isAtomic()) {
545         if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
546             isOtherMemAccess(QueryInst))
547           return MemDepResult::getClobber(SI);
548         if (SI->getOrdering() != AtomicOrdering::Monotonic)
549           return MemDepResult::getClobber(SI);
550       }
551 
552       // FIXME: this is overly conservative.
553       // While volatile access cannot be eliminated, they do not have to clobber
554       // non-aliasing locations, as normal accesses can for example be reordered
555       // with volatile accesses.
556       if (SI->isVolatile())
557         if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
558             isOtherMemAccess(QueryInst))
559           return MemDepResult::getClobber(SI);
560 
561       // If alias analysis can tell that this store is guaranteed to not modify
562       // the query pointer, ignore it.  Use getModRefInfo to handle cases where
563       // the query pointer points to constant memory etc.
564       if (!isModOrRefSet(AA.getModRefInfo(SI, MemLoc)))
565         continue;
566 
567       // Ok, this store might clobber the query pointer.  Check to see if it is
568       // a must alias: in this case, we want to return this as a def.
569       // FIXME: Use ModRefInfo::Must bit from getModRefInfo call above.
570       MemoryLocation StoreLoc = MemoryLocation::get(SI);
571 
572       // If we found a pointer, check if it could be the same as our pointer.
573       AliasResult R = AA.alias(StoreLoc, MemLoc);
574 
575       if (R == NoAlias)
576         continue;
577       if (R == MustAlias)
578         return MemDepResult::getDef(Inst);
579       if (isInvariantLoad)
580         continue;
581       return MemDepResult::getClobber(Inst);
582     }
583 
584     // If this is an allocation, and if we know that the accessed pointer is to
585     // the allocation, return Def.  This means that there is no dependence and
586     // the access can be optimized based on that.  For example, a load could
587     // turn into undef.  Note that we can bypass the allocation itself when
588     // looking for a clobber in many cases; that's an alias property and is
589     // handled by BasicAA.
590     if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) {
591       const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
592       if (AccessPtr == Inst || AA.isMustAlias(Inst, AccessPtr))
593         return MemDepResult::getDef(Inst);
594     }
595 
596     if (isInvariantLoad)
597       continue;
598 
599     // A release fence requires that all stores complete before it, but does
600     // not prevent the reordering of following loads or stores 'before' the
601     // fence.  As a result, we look past it when finding a dependency for
602     // loads.  DSE uses this to find preceding stores to delete and thus we
603     // can't bypass the fence if the query instruction is a store.
604     if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
605       if (isLoad && FI->getOrdering() == AtomicOrdering::Release)
606         continue;
607 
608     // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
609     ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc);
610     // If necessary, perform additional analysis.
611     if (isModAndRefSet(MR))
612       MR = AA.callCapturesBefore(Inst, MemLoc, &DT, OBB);
613     switch (clearMust(MR)) {
614     case ModRefInfo::NoModRef:
615       // If the call has no effect on the queried pointer, just ignore it.
616       continue;
617     case ModRefInfo::Mod:
618       return MemDepResult::getClobber(Inst);
619     case ModRefInfo::Ref:
620       // If the call is known to never store to the pointer, and if this is a
621       // load query, we can safely ignore it (scan past it).
622       if (isLoad)
623         continue;
624       LLVM_FALLTHROUGH;
625     default:
626       // Otherwise, there is a potential dependence.  Return a clobber.
627       return MemDepResult::getClobber(Inst);
628     }
629   }
630 
631   // No dependence found.  If this is the entry block of the function, it is
632   // unknown, otherwise it is non-local.
633   if (BB != &BB->getParent()->getEntryBlock())
634     return MemDepResult::getNonLocal();
635   return MemDepResult::getNonFuncLocal();
636 }
637 
638 MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst,
639                                                     OrderedBasicBlock *OBB) {
640   Instruction *ScanPos = QueryInst;
641 
642   // Check for a cached result
643   MemDepResult &LocalCache = LocalDeps[QueryInst];
644 
645   // If the cached entry is non-dirty, just return it.  Note that this depends
646   // on MemDepResult's default constructing to 'dirty'.
647   if (!LocalCache.isDirty())
648     return LocalCache;
649 
650   // Otherwise, if we have a dirty entry, we know we can start the scan at that
651   // instruction, which may save us some work.
652   if (Instruction *Inst = LocalCache.getInst()) {
653     ScanPos = Inst;
654 
655     RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
656   }
657 
658   BasicBlock *QueryParent = QueryInst->getParent();
659 
660   // Do the scan.
661   if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
662     // No dependence found. If this is the entry block of the function, it is
663     // unknown, otherwise it is non-local.
664     if (QueryParent != &QueryParent->getParent()->getEntryBlock())
665       LocalCache = MemDepResult::getNonLocal();
666     else
667       LocalCache = MemDepResult::getNonFuncLocal();
668   } else {
669     MemoryLocation MemLoc;
670     ModRefInfo MR = GetLocation(QueryInst, MemLoc, TLI);
671     if (MemLoc.Ptr) {
672       // If we can do a pointer scan, make it happen.
673       bool isLoad = !isModSet(MR);
674       if (auto *II = dyn_cast<IntrinsicInst>(QueryInst))
675         isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
676 
677       LocalCache =
678           getPointerDependencyFrom(MemLoc, isLoad, ScanPos->getIterator(),
679                                    QueryParent, QueryInst, nullptr, OBB);
680     } else if (auto *QueryCall = dyn_cast<CallBase>(QueryInst)) {
681       bool isReadOnly = AA.onlyReadsMemory(QueryCall);
682       LocalCache = getCallDependencyFrom(QueryCall, isReadOnly,
683                                          ScanPos->getIterator(), QueryParent);
684     } else
685       // Non-memory instruction.
686       LocalCache = MemDepResult::getUnknown();
687   }
688 
689   // Remember the result!
690   if (Instruction *I = LocalCache.getInst())
691     ReverseLocalDeps[I].insert(QueryInst);
692 
693   return LocalCache;
694 }
695 
696 #ifndef NDEBUG
697 /// This method is used when -debug is specified to verify that cache arrays
698 /// are properly kept sorted.
699 static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache,
700                          int Count = -1) {
701   if (Count == -1)
702     Count = Cache.size();
703   assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) &&
704          "Cache isn't sorted!");
705 }
706 #endif
707 
708 const MemoryDependenceResults::NonLocalDepInfo &
709 MemoryDependenceResults::getNonLocalCallDependency(CallBase *QueryCall) {
710   assert(getDependency(QueryCall).isNonLocal() &&
711          "getNonLocalCallDependency should only be used on calls with "
712          "non-local deps!");
713   PerInstNLInfo &CacheP = NonLocalDeps[QueryCall];
714   NonLocalDepInfo &Cache = CacheP.first;
715 
716   // This is the set of blocks that need to be recomputed.  In the cached case,
717   // this can happen due to instructions being deleted etc. In the uncached
718   // case, this starts out as the set of predecessors we care about.
719   SmallVector<BasicBlock *, 32> DirtyBlocks;
720 
721   if (!Cache.empty()) {
722     // Okay, we have a cache entry.  If we know it is not dirty, just return it
723     // with no computation.
724     if (!CacheP.second) {
725       ++NumCacheNonLocal;
726       return Cache;
727     }
728 
729     // If we already have a partially computed set of results, scan them to
730     // determine what is dirty, seeding our initial DirtyBlocks worklist.
731     for (auto &Entry : Cache)
732       if (Entry.getResult().isDirty())
733         DirtyBlocks.push_back(Entry.getBB());
734 
735     // Sort the cache so that we can do fast binary search lookups below.
736     llvm::sort(Cache);
737 
738     ++NumCacheDirtyNonLocal;
739     // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
740     //     << Cache.size() << " cached: " << *QueryInst;
741   } else {
742     // Seed DirtyBlocks with each of the preds of QueryInst's block.
743     BasicBlock *QueryBB = QueryCall->getParent();
744     for (BasicBlock *Pred : PredCache.get(QueryBB))
745       DirtyBlocks.push_back(Pred);
746     ++NumUncacheNonLocal;
747   }
748 
749   // isReadonlyCall - If this is a read-only call, we can be more aggressive.
750   bool isReadonlyCall = AA.onlyReadsMemory(QueryCall);
751 
752   SmallPtrSet<BasicBlock *, 32> Visited;
753 
754   unsigned NumSortedEntries = Cache.size();
755   LLVM_DEBUG(AssertSorted(Cache));
756 
757   // Iterate while we still have blocks to update.
758   while (!DirtyBlocks.empty()) {
759     BasicBlock *DirtyBB = DirtyBlocks.back();
760     DirtyBlocks.pop_back();
761 
762     // Already processed this block?
763     if (!Visited.insert(DirtyBB).second)
764       continue;
765 
766     // Do a binary search to see if we already have an entry for this block in
767     // the cache set.  If so, find it.
768     LLVM_DEBUG(AssertSorted(Cache, NumSortedEntries));
769     NonLocalDepInfo::iterator Entry =
770         std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries,
771                          NonLocalDepEntry(DirtyBB));
772     if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
773       --Entry;
774 
775     NonLocalDepEntry *ExistingResult = nullptr;
776     if (Entry != Cache.begin() + NumSortedEntries &&
777         Entry->getBB() == DirtyBB) {
778       // If we already have an entry, and if it isn't already dirty, the block
779       // is done.
780       if (!Entry->getResult().isDirty())
781         continue;
782 
783       // Otherwise, remember this slot so we can update the value.
784       ExistingResult = &*Entry;
785     }
786 
787     // If the dirty entry has a pointer, start scanning from it so we don't have
788     // to rescan the entire block.
789     BasicBlock::iterator ScanPos = DirtyBB->end();
790     if (ExistingResult) {
791       if (Instruction *Inst = ExistingResult->getResult().getInst()) {
792         ScanPos = Inst->getIterator();
793         // We're removing QueryInst's use of Inst.
794         RemoveFromReverseMap<Instruction *>(ReverseNonLocalDeps, Inst,
795                                             QueryCall);
796       }
797     }
798 
799     // Find out if this block has a local dependency for QueryInst.
800     MemDepResult Dep;
801 
802     if (ScanPos != DirtyBB->begin()) {
803       Dep = getCallDependencyFrom(QueryCall, isReadonlyCall, ScanPos, DirtyBB);
804     } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
805       // No dependence found.  If this is the entry block of the function, it is
806       // a clobber, otherwise it is unknown.
807       Dep = MemDepResult::getNonLocal();
808     } else {
809       Dep = MemDepResult::getNonFuncLocal();
810     }
811 
812     // If we had a dirty entry for the block, update it.  Otherwise, just add
813     // a new entry.
814     if (ExistingResult)
815       ExistingResult->setResult(Dep);
816     else
817       Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
818 
819     // If the block has a dependency (i.e. it isn't completely transparent to
820     // the value), remember the association!
821     if (!Dep.isNonLocal()) {
822       // Keep the ReverseNonLocalDeps map up to date so we can efficiently
823       // update this when we remove instructions.
824       if (Instruction *Inst = Dep.getInst())
825         ReverseNonLocalDeps[Inst].insert(QueryCall);
826     } else {
827 
828       // If the block *is* completely transparent to the load, we need to check
829       // the predecessors of this block.  Add them to our worklist.
830       for (BasicBlock *Pred : PredCache.get(DirtyBB))
831         DirtyBlocks.push_back(Pred);
832     }
833   }
834 
835   return Cache;
836 }
837 
838 void MemoryDependenceResults::getNonLocalPointerDependency(
839     Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) {
840   const MemoryLocation Loc = MemoryLocation::get(QueryInst);
841   bool isLoad = isa<LoadInst>(QueryInst);
842   BasicBlock *FromBB = QueryInst->getParent();
843   assert(FromBB);
844 
845   assert(Loc.Ptr->getType()->isPointerTy() &&
846          "Can't get pointer deps of a non-pointer!");
847   Result.clear();
848   {
849     // Check if there is cached Def with invariant.group.
850     auto NonLocalDefIt = NonLocalDefsCache.find(QueryInst);
851     if (NonLocalDefIt != NonLocalDefsCache.end()) {
852       Result.push_back(NonLocalDefIt->second);
853       ReverseNonLocalDefsCache[NonLocalDefIt->second.getResult().getInst()]
854           .erase(QueryInst);
855       NonLocalDefsCache.erase(NonLocalDefIt);
856       return;
857     }
858   }
859   // This routine does not expect to deal with volatile instructions.
860   // Doing so would require piping through the QueryInst all the way through.
861   // TODO: volatiles can't be elided, but they can be reordered with other
862   // non-volatile accesses.
863 
864   // We currently give up on any instruction which is ordered, but we do handle
865   // atomic instructions which are unordered.
866   // TODO: Handle ordered instructions
867   auto isOrdered = [](Instruction *Inst) {
868     if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
869       return !LI->isUnordered();
870     } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
871       return !SI->isUnordered();
872     }
873     return false;
874   };
875   if (isVolatile(QueryInst) || isOrdered(QueryInst)) {
876     Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(),
877                                        const_cast<Value *>(Loc.Ptr)));
878     return;
879   }
880   const DataLayout &DL = FromBB->getModule()->getDataLayout();
881   PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC);
882 
883   // This is the set of blocks we've inspected, and the pointer we consider in
884   // each block.  Because of critical edges, we currently bail out if querying
885   // a block with multiple different pointers.  This can happen during PHI
886   // translation.
887   DenseMap<BasicBlock *, Value *> Visited;
888   if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB,
889                                    Result, Visited, true))
890     return;
891   Result.clear();
892   Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(),
893                                      const_cast<Value *>(Loc.Ptr)));
894 }
895 
896 /// Compute the memdep value for BB with Pointer/PointeeSize using either
897 /// cached information in Cache or by doing a lookup (which may use dirty cache
898 /// info if available).
899 ///
900 /// If we do a lookup, add the result to the cache.
901 MemDepResult MemoryDependenceResults::GetNonLocalInfoForBlock(
902     Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad,
903     BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
904 
905   // Do a binary search to see if we already have an entry for this block in
906   // the cache set.  If so, find it.
907   NonLocalDepInfo::iterator Entry = std::upper_bound(
908       Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB));
909   if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB)
910     --Entry;
911 
912   NonLocalDepEntry *ExistingResult = nullptr;
913   if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB)
914     ExistingResult = &*Entry;
915 
916   // If we have a cached entry, and it is non-dirty, use it as the value for
917   // this dependency.
918   if (ExistingResult && !ExistingResult->getResult().isDirty()) {
919     ++NumCacheNonLocalPtr;
920     return ExistingResult->getResult();
921   }
922 
923   // Otherwise, we have to scan for the value.  If we have a dirty cache
924   // entry, start scanning from its position, otherwise we scan from the end
925   // of the block.
926   BasicBlock::iterator ScanPos = BB->end();
927   if (ExistingResult && ExistingResult->getResult().getInst()) {
928     assert(ExistingResult->getResult().getInst()->getParent() == BB &&
929            "Instruction invalidated?");
930     ++NumCacheDirtyNonLocalPtr;
931     ScanPos = ExistingResult->getResult().getInst()->getIterator();
932 
933     // Eliminating the dirty entry from 'Cache', so update the reverse info.
934     ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
935     RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey);
936   } else {
937     ++NumUncacheNonLocalPtr;
938   }
939 
940   // Scan the block for the dependency.
941   MemDepResult Dep =
942       getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst);
943 
944   // If we had a dirty entry for the block, update it.  Otherwise, just add
945   // a new entry.
946   if (ExistingResult)
947     ExistingResult->setResult(Dep);
948   else
949     Cache->push_back(NonLocalDepEntry(BB, Dep));
950 
951   // If the block has a dependency (i.e. it isn't completely transparent to
952   // the value), remember the reverse association because we just added it
953   // to Cache!
954   if (!Dep.isDef() && !Dep.isClobber())
955     return Dep;
956 
957   // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
958   // update MemDep when we remove instructions.
959   Instruction *Inst = Dep.getInst();
960   assert(Inst && "Didn't depend on anything?");
961   ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
962   ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
963   return Dep;
964 }
965 
966 /// Sort the NonLocalDepInfo cache, given a certain number of elements in the
967 /// array that are already properly ordered.
968 ///
969 /// This is optimized for the case when only a few entries are added.
970 static void
971 SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache,
972                          unsigned NumSortedEntries) {
973   switch (Cache.size() - NumSortedEntries) {
974   case 0:
975     // done, no new entries.
976     break;
977   case 2: {
978     // Two new entries, insert the last one into place.
979     NonLocalDepEntry Val = Cache.back();
980     Cache.pop_back();
981     MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
982         std::upper_bound(Cache.begin(), Cache.end() - 1, Val);
983     Cache.insert(Entry, Val);
984     LLVM_FALLTHROUGH;
985   }
986   case 1:
987     // One new entry, Just insert the new value at the appropriate position.
988     if (Cache.size() != 1) {
989       NonLocalDepEntry Val = Cache.back();
990       Cache.pop_back();
991       MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
992           std::upper_bound(Cache.begin(), Cache.end(), Val);
993       Cache.insert(Entry, Val);
994     }
995     break;
996   default:
997     // Added many values, do a full scale sort.
998     llvm::sort(Cache);
999     break;
1000   }
1001 }
1002 
1003 /// Perform a dependency query based on pointer/pointeesize starting at the end
1004 /// of StartBB.
1005 ///
1006 /// Add any clobber/def results to the results vector and keep track of which
1007 /// blocks are visited in 'Visited'.
1008 ///
1009 /// This has special behavior for the first block queries (when SkipFirstBlock
1010 /// is true).  In this special case, it ignores the contents of the specified
1011 /// block and starts returning dependence info for its predecessors.
1012 ///
1013 /// This function returns true on success, or false to indicate that it could
1014 /// not compute dependence information for some reason.  This should be treated
1015 /// as a clobber dependence on the first instruction in the predecessor block.
1016 bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
1017     Instruction *QueryInst, const PHITransAddr &Pointer,
1018     const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB,
1019     SmallVectorImpl<NonLocalDepResult> &Result,
1020     DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock) {
1021   // Look up the cached info for Pointer.
1022   ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
1023 
1024   // Set up a temporary NLPI value. If the map doesn't yet have an entry for
1025   // CacheKey, this value will be inserted as the associated value. Otherwise,
1026   // it'll be ignored, and we'll have to check to see if the cached size and
1027   // aa tags are consistent with the current query.
1028   NonLocalPointerInfo InitialNLPI;
1029   InitialNLPI.Size = Loc.Size;
1030   InitialNLPI.AATags = Loc.AATags;
1031 
1032   // Get the NLPI for CacheKey, inserting one into the map if it doesn't
1033   // already have one.
1034   std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
1035       NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
1036   NonLocalPointerInfo *CacheInfo = &Pair.first->second;
1037 
1038   // If we already have a cache entry for this CacheKey, we may need to do some
1039   // work to reconcile the cache entry and the current query.
1040   if (!Pair.second) {
1041     if (CacheInfo->Size != Loc.Size) {
1042       bool ThrowOutEverything;
1043       if (CacheInfo->Size.hasValue() && Loc.Size.hasValue()) {
1044         // FIXME: We may be able to do better in the face of results with mixed
1045         // precision. We don't appear to get them in practice, though, so just
1046         // be conservative.
1047         ThrowOutEverything =
1048             CacheInfo->Size.isPrecise() != Loc.Size.isPrecise() ||
1049             CacheInfo->Size.getValue() < Loc.Size.getValue();
1050       } else {
1051         // For our purposes, unknown size > all others.
1052         ThrowOutEverything = !Loc.Size.hasValue();
1053       }
1054 
1055       if (ThrowOutEverything) {
1056         // The query's Size is greater than the cached one. Throw out the
1057         // cached data and proceed with the query at the greater size.
1058         CacheInfo->Pair = BBSkipFirstBlockPair();
1059         CacheInfo->Size = Loc.Size;
1060         for (auto &Entry : CacheInfo->NonLocalDeps)
1061           if (Instruction *Inst = Entry.getResult().getInst())
1062             RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
1063         CacheInfo->NonLocalDeps.clear();
1064       } else {
1065         // This query's Size is less than the cached one. Conservatively restart
1066         // the query using the greater size.
1067         return getNonLocalPointerDepFromBB(
1068             QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad,
1069             StartBB, Result, Visited, SkipFirstBlock);
1070       }
1071     }
1072 
1073     // If the query's AATags are inconsistent with the cached one,
1074     // conservatively throw out the cached data and restart the query with
1075     // no tag if needed.
1076     if (CacheInfo->AATags != Loc.AATags) {
1077       if (CacheInfo->AATags) {
1078         CacheInfo->Pair = BBSkipFirstBlockPair();
1079         CacheInfo->AATags = AAMDNodes();
1080         for (auto &Entry : CacheInfo->NonLocalDeps)
1081           if (Instruction *Inst = Entry.getResult().getInst())
1082             RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
1083         CacheInfo->NonLocalDeps.clear();
1084       }
1085       if (Loc.AATags)
1086         return getNonLocalPointerDepFromBB(
1087             QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result,
1088             Visited, SkipFirstBlock);
1089     }
1090   }
1091 
1092   NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
1093 
1094   // If we have valid cached information for exactly the block we are
1095   // investigating, just return it with no recomputation.
1096   if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
1097     // We have a fully cached result for this query then we can just return the
1098     // cached results and populate the visited set.  However, we have to verify
1099     // that we don't already have conflicting results for these blocks.  Check
1100     // to ensure that if a block in the results set is in the visited set that
1101     // it was for the same pointer query.
1102     if (!Visited.empty()) {
1103       for (auto &Entry : *Cache) {
1104         DenseMap<BasicBlock *, Value *>::iterator VI =
1105             Visited.find(Entry.getBB());
1106         if (VI == Visited.end() || VI->second == Pointer.getAddr())
1107           continue;
1108 
1109         // We have a pointer mismatch in a block.  Just return false, saying
1110         // that something was clobbered in this result.  We could also do a
1111         // non-fully cached query, but there is little point in doing this.
1112         return false;
1113       }
1114     }
1115 
1116     Value *Addr = Pointer.getAddr();
1117     for (auto &Entry : *Cache) {
1118       Visited.insert(std::make_pair(Entry.getBB(), Addr));
1119       if (Entry.getResult().isNonLocal()) {
1120         continue;
1121       }
1122 
1123       if (DT.isReachableFromEntry(Entry.getBB())) {
1124         Result.push_back(
1125             NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr));
1126       }
1127     }
1128     ++NumCacheCompleteNonLocalPtr;
1129     return true;
1130   }
1131 
1132   // Otherwise, either this is a new block, a block with an invalid cache
1133   // pointer or one that we're about to invalidate by putting more info into it
1134   // than its valid cache info.  If empty, the result will be valid cache info,
1135   // otherwise it isn't.
1136   if (Cache->empty())
1137     CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
1138   else
1139     CacheInfo->Pair = BBSkipFirstBlockPair();
1140 
1141   SmallVector<BasicBlock *, 32> Worklist;
1142   Worklist.push_back(StartBB);
1143 
1144   // PredList used inside loop.
1145   SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList;
1146 
1147   // Keep track of the entries that we know are sorted.  Previously cached
1148   // entries will all be sorted.  The entries we add we only sort on demand (we
1149   // don't insert every element into its sorted position).  We know that we
1150   // won't get any reuse from currently inserted values, because we don't
1151   // revisit blocks after we insert info for them.
1152   unsigned NumSortedEntries = Cache->size();
1153   unsigned WorklistEntries = BlockNumberLimit;
1154   bool GotWorklistLimit = false;
1155   LLVM_DEBUG(AssertSorted(*Cache));
1156 
1157   while (!Worklist.empty()) {
1158     BasicBlock *BB = Worklist.pop_back_val();
1159 
1160     // If we do process a large number of blocks it becomes very expensive and
1161     // likely it isn't worth worrying about
1162     if (Result.size() > NumResultsLimit) {
1163       Worklist.clear();
1164       // Sort it now (if needed) so that recursive invocations of
1165       // getNonLocalPointerDepFromBB and other routines that could reuse the
1166       // cache value will only see properly sorted cache arrays.
1167       if (Cache && NumSortedEntries != Cache->size()) {
1168         SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1169       }
1170       // Since we bail out, the "Cache" set won't contain all of the
1171       // results for the query.  This is ok (we can still use it to accelerate
1172       // specific block queries) but we can't do the fastpath "return all
1173       // results from the set".  Clear out the indicator for this.
1174       CacheInfo->Pair = BBSkipFirstBlockPair();
1175       return false;
1176     }
1177 
1178     // Skip the first block if we have it.
1179     if (!SkipFirstBlock) {
1180       // Analyze the dependency of *Pointer in FromBB.  See if we already have
1181       // been here.
1182       assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
1183 
1184       // Get the dependency info for Pointer in BB.  If we have cached
1185       // information, we will use it, otherwise we compute it.
1186       LLVM_DEBUG(AssertSorted(*Cache, NumSortedEntries));
1187       MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB,
1188                                                  Cache, NumSortedEntries);
1189 
1190       // If we got a Def or Clobber, add this to the list of results.
1191       if (!Dep.isNonLocal()) {
1192         if (DT.isReachableFromEntry(BB)) {
1193           Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
1194           continue;
1195         }
1196       }
1197     }
1198 
1199     // If 'Pointer' is an instruction defined in this block, then we need to do
1200     // phi translation to change it into a value live in the predecessor block.
1201     // If not, we just add the predecessors to the worklist and scan them with
1202     // the same Pointer.
1203     if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
1204       SkipFirstBlock = false;
1205       SmallVector<BasicBlock *, 16> NewBlocks;
1206       for (BasicBlock *Pred : PredCache.get(BB)) {
1207         // Verify that we haven't looked at this block yet.
1208         std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes =
1209             Visited.insert(std::make_pair(Pred, Pointer.getAddr()));
1210         if (InsertRes.second) {
1211           // First time we've looked at *PI.
1212           NewBlocks.push_back(Pred);
1213           continue;
1214         }
1215 
1216         // If we have seen this block before, but it was with a different
1217         // pointer then we have a phi translation failure and we have to treat
1218         // this as a clobber.
1219         if (InsertRes.first->second != Pointer.getAddr()) {
1220           // Make sure to clean up the Visited map before continuing on to
1221           // PredTranslationFailure.
1222           for (unsigned i = 0; i < NewBlocks.size(); i++)
1223             Visited.erase(NewBlocks[i]);
1224           goto PredTranslationFailure;
1225         }
1226       }
1227       if (NewBlocks.size() > WorklistEntries) {
1228         // Make sure to clean up the Visited map before continuing on to
1229         // PredTranslationFailure.
1230         for (unsigned i = 0; i < NewBlocks.size(); i++)
1231           Visited.erase(NewBlocks[i]);
1232         GotWorklistLimit = true;
1233         goto PredTranslationFailure;
1234       }
1235       WorklistEntries -= NewBlocks.size();
1236       Worklist.append(NewBlocks.begin(), NewBlocks.end());
1237       continue;
1238     }
1239 
1240     // We do need to do phi translation, if we know ahead of time we can't phi
1241     // translate this value, don't even try.
1242     if (!Pointer.IsPotentiallyPHITranslatable())
1243       goto PredTranslationFailure;
1244 
1245     // We may have added values to the cache list before this PHI translation.
1246     // If so, we haven't done anything to ensure that the cache remains sorted.
1247     // Sort it now (if needed) so that recursive invocations of
1248     // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1249     // value will only see properly sorted cache arrays.
1250     if (Cache && NumSortedEntries != Cache->size()) {
1251       SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1252       NumSortedEntries = Cache->size();
1253     }
1254     Cache = nullptr;
1255 
1256     PredList.clear();
1257     for (BasicBlock *Pred : PredCache.get(BB)) {
1258       PredList.push_back(std::make_pair(Pred, Pointer));
1259 
1260       // Get the PHI translated pointer in this predecessor.  This can fail if
1261       // not translatable, in which case the getAddr() returns null.
1262       PHITransAddr &PredPointer = PredList.back().second;
1263       PredPointer.PHITranslateValue(BB, Pred, &DT, /*MustDominate=*/false);
1264       Value *PredPtrVal = PredPointer.getAddr();
1265 
1266       // Check to see if we have already visited this pred block with another
1267       // pointer.  If so, we can't do this lookup.  This failure can occur
1268       // with PHI translation when a critical edge exists and the PHI node in
1269       // the successor translates to a pointer value different than the
1270       // pointer the block was first analyzed with.
1271       std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes =
1272           Visited.insert(std::make_pair(Pred, PredPtrVal));
1273 
1274       if (!InsertRes.second) {
1275         // We found the pred; take it off the list of preds to visit.
1276         PredList.pop_back();
1277 
1278         // If the predecessor was visited with PredPtr, then we already did
1279         // the analysis and can ignore it.
1280         if (InsertRes.first->second == PredPtrVal)
1281           continue;
1282 
1283         // Otherwise, the block was previously analyzed with a different
1284         // pointer.  We can't represent the result of this case, so we just
1285         // treat this as a phi translation failure.
1286 
1287         // Make sure to clean up the Visited map before continuing on to
1288         // PredTranslationFailure.
1289         for (unsigned i = 0, n = PredList.size(); i < n; ++i)
1290           Visited.erase(PredList[i].first);
1291 
1292         goto PredTranslationFailure;
1293       }
1294     }
1295 
1296     // Actually process results here; this need to be a separate loop to avoid
1297     // calling getNonLocalPointerDepFromBB for blocks we don't want to return
1298     // any results for.  (getNonLocalPointerDepFromBB will modify our
1299     // datastructures in ways the code after the PredTranslationFailure label
1300     // doesn't expect.)
1301     for (unsigned i = 0, n = PredList.size(); i < n; ++i) {
1302       BasicBlock *Pred = PredList[i].first;
1303       PHITransAddr &PredPointer = PredList[i].second;
1304       Value *PredPtrVal = PredPointer.getAddr();
1305 
1306       bool CanTranslate = true;
1307       // If PHI translation was unable to find an available pointer in this
1308       // predecessor, then we have to assume that the pointer is clobbered in
1309       // that predecessor.  We can still do PRE of the load, which would insert
1310       // a computation of the pointer in this predecessor.
1311       if (!PredPtrVal)
1312         CanTranslate = false;
1313 
1314       // FIXME: it is entirely possible that PHI translating will end up with
1315       // the same value.  Consider PHI translating something like:
1316       // X = phi [x, bb1], [y, bb2].  PHI translating for bb1 doesn't *need*
1317       // to recurse here, pedantically speaking.
1318 
1319       // If getNonLocalPointerDepFromBB fails here, that means the cached
1320       // result conflicted with the Visited list; we have to conservatively
1321       // assume it is unknown, but this also does not block PRE of the load.
1322       if (!CanTranslate ||
1323           !getNonLocalPointerDepFromBB(QueryInst, PredPointer,
1324                                       Loc.getWithNewPtr(PredPtrVal), isLoad,
1325                                       Pred, Result, Visited)) {
1326         // Add the entry to the Result list.
1327         NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
1328         Result.push_back(Entry);
1329 
1330         // Since we had a phi translation failure, the cache for CacheKey won't
1331         // include all of the entries that we need to immediately satisfy future
1332         // queries.  Mark this in NonLocalPointerDeps by setting the
1333         // BBSkipFirstBlockPair pointer to null.  This requires reuse of the
1334         // cached value to do more work but not miss the phi trans failure.
1335         NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1336         NLPI.Pair = BBSkipFirstBlockPair();
1337         continue;
1338       }
1339     }
1340 
1341     // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1342     CacheInfo = &NonLocalPointerDeps[CacheKey];
1343     Cache = &CacheInfo->NonLocalDeps;
1344     NumSortedEntries = Cache->size();
1345 
1346     // Since we did phi translation, the "Cache" set won't contain all of the
1347     // results for the query.  This is ok (we can still use it to accelerate
1348     // specific block queries) but we can't do the fastpath "return all
1349     // results from the set"  Clear out the indicator for this.
1350     CacheInfo->Pair = BBSkipFirstBlockPair();
1351     SkipFirstBlock = false;
1352     continue;
1353 
1354   PredTranslationFailure:
1355     // The following code is "failure"; we can't produce a sane translation
1356     // for the given block.  It assumes that we haven't modified any of
1357     // our datastructures while processing the current block.
1358 
1359     if (!Cache) {
1360       // Refresh the CacheInfo/Cache pointer if it got invalidated.
1361       CacheInfo = &NonLocalPointerDeps[CacheKey];
1362       Cache = &CacheInfo->NonLocalDeps;
1363       NumSortedEntries = Cache->size();
1364     }
1365 
1366     // Since we failed phi translation, the "Cache" set won't contain all of the
1367     // results for the query.  This is ok (we can still use it to accelerate
1368     // specific block queries) but we can't do the fastpath "return all
1369     // results from the set".  Clear out the indicator for this.
1370     CacheInfo->Pair = BBSkipFirstBlockPair();
1371 
1372     // If *nothing* works, mark the pointer as unknown.
1373     //
1374     // If this is the magic first block, return this as a clobber of the whole
1375     // incoming value.  Since we can't phi translate to one of the predecessors,
1376     // we have to bail out.
1377     if (SkipFirstBlock)
1378       return false;
1379 
1380     bool foundBlock = false;
1381     for (NonLocalDepEntry &I : llvm::reverse(*Cache)) {
1382       if (I.getBB() != BB)
1383         continue;
1384 
1385       assert((GotWorklistLimit || I.getResult().isNonLocal() ||
1386               !DT.isReachableFromEntry(BB)) &&
1387              "Should only be here with transparent block");
1388       foundBlock = true;
1389       I.setResult(MemDepResult::getUnknown());
1390       Result.push_back(
1391           NonLocalDepResult(I.getBB(), I.getResult(), Pointer.getAddr()));
1392       break;
1393     }
1394     (void)foundBlock; (void)GotWorklistLimit;
1395     assert((foundBlock || GotWorklistLimit) && "Current block not in cache?");
1396   }
1397 
1398   // Okay, we're done now.  If we added new values to the cache, re-sort it.
1399   SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1400   LLVM_DEBUG(AssertSorted(*Cache));
1401   return true;
1402 }
1403 
1404 /// If P exists in CachedNonLocalPointerInfo or NonLocalDefsCache, remove it.
1405 void MemoryDependenceResults::RemoveCachedNonLocalPointerDependencies(
1406     ValueIsLoadPair P) {
1407 
1408   // Most of the time this cache is empty.
1409   if (!NonLocalDefsCache.empty()) {
1410     auto it = NonLocalDefsCache.find(P.getPointer());
1411     if (it != NonLocalDefsCache.end()) {
1412       RemoveFromReverseMap(ReverseNonLocalDefsCache,
1413                            it->second.getResult().getInst(), P.getPointer());
1414       NonLocalDefsCache.erase(it);
1415     }
1416 
1417     if (auto *I = dyn_cast<Instruction>(P.getPointer())) {
1418       auto toRemoveIt = ReverseNonLocalDefsCache.find(I);
1419       if (toRemoveIt != ReverseNonLocalDefsCache.end()) {
1420         for (const auto *entry : toRemoveIt->second)
1421           NonLocalDefsCache.erase(entry);
1422         ReverseNonLocalDefsCache.erase(toRemoveIt);
1423       }
1424     }
1425   }
1426 
1427   CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P);
1428   if (It == NonLocalPointerDeps.end())
1429     return;
1430 
1431   // Remove all of the entries in the BB->val map.  This involves removing
1432   // instructions from the reverse map.
1433   NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
1434 
1435   for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
1436     Instruction *Target = PInfo[i].getResult().getInst();
1437     if (!Target)
1438       continue; // Ignore non-local dep results.
1439     assert(Target->getParent() == PInfo[i].getBB());
1440 
1441     // Eliminating the dirty entry from 'Cache', so update the reverse info.
1442     RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
1443   }
1444 
1445   // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1446   NonLocalPointerDeps.erase(It);
1447 }
1448 
1449 void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) {
1450   // If Ptr isn't really a pointer, just ignore it.
1451   if (!Ptr->getType()->isPointerTy())
1452     return;
1453   // Flush store info for the pointer.
1454   RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
1455   // Flush load info for the pointer.
1456   RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
1457   // Invalidate phis that use the pointer.
1458   PV.invalidateValue(Ptr);
1459 }
1460 
1461 void MemoryDependenceResults::invalidateCachedPredecessors() {
1462   PredCache.clear();
1463 }
1464 
1465 void MemoryDependenceResults::removeInstruction(Instruction *RemInst) {
1466   // Walk through the Non-local dependencies, removing this one as the value
1467   // for any cached queries.
1468   NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
1469   if (NLDI != NonLocalDeps.end()) {
1470     NonLocalDepInfo &BlockMap = NLDI->second.first;
1471     for (auto &Entry : BlockMap)
1472       if (Instruction *Inst = Entry.getResult().getInst())
1473         RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
1474     NonLocalDeps.erase(NLDI);
1475   }
1476 
1477   // If we have a cached local dependence query for this instruction, remove it.
1478   LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
1479   if (LocalDepEntry != LocalDeps.end()) {
1480     // Remove us from DepInst's reverse set now that the local dep info is gone.
1481     if (Instruction *Inst = LocalDepEntry->second.getInst())
1482       RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
1483 
1484     // Remove this local dependency info.
1485     LocalDeps.erase(LocalDepEntry);
1486   }
1487 
1488   // If we have any cached pointer dependencies on this instruction, remove
1489   // them.  If the instruction has non-pointer type, then it can't be a pointer
1490   // base.
1491 
1492   // Remove it from both the load info and the store info.  The instruction
1493   // can't be in either of these maps if it is non-pointer.
1494   if (RemInst->getType()->isPointerTy()) {
1495     RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
1496     RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
1497   }
1498 
1499   // Loop over all of the things that depend on the instruction we're removing.
1500   SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd;
1501 
1502   // If we find RemInst as a clobber or Def in any of the maps for other values,
1503   // we need to replace its entry with a dirty version of the instruction after
1504   // it.  If RemInst is a terminator, we use a null dirty value.
1505   //
1506   // Using a dirty version of the instruction after RemInst saves having to scan
1507   // the entire block to get to this point.
1508   MemDepResult NewDirtyVal;
1509   if (!RemInst->isTerminator())
1510     NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator());
1511 
1512   ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1513   if (ReverseDepIt != ReverseLocalDeps.end()) {
1514     // RemInst can't be the terminator if it has local stuff depending on it.
1515     assert(!ReverseDepIt->second.empty() && !RemInst->isTerminator() &&
1516            "Nothing can locally depend on a terminator");
1517 
1518     for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) {
1519       assert(InstDependingOnRemInst != RemInst &&
1520              "Already removed our local dep info");
1521 
1522       LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
1523 
1524       // Make sure to remember that new things depend on NewDepInst.
1525       assert(NewDirtyVal.getInst() &&
1526              "There is no way something else can have "
1527              "a local dep on this if it is a terminator!");
1528       ReverseDepsToAdd.push_back(
1529           std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst));
1530     }
1531 
1532     ReverseLocalDeps.erase(ReverseDepIt);
1533 
1534     // Add new reverse deps after scanning the set, to avoid invalidating the
1535     // 'ReverseDeps' reference.
1536     while (!ReverseDepsToAdd.empty()) {
1537       ReverseLocalDeps[ReverseDepsToAdd.back().first].insert(
1538           ReverseDepsToAdd.back().second);
1539       ReverseDepsToAdd.pop_back();
1540     }
1541   }
1542 
1543   ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1544   if (ReverseDepIt != ReverseNonLocalDeps.end()) {
1545     for (Instruction *I : ReverseDepIt->second) {
1546       assert(I != RemInst && "Already removed NonLocalDep info for RemInst");
1547 
1548       PerInstNLInfo &INLD = NonLocalDeps[I];
1549       // The information is now dirty!
1550       INLD.second = true;
1551 
1552       for (auto &Entry : INLD.first) {
1553         if (Entry.getResult().getInst() != RemInst)
1554           continue;
1555 
1556         // Convert to a dirty entry for the subsequent instruction.
1557         Entry.setResult(NewDirtyVal);
1558 
1559         if (Instruction *NextI = NewDirtyVal.getInst())
1560           ReverseDepsToAdd.push_back(std::make_pair(NextI, I));
1561       }
1562     }
1563 
1564     ReverseNonLocalDeps.erase(ReverseDepIt);
1565 
1566     // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1567     while (!ReverseDepsToAdd.empty()) {
1568       ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert(
1569           ReverseDepsToAdd.back().second);
1570       ReverseDepsToAdd.pop_back();
1571     }
1572   }
1573 
1574   // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1575   // value in the NonLocalPointerDeps info.
1576   ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
1577       ReverseNonLocalPtrDeps.find(RemInst);
1578   if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
1579     SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8>
1580         ReversePtrDepsToAdd;
1581 
1582     for (ValueIsLoadPair P : ReversePtrDepIt->second) {
1583       assert(P.getPointer() != RemInst &&
1584              "Already removed NonLocalPointerDeps info for RemInst");
1585 
1586       NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
1587 
1588       // The cache is not valid for any specific block anymore.
1589       NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
1590 
1591       // Update any entries for RemInst to use the instruction after it.
1592       for (auto &Entry : NLPDI) {
1593         if (Entry.getResult().getInst() != RemInst)
1594           continue;
1595 
1596         // Convert to a dirty entry for the subsequent instruction.
1597         Entry.setResult(NewDirtyVal);
1598 
1599         if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1600           ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1601       }
1602 
1603       // Re-sort the NonLocalDepInfo.  Changing the dirty entry to its
1604       // subsequent value may invalidate the sortedness.
1605       llvm::sort(NLPDI);
1606     }
1607 
1608     ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
1609 
1610     while (!ReversePtrDepsToAdd.empty()) {
1611       ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert(
1612           ReversePtrDepsToAdd.back().second);
1613       ReversePtrDepsToAdd.pop_back();
1614     }
1615   }
1616 
1617   // Invalidate phis that use the removed instruction.
1618   PV.invalidateValue(RemInst);
1619 
1620   assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
1621   LLVM_DEBUG(verifyRemoved(RemInst));
1622 }
1623 
1624 /// Verify that the specified instruction does not occur in our internal data
1625 /// structures.
1626 ///
1627 /// This function verifies by asserting in debug builds.
1628 void MemoryDependenceResults::verifyRemoved(Instruction *D) const {
1629 #ifndef NDEBUG
1630   for (const auto &DepKV : LocalDeps) {
1631     assert(DepKV.first != D && "Inst occurs in data structures");
1632     assert(DepKV.second.getInst() != D && "Inst occurs in data structures");
1633   }
1634 
1635   for (const auto &DepKV : NonLocalPointerDeps) {
1636     assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key");
1637     for (const auto &Entry : DepKV.second.NonLocalDeps)
1638       assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value");
1639   }
1640 
1641   for (const auto &DepKV : NonLocalDeps) {
1642     assert(DepKV.first != D && "Inst occurs in data structures");
1643     const PerInstNLInfo &INLD = DepKV.second;
1644     for (const auto &Entry : INLD.first)
1645       assert(Entry.getResult().getInst() != D &&
1646              "Inst occurs in data structures");
1647   }
1648 
1649   for (const auto &DepKV : ReverseLocalDeps) {
1650     assert(DepKV.first != D && "Inst occurs in data structures");
1651     for (Instruction *Inst : DepKV.second)
1652       assert(Inst != D && "Inst occurs in data structures");
1653   }
1654 
1655   for (const auto &DepKV : ReverseNonLocalDeps) {
1656     assert(DepKV.first != D && "Inst occurs in data structures");
1657     for (Instruction *Inst : DepKV.second)
1658       assert(Inst != D && "Inst occurs in data structures");
1659   }
1660 
1661   for (const auto &DepKV : ReverseNonLocalPtrDeps) {
1662     assert(DepKV.first != D && "Inst occurs in rev NLPD map");
1663 
1664     for (ValueIsLoadPair P : DepKV.second)
1665       assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) &&
1666              "Inst occurs in ReverseNonLocalPtrDeps map");
1667   }
1668 #endif
1669 }
1670 
1671 AnalysisKey MemoryDependenceAnalysis::Key;
1672 
1673 MemoryDependenceAnalysis::MemoryDependenceAnalysis()
1674     : DefaultBlockScanLimit(BlockScanLimit) {}
1675 
1676 MemoryDependenceResults
1677 MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) {
1678   auto &AA = AM.getResult<AAManager>(F);
1679   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1680   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1681   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1682   auto &PV = AM.getResult<PhiValuesAnalysis>(F);
1683   return MemoryDependenceResults(AA, AC, TLI, DT, PV, DefaultBlockScanLimit);
1684 }
1685 
1686 char MemoryDependenceWrapperPass::ID = 0;
1687 
1688 INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep",
1689                       "Memory Dependence Analysis", false, true)
1690 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1691 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1692 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1693 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1694 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1695 INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep",
1696                     "Memory Dependence Analysis", false, true)
1697 
1698 MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) {
1699   initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry());
1700 }
1701 
1702 MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() = default;
1703 
1704 void MemoryDependenceWrapperPass::releaseMemory() {
1705   MemDep.reset();
1706 }
1707 
1708 void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1709   AU.setPreservesAll();
1710   AU.addRequired<AssumptionCacheTracker>();
1711   AU.addRequired<DominatorTreeWrapperPass>();
1712   AU.addRequired<PhiValuesWrapperPass>();
1713   AU.addRequiredTransitive<AAResultsWrapperPass>();
1714   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1715 }
1716 
1717 bool MemoryDependenceResults::invalidate(Function &F, const PreservedAnalyses &PA,
1718                                FunctionAnalysisManager::Invalidator &Inv) {
1719   // Check whether our analysis is preserved.
1720   auto PAC = PA.getChecker<MemoryDependenceAnalysis>();
1721   if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
1722     // If not, give up now.
1723     return true;
1724 
1725   // Check whether the analyses we depend on became invalid for any reason.
1726   if (Inv.invalidate<AAManager>(F, PA) ||
1727       Inv.invalidate<AssumptionAnalysis>(F, PA) ||
1728       Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
1729       Inv.invalidate<PhiValuesAnalysis>(F, PA))
1730     return true;
1731 
1732   // Otherwise this analysis result remains valid.
1733   return false;
1734 }
1735 
1736 unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const {
1737   return DefaultBlockScanLimit;
1738 }
1739 
1740 bool MemoryDependenceWrapperPass::runOnFunction(Function &F) {
1741   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1742   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1743   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1744   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1745   auto &PV = getAnalysis<PhiValuesWrapperPass>().getResult();
1746   MemDep.emplace(AA, AC, TLI, DT, PV, BlockScanLimit);
1747   return false;
1748 }
1749