1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements an analysis that determines, for a given memory 11 // operation, what preceding memory operations it depends on. It builds on 12 // alias analysis information, and tries to provide a lazy, caching interface to 13 // a common kind of alias information query. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/AssumptionCache.h" 22 #include "llvm/Analysis/InstructionSimplify.h" 23 #include "llvm/Analysis/MemoryBuiltins.h" 24 #include "llvm/Analysis/PHITransAddr.h" 25 #include "llvm/Analysis/ValueTracking.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/IR/Instructions.h" 30 #include "llvm/IR/IntrinsicInst.h" 31 #include "llvm/IR/LLVMContext.h" 32 #include "llvm/IR/PredIteratorCache.h" 33 #include "llvm/Support/Debug.h" 34 using namespace llvm; 35 36 #define DEBUG_TYPE "memdep" 37 38 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); 39 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses"); 40 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 41 42 STATISTIC(NumCacheNonLocalPtr, 43 "Number of fully cached non-local ptr responses"); 44 STATISTIC(NumCacheDirtyNonLocalPtr, 45 "Number of cached, but dirty, non-local ptr responses"); 46 STATISTIC(NumUncacheNonLocalPtr, 47 "Number of uncached non-local ptr responses"); 48 STATISTIC(NumCacheCompleteNonLocalPtr, 49 "Number of block queries that were completely cached"); 50 51 // Limit for the number of instructions to scan in a block. 52 53 static cl::opt<unsigned> BlockScanLimit( 54 "memdep-block-scan-limit", cl::Hidden, cl::init(100), 55 cl::desc("The number of instructions to scan in a block in memory " 56 "dependency analysis (default = 100)")); 57 58 // Limit on the number of memdep results to process. 59 static const unsigned int NumResultsLimit = 100; 60 61 char MemoryDependenceAnalysis::ID = 0; 62 63 // Register this pass... 64 INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep", 65 "Memory Dependence Analysis", false, true) 66 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 67 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 68 INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep", 69 "Memory Dependence Analysis", false, true) 70 71 MemoryDependenceAnalysis::MemoryDependenceAnalysis() 72 : FunctionPass(ID) { 73 initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry()); 74 } 75 MemoryDependenceAnalysis::~MemoryDependenceAnalysis() { 76 } 77 78 /// Clean up memory in between runs 79 void MemoryDependenceAnalysis::releaseMemory() { 80 LocalDeps.clear(); 81 NonLocalDeps.clear(); 82 NonLocalPointerDeps.clear(); 83 ReverseLocalDeps.clear(); 84 ReverseNonLocalDeps.clear(); 85 ReverseNonLocalPtrDeps.clear(); 86 PredCache.clear(); 87 } 88 89 /// getAnalysisUsage - Does not modify anything. It uses Alias Analysis. 90 /// 91 void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 92 AU.setPreservesAll(); 93 AU.addRequired<AssumptionCacheTracker>(); 94 AU.addRequiredTransitive<AliasAnalysis>(); 95 } 96 97 bool MemoryDependenceAnalysis::runOnFunction(Function &F) { 98 AA = &getAnalysis<AliasAnalysis>(); 99 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 100 DominatorTreeWrapperPass *DTWP = 101 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 102 DT = DTWP ? &DTWP->getDomTree() : nullptr; 103 return false; 104 } 105 106 /// RemoveFromReverseMap - This is a helper function that removes Val from 107 /// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry. 108 template <typename KeyTy> 109 static void RemoveFromReverseMap(DenseMap<Instruction*, 110 SmallPtrSet<KeyTy, 4> > &ReverseMap, 111 Instruction *Inst, KeyTy Val) { 112 typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator 113 InstIt = ReverseMap.find(Inst); 114 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?"); 115 bool Found = InstIt->second.erase(Val); 116 assert(Found && "Invalid reverse map!"); (void)Found; 117 if (InstIt->second.empty()) 118 ReverseMap.erase(InstIt); 119 } 120 121 /// GetLocation - If the given instruction references a specific memory 122 /// location, fill in Loc with the details, otherwise set Loc.Ptr to null. 123 /// Return a ModRefInfo value describing the general behavior of the 124 /// instruction. 125 static AliasAnalysis::ModRefResult 126 GetLocation(const Instruction *Inst, MemoryLocation &Loc, AliasAnalysis *AA) { 127 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 128 if (LI->isUnordered()) { 129 Loc = MemoryLocation::get(LI); 130 return AliasAnalysis::Ref; 131 } 132 if (LI->getOrdering() == Monotonic) { 133 Loc = MemoryLocation::get(LI); 134 return AliasAnalysis::ModRef; 135 } 136 Loc = MemoryLocation(); 137 return AliasAnalysis::ModRef; 138 } 139 140 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 141 if (SI->isUnordered()) { 142 Loc = MemoryLocation::get(SI); 143 return AliasAnalysis::Mod; 144 } 145 if (SI->getOrdering() == Monotonic) { 146 Loc = MemoryLocation::get(SI); 147 return AliasAnalysis::ModRef; 148 } 149 Loc = MemoryLocation(); 150 return AliasAnalysis::ModRef; 151 } 152 153 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 154 Loc = MemoryLocation::get(V); 155 return AliasAnalysis::ModRef; 156 } 157 158 if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) { 159 // calls to free() deallocate the entire structure 160 Loc = MemoryLocation(CI->getArgOperand(0)); 161 return AliasAnalysis::Mod; 162 } 163 164 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 165 AAMDNodes AAInfo; 166 167 switch (II->getIntrinsicID()) { 168 case Intrinsic::lifetime_start: 169 case Intrinsic::lifetime_end: 170 case Intrinsic::invariant_start: 171 II->getAAMetadata(AAInfo); 172 Loc = MemoryLocation( 173 II->getArgOperand(1), 174 cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AAInfo); 175 // These intrinsics don't really modify the memory, but returning Mod 176 // will allow them to be handled conservatively. 177 return AliasAnalysis::Mod; 178 case Intrinsic::invariant_end: 179 II->getAAMetadata(AAInfo); 180 Loc = MemoryLocation( 181 II->getArgOperand(2), 182 cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AAInfo); 183 // These intrinsics don't really modify the memory, but returning Mod 184 // will allow them to be handled conservatively. 185 return AliasAnalysis::Mod; 186 default: 187 break; 188 } 189 } 190 191 // Otherwise, just do the coarse-grained thing that always works. 192 if (Inst->mayWriteToMemory()) 193 return AliasAnalysis::ModRef; 194 if (Inst->mayReadFromMemory()) 195 return AliasAnalysis::Ref; 196 return AliasAnalysis::NoModRef; 197 } 198 199 /// getCallSiteDependencyFrom - Private helper for finding the local 200 /// dependencies of a call site. 201 MemDepResult MemoryDependenceAnalysis:: 202 getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall, 203 BasicBlock::iterator ScanIt, BasicBlock *BB) { 204 unsigned Limit = BlockScanLimit; 205 206 // Walk backwards through the block, looking for dependencies 207 while (ScanIt != BB->begin()) { 208 // Limit the amount of scanning we do so we don't end up with quadratic 209 // running time on extreme testcases. 210 --Limit; 211 if (!Limit) 212 return MemDepResult::getUnknown(); 213 214 Instruction *Inst = --ScanIt; 215 216 // If this inst is a memory op, get the pointer it accessed 217 MemoryLocation Loc; 218 AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA); 219 if (Loc.Ptr) { 220 // A simple instruction. 221 if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef) 222 return MemDepResult::getClobber(Inst); 223 continue; 224 } 225 226 if (auto InstCS = CallSite(Inst)) { 227 // Debug intrinsics don't cause dependences. 228 if (isa<DbgInfoIntrinsic>(Inst)) continue; 229 // If these two calls do not interfere, look past it. 230 switch (AA->getModRefInfo(CS, InstCS)) { 231 case AliasAnalysis::NoModRef: 232 // If the two calls are the same, return InstCS as a Def, so that 233 // CS can be found redundant and eliminated. 234 if (isReadOnlyCall && !(MR & AliasAnalysis::Mod) && 235 CS.getInstruction()->isIdenticalToWhenDefined(Inst)) 236 return MemDepResult::getDef(Inst); 237 238 // Otherwise if the two calls don't interact (e.g. InstCS is readnone) 239 // keep scanning. 240 continue; 241 default: 242 return MemDepResult::getClobber(Inst); 243 } 244 } 245 246 // If we could not obtain a pointer for the instruction and the instruction 247 // touches memory then assume that this is a dependency. 248 if (MR != AliasAnalysis::NoModRef) 249 return MemDepResult::getClobber(Inst); 250 } 251 252 // No dependence found. If this is the entry block of the function, it is 253 // unknown, otherwise it is non-local. 254 if (BB != &BB->getParent()->getEntryBlock()) 255 return MemDepResult::getNonLocal(); 256 return MemDepResult::getNonFuncLocal(); 257 } 258 259 /// isLoadLoadClobberIfExtendedToFullWidth - Return true if LI is a load that 260 /// would fully overlap MemLoc if done as a wider legal integer load. 261 /// 262 /// MemLocBase, MemLocOffset are lazily computed here the first time the 263 /// base/offs of memloc is needed. 264 static bool isLoadLoadClobberIfExtendedToFullWidth(const MemoryLocation &MemLoc, 265 const Value *&MemLocBase, 266 int64_t &MemLocOffs, 267 const LoadInst *LI) { 268 const DataLayout &DL = LI->getModule()->getDataLayout(); 269 270 // If we haven't already computed the base/offset of MemLoc, do so now. 271 if (!MemLocBase) 272 MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL); 273 274 unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( 275 MemLocBase, MemLocOffs, MemLoc.Size, LI); 276 return Size != 0; 277 } 278 279 /// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that 280 /// looks at a memory location for a load (specified by MemLocBase, Offs, 281 /// and Size) and compares it against a load. If the specified load could 282 /// be safely widened to a larger integer load that is 1) still efficient, 283 /// 2) safe for the target, and 3) would provide the specified memory 284 /// location value, then this function returns the size in bytes of the 285 /// load width to use. If not, this returns zero. 286 unsigned MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( 287 const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize, 288 const LoadInst *LI) { 289 // We can only extend simple integer loads. 290 if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0; 291 292 // Load widening is hostile to ThreadSanitizer: it may cause false positives 293 // or make the reports more cryptic (access sizes are wrong). 294 if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread)) 295 return 0; 296 297 const DataLayout &DL = LI->getModule()->getDataLayout(); 298 299 // Get the base of this load. 300 int64_t LIOffs = 0; 301 const Value *LIBase = 302 GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL); 303 304 // If the two pointers are not based on the same pointer, we can't tell that 305 // they are related. 306 if (LIBase != MemLocBase) return 0; 307 308 // Okay, the two values are based on the same pointer, but returned as 309 // no-alias. This happens when we have things like two byte loads at "P+1" 310 // and "P+3". Check to see if increasing the size of the "LI" load up to its 311 // alignment (or the largest native integer type) will allow us to load all 312 // the bits required by MemLoc. 313 314 // If MemLoc is before LI, then no widening of LI will help us out. 315 if (MemLocOffs < LIOffs) return 0; 316 317 // Get the alignment of the load in bytes. We assume that it is safe to load 318 // any legal integer up to this size without a problem. For example, if we're 319 // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can 320 // widen it up to an i32 load. If it is known 2-byte aligned, we can widen it 321 // to i16. 322 unsigned LoadAlign = LI->getAlignment(); 323 324 int64_t MemLocEnd = MemLocOffs+MemLocSize; 325 326 // If no amount of rounding up will let MemLoc fit into LI, then bail out. 327 if (LIOffs+LoadAlign < MemLocEnd) return 0; 328 329 // This is the size of the load to try. Start with the next larger power of 330 // two. 331 unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U; 332 NewLoadByteSize = NextPowerOf2(NewLoadByteSize); 333 334 while (1) { 335 // If this load size is bigger than our known alignment or would not fit 336 // into a native integer register, then we fail. 337 if (NewLoadByteSize > LoadAlign || 338 !DL.fitsInLegalInteger(NewLoadByteSize*8)) 339 return 0; 340 341 if (LIOffs + NewLoadByteSize > MemLocEnd && 342 LI->getParent()->getParent()->hasFnAttribute( 343 Attribute::SanitizeAddress)) 344 // We will be reading past the location accessed by the original program. 345 // While this is safe in a regular build, Address Safety analysis tools 346 // may start reporting false warnings. So, don't do widening. 347 return 0; 348 349 // If a load of this width would include all of MemLoc, then we succeed. 350 if (LIOffs+NewLoadByteSize >= MemLocEnd) 351 return NewLoadByteSize; 352 353 NewLoadByteSize <<= 1; 354 } 355 } 356 357 static bool isVolatile(Instruction *Inst) { 358 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 359 return LI->isVolatile(); 360 else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 361 return SI->isVolatile(); 362 else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) 363 return AI->isVolatile(); 364 return false; 365 } 366 367 368 /// getPointerDependencyFrom - Return the instruction on which a memory 369 /// location depends. If isLoad is true, this routine ignores may-aliases with 370 /// read-only operations. If isLoad is false, this routine ignores may-aliases 371 /// with reads from read-only locations. If possible, pass the query 372 /// instruction as well; this function may take advantage of the metadata 373 /// annotated to the query instruction to refine the result. 374 MemDepResult MemoryDependenceAnalysis::getPointerDependencyFrom( 375 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 376 BasicBlock *BB, Instruction *QueryInst) { 377 378 const Value *MemLocBase = nullptr; 379 int64_t MemLocOffset = 0; 380 unsigned Limit = BlockScanLimit; 381 bool isInvariantLoad = false; 382 383 // We must be careful with atomic accesses, as they may allow another thread 384 // to touch this location, cloberring it. We are conservative: if the 385 // QueryInst is not a simple (non-atomic) memory access, we automatically 386 // return getClobber. 387 // If it is simple, we know based on the results of 388 // "Compiler testing via a theory of sound optimisations in the C11/C++11 389 // memory model" in PLDI 2013, that a non-atomic location can only be 390 // clobbered between a pair of a release and an acquire action, with no 391 // access to the location in between. 392 // Here is an example for giving the general intuition behind this rule. 393 // In the following code: 394 // store x 0; 395 // release action; [1] 396 // acquire action; [4] 397 // %val = load x; 398 // It is unsafe to replace %val by 0 because another thread may be running: 399 // acquire action; [2] 400 // store x 42; 401 // release action; [3] 402 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val 403 // being 42. A key property of this program however is that if either 404 // 1 or 4 were missing, there would be a race between the store of 42 405 // either the store of 0 or the load (making the whole progam racy). 406 // The paper mentionned above shows that the same property is respected 407 // by every program that can detect any optimisation of that kind: either 408 // it is racy (undefined) or there is a release followed by an acquire 409 // between the pair of accesses under consideration. 410 411 // If the load is invariant, we "know" that it doesn't alias *any* write. We 412 // do want to respect mustalias results since defs are useful for value 413 // forwarding, but any mayalias write can be assumed to be noalias. 414 // Arguably, this logic should be pushed inside AliasAnalysis itself. 415 if (isLoad && QueryInst) { 416 LoadInst *LI = dyn_cast<LoadInst>(QueryInst); 417 if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr) 418 isInvariantLoad = true; 419 } 420 421 const DataLayout &DL = BB->getModule()->getDataLayout(); 422 423 // Walk backwards through the basic block, looking for dependencies. 424 while (ScanIt != BB->begin()) { 425 Instruction *Inst = --ScanIt; 426 427 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 428 // Debug intrinsics don't (and can't) cause dependencies. 429 if (isa<DbgInfoIntrinsic>(II)) continue; 430 431 // Limit the amount of scanning we do so we don't end up with quadratic 432 // running time on extreme testcases. 433 --Limit; 434 if (!Limit) 435 return MemDepResult::getUnknown(); 436 437 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 438 // If we reach a lifetime begin or end marker, then the query ends here 439 // because the value is undefined. 440 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 441 // FIXME: This only considers queries directly on the invariant-tagged 442 // pointer, not on query pointers that are indexed off of them. It'd 443 // be nice to handle that at some point (the right approach is to use 444 // GetPointerBaseWithConstantOffset). 445 if (AA->isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc)) 446 return MemDepResult::getDef(II); 447 continue; 448 } 449 } 450 451 // Values depend on loads if the pointers are must aliased. This means that 452 // a load depends on another must aliased load from the same value. 453 // One exception is atomic loads: a value can depend on an atomic load that it 454 // does not alias with when this atomic load indicates that another thread may 455 // be accessing the location. 456 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 457 458 // While volatile access cannot be eliminated, they do not have to clobber 459 // non-aliasing locations, as normal accesses, for example, can be safely 460 // reordered with volatile accesses. 461 if (LI->isVolatile()) { 462 if (!QueryInst) 463 // Original QueryInst *may* be volatile 464 return MemDepResult::getClobber(LI); 465 if (isVolatile(QueryInst)) 466 // Ordering required if QueryInst is itself volatile 467 return MemDepResult::getClobber(LI); 468 // Otherwise, volatile doesn't imply any special ordering 469 } 470 471 // Atomic loads have complications involved. 472 // A Monotonic (or higher) load is OK if the query inst is itself not atomic. 473 // FIXME: This is overly conservative. 474 if (LI->isAtomic() && LI->getOrdering() > Unordered) { 475 if (!QueryInst) 476 return MemDepResult::getClobber(LI); 477 if (LI->getOrdering() != Monotonic) 478 return MemDepResult::getClobber(LI); 479 if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) { 480 if (!QueryLI->isSimple()) 481 return MemDepResult::getClobber(LI); 482 } else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) { 483 if (!QuerySI->isSimple()) 484 return MemDepResult::getClobber(LI); 485 } else if (QueryInst->mayReadOrWriteMemory()) { 486 return MemDepResult::getClobber(LI); 487 } 488 } 489 490 MemoryLocation LoadLoc = MemoryLocation::get(LI); 491 492 // If we found a pointer, check if it could be the same as our pointer. 493 AliasResult R = AA->alias(LoadLoc, MemLoc); 494 495 if (isLoad) { 496 if (R == NoAlias) { 497 // If this is an over-aligned integer load (for example, 498 // "load i8* %P, align 4") see if it would obviously overlap with the 499 // queried location if widened to a larger load (e.g. if the queried 500 // location is 1 byte at P+1). If so, return it as a load/load 501 // clobber result, allowing the client to decide to widen the load if 502 // it wants to. 503 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { 504 if (LI->getAlignment() * 8 > ITy->getPrimitiveSizeInBits() && 505 isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase, 506 MemLocOffset, LI)) 507 return MemDepResult::getClobber(Inst); 508 } 509 continue; 510 } 511 512 // Must aliased loads are defs of each other. 513 if (R == MustAlias) 514 return MemDepResult::getDef(Inst); 515 516 #if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads 517 // in terms of clobbering loads, but since it does this by looking 518 // at the clobbering load directly, it doesn't know about any 519 // phi translation that may have happened along the way. 520 521 // If we have a partial alias, then return this as a clobber for the 522 // client to handle. 523 if (R == PartialAlias) 524 return MemDepResult::getClobber(Inst); 525 #endif 526 527 // Random may-alias loads don't depend on each other without a 528 // dependence. 529 continue; 530 } 531 532 // Stores don't depend on other no-aliased accesses. 533 if (R == NoAlias) 534 continue; 535 536 // Stores don't alias loads from read-only memory. 537 if (AA->pointsToConstantMemory(LoadLoc)) 538 continue; 539 540 // Stores depend on may/must aliased loads. 541 return MemDepResult::getDef(Inst); 542 } 543 544 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 545 // Atomic stores have complications involved. 546 // A Monotonic store is OK if the query inst is itself not atomic. 547 // FIXME: This is overly conservative. 548 if (!SI->isUnordered()) { 549 if (!QueryInst) 550 return MemDepResult::getClobber(SI); 551 if (SI->getOrdering() != Monotonic) 552 return MemDepResult::getClobber(SI); 553 if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) { 554 if (!QueryLI->isSimple()) 555 return MemDepResult::getClobber(SI); 556 } else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) { 557 if (!QuerySI->isSimple()) 558 return MemDepResult::getClobber(SI); 559 } else if (QueryInst->mayReadOrWriteMemory()) { 560 return MemDepResult::getClobber(SI); 561 } 562 } 563 564 // FIXME: this is overly conservative. 565 // While volatile access cannot be eliminated, they do not have to clobber 566 // non-aliasing locations, as normal accesses can for example be reordered 567 // with volatile accesses. 568 if (SI->isVolatile()) 569 return MemDepResult::getClobber(SI); 570 571 // If alias analysis can tell that this store is guaranteed to not modify 572 // the query pointer, ignore it. Use getModRefInfo to handle cases where 573 // the query pointer points to constant memory etc. 574 if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef) 575 continue; 576 577 // Ok, this store might clobber the query pointer. Check to see if it is 578 // a must alias: in this case, we want to return this as a def. 579 MemoryLocation StoreLoc = MemoryLocation::get(SI); 580 581 // If we found a pointer, check if it could be the same as our pointer. 582 AliasResult R = AA->alias(StoreLoc, MemLoc); 583 584 if (R == NoAlias) 585 continue; 586 if (R == MustAlias) 587 return MemDepResult::getDef(Inst); 588 if (isInvariantLoad) 589 continue; 590 return MemDepResult::getClobber(Inst); 591 } 592 593 // If this is an allocation, and if we know that the accessed pointer is to 594 // the allocation, return Def. This means that there is no dependence and 595 // the access can be optimized based on that. For example, a load could 596 // turn into undef. 597 // Note: Only determine this to be a malloc if Inst is the malloc call, not 598 // a subsequent bitcast of the malloc call result. There can be stores to 599 // the malloced memory between the malloc call and its bitcast uses, and we 600 // need to continue scanning until the malloc call. 601 const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo(); 602 if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) { 603 const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL); 604 605 if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr)) 606 return MemDepResult::getDef(Inst); 607 if (isInvariantLoad) 608 continue; 609 // Be conservative if the accessed pointer may alias the allocation. 610 if (AA->alias(Inst, AccessPtr) != NoAlias) 611 return MemDepResult::getClobber(Inst); 612 // If the allocation is not aliased and does not read memory (like 613 // strdup), it is safe to ignore. 614 if (isa<AllocaInst>(Inst) || 615 isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI)) 616 continue; 617 } 618 619 if (isInvariantLoad) 620 continue; 621 622 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. 623 AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc); 624 // If necessary, perform additional analysis. 625 if (MR == AliasAnalysis::ModRef) 626 MR = AA->callCapturesBefore(Inst, MemLoc, DT); 627 switch (MR) { 628 case AliasAnalysis::NoModRef: 629 // If the call has no effect on the queried pointer, just ignore it. 630 continue; 631 case AliasAnalysis::Mod: 632 return MemDepResult::getClobber(Inst); 633 case AliasAnalysis::Ref: 634 // If the call is known to never store to the pointer, and if this is a 635 // load query, we can safely ignore it (scan past it). 636 if (isLoad) 637 continue; 638 default: 639 // Otherwise, there is a potential dependence. Return a clobber. 640 return MemDepResult::getClobber(Inst); 641 } 642 } 643 644 // No dependence found. If this is the entry block of the function, it is 645 // unknown, otherwise it is non-local. 646 if (BB != &BB->getParent()->getEntryBlock()) 647 return MemDepResult::getNonLocal(); 648 return MemDepResult::getNonFuncLocal(); 649 } 650 651 /// getDependency - Return the instruction on which a memory operation 652 /// depends. 653 MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { 654 Instruction *ScanPos = QueryInst; 655 656 // Check for a cached result 657 MemDepResult &LocalCache = LocalDeps[QueryInst]; 658 659 // If the cached entry is non-dirty, just return it. Note that this depends 660 // on MemDepResult's default constructing to 'dirty'. 661 if (!LocalCache.isDirty()) 662 return LocalCache; 663 664 // Otherwise, if we have a dirty entry, we know we can start the scan at that 665 // instruction, which may save us some work. 666 if (Instruction *Inst = LocalCache.getInst()) { 667 ScanPos = Inst; 668 669 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst); 670 } 671 672 BasicBlock *QueryParent = QueryInst->getParent(); 673 674 // Do the scan. 675 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { 676 // No dependence found. If this is the entry block of the function, it is 677 // unknown, otherwise it is non-local. 678 if (QueryParent != &QueryParent->getParent()->getEntryBlock()) 679 LocalCache = MemDepResult::getNonLocal(); 680 else 681 LocalCache = MemDepResult::getNonFuncLocal(); 682 } else { 683 MemoryLocation MemLoc; 684 AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA); 685 if (MemLoc.Ptr) { 686 // If we can do a pointer scan, make it happen. 687 bool isLoad = !(MR & AliasAnalysis::Mod); 688 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst)) 689 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start; 690 691 LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos, 692 QueryParent, QueryInst); 693 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) { 694 CallSite QueryCS(QueryInst); 695 bool isReadOnly = AA->onlyReadsMemory(QueryCS); 696 LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos, 697 QueryParent); 698 } else 699 // Non-memory instruction. 700 LocalCache = MemDepResult::getUnknown(); 701 } 702 703 // Remember the result! 704 if (Instruction *I = LocalCache.getInst()) 705 ReverseLocalDeps[I].insert(QueryInst); 706 707 return LocalCache; 708 } 709 710 #ifndef NDEBUG 711 /// AssertSorted - This method is used when -debug is specified to verify that 712 /// cache arrays are properly kept sorted. 713 static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, 714 int Count = -1) { 715 if (Count == -1) Count = Cache.size(); 716 if (Count == 0) return; 717 718 for (unsigned i = 1; i != unsigned(Count); ++i) 719 assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!"); 720 } 721 #endif 722 723 /// getNonLocalCallDependency - Perform a full dependency query for the 724 /// specified call, returning the set of blocks that the value is 725 /// potentially live across. The returned set of results will include a 726 /// "NonLocal" result for all blocks where the value is live across. 727 /// 728 /// This method assumes the instruction returns a "NonLocal" dependency 729 /// within its own block. 730 /// 731 /// This returns a reference to an internal data structure that may be 732 /// invalidated on the next non-local query or when an instruction is 733 /// removed. Clients must copy this data if they want it around longer than 734 /// that. 735 const MemoryDependenceAnalysis::NonLocalDepInfo & 736 MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { 737 assert(getDependency(QueryCS.getInstruction()).isNonLocal() && 738 "getNonLocalCallDependency should only be used on calls with non-local deps!"); 739 PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()]; 740 NonLocalDepInfo &Cache = CacheP.first; 741 742 /// DirtyBlocks - This is the set of blocks that need to be recomputed. In 743 /// the cached case, this can happen due to instructions being deleted etc. In 744 /// the uncached case, this starts out as the set of predecessors we care 745 /// about. 746 SmallVector<BasicBlock*, 32> DirtyBlocks; 747 748 if (!Cache.empty()) { 749 // Okay, we have a cache entry. If we know it is not dirty, just return it 750 // with no computation. 751 if (!CacheP.second) { 752 ++NumCacheNonLocal; 753 return Cache; 754 } 755 756 // If we already have a partially computed set of results, scan them to 757 // determine what is dirty, seeding our initial DirtyBlocks worklist. 758 for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end(); 759 I != E; ++I) 760 if (I->getResult().isDirty()) 761 DirtyBlocks.push_back(I->getBB()); 762 763 // Sort the cache so that we can do fast binary search lookups below. 764 std::sort(Cache.begin(), Cache.end()); 765 766 ++NumCacheDirtyNonLocal; 767 //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " 768 // << Cache.size() << " cached: " << *QueryInst; 769 } else { 770 // Seed DirtyBlocks with each of the preds of QueryInst's block. 771 BasicBlock *QueryBB = QueryCS.getInstruction()->getParent(); 772 for (BasicBlock *Pred : PredCache.get(QueryBB)) 773 DirtyBlocks.push_back(Pred); 774 ++NumUncacheNonLocal; 775 } 776 777 // isReadonlyCall - If this is a read-only call, we can be more aggressive. 778 bool isReadonlyCall = AA->onlyReadsMemory(QueryCS); 779 780 SmallPtrSet<BasicBlock*, 64> Visited; 781 782 unsigned NumSortedEntries = Cache.size(); 783 DEBUG(AssertSorted(Cache)); 784 785 // Iterate while we still have blocks to update. 786 while (!DirtyBlocks.empty()) { 787 BasicBlock *DirtyBB = DirtyBlocks.back(); 788 DirtyBlocks.pop_back(); 789 790 // Already processed this block? 791 if (!Visited.insert(DirtyBB).second) 792 continue; 793 794 // Do a binary search to see if we already have an entry for this block in 795 // the cache set. If so, find it. 796 DEBUG(AssertSorted(Cache, NumSortedEntries)); 797 NonLocalDepInfo::iterator Entry = 798 std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries, 799 NonLocalDepEntry(DirtyBB)); 800 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB) 801 --Entry; 802 803 NonLocalDepEntry *ExistingResult = nullptr; 804 if (Entry != Cache.begin()+NumSortedEntries && 805 Entry->getBB() == DirtyBB) { 806 // If we already have an entry, and if it isn't already dirty, the block 807 // is done. 808 if (!Entry->getResult().isDirty()) 809 continue; 810 811 // Otherwise, remember this slot so we can update the value. 812 ExistingResult = &*Entry; 813 } 814 815 // If the dirty entry has a pointer, start scanning from it so we don't have 816 // to rescan the entire block. 817 BasicBlock::iterator ScanPos = DirtyBB->end(); 818 if (ExistingResult) { 819 if (Instruction *Inst = ExistingResult->getResult().getInst()) { 820 ScanPos = Inst; 821 // We're removing QueryInst's use of Inst. 822 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, 823 QueryCS.getInstruction()); 824 } 825 } 826 827 // Find out if this block has a local dependency for QueryInst. 828 MemDepResult Dep; 829 830 if (ScanPos != DirtyBB->begin()) { 831 Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB); 832 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { 833 // No dependence found. If this is the entry block of the function, it is 834 // a clobber, otherwise it is unknown. 835 Dep = MemDepResult::getNonLocal(); 836 } else { 837 Dep = MemDepResult::getNonFuncLocal(); 838 } 839 840 // If we had a dirty entry for the block, update it. Otherwise, just add 841 // a new entry. 842 if (ExistingResult) 843 ExistingResult->setResult(Dep); 844 else 845 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep)); 846 847 // If the block has a dependency (i.e. it isn't completely transparent to 848 // the value), remember the association! 849 if (!Dep.isNonLocal()) { 850 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 851 // update this when we remove instructions. 852 if (Instruction *Inst = Dep.getInst()) 853 ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction()); 854 } else { 855 856 // If the block *is* completely transparent to the load, we need to check 857 // the predecessors of this block. Add them to our worklist. 858 for (BasicBlock *Pred : PredCache.get(DirtyBB)) 859 DirtyBlocks.push_back(Pred); 860 } 861 } 862 863 return Cache; 864 } 865 866 /// getNonLocalPointerDependency - Perform a full dependency query for an 867 /// access to the specified (non-volatile) memory location, returning the 868 /// set of instructions that either define or clobber the value. 869 /// 870 /// This method assumes the pointer has a "NonLocal" dependency within its 871 /// own block. 872 /// 873 void MemoryDependenceAnalysis:: 874 getNonLocalPointerDependency(Instruction *QueryInst, 875 SmallVectorImpl<NonLocalDepResult> &Result) { 876 const MemoryLocation Loc = MemoryLocation::get(QueryInst); 877 bool isLoad = isa<LoadInst>(QueryInst); 878 BasicBlock *FromBB = QueryInst->getParent(); 879 assert(FromBB); 880 881 assert(Loc.Ptr->getType()->isPointerTy() && 882 "Can't get pointer deps of a non-pointer!"); 883 Result.clear(); 884 885 // This routine does not expect to deal with volatile instructions. 886 // Doing so would require piping through the QueryInst all the way through. 887 // TODO: volatiles can't be elided, but they can be reordered with other 888 // non-volatile accesses. 889 890 // We currently give up on any instruction which is ordered, but we do handle 891 // atomic instructions which are unordered. 892 // TODO: Handle ordered instructions 893 auto isOrdered = [](Instruction *Inst) { 894 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 895 return !LI->isUnordered(); 896 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 897 return !SI->isUnordered(); 898 } 899 return false; 900 }; 901 if (isVolatile(QueryInst) || isOrdered(QueryInst)) { 902 Result.push_back(NonLocalDepResult(FromBB, 903 MemDepResult::getUnknown(), 904 const_cast<Value *>(Loc.Ptr))); 905 return; 906 } 907 const DataLayout &DL = FromBB->getModule()->getDataLayout(); 908 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, AC); 909 910 // This is the set of blocks we've inspected, and the pointer we consider in 911 // each block. Because of critical edges, we currently bail out if querying 912 // a block with multiple different pointers. This can happen during PHI 913 // translation. 914 DenseMap<BasicBlock*, Value*> Visited; 915 if (!getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB, 916 Result, Visited, true)) 917 return; 918 Result.clear(); 919 Result.push_back(NonLocalDepResult(FromBB, 920 MemDepResult::getUnknown(), 921 const_cast<Value *>(Loc.Ptr))); 922 } 923 924 /// GetNonLocalInfoForBlock - Compute the memdep value for BB with 925 /// Pointer/PointeeSize using either cached information in Cache or by doing a 926 /// lookup (which may use dirty cache info if available). If we do a lookup, 927 /// add the result to the cache. 928 MemDepResult MemoryDependenceAnalysis::GetNonLocalInfoForBlock( 929 Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad, 930 BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) { 931 932 // Do a binary search to see if we already have an entry for this block in 933 // the cache set. If so, find it. 934 NonLocalDepInfo::iterator Entry = 935 std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries, 936 NonLocalDepEntry(BB)); 937 if (Entry != Cache->begin() && (Entry-1)->getBB() == BB) 938 --Entry; 939 940 NonLocalDepEntry *ExistingResult = nullptr; 941 if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB) 942 ExistingResult = &*Entry; 943 944 // If we have a cached entry, and it is non-dirty, use it as the value for 945 // this dependency. 946 if (ExistingResult && !ExistingResult->getResult().isDirty()) { 947 ++NumCacheNonLocalPtr; 948 return ExistingResult->getResult(); 949 } 950 951 // Otherwise, we have to scan for the value. If we have a dirty cache 952 // entry, start scanning from its position, otherwise we scan from the end 953 // of the block. 954 BasicBlock::iterator ScanPos = BB->end(); 955 if (ExistingResult && ExistingResult->getResult().getInst()) { 956 assert(ExistingResult->getResult().getInst()->getParent() == BB && 957 "Instruction invalidated?"); 958 ++NumCacheDirtyNonLocalPtr; 959 ScanPos = ExistingResult->getResult().getInst(); 960 961 // Eliminating the dirty entry from 'Cache', so update the reverse info. 962 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 963 RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey); 964 } else { 965 ++NumUncacheNonLocalPtr; 966 } 967 968 // Scan the block for the dependency. 969 MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, 970 QueryInst); 971 972 // If we had a dirty entry for the block, update it. Otherwise, just add 973 // a new entry. 974 if (ExistingResult) 975 ExistingResult->setResult(Dep); 976 else 977 Cache->push_back(NonLocalDepEntry(BB, Dep)); 978 979 // If the block has a dependency (i.e. it isn't completely transparent to 980 // the value), remember the reverse association because we just added it 981 // to Cache! 982 if (!Dep.isDef() && !Dep.isClobber()) 983 return Dep; 984 985 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently 986 // update MemDep when we remove instructions. 987 Instruction *Inst = Dep.getInst(); 988 assert(Inst && "Didn't depend on anything?"); 989 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 990 ReverseNonLocalPtrDeps[Inst].insert(CacheKey); 991 return Dep; 992 } 993 994 /// SortNonLocalDepInfoCache - Sort the NonLocalDepInfo cache, given a certain 995 /// number of elements in the array that are already properly ordered. This is 996 /// optimized for the case when only a few entries are added. 997 static void 998 SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, 999 unsigned NumSortedEntries) { 1000 switch (Cache.size() - NumSortedEntries) { 1001 case 0: 1002 // done, no new entries. 1003 break; 1004 case 2: { 1005 // Two new entries, insert the last one into place. 1006 NonLocalDepEntry Val = Cache.back(); 1007 Cache.pop_back(); 1008 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry = 1009 std::upper_bound(Cache.begin(), Cache.end()-1, Val); 1010 Cache.insert(Entry, Val); 1011 // FALL THROUGH. 1012 } 1013 case 1: 1014 // One new entry, Just insert the new value at the appropriate position. 1015 if (Cache.size() != 1) { 1016 NonLocalDepEntry Val = Cache.back(); 1017 Cache.pop_back(); 1018 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry = 1019 std::upper_bound(Cache.begin(), Cache.end(), Val); 1020 Cache.insert(Entry, Val); 1021 } 1022 break; 1023 default: 1024 // Added many values, do a full scale sort. 1025 std::sort(Cache.begin(), Cache.end()); 1026 break; 1027 } 1028 } 1029 1030 /// getNonLocalPointerDepFromBB - Perform a dependency query based on 1031 /// pointer/pointeesize starting at the end of StartBB. Add any clobber/def 1032 /// results to the results vector and keep track of which blocks are visited in 1033 /// 'Visited'. 1034 /// 1035 /// This has special behavior for the first block queries (when SkipFirstBlock 1036 /// is true). In this special case, it ignores the contents of the specified 1037 /// block and starts returning dependence info for its predecessors. 1038 /// 1039 /// This function returns false on success, or true to indicate that it could 1040 /// not compute dependence information for some reason. This should be treated 1041 /// as a clobber dependence on the first instruction in the predecessor block. 1042 bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( 1043 Instruction *QueryInst, const PHITransAddr &Pointer, 1044 const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB, 1045 SmallVectorImpl<NonLocalDepResult> &Result, 1046 DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock) { 1047 // Look up the cached info for Pointer. 1048 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad); 1049 1050 // Set up a temporary NLPI value. If the map doesn't yet have an entry for 1051 // CacheKey, this value will be inserted as the associated value. Otherwise, 1052 // it'll be ignored, and we'll have to check to see if the cached size and 1053 // aa tags are consistent with the current query. 1054 NonLocalPointerInfo InitialNLPI; 1055 InitialNLPI.Size = Loc.Size; 1056 InitialNLPI.AATags = Loc.AATags; 1057 1058 // Get the NLPI for CacheKey, inserting one into the map if it doesn't 1059 // already have one. 1060 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = 1061 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI)); 1062 NonLocalPointerInfo *CacheInfo = &Pair.first->second; 1063 1064 // If we already have a cache entry for this CacheKey, we may need to do some 1065 // work to reconcile the cache entry and the current query. 1066 if (!Pair.second) { 1067 if (CacheInfo->Size < Loc.Size) { 1068 // The query's Size is greater than the cached one. Throw out the 1069 // cached data and proceed with the query at the greater size. 1070 CacheInfo->Pair = BBSkipFirstBlockPair(); 1071 CacheInfo->Size = Loc.Size; 1072 for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(), 1073 DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI) 1074 if (Instruction *Inst = DI->getResult().getInst()) 1075 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1076 CacheInfo->NonLocalDeps.clear(); 1077 } else if (CacheInfo->Size > Loc.Size) { 1078 // This query's Size is less than the cached one. Conservatively restart 1079 // the query using the greater size. 1080 return getNonLocalPointerDepFromBB(QueryInst, Pointer, 1081 Loc.getWithNewSize(CacheInfo->Size), 1082 isLoad, StartBB, Result, Visited, 1083 SkipFirstBlock); 1084 } 1085 1086 // If the query's AATags are inconsistent with the cached one, 1087 // conservatively throw out the cached data and restart the query with 1088 // no tag if needed. 1089 if (CacheInfo->AATags != Loc.AATags) { 1090 if (CacheInfo->AATags) { 1091 CacheInfo->Pair = BBSkipFirstBlockPair(); 1092 CacheInfo->AATags = AAMDNodes(); 1093 for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(), 1094 DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI) 1095 if (Instruction *Inst = DI->getResult().getInst()) 1096 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1097 CacheInfo->NonLocalDeps.clear(); 1098 } 1099 if (Loc.AATags) 1100 return getNonLocalPointerDepFromBB(QueryInst, 1101 Pointer, Loc.getWithoutAATags(), 1102 isLoad, StartBB, Result, Visited, 1103 SkipFirstBlock); 1104 } 1105 } 1106 1107 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps; 1108 1109 // If we have valid cached information for exactly the block we are 1110 // investigating, just return it with no recomputation. 1111 if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) { 1112 // We have a fully cached result for this query then we can just return the 1113 // cached results and populate the visited set. However, we have to verify 1114 // that we don't already have conflicting results for these blocks. Check 1115 // to ensure that if a block in the results set is in the visited set that 1116 // it was for the same pointer query. 1117 if (!Visited.empty()) { 1118 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); 1119 I != E; ++I) { 1120 DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB()); 1121 if (VI == Visited.end() || VI->second == Pointer.getAddr()) 1122 continue; 1123 1124 // We have a pointer mismatch in a block. Just return clobber, saying 1125 // that something was clobbered in this result. We could also do a 1126 // non-fully cached query, but there is little point in doing this. 1127 return true; 1128 } 1129 } 1130 1131 Value *Addr = Pointer.getAddr(); 1132 for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); 1133 I != E; ++I) { 1134 Visited.insert(std::make_pair(I->getBB(), Addr)); 1135 if (I->getResult().isNonLocal()) { 1136 continue; 1137 } 1138 1139 if (!DT) { 1140 Result.push_back(NonLocalDepResult(I->getBB(), 1141 MemDepResult::getUnknown(), 1142 Addr)); 1143 } else if (DT->isReachableFromEntry(I->getBB())) { 1144 Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr)); 1145 } 1146 } 1147 ++NumCacheCompleteNonLocalPtr; 1148 return false; 1149 } 1150 1151 // Otherwise, either this is a new block, a block with an invalid cache 1152 // pointer or one that we're about to invalidate by putting more info into it 1153 // than its valid cache info. If empty, the result will be valid cache info, 1154 // otherwise it isn't. 1155 if (Cache->empty()) 1156 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); 1157 else 1158 CacheInfo->Pair = BBSkipFirstBlockPair(); 1159 1160 SmallVector<BasicBlock*, 32> Worklist; 1161 Worklist.push_back(StartBB); 1162 1163 // PredList used inside loop. 1164 SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList; 1165 1166 // Keep track of the entries that we know are sorted. Previously cached 1167 // entries will all be sorted. The entries we add we only sort on demand (we 1168 // don't insert every element into its sorted position). We know that we 1169 // won't get any reuse from currently inserted values, because we don't 1170 // revisit blocks after we insert info for them. 1171 unsigned NumSortedEntries = Cache->size(); 1172 DEBUG(AssertSorted(*Cache)); 1173 1174 while (!Worklist.empty()) { 1175 BasicBlock *BB = Worklist.pop_back_val(); 1176 1177 // If we do process a large number of blocks it becomes very expensive and 1178 // likely it isn't worth worrying about 1179 if (Result.size() > NumResultsLimit) { 1180 Worklist.clear(); 1181 // Sort it now (if needed) so that recursive invocations of 1182 // getNonLocalPointerDepFromBB and other routines that could reuse the 1183 // cache value will only see properly sorted cache arrays. 1184 if (Cache && NumSortedEntries != Cache->size()) { 1185 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1186 } 1187 // Since we bail out, the "Cache" set won't contain all of the 1188 // results for the query. This is ok (we can still use it to accelerate 1189 // specific block queries) but we can't do the fastpath "return all 1190 // results from the set". Clear out the indicator for this. 1191 CacheInfo->Pair = BBSkipFirstBlockPair(); 1192 return true; 1193 } 1194 1195 // Skip the first block if we have it. 1196 if (!SkipFirstBlock) { 1197 // Analyze the dependency of *Pointer in FromBB. See if we already have 1198 // been here. 1199 assert(Visited.count(BB) && "Should check 'visited' before adding to WL"); 1200 1201 // Get the dependency info for Pointer in BB. If we have cached 1202 // information, we will use it, otherwise we compute it. 1203 DEBUG(AssertSorted(*Cache, NumSortedEntries)); 1204 MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, 1205 Loc, isLoad, BB, Cache, 1206 NumSortedEntries); 1207 1208 // If we got a Def or Clobber, add this to the list of results. 1209 if (!Dep.isNonLocal()) { 1210 if (!DT) { 1211 Result.push_back(NonLocalDepResult(BB, 1212 MemDepResult::getUnknown(), 1213 Pointer.getAddr())); 1214 continue; 1215 } else if (DT->isReachableFromEntry(BB)) { 1216 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr())); 1217 continue; 1218 } 1219 } 1220 } 1221 1222 // If 'Pointer' is an instruction defined in this block, then we need to do 1223 // phi translation to change it into a value live in the predecessor block. 1224 // If not, we just add the predecessors to the worklist and scan them with 1225 // the same Pointer. 1226 if (!Pointer.NeedsPHITranslationFromBlock(BB)) { 1227 SkipFirstBlock = false; 1228 SmallVector<BasicBlock*, 16> NewBlocks; 1229 for (BasicBlock *Pred : PredCache.get(BB)) { 1230 // Verify that we haven't looked at this block yet. 1231 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool> 1232 InsertRes = Visited.insert(std::make_pair(Pred, Pointer.getAddr())); 1233 if (InsertRes.second) { 1234 // First time we've looked at *PI. 1235 NewBlocks.push_back(Pred); 1236 continue; 1237 } 1238 1239 // If we have seen this block before, but it was with a different 1240 // pointer then we have a phi translation failure and we have to treat 1241 // this as a clobber. 1242 if (InsertRes.first->second != Pointer.getAddr()) { 1243 // Make sure to clean up the Visited map before continuing on to 1244 // PredTranslationFailure. 1245 for (unsigned i = 0; i < NewBlocks.size(); i++) 1246 Visited.erase(NewBlocks[i]); 1247 goto PredTranslationFailure; 1248 } 1249 } 1250 Worklist.append(NewBlocks.begin(), NewBlocks.end()); 1251 continue; 1252 } 1253 1254 // We do need to do phi translation, if we know ahead of time we can't phi 1255 // translate this value, don't even try. 1256 if (!Pointer.IsPotentiallyPHITranslatable()) 1257 goto PredTranslationFailure; 1258 1259 // We may have added values to the cache list before this PHI translation. 1260 // If so, we haven't done anything to ensure that the cache remains sorted. 1261 // Sort it now (if needed) so that recursive invocations of 1262 // getNonLocalPointerDepFromBB and other routines that could reuse the cache 1263 // value will only see properly sorted cache arrays. 1264 if (Cache && NumSortedEntries != Cache->size()) { 1265 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1266 NumSortedEntries = Cache->size(); 1267 } 1268 Cache = nullptr; 1269 1270 PredList.clear(); 1271 for (BasicBlock *Pred : PredCache.get(BB)) { 1272 PredList.push_back(std::make_pair(Pred, Pointer)); 1273 1274 // Get the PHI translated pointer in this predecessor. This can fail if 1275 // not translatable, in which case the getAddr() returns null. 1276 PHITransAddr &PredPointer = PredList.back().second; 1277 PredPointer.PHITranslateValue(BB, Pred, DT, /*MustDominate=*/false); 1278 Value *PredPtrVal = PredPointer.getAddr(); 1279 1280 // Check to see if we have already visited this pred block with another 1281 // pointer. If so, we can't do this lookup. This failure can occur 1282 // with PHI translation when a critical edge exists and the PHI node in 1283 // the successor translates to a pointer value different than the 1284 // pointer the block was first analyzed with. 1285 std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool> 1286 InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal)); 1287 1288 if (!InsertRes.second) { 1289 // We found the pred; take it off the list of preds to visit. 1290 PredList.pop_back(); 1291 1292 // If the predecessor was visited with PredPtr, then we already did 1293 // the analysis and can ignore it. 1294 if (InsertRes.first->second == PredPtrVal) 1295 continue; 1296 1297 // Otherwise, the block was previously analyzed with a different 1298 // pointer. We can't represent the result of this case, so we just 1299 // treat this as a phi translation failure. 1300 1301 // Make sure to clean up the Visited map before continuing on to 1302 // PredTranslationFailure. 1303 for (unsigned i = 0, n = PredList.size(); i < n; ++i) 1304 Visited.erase(PredList[i].first); 1305 1306 goto PredTranslationFailure; 1307 } 1308 } 1309 1310 // Actually process results here; this need to be a separate loop to avoid 1311 // calling getNonLocalPointerDepFromBB for blocks we don't want to return 1312 // any results for. (getNonLocalPointerDepFromBB will modify our 1313 // datastructures in ways the code after the PredTranslationFailure label 1314 // doesn't expect.) 1315 for (unsigned i = 0, n = PredList.size(); i < n; ++i) { 1316 BasicBlock *Pred = PredList[i].first; 1317 PHITransAddr &PredPointer = PredList[i].second; 1318 Value *PredPtrVal = PredPointer.getAddr(); 1319 1320 bool CanTranslate = true; 1321 // If PHI translation was unable to find an available pointer in this 1322 // predecessor, then we have to assume that the pointer is clobbered in 1323 // that predecessor. We can still do PRE of the load, which would insert 1324 // a computation of the pointer in this predecessor. 1325 if (!PredPtrVal) 1326 CanTranslate = false; 1327 1328 // FIXME: it is entirely possible that PHI translating will end up with 1329 // the same value. Consider PHI translating something like: 1330 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need* 1331 // to recurse here, pedantically speaking. 1332 1333 // If getNonLocalPointerDepFromBB fails here, that means the cached 1334 // result conflicted with the Visited list; we have to conservatively 1335 // assume it is unknown, but this also does not block PRE of the load. 1336 if (!CanTranslate || 1337 getNonLocalPointerDepFromBB(QueryInst, PredPointer, 1338 Loc.getWithNewPtr(PredPtrVal), 1339 isLoad, Pred, 1340 Result, Visited)) { 1341 // Add the entry to the Result list. 1342 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal); 1343 Result.push_back(Entry); 1344 1345 // Since we had a phi translation failure, the cache for CacheKey won't 1346 // include all of the entries that we need to immediately satisfy future 1347 // queries. Mark this in NonLocalPointerDeps by setting the 1348 // BBSkipFirstBlockPair pointer to null. This requires reuse of the 1349 // cached value to do more work but not miss the phi trans failure. 1350 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey]; 1351 NLPI.Pair = BBSkipFirstBlockPair(); 1352 continue; 1353 } 1354 } 1355 1356 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. 1357 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1358 Cache = &CacheInfo->NonLocalDeps; 1359 NumSortedEntries = Cache->size(); 1360 1361 // Since we did phi translation, the "Cache" set won't contain all of the 1362 // results for the query. This is ok (we can still use it to accelerate 1363 // specific block queries) but we can't do the fastpath "return all 1364 // results from the set" Clear out the indicator for this. 1365 CacheInfo->Pair = BBSkipFirstBlockPair(); 1366 SkipFirstBlock = false; 1367 continue; 1368 1369 PredTranslationFailure: 1370 // The following code is "failure"; we can't produce a sane translation 1371 // for the given block. It assumes that we haven't modified any of 1372 // our datastructures while processing the current block. 1373 1374 if (!Cache) { 1375 // Refresh the CacheInfo/Cache pointer if it got invalidated. 1376 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1377 Cache = &CacheInfo->NonLocalDeps; 1378 NumSortedEntries = Cache->size(); 1379 } 1380 1381 // Since we failed phi translation, the "Cache" set won't contain all of the 1382 // results for the query. This is ok (we can still use it to accelerate 1383 // specific block queries) but we can't do the fastpath "return all 1384 // results from the set". Clear out the indicator for this. 1385 CacheInfo->Pair = BBSkipFirstBlockPair(); 1386 1387 // If *nothing* works, mark the pointer as unknown. 1388 // 1389 // If this is the magic first block, return this as a clobber of the whole 1390 // incoming value. Since we can't phi translate to one of the predecessors, 1391 // we have to bail out. 1392 if (SkipFirstBlock) 1393 return true; 1394 1395 for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) { 1396 assert(I != Cache->rend() && "Didn't find current block??"); 1397 if (I->getBB() != BB) 1398 continue; 1399 1400 assert((I->getResult().isNonLocal() || !DT->isReachableFromEntry(BB)) && 1401 "Should only be here with transparent block"); 1402 I->setResult(MemDepResult::getUnknown()); 1403 Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), 1404 Pointer.getAddr())); 1405 break; 1406 } 1407 } 1408 1409 // Okay, we're done now. If we added new values to the cache, re-sort it. 1410 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1411 DEBUG(AssertSorted(*Cache)); 1412 return false; 1413 } 1414 1415 /// RemoveCachedNonLocalPointerDependencies - If P exists in 1416 /// CachedNonLocalPointerInfo, remove it. 1417 void MemoryDependenceAnalysis:: 1418 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) { 1419 CachedNonLocalPointerInfo::iterator It = 1420 NonLocalPointerDeps.find(P); 1421 if (It == NonLocalPointerDeps.end()) return; 1422 1423 // Remove all of the entries in the BB->val map. This involves removing 1424 // instructions from the reverse map. 1425 NonLocalDepInfo &PInfo = It->second.NonLocalDeps; 1426 1427 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) { 1428 Instruction *Target = PInfo[i].getResult().getInst(); 1429 if (!Target) continue; // Ignore non-local dep results. 1430 assert(Target->getParent() == PInfo[i].getBB()); 1431 1432 // Eliminating the dirty entry from 'Cache', so update the reverse info. 1433 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P); 1434 } 1435 1436 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). 1437 NonLocalPointerDeps.erase(It); 1438 } 1439 1440 1441 /// invalidateCachedPointerInfo - This method is used to invalidate cached 1442 /// information about the specified pointer, because it may be too 1443 /// conservative in memdep. This is an optional call that can be used when 1444 /// the client detects an equivalence between the pointer and some other 1445 /// value and replaces the other value with ptr. This can make Ptr available 1446 /// in more places that cached info does not necessarily keep. 1447 void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) { 1448 // If Ptr isn't really a pointer, just ignore it. 1449 if (!Ptr->getType()->isPointerTy()) return; 1450 // Flush store info for the pointer. 1451 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); 1452 // Flush load info for the pointer. 1453 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true)); 1454 } 1455 1456 /// invalidateCachedPredecessors - Clear the PredIteratorCache info. 1457 /// This needs to be done when the CFG changes, e.g., due to splitting 1458 /// critical edges. 1459 void MemoryDependenceAnalysis::invalidateCachedPredecessors() { 1460 PredCache.clear(); 1461 } 1462 1463 /// removeInstruction - Remove an instruction from the dependence analysis, 1464 /// updating the dependence of instructions that previously depended on it. 1465 /// This method attempts to keep the cache coherent using the reverse map. 1466 void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { 1467 // Walk through the Non-local dependencies, removing this one as the value 1468 // for any cached queries. 1469 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst); 1470 if (NLDI != NonLocalDeps.end()) { 1471 NonLocalDepInfo &BlockMap = NLDI->second.first; 1472 for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end(); 1473 DI != DE; ++DI) 1474 if (Instruction *Inst = DI->getResult().getInst()) 1475 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst); 1476 NonLocalDeps.erase(NLDI); 1477 } 1478 1479 // If we have a cached local dependence query for this instruction, remove it. 1480 // 1481 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 1482 if (LocalDepEntry != LocalDeps.end()) { 1483 // Remove us from DepInst's reverse set now that the local dep info is gone. 1484 if (Instruction *Inst = LocalDepEntry->second.getInst()) 1485 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst); 1486 1487 // Remove this local dependency info. 1488 LocalDeps.erase(LocalDepEntry); 1489 } 1490 1491 // If we have any cached pointer dependencies on this instruction, remove 1492 // them. If the instruction has non-pointer type, then it can't be a pointer 1493 // base. 1494 1495 // Remove it from both the load info and the store info. The instruction 1496 // can't be in either of these maps if it is non-pointer. 1497 if (RemInst->getType()->isPointerTy()) { 1498 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); 1499 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); 1500 } 1501 1502 // Loop over all of the things that depend on the instruction we're removing. 1503 // 1504 SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd; 1505 1506 // If we find RemInst as a clobber or Def in any of the maps for other values, 1507 // we need to replace its entry with a dirty version of the instruction after 1508 // it. If RemInst is a terminator, we use a null dirty value. 1509 // 1510 // Using a dirty version of the instruction after RemInst saves having to scan 1511 // the entire block to get to this point. 1512 MemDepResult NewDirtyVal; 1513 if (!RemInst->isTerminator()) 1514 NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst)); 1515 1516 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 1517 if (ReverseDepIt != ReverseLocalDeps.end()) { 1518 // RemInst can't be the terminator if it has local stuff depending on it. 1519 assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) && 1520 "Nothing can locally depend on a terminator"); 1521 1522 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) { 1523 assert(InstDependingOnRemInst != RemInst && 1524 "Already removed our local dep info"); 1525 1526 LocalDeps[InstDependingOnRemInst] = NewDirtyVal; 1527 1528 // Make sure to remember that new things depend on NewDepInst. 1529 assert(NewDirtyVal.getInst() && "There is no way something else can have " 1530 "a local dep on this if it is a terminator!"); 1531 ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(), 1532 InstDependingOnRemInst)); 1533 } 1534 1535 ReverseLocalDeps.erase(ReverseDepIt); 1536 1537 // Add new reverse deps after scanning the set, to avoid invalidating the 1538 // 'ReverseDeps' reference. 1539 while (!ReverseDepsToAdd.empty()) { 1540 ReverseLocalDeps[ReverseDepsToAdd.back().first] 1541 .insert(ReverseDepsToAdd.back().second); 1542 ReverseDepsToAdd.pop_back(); 1543 } 1544 } 1545 1546 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 1547 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 1548 for (Instruction *I : ReverseDepIt->second) { 1549 assert(I != RemInst && "Already removed NonLocalDep info for RemInst"); 1550 1551 PerInstNLInfo &INLD = NonLocalDeps[I]; 1552 // The information is now dirty! 1553 INLD.second = true; 1554 1555 for (NonLocalDepInfo::iterator DI = INLD.first.begin(), 1556 DE = INLD.first.end(); DI != DE; ++DI) { 1557 if (DI->getResult().getInst() != RemInst) continue; 1558 1559 // Convert to a dirty entry for the subsequent instruction. 1560 DI->setResult(NewDirtyVal); 1561 1562 if (Instruction *NextI = NewDirtyVal.getInst()) 1563 ReverseDepsToAdd.push_back(std::make_pair(NextI, I)); 1564 } 1565 } 1566 1567 ReverseNonLocalDeps.erase(ReverseDepIt); 1568 1569 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 1570 while (!ReverseDepsToAdd.empty()) { 1571 ReverseNonLocalDeps[ReverseDepsToAdd.back().first] 1572 .insert(ReverseDepsToAdd.back().second); 1573 ReverseDepsToAdd.pop_back(); 1574 } 1575 } 1576 1577 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a 1578 // value in the NonLocalPointerDeps info. 1579 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = 1580 ReverseNonLocalPtrDeps.find(RemInst); 1581 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { 1582 SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd; 1583 1584 for (ValueIsLoadPair P : ReversePtrDepIt->second) { 1585 assert(P.getPointer() != RemInst && 1586 "Already removed NonLocalPointerDeps info for RemInst"); 1587 1588 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps; 1589 1590 // The cache is not valid for any specific block anymore. 1591 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair(); 1592 1593 // Update any entries for RemInst to use the instruction after it. 1594 for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end(); 1595 DI != DE; ++DI) { 1596 if (DI->getResult().getInst() != RemInst) continue; 1597 1598 // Convert to a dirty entry for the subsequent instruction. 1599 DI->setResult(NewDirtyVal); 1600 1601 if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) 1602 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); 1603 } 1604 1605 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its 1606 // subsequent value may invalidate the sortedness. 1607 std::sort(NLPDI.begin(), NLPDI.end()); 1608 } 1609 1610 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); 1611 1612 while (!ReversePtrDepsToAdd.empty()) { 1613 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first] 1614 .insert(ReversePtrDepsToAdd.back().second); 1615 ReversePtrDepsToAdd.pop_back(); 1616 } 1617 } 1618 1619 1620 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?"); 1621 AA->deleteValue(RemInst); 1622 DEBUG(verifyRemoved(RemInst)); 1623 } 1624 /// verifyRemoved - Verify that the specified instruction does not occur 1625 /// in our internal data structures. This function verifies by asserting in 1626 /// debug builds. 1627 void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { 1628 #ifndef NDEBUG 1629 for (LocalDepMapType::const_iterator I = LocalDeps.begin(), 1630 E = LocalDeps.end(); I != E; ++I) { 1631 assert(I->first != D && "Inst occurs in data structures"); 1632 assert(I->second.getInst() != D && 1633 "Inst occurs in data structures"); 1634 } 1635 1636 for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(), 1637 E = NonLocalPointerDeps.end(); I != E; ++I) { 1638 assert(I->first.getPointer() != D && "Inst occurs in NLPD map key"); 1639 const NonLocalDepInfo &Val = I->second.NonLocalDeps; 1640 for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end(); 1641 II != E; ++II) 1642 assert(II->getResult().getInst() != D && "Inst occurs as NLPD value"); 1643 } 1644 1645 for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(), 1646 E = NonLocalDeps.end(); I != E; ++I) { 1647 assert(I->first != D && "Inst occurs in data structures"); 1648 const PerInstNLInfo &INLD = I->second; 1649 for (NonLocalDepInfo::const_iterator II = INLD.first.begin(), 1650 EE = INLD.first.end(); II != EE; ++II) 1651 assert(II->getResult().getInst() != D && "Inst occurs in data structures"); 1652 } 1653 1654 for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(), 1655 E = ReverseLocalDeps.end(); I != E; ++I) { 1656 assert(I->first != D && "Inst occurs in data structures"); 1657 for (Instruction *Inst : I->second) 1658 assert(Inst != D && "Inst occurs in data structures"); 1659 } 1660 1661 for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(), 1662 E = ReverseNonLocalDeps.end(); 1663 I != E; ++I) { 1664 assert(I->first != D && "Inst occurs in data structures"); 1665 for (Instruction *Inst : I->second) 1666 assert(Inst != D && "Inst occurs in data structures"); 1667 } 1668 1669 for (ReverseNonLocalPtrDepTy::const_iterator 1670 I = ReverseNonLocalPtrDeps.begin(), 1671 E = ReverseNonLocalPtrDeps.end(); I != E; ++I) { 1672 assert(I->first != D && "Inst occurs in rev NLPD map"); 1673 1674 for (ValueIsLoadPair P : I->second) 1675 assert(P != ValueIsLoadPair(D, false) && 1676 P != ValueIsLoadPair(D, true) && 1677 "Inst occurs in ReverseNonLocalPtrDeps map"); 1678 } 1679 #endif 1680 } 1681