1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements an analysis that determines, for a given memory 11 // operation, what preceding memory operations it depends on. It builds on 12 // alias analysis information, and tries to provide a lazy, caching interface to 13 // a common kind of alias information query. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/AssumptionCache.h" 22 #include "llvm/Analysis/InstructionSimplify.h" 23 #include "llvm/Analysis/MemoryBuiltins.h" 24 #include "llvm/Analysis/PHITransAddr.h" 25 #include "llvm/Analysis/OrderedBasicBlock.h" 26 #include "llvm/Analysis/ValueTracking.h" 27 #include "llvm/Analysis/TargetLibraryInfo.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Dominators.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/IR/Instructions.h" 32 #include "llvm/IR/IntrinsicInst.h" 33 #include "llvm/IR/LLVMContext.h" 34 #include "llvm/IR/PredIteratorCache.h" 35 #include "llvm/Support/Debug.h" 36 using namespace llvm; 37 38 #define DEBUG_TYPE "memdep" 39 40 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); 41 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses"); 42 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 43 44 STATISTIC(NumCacheNonLocalPtr, 45 "Number of fully cached non-local ptr responses"); 46 STATISTIC(NumCacheDirtyNonLocalPtr, 47 "Number of cached, but dirty, non-local ptr responses"); 48 STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses"); 49 STATISTIC(NumCacheCompleteNonLocalPtr, 50 "Number of block queries that were completely cached"); 51 52 // Limit for the number of instructions to scan in a block. 53 54 static cl::opt<unsigned> BlockScanLimit( 55 "memdep-block-scan-limit", cl::Hidden, cl::init(100), 56 cl::desc("The number of instructions to scan in a block in memory " 57 "dependency analysis (default = 100)")); 58 59 static cl::opt<unsigned> 60 BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000), 61 cl::desc("The number of blocks to scan during memory " 62 "dependency analysis (default = 1000)")); 63 64 // Limit on the number of memdep results to process. 65 static const unsigned int NumResultsLimit = 100; 66 67 char MemoryDependenceAnalysis::ID = 0; 68 69 INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep", 70 "Memory Dependence Analysis", false, true) 71 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 72 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 73 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 74 INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep", 75 "Memory Dependence Analysis", false, true) 76 77 MemoryDependenceAnalysis::MemoryDependenceAnalysis() : FunctionPass(ID) { 78 initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry()); 79 } 80 MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {} 81 82 /// Clean up memory in between runs 83 void MemoryDependenceAnalysis::releaseMemory() { 84 LocalDeps.clear(); 85 NonLocalDeps.clear(); 86 NonLocalPointerDeps.clear(); 87 ReverseLocalDeps.clear(); 88 ReverseNonLocalDeps.clear(); 89 ReverseNonLocalPtrDeps.clear(); 90 PredCache.clear(); 91 } 92 93 void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 94 AU.setPreservesAll(); 95 AU.addRequired<AssumptionCacheTracker>(); 96 AU.addRequiredTransitive<AAResultsWrapperPass>(); 97 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 98 } 99 100 bool MemoryDependenceAnalysis::runOnFunction(Function &F) { 101 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 102 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 103 DominatorTreeWrapperPass *DTWP = 104 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 105 DT = DTWP ? &DTWP->getDomTree() : nullptr; 106 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 107 return false; 108 } 109 110 /// This is a helper function that removes Val from 'Inst's set in ReverseMap. 111 /// 112 /// If the set becomes empty, remove Inst's entry. 113 template <typename KeyTy> 114 static void 115 RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap, 116 Instruction *Inst, KeyTy Val) { 117 typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt = 118 ReverseMap.find(Inst); 119 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?"); 120 bool Found = InstIt->second.erase(Val); 121 assert(Found && "Invalid reverse map!"); 122 (void)Found; 123 if (InstIt->second.empty()) 124 ReverseMap.erase(InstIt); 125 } 126 127 /// If the given instruction references a specific memory location, fill in Loc 128 /// with the details, otherwise set Loc.Ptr to null. 129 /// 130 /// Returns a ModRefInfo value describing the general behavior of the 131 /// instruction. 132 static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, 133 const TargetLibraryInfo &TLI) { 134 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 135 if (LI->isUnordered()) { 136 Loc = MemoryLocation::get(LI); 137 return MRI_Ref; 138 } 139 if (LI->getOrdering() == Monotonic) { 140 Loc = MemoryLocation::get(LI); 141 return MRI_ModRef; 142 } 143 Loc = MemoryLocation(); 144 return MRI_ModRef; 145 } 146 147 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 148 if (SI->isUnordered()) { 149 Loc = MemoryLocation::get(SI); 150 return MRI_Mod; 151 } 152 if (SI->getOrdering() == Monotonic) { 153 Loc = MemoryLocation::get(SI); 154 return MRI_ModRef; 155 } 156 Loc = MemoryLocation(); 157 return MRI_ModRef; 158 } 159 160 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 161 Loc = MemoryLocation::get(V); 162 return MRI_ModRef; 163 } 164 165 if (const CallInst *CI = isFreeCall(Inst, &TLI)) { 166 // calls to free() deallocate the entire structure 167 Loc = MemoryLocation(CI->getArgOperand(0)); 168 return MRI_Mod; 169 } 170 171 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 172 AAMDNodes AAInfo; 173 174 switch (II->getIntrinsicID()) { 175 case Intrinsic::lifetime_start: 176 case Intrinsic::lifetime_end: 177 case Intrinsic::invariant_start: 178 II->getAAMetadata(AAInfo); 179 Loc = MemoryLocation( 180 II->getArgOperand(1), 181 cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AAInfo); 182 // These intrinsics don't really modify the memory, but returning Mod 183 // will allow them to be handled conservatively. 184 return MRI_Mod; 185 case Intrinsic::invariant_end: 186 II->getAAMetadata(AAInfo); 187 Loc = MemoryLocation( 188 II->getArgOperand(2), 189 cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AAInfo); 190 // These intrinsics don't really modify the memory, but returning Mod 191 // will allow them to be handled conservatively. 192 return MRI_Mod; 193 default: 194 break; 195 } 196 } 197 198 // Otherwise, just do the coarse-grained thing that always works. 199 if (Inst->mayWriteToMemory()) 200 return MRI_ModRef; 201 if (Inst->mayReadFromMemory()) 202 return MRI_Ref; 203 return MRI_NoModRef; 204 } 205 206 /// Private helper for finding the local dependencies of a call site. 207 MemDepResult MemoryDependenceAnalysis::getCallSiteDependencyFrom( 208 CallSite CS, bool isReadOnlyCall, BasicBlock::iterator ScanIt, 209 BasicBlock *BB) { 210 unsigned Limit = BlockScanLimit; 211 212 // Walk backwards through the block, looking for dependencies 213 while (ScanIt != BB->begin()) { 214 // Limit the amount of scanning we do so we don't end up with quadratic 215 // running time on extreme testcases. 216 --Limit; 217 if (!Limit) 218 return MemDepResult::getUnknown(); 219 220 Instruction *Inst = &*--ScanIt; 221 222 // If this inst is a memory op, get the pointer it accessed 223 MemoryLocation Loc; 224 ModRefInfo MR = GetLocation(Inst, Loc, *TLI); 225 if (Loc.Ptr) { 226 // A simple instruction. 227 if (AA->getModRefInfo(CS, Loc) != MRI_NoModRef) 228 return MemDepResult::getClobber(Inst); 229 continue; 230 } 231 232 if (auto InstCS = CallSite(Inst)) { 233 // Debug intrinsics don't cause dependences. 234 if (isa<DbgInfoIntrinsic>(Inst)) 235 continue; 236 // If these two calls do not interfere, look past it. 237 switch (AA->getModRefInfo(CS, InstCS)) { 238 case MRI_NoModRef: 239 // If the two calls are the same, return InstCS as a Def, so that 240 // CS can be found redundant and eliminated. 241 if (isReadOnlyCall && !(MR & MRI_Mod) && 242 CS.getInstruction()->isIdenticalToWhenDefined(Inst)) 243 return MemDepResult::getDef(Inst); 244 245 // Otherwise if the two calls don't interact (e.g. InstCS is readnone) 246 // keep scanning. 247 continue; 248 default: 249 return MemDepResult::getClobber(Inst); 250 } 251 } 252 253 // If we could not obtain a pointer for the instruction and the instruction 254 // touches memory then assume that this is a dependency. 255 if (MR != MRI_NoModRef) 256 return MemDepResult::getClobber(Inst); 257 } 258 259 // No dependence found. If this is the entry block of the function, it is 260 // unknown, otherwise it is non-local. 261 if (BB != &BB->getParent()->getEntryBlock()) 262 return MemDepResult::getNonLocal(); 263 return MemDepResult::getNonFuncLocal(); 264 } 265 266 /// Return true if LI is a load that would fully overlap MemLoc if done as 267 /// a wider legal integer load. 268 /// 269 /// MemLocBase, MemLocOffset are lazily computed here the first time the 270 /// base/offs of memloc is needed. 271 static bool isLoadLoadClobberIfExtendedToFullWidth(const MemoryLocation &MemLoc, 272 const Value *&MemLocBase, 273 int64_t &MemLocOffs, 274 const LoadInst *LI) { 275 const DataLayout &DL = LI->getModule()->getDataLayout(); 276 277 // If we haven't already computed the base/offset of MemLoc, do so now. 278 if (!MemLocBase) 279 MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL); 280 281 unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( 282 MemLocBase, MemLocOffs, MemLoc.Size, LI); 283 return Size != 0; 284 } 285 286 unsigned MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( 287 const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize, 288 const LoadInst *LI) { 289 // We can only extend simple integer loads. 290 if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) 291 return 0; 292 293 // Load widening is hostile to ThreadSanitizer: it may cause false positives 294 // or make the reports more cryptic (access sizes are wrong). 295 if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread)) 296 return 0; 297 298 const DataLayout &DL = LI->getModule()->getDataLayout(); 299 300 // Get the base of this load. 301 int64_t LIOffs = 0; 302 const Value *LIBase = 303 GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL); 304 305 // If the two pointers are not based on the same pointer, we can't tell that 306 // they are related. 307 if (LIBase != MemLocBase) 308 return 0; 309 310 // Okay, the two values are based on the same pointer, but returned as 311 // no-alias. This happens when we have things like two byte loads at "P+1" 312 // and "P+3". Check to see if increasing the size of the "LI" load up to its 313 // alignment (or the largest native integer type) will allow us to load all 314 // the bits required by MemLoc. 315 316 // If MemLoc is before LI, then no widening of LI will help us out. 317 if (MemLocOffs < LIOffs) 318 return 0; 319 320 // Get the alignment of the load in bytes. We assume that it is safe to load 321 // any legal integer up to this size without a problem. For example, if we're 322 // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can 323 // widen it up to an i32 load. If it is known 2-byte aligned, we can widen it 324 // to i16. 325 unsigned LoadAlign = LI->getAlignment(); 326 327 int64_t MemLocEnd = MemLocOffs + MemLocSize; 328 329 // If no amount of rounding up will let MemLoc fit into LI, then bail out. 330 if (LIOffs + LoadAlign < MemLocEnd) 331 return 0; 332 333 // This is the size of the load to try. Start with the next larger power of 334 // two. 335 unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits() / 8U; 336 NewLoadByteSize = NextPowerOf2(NewLoadByteSize); 337 338 while (1) { 339 // If this load size is bigger than our known alignment or would not fit 340 // into a native integer register, then we fail. 341 if (NewLoadByteSize > LoadAlign || 342 !DL.fitsInLegalInteger(NewLoadByteSize * 8)) 343 return 0; 344 345 if (LIOffs + NewLoadByteSize > MemLocEnd && 346 LI->getParent()->getParent()->hasFnAttribute( 347 Attribute::SanitizeAddress)) 348 // We will be reading past the location accessed by the original program. 349 // While this is safe in a regular build, Address Safety analysis tools 350 // may start reporting false warnings. So, don't do widening. 351 return 0; 352 353 // If a load of this width would include all of MemLoc, then we succeed. 354 if (LIOffs + NewLoadByteSize >= MemLocEnd) 355 return NewLoadByteSize; 356 357 NewLoadByteSize <<= 1; 358 } 359 } 360 361 static bool isVolatile(Instruction *Inst) { 362 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 363 return LI->isVolatile(); 364 else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 365 return SI->isVolatile(); 366 else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) 367 return AI->isVolatile(); 368 return false; 369 } 370 371 MemDepResult MemoryDependenceAnalysis::getPointerDependencyFrom( 372 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 373 BasicBlock *BB, Instruction *QueryInst) { 374 375 if (QueryInst != nullptr) { 376 if (auto *LI = dyn_cast<LoadInst>(QueryInst)) { 377 MemDepResult invariantGroupDependency = 378 getInvariantGroupPointerDependency(LI, BB); 379 380 if (invariantGroupDependency.isDef()) 381 return invariantGroupDependency; 382 } 383 } 384 return getSimplePointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst); 385 } 386 387 MemDepResult 388 MemoryDependenceAnalysis::getInvariantGroupPointerDependency(LoadInst *LI, 389 BasicBlock *BB) { 390 Value *LoadOperand = LI->getPointerOperand(); 391 // It's is not safe to walk the use list of global value, because function 392 // passes aren't allowed to look outside their functions. 393 if (isa<GlobalValue>(LoadOperand)) 394 return MemDepResult::getUnknown(); 395 396 auto *InvariantGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group); 397 if (!InvariantGroupMD) 398 return MemDepResult::getUnknown(); 399 400 MemDepResult Result = MemDepResult::getUnknown(); 401 llvm::SmallSet<Value *, 14> Seen; 402 // Queue to process all pointers that are equivalent to load operand. 403 llvm::SmallVector<Value *, 8> LoadOperandsQueue; 404 LoadOperandsQueue.push_back(LoadOperand); 405 while (!LoadOperandsQueue.empty()) { 406 Value *Ptr = LoadOperandsQueue.pop_back_val(); 407 if (isa<GlobalValue>(Ptr)) 408 continue; 409 410 if (auto *BCI = dyn_cast<BitCastInst>(Ptr)) { 411 if (!Seen.count(BCI->getOperand(0))) { 412 LoadOperandsQueue.push_back(BCI->getOperand(0)); 413 Seen.insert(BCI->getOperand(0)); 414 } 415 } 416 417 for (Use &Us : Ptr->uses()) { 418 auto *U = dyn_cast<Instruction>(Us.getUser()); 419 if (!U || U == LI || !DT->dominates(U, LI)) 420 continue; 421 422 if (auto *BCI = dyn_cast<BitCastInst>(U)) { 423 if (!Seen.count(BCI)) { 424 LoadOperandsQueue.push_back(BCI); 425 Seen.insert(BCI); 426 } 427 continue; 428 } 429 // If we hit load/store with the same invariant.group metadata (and the 430 // same pointer operand) we can assume that value pointed by pointer 431 // operand didn't change. 432 if ((isa<LoadInst>(U) || isa<StoreInst>(U)) && U->getParent() == BB && 433 U->getMetadata(LLVMContext::MD_invariant_group) == InvariantGroupMD) 434 return MemDepResult::getDef(U); 435 } 436 } 437 return Result; 438 } 439 440 MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom( 441 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 442 BasicBlock *BB, Instruction *QueryInst) { 443 444 const Value *MemLocBase = nullptr; 445 int64_t MemLocOffset = 0; 446 unsigned Limit = BlockScanLimit; 447 bool isInvariantLoad = false; 448 449 // We must be careful with atomic accesses, as they may allow another thread 450 // to touch this location, cloberring it. We are conservative: if the 451 // QueryInst is not a simple (non-atomic) memory access, we automatically 452 // return getClobber. 453 // If it is simple, we know based on the results of 454 // "Compiler testing via a theory of sound optimisations in the C11/C++11 455 // memory model" in PLDI 2013, that a non-atomic location can only be 456 // clobbered between a pair of a release and an acquire action, with no 457 // access to the location in between. 458 // Here is an example for giving the general intuition behind this rule. 459 // In the following code: 460 // store x 0; 461 // release action; [1] 462 // acquire action; [4] 463 // %val = load x; 464 // It is unsafe to replace %val by 0 because another thread may be running: 465 // acquire action; [2] 466 // store x 42; 467 // release action; [3] 468 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val 469 // being 42. A key property of this program however is that if either 470 // 1 or 4 were missing, there would be a race between the store of 42 471 // either the store of 0 or the load (making the whole progam racy). 472 // The paper mentioned above shows that the same property is respected 473 // by every program that can detect any optimisation of that kind: either 474 // it is racy (undefined) or there is a release followed by an acquire 475 // between the pair of accesses under consideration. 476 477 // If the load is invariant, we "know" that it doesn't alias *any* write. We 478 // do want to respect mustalias results since defs are useful for value 479 // forwarding, but any mayalias write can be assumed to be noalias. 480 // Arguably, this logic should be pushed inside AliasAnalysis itself. 481 if (isLoad && QueryInst) { 482 LoadInst *LI = dyn_cast<LoadInst>(QueryInst); 483 if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr) 484 isInvariantLoad = true; 485 } 486 487 const DataLayout &DL = BB->getModule()->getDataLayout(); 488 489 // Create a numbered basic block to lazily compute and cache instruction 490 // positions inside a BB. This is used to provide fast queries for relative 491 // position between two instructions in a BB and can be used by 492 // AliasAnalysis::callCapturesBefore. 493 OrderedBasicBlock OBB(BB); 494 495 // Return "true" if and only if the instruction I is either a non-simple 496 // load or a non-simple store. 497 auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool { 498 if (auto *LI = dyn_cast<LoadInst>(I)) 499 return !LI->isSimple(); 500 if (auto *SI = dyn_cast<StoreInst>(I)) 501 return !SI->isSimple(); 502 return false; 503 }; 504 505 // Return "true" if I is not a load and not a store, but it does access 506 // memory. 507 auto isOtherMemAccess = [](Instruction *I) -> bool { 508 return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory(); 509 }; 510 511 // Walk backwards through the basic block, looking for dependencies. 512 while (ScanIt != BB->begin()) { 513 Instruction *Inst = &*--ScanIt; 514 515 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 516 // Debug intrinsics don't (and can't) cause dependencies. 517 if (isa<DbgInfoIntrinsic>(II)) 518 continue; 519 520 // Limit the amount of scanning we do so we don't end up with quadratic 521 // running time on extreme testcases. 522 --Limit; 523 if (!Limit) 524 return MemDepResult::getUnknown(); 525 526 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 527 // If we reach a lifetime begin or end marker, then the query ends here 528 // because the value is undefined. 529 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 530 // FIXME: This only considers queries directly on the invariant-tagged 531 // pointer, not on query pointers that are indexed off of them. It'd 532 // be nice to handle that at some point (the right approach is to use 533 // GetPointerBaseWithConstantOffset). 534 if (AA->isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc)) 535 return MemDepResult::getDef(II); 536 continue; 537 } 538 } 539 540 // Values depend on loads if the pointers are must aliased. This means 541 // that a load depends on another must aliased load from the same value. 542 // One exception is atomic loads: a value can depend on an atomic load that 543 // it does not alias with when this atomic load indicates that another 544 // thread may be accessing the location. 545 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 546 547 // While volatile access cannot be eliminated, they do not have to clobber 548 // non-aliasing locations, as normal accesses, for example, can be safely 549 // reordered with volatile accesses. 550 if (LI->isVolatile()) { 551 if (!QueryInst) 552 // Original QueryInst *may* be volatile 553 return MemDepResult::getClobber(LI); 554 if (isVolatile(QueryInst)) 555 // Ordering required if QueryInst is itself volatile 556 return MemDepResult::getClobber(LI); 557 // Otherwise, volatile doesn't imply any special ordering 558 } 559 560 // Atomic loads have complications involved. 561 // A Monotonic (or higher) load is OK if the query inst is itself not 562 // atomic. 563 // FIXME: This is overly conservative. 564 if (LI->isAtomic() && LI->getOrdering() > Unordered) { 565 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 566 isOtherMemAccess(QueryInst)) 567 return MemDepResult::getClobber(LI); 568 if (LI->getOrdering() != Monotonic) 569 return MemDepResult::getClobber(LI); 570 } 571 572 MemoryLocation LoadLoc = MemoryLocation::get(LI); 573 574 // If we found a pointer, check if it could be the same as our pointer. 575 AliasResult R = AA->alias(LoadLoc, MemLoc); 576 577 if (isLoad) { 578 if (R == NoAlias) { 579 // If this is an over-aligned integer load (for example, 580 // "load i8* %P, align 4") see if it would obviously overlap with the 581 // queried location if widened to a larger load (e.g. if the queried 582 // location is 1 byte at P+1). If so, return it as a load/load 583 // clobber result, allowing the client to decide to widen the load if 584 // it wants to. 585 if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) { 586 if (LI->getAlignment() * 8 > ITy->getPrimitiveSizeInBits() && 587 isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase, 588 MemLocOffset, LI)) 589 return MemDepResult::getClobber(Inst); 590 } 591 continue; 592 } 593 594 // Must aliased loads are defs of each other. 595 if (R == MustAlias) 596 return MemDepResult::getDef(Inst); 597 598 #if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads 599 // in terms of clobbering loads, but since it does this by looking 600 // at the clobbering load directly, it doesn't know about any 601 // phi translation that may have happened along the way. 602 603 // If we have a partial alias, then return this as a clobber for the 604 // client to handle. 605 if (R == PartialAlias) 606 return MemDepResult::getClobber(Inst); 607 #endif 608 609 // Random may-alias loads don't depend on each other without a 610 // dependence. 611 continue; 612 } 613 614 // Stores don't depend on other no-aliased accesses. 615 if (R == NoAlias) 616 continue; 617 618 // Stores don't alias loads from read-only memory. 619 if (AA->pointsToConstantMemory(LoadLoc)) 620 continue; 621 622 // Stores depend on may/must aliased loads. 623 return MemDepResult::getDef(Inst); 624 } 625 626 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 627 // Atomic stores have complications involved. 628 // A Monotonic store is OK if the query inst is itself not atomic. 629 // FIXME: This is overly conservative. 630 if (!SI->isUnordered() && SI->isAtomic()) { 631 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 632 isOtherMemAccess(QueryInst)) 633 return MemDepResult::getClobber(SI); 634 if (SI->getOrdering() != Monotonic) 635 return MemDepResult::getClobber(SI); 636 } 637 638 // FIXME: this is overly conservative. 639 // While volatile access cannot be eliminated, they do not have to clobber 640 // non-aliasing locations, as normal accesses can for example be reordered 641 // with volatile accesses. 642 if (SI->isVolatile()) 643 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 644 isOtherMemAccess(QueryInst)) 645 return MemDepResult::getClobber(SI); 646 647 // If alias analysis can tell that this store is guaranteed to not modify 648 // the query pointer, ignore it. Use getModRefInfo to handle cases where 649 // the query pointer points to constant memory etc. 650 if (AA->getModRefInfo(SI, MemLoc) == MRI_NoModRef) 651 continue; 652 653 // Ok, this store might clobber the query pointer. Check to see if it is 654 // a must alias: in this case, we want to return this as a def. 655 MemoryLocation StoreLoc = MemoryLocation::get(SI); 656 657 // If we found a pointer, check if it could be the same as our pointer. 658 AliasResult R = AA->alias(StoreLoc, MemLoc); 659 660 if (R == NoAlias) 661 continue; 662 if (R == MustAlias) 663 return MemDepResult::getDef(Inst); 664 if (isInvariantLoad) 665 continue; 666 return MemDepResult::getClobber(Inst); 667 } 668 669 // If this is an allocation, and if we know that the accessed pointer is to 670 // the allocation, return Def. This means that there is no dependence and 671 // the access can be optimized based on that. For example, a load could 672 // turn into undef. 673 // Note: Only determine this to be a malloc if Inst is the malloc call, not 674 // a subsequent bitcast of the malloc call result. There can be stores to 675 // the malloced memory between the malloc call and its bitcast uses, and we 676 // need to continue scanning until the malloc call. 677 if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) { 678 const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL); 679 680 if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr)) 681 return MemDepResult::getDef(Inst); 682 if (isInvariantLoad) 683 continue; 684 // Be conservative if the accessed pointer may alias the allocation - 685 // fallback to the generic handling below. 686 if ((AA->alias(Inst, AccessPtr) == NoAlias) && 687 // If the allocation is not aliased and does not read memory (like 688 // strdup), it is safe to ignore. 689 (isa<AllocaInst>(Inst) || isMallocLikeFn(Inst, TLI) || 690 isCallocLikeFn(Inst, TLI))) 691 continue; 692 } 693 694 if (isInvariantLoad) 695 continue; 696 697 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. 698 ModRefInfo MR = AA->getModRefInfo(Inst, MemLoc); 699 // If necessary, perform additional analysis. 700 if (MR == MRI_ModRef) 701 MR = AA->callCapturesBefore(Inst, MemLoc, DT, &OBB); 702 switch (MR) { 703 case MRI_NoModRef: 704 // If the call has no effect on the queried pointer, just ignore it. 705 continue; 706 case MRI_Mod: 707 return MemDepResult::getClobber(Inst); 708 case MRI_Ref: 709 // If the call is known to never store to the pointer, and if this is a 710 // load query, we can safely ignore it (scan past it). 711 if (isLoad) 712 continue; 713 default: 714 // Otherwise, there is a potential dependence. Return a clobber. 715 return MemDepResult::getClobber(Inst); 716 } 717 } 718 719 // No dependence found. If this is the entry block of the function, it is 720 // unknown, otherwise it is non-local. 721 if (BB != &BB->getParent()->getEntryBlock()) 722 return MemDepResult::getNonLocal(); 723 return MemDepResult::getNonFuncLocal(); 724 } 725 726 MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { 727 Instruction *ScanPos = QueryInst; 728 729 // Check for a cached result 730 MemDepResult &LocalCache = LocalDeps[QueryInst]; 731 732 // If the cached entry is non-dirty, just return it. Note that this depends 733 // on MemDepResult's default constructing to 'dirty'. 734 if (!LocalCache.isDirty()) 735 return LocalCache; 736 737 // Otherwise, if we have a dirty entry, we know we can start the scan at that 738 // instruction, which may save us some work. 739 if (Instruction *Inst = LocalCache.getInst()) { 740 ScanPos = Inst; 741 742 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst); 743 } 744 745 BasicBlock *QueryParent = QueryInst->getParent(); 746 747 // Do the scan. 748 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { 749 // No dependence found. If this is the entry block of the function, it is 750 // unknown, otherwise it is non-local. 751 if (QueryParent != &QueryParent->getParent()->getEntryBlock()) 752 LocalCache = MemDepResult::getNonLocal(); 753 else 754 LocalCache = MemDepResult::getNonFuncLocal(); 755 } else { 756 MemoryLocation MemLoc; 757 ModRefInfo MR = GetLocation(QueryInst, MemLoc, *TLI); 758 if (MemLoc.Ptr) { 759 // If we can do a pointer scan, make it happen. 760 bool isLoad = !(MR & MRI_Mod); 761 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst)) 762 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start; 763 764 LocalCache = getPointerDependencyFrom( 765 MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst); 766 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) { 767 CallSite QueryCS(QueryInst); 768 bool isReadOnly = AA->onlyReadsMemory(QueryCS); 769 LocalCache = getCallSiteDependencyFrom( 770 QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent); 771 } else 772 // Non-memory instruction. 773 LocalCache = MemDepResult::getUnknown(); 774 } 775 776 // Remember the result! 777 if (Instruction *I = LocalCache.getInst()) 778 ReverseLocalDeps[I].insert(QueryInst); 779 780 return LocalCache; 781 } 782 783 #ifndef NDEBUG 784 /// This method is used when -debug is specified to verify that cache arrays 785 /// are properly kept sorted. 786 static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, 787 int Count = -1) { 788 if (Count == -1) 789 Count = Cache.size(); 790 assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) && 791 "Cache isn't sorted!"); 792 } 793 #endif 794 795 const MemoryDependenceAnalysis::NonLocalDepInfo & 796 MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { 797 assert(getDependency(QueryCS.getInstruction()).isNonLocal() && 798 "getNonLocalCallDependency should only be used on calls with " 799 "non-local deps!"); 800 PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()]; 801 NonLocalDepInfo &Cache = CacheP.first; 802 803 // This is the set of blocks that need to be recomputed. In the cached case, 804 // this can happen due to instructions being deleted etc. In the uncached 805 // case, this starts out as the set of predecessors we care about. 806 SmallVector<BasicBlock *, 32> DirtyBlocks; 807 808 if (!Cache.empty()) { 809 // Okay, we have a cache entry. If we know it is not dirty, just return it 810 // with no computation. 811 if (!CacheP.second) { 812 ++NumCacheNonLocal; 813 return Cache; 814 } 815 816 // If we already have a partially computed set of results, scan them to 817 // determine what is dirty, seeding our initial DirtyBlocks worklist. 818 for (auto &Entry : Cache) 819 if (Entry.getResult().isDirty()) 820 DirtyBlocks.push_back(Entry.getBB()); 821 822 // Sort the cache so that we can do fast binary search lookups below. 823 std::sort(Cache.begin(), Cache.end()); 824 825 ++NumCacheDirtyNonLocal; 826 // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " 827 // << Cache.size() << " cached: " << *QueryInst; 828 } else { 829 // Seed DirtyBlocks with each of the preds of QueryInst's block. 830 BasicBlock *QueryBB = QueryCS.getInstruction()->getParent(); 831 for (BasicBlock *Pred : PredCache.get(QueryBB)) 832 DirtyBlocks.push_back(Pred); 833 ++NumUncacheNonLocal; 834 } 835 836 // isReadonlyCall - If this is a read-only call, we can be more aggressive. 837 bool isReadonlyCall = AA->onlyReadsMemory(QueryCS); 838 839 SmallPtrSet<BasicBlock *, 32> Visited; 840 841 unsigned NumSortedEntries = Cache.size(); 842 DEBUG(AssertSorted(Cache)); 843 844 // Iterate while we still have blocks to update. 845 while (!DirtyBlocks.empty()) { 846 BasicBlock *DirtyBB = DirtyBlocks.back(); 847 DirtyBlocks.pop_back(); 848 849 // Already processed this block? 850 if (!Visited.insert(DirtyBB).second) 851 continue; 852 853 // Do a binary search to see if we already have an entry for this block in 854 // the cache set. If so, find it. 855 DEBUG(AssertSorted(Cache, NumSortedEntries)); 856 NonLocalDepInfo::iterator Entry = 857 std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries, 858 NonLocalDepEntry(DirtyBB)); 859 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB) 860 --Entry; 861 862 NonLocalDepEntry *ExistingResult = nullptr; 863 if (Entry != Cache.begin() + NumSortedEntries && 864 Entry->getBB() == DirtyBB) { 865 // If we already have an entry, and if it isn't already dirty, the block 866 // is done. 867 if (!Entry->getResult().isDirty()) 868 continue; 869 870 // Otherwise, remember this slot so we can update the value. 871 ExistingResult = &*Entry; 872 } 873 874 // If the dirty entry has a pointer, start scanning from it so we don't have 875 // to rescan the entire block. 876 BasicBlock::iterator ScanPos = DirtyBB->end(); 877 if (ExistingResult) { 878 if (Instruction *Inst = ExistingResult->getResult().getInst()) { 879 ScanPos = Inst->getIterator(); 880 // We're removing QueryInst's use of Inst. 881 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, 882 QueryCS.getInstruction()); 883 } 884 } 885 886 // Find out if this block has a local dependency for QueryInst. 887 MemDepResult Dep; 888 889 if (ScanPos != DirtyBB->begin()) { 890 Dep = 891 getCallSiteDependencyFrom(QueryCS, isReadonlyCall, ScanPos, DirtyBB); 892 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { 893 // No dependence found. If this is the entry block of the function, it is 894 // a clobber, otherwise it is unknown. 895 Dep = MemDepResult::getNonLocal(); 896 } else { 897 Dep = MemDepResult::getNonFuncLocal(); 898 } 899 900 // If we had a dirty entry for the block, update it. Otherwise, just add 901 // a new entry. 902 if (ExistingResult) 903 ExistingResult->setResult(Dep); 904 else 905 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep)); 906 907 // If the block has a dependency (i.e. it isn't completely transparent to 908 // the value), remember the association! 909 if (!Dep.isNonLocal()) { 910 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 911 // update this when we remove instructions. 912 if (Instruction *Inst = Dep.getInst()) 913 ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction()); 914 } else { 915 916 // If the block *is* completely transparent to the load, we need to check 917 // the predecessors of this block. Add them to our worklist. 918 for (BasicBlock *Pred : PredCache.get(DirtyBB)) 919 DirtyBlocks.push_back(Pred); 920 } 921 } 922 923 return Cache; 924 } 925 926 void MemoryDependenceAnalysis::getNonLocalPointerDependency( 927 Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) { 928 const MemoryLocation Loc = MemoryLocation::get(QueryInst); 929 bool isLoad = isa<LoadInst>(QueryInst); 930 BasicBlock *FromBB = QueryInst->getParent(); 931 assert(FromBB); 932 933 assert(Loc.Ptr->getType()->isPointerTy() && 934 "Can't get pointer deps of a non-pointer!"); 935 Result.clear(); 936 937 // This routine does not expect to deal with volatile instructions. 938 // Doing so would require piping through the QueryInst all the way through. 939 // TODO: volatiles can't be elided, but they can be reordered with other 940 // non-volatile accesses. 941 942 // We currently give up on any instruction which is ordered, but we do handle 943 // atomic instructions which are unordered. 944 // TODO: Handle ordered instructions 945 auto isOrdered = [](Instruction *Inst) { 946 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 947 return !LI->isUnordered(); 948 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 949 return !SI->isUnordered(); 950 } 951 return false; 952 }; 953 if (isVolatile(QueryInst) || isOrdered(QueryInst)) { 954 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 955 const_cast<Value *>(Loc.Ptr))); 956 return; 957 } 958 const DataLayout &DL = FromBB->getModule()->getDataLayout(); 959 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, AC); 960 961 // This is the set of blocks we've inspected, and the pointer we consider in 962 // each block. Because of critical edges, we currently bail out if querying 963 // a block with multiple different pointers. This can happen during PHI 964 // translation. 965 DenseMap<BasicBlock *, Value *> Visited; 966 if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB, 967 Result, Visited, true)) 968 return; 969 Result.clear(); 970 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 971 const_cast<Value *>(Loc.Ptr))); 972 } 973 974 /// Compute the memdep value for BB with Pointer/PointeeSize using either 975 /// cached information in Cache or by doing a lookup (which may use dirty cache 976 /// info if available). 977 /// 978 /// If we do a lookup, add the result to the cache. 979 MemDepResult MemoryDependenceAnalysis::GetNonLocalInfoForBlock( 980 Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad, 981 BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) { 982 983 // Do a binary search to see if we already have an entry for this block in 984 // the cache set. If so, find it. 985 NonLocalDepInfo::iterator Entry = std::upper_bound( 986 Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB)); 987 if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB) 988 --Entry; 989 990 NonLocalDepEntry *ExistingResult = nullptr; 991 if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB) 992 ExistingResult = &*Entry; 993 994 // If we have a cached entry, and it is non-dirty, use it as the value for 995 // this dependency. 996 if (ExistingResult && !ExistingResult->getResult().isDirty()) { 997 ++NumCacheNonLocalPtr; 998 return ExistingResult->getResult(); 999 } 1000 1001 // Otherwise, we have to scan for the value. If we have a dirty cache 1002 // entry, start scanning from its position, otherwise we scan from the end 1003 // of the block. 1004 BasicBlock::iterator ScanPos = BB->end(); 1005 if (ExistingResult && ExistingResult->getResult().getInst()) { 1006 assert(ExistingResult->getResult().getInst()->getParent() == BB && 1007 "Instruction invalidated?"); 1008 ++NumCacheDirtyNonLocalPtr; 1009 ScanPos = ExistingResult->getResult().getInst()->getIterator(); 1010 1011 // Eliminating the dirty entry from 'Cache', so update the reverse info. 1012 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 1013 RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey); 1014 } else { 1015 ++NumUncacheNonLocalPtr; 1016 } 1017 1018 // Scan the block for the dependency. 1019 MemDepResult Dep = 1020 getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst); 1021 1022 // If we had a dirty entry for the block, update it. Otherwise, just add 1023 // a new entry. 1024 if (ExistingResult) 1025 ExistingResult->setResult(Dep); 1026 else 1027 Cache->push_back(NonLocalDepEntry(BB, Dep)); 1028 1029 // If the block has a dependency (i.e. it isn't completely transparent to 1030 // the value), remember the reverse association because we just added it 1031 // to Cache! 1032 if (!Dep.isDef() && !Dep.isClobber()) 1033 return Dep; 1034 1035 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently 1036 // update MemDep when we remove instructions. 1037 Instruction *Inst = Dep.getInst(); 1038 assert(Inst && "Didn't depend on anything?"); 1039 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 1040 ReverseNonLocalPtrDeps[Inst].insert(CacheKey); 1041 return Dep; 1042 } 1043 1044 /// Sort the NonLocalDepInfo cache, given a certain number of elements in the 1045 /// array that are already properly ordered. 1046 /// 1047 /// This is optimized for the case when only a few entries are added. 1048 static void 1049 SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, 1050 unsigned NumSortedEntries) { 1051 switch (Cache.size() - NumSortedEntries) { 1052 case 0: 1053 // done, no new entries. 1054 break; 1055 case 2: { 1056 // Two new entries, insert the last one into place. 1057 NonLocalDepEntry Val = Cache.back(); 1058 Cache.pop_back(); 1059 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry = 1060 std::upper_bound(Cache.begin(), Cache.end() - 1, Val); 1061 Cache.insert(Entry, Val); 1062 // FALL THROUGH. 1063 } 1064 case 1: 1065 // One new entry, Just insert the new value at the appropriate position. 1066 if (Cache.size() != 1) { 1067 NonLocalDepEntry Val = Cache.back(); 1068 Cache.pop_back(); 1069 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry = 1070 std::upper_bound(Cache.begin(), Cache.end(), Val); 1071 Cache.insert(Entry, Val); 1072 } 1073 break; 1074 default: 1075 // Added many values, do a full scale sort. 1076 std::sort(Cache.begin(), Cache.end()); 1077 break; 1078 } 1079 } 1080 1081 /// Perform a dependency query based on pointer/pointeesize starting at the end 1082 /// of StartBB. 1083 /// 1084 /// Add any clobber/def results to the results vector and keep track of which 1085 /// blocks are visited in 'Visited'. 1086 /// 1087 /// This has special behavior for the first block queries (when SkipFirstBlock 1088 /// is true). In this special case, it ignores the contents of the specified 1089 /// block and starts returning dependence info for its predecessors. 1090 /// 1091 /// This function returns true on success, or false to indicate that it could 1092 /// not compute dependence information for some reason. This should be treated 1093 /// as a clobber dependence on the first instruction in the predecessor block. 1094 bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB( 1095 Instruction *QueryInst, const PHITransAddr &Pointer, 1096 const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB, 1097 SmallVectorImpl<NonLocalDepResult> &Result, 1098 DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock) { 1099 // Look up the cached info for Pointer. 1100 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad); 1101 1102 // Set up a temporary NLPI value. If the map doesn't yet have an entry for 1103 // CacheKey, this value will be inserted as the associated value. Otherwise, 1104 // it'll be ignored, and we'll have to check to see if the cached size and 1105 // aa tags are consistent with the current query. 1106 NonLocalPointerInfo InitialNLPI; 1107 InitialNLPI.Size = Loc.Size; 1108 InitialNLPI.AATags = Loc.AATags; 1109 1110 // Get the NLPI for CacheKey, inserting one into the map if it doesn't 1111 // already have one. 1112 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = 1113 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI)); 1114 NonLocalPointerInfo *CacheInfo = &Pair.first->second; 1115 1116 // If we already have a cache entry for this CacheKey, we may need to do some 1117 // work to reconcile the cache entry and the current query. 1118 if (!Pair.second) { 1119 if (CacheInfo->Size < Loc.Size) { 1120 // The query's Size is greater than the cached one. Throw out the 1121 // cached data and proceed with the query at the greater size. 1122 CacheInfo->Pair = BBSkipFirstBlockPair(); 1123 CacheInfo->Size = Loc.Size; 1124 for (auto &Entry : CacheInfo->NonLocalDeps) 1125 if (Instruction *Inst = Entry.getResult().getInst()) 1126 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1127 CacheInfo->NonLocalDeps.clear(); 1128 } else if (CacheInfo->Size > Loc.Size) { 1129 // This query's Size is less than the cached one. Conservatively restart 1130 // the query using the greater size. 1131 return getNonLocalPointerDepFromBB( 1132 QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad, 1133 StartBB, Result, Visited, SkipFirstBlock); 1134 } 1135 1136 // If the query's AATags are inconsistent with the cached one, 1137 // conservatively throw out the cached data and restart the query with 1138 // no tag if needed. 1139 if (CacheInfo->AATags != Loc.AATags) { 1140 if (CacheInfo->AATags) { 1141 CacheInfo->Pair = BBSkipFirstBlockPair(); 1142 CacheInfo->AATags = AAMDNodes(); 1143 for (auto &Entry : CacheInfo->NonLocalDeps) 1144 if (Instruction *Inst = Entry.getResult().getInst()) 1145 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1146 CacheInfo->NonLocalDeps.clear(); 1147 } 1148 if (Loc.AATags) 1149 return getNonLocalPointerDepFromBB( 1150 QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result, 1151 Visited, SkipFirstBlock); 1152 } 1153 } 1154 1155 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps; 1156 1157 // If we have valid cached information for exactly the block we are 1158 // investigating, just return it with no recomputation. 1159 if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) { 1160 // We have a fully cached result for this query then we can just return the 1161 // cached results and populate the visited set. However, we have to verify 1162 // that we don't already have conflicting results for these blocks. Check 1163 // to ensure that if a block in the results set is in the visited set that 1164 // it was for the same pointer query. 1165 if (!Visited.empty()) { 1166 for (auto &Entry : *Cache) { 1167 DenseMap<BasicBlock *, Value *>::iterator VI = 1168 Visited.find(Entry.getBB()); 1169 if (VI == Visited.end() || VI->second == Pointer.getAddr()) 1170 continue; 1171 1172 // We have a pointer mismatch in a block. Just return false, saying 1173 // that something was clobbered in this result. We could also do a 1174 // non-fully cached query, but there is little point in doing this. 1175 return false; 1176 } 1177 } 1178 1179 Value *Addr = Pointer.getAddr(); 1180 for (auto &Entry : *Cache) { 1181 Visited.insert(std::make_pair(Entry.getBB(), Addr)); 1182 if (Entry.getResult().isNonLocal()) { 1183 continue; 1184 } 1185 1186 if (!DT) { 1187 Result.push_back( 1188 NonLocalDepResult(Entry.getBB(), MemDepResult::getUnknown(), Addr)); 1189 } else if (DT->isReachableFromEntry(Entry.getBB())) { 1190 Result.push_back( 1191 NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr)); 1192 } 1193 } 1194 ++NumCacheCompleteNonLocalPtr; 1195 return true; 1196 } 1197 1198 // Otherwise, either this is a new block, a block with an invalid cache 1199 // pointer or one that we're about to invalidate by putting more info into it 1200 // than its valid cache info. If empty, the result will be valid cache info, 1201 // otherwise it isn't. 1202 if (Cache->empty()) 1203 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); 1204 else 1205 CacheInfo->Pair = BBSkipFirstBlockPair(); 1206 1207 SmallVector<BasicBlock *, 32> Worklist; 1208 Worklist.push_back(StartBB); 1209 1210 // PredList used inside loop. 1211 SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList; 1212 1213 // Keep track of the entries that we know are sorted. Previously cached 1214 // entries will all be sorted. The entries we add we only sort on demand (we 1215 // don't insert every element into its sorted position). We know that we 1216 // won't get any reuse from currently inserted values, because we don't 1217 // revisit blocks after we insert info for them. 1218 unsigned NumSortedEntries = Cache->size(); 1219 unsigned WorklistEntries = BlockNumberLimit; 1220 bool GotWorklistLimit = false; 1221 DEBUG(AssertSorted(*Cache)); 1222 1223 while (!Worklist.empty()) { 1224 BasicBlock *BB = Worklist.pop_back_val(); 1225 1226 // If we do process a large number of blocks it becomes very expensive and 1227 // likely it isn't worth worrying about 1228 if (Result.size() > NumResultsLimit) { 1229 Worklist.clear(); 1230 // Sort it now (if needed) so that recursive invocations of 1231 // getNonLocalPointerDepFromBB and other routines that could reuse the 1232 // cache value will only see properly sorted cache arrays. 1233 if (Cache && NumSortedEntries != Cache->size()) { 1234 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1235 } 1236 // Since we bail out, the "Cache" set won't contain all of the 1237 // results for the query. This is ok (we can still use it to accelerate 1238 // specific block queries) but we can't do the fastpath "return all 1239 // results from the set". Clear out the indicator for this. 1240 CacheInfo->Pair = BBSkipFirstBlockPair(); 1241 return false; 1242 } 1243 1244 // Skip the first block if we have it. 1245 if (!SkipFirstBlock) { 1246 // Analyze the dependency of *Pointer in FromBB. See if we already have 1247 // been here. 1248 assert(Visited.count(BB) && "Should check 'visited' before adding to WL"); 1249 1250 // Get the dependency info for Pointer in BB. If we have cached 1251 // information, we will use it, otherwise we compute it. 1252 DEBUG(AssertSorted(*Cache, NumSortedEntries)); 1253 MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB, 1254 Cache, NumSortedEntries); 1255 1256 // If we got a Def or Clobber, add this to the list of results. 1257 if (!Dep.isNonLocal()) { 1258 if (!DT) { 1259 Result.push_back(NonLocalDepResult(BB, MemDepResult::getUnknown(), 1260 Pointer.getAddr())); 1261 continue; 1262 } else if (DT->isReachableFromEntry(BB)) { 1263 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr())); 1264 continue; 1265 } 1266 } 1267 } 1268 1269 // If 'Pointer' is an instruction defined in this block, then we need to do 1270 // phi translation to change it into a value live in the predecessor block. 1271 // If not, we just add the predecessors to the worklist and scan them with 1272 // the same Pointer. 1273 if (!Pointer.NeedsPHITranslationFromBlock(BB)) { 1274 SkipFirstBlock = false; 1275 SmallVector<BasicBlock *, 16> NewBlocks; 1276 for (BasicBlock *Pred : PredCache.get(BB)) { 1277 // Verify that we haven't looked at this block yet. 1278 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1279 Visited.insert(std::make_pair(Pred, Pointer.getAddr())); 1280 if (InsertRes.second) { 1281 // First time we've looked at *PI. 1282 NewBlocks.push_back(Pred); 1283 continue; 1284 } 1285 1286 // If we have seen this block before, but it was with a different 1287 // pointer then we have a phi translation failure and we have to treat 1288 // this as a clobber. 1289 if (InsertRes.first->second != Pointer.getAddr()) { 1290 // Make sure to clean up the Visited map before continuing on to 1291 // PredTranslationFailure. 1292 for (unsigned i = 0; i < NewBlocks.size(); i++) 1293 Visited.erase(NewBlocks[i]); 1294 goto PredTranslationFailure; 1295 } 1296 } 1297 if (NewBlocks.size() > WorklistEntries) { 1298 // Make sure to clean up the Visited map before continuing on to 1299 // PredTranslationFailure. 1300 for (unsigned i = 0; i < NewBlocks.size(); i++) 1301 Visited.erase(NewBlocks[i]); 1302 GotWorklistLimit = true; 1303 goto PredTranslationFailure; 1304 } 1305 WorklistEntries -= NewBlocks.size(); 1306 Worklist.append(NewBlocks.begin(), NewBlocks.end()); 1307 continue; 1308 } 1309 1310 // We do need to do phi translation, if we know ahead of time we can't phi 1311 // translate this value, don't even try. 1312 if (!Pointer.IsPotentiallyPHITranslatable()) 1313 goto PredTranslationFailure; 1314 1315 // We may have added values to the cache list before this PHI translation. 1316 // If so, we haven't done anything to ensure that the cache remains sorted. 1317 // Sort it now (if needed) so that recursive invocations of 1318 // getNonLocalPointerDepFromBB and other routines that could reuse the cache 1319 // value will only see properly sorted cache arrays. 1320 if (Cache && NumSortedEntries != Cache->size()) { 1321 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1322 NumSortedEntries = Cache->size(); 1323 } 1324 Cache = nullptr; 1325 1326 PredList.clear(); 1327 for (BasicBlock *Pred : PredCache.get(BB)) { 1328 PredList.push_back(std::make_pair(Pred, Pointer)); 1329 1330 // Get the PHI translated pointer in this predecessor. This can fail if 1331 // not translatable, in which case the getAddr() returns null. 1332 PHITransAddr &PredPointer = PredList.back().second; 1333 PredPointer.PHITranslateValue(BB, Pred, DT, /*MustDominate=*/false); 1334 Value *PredPtrVal = PredPointer.getAddr(); 1335 1336 // Check to see if we have already visited this pred block with another 1337 // pointer. If so, we can't do this lookup. This failure can occur 1338 // with PHI translation when a critical edge exists and the PHI node in 1339 // the successor translates to a pointer value different than the 1340 // pointer the block was first analyzed with. 1341 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1342 Visited.insert(std::make_pair(Pred, PredPtrVal)); 1343 1344 if (!InsertRes.second) { 1345 // We found the pred; take it off the list of preds to visit. 1346 PredList.pop_back(); 1347 1348 // If the predecessor was visited with PredPtr, then we already did 1349 // the analysis and can ignore it. 1350 if (InsertRes.first->second == PredPtrVal) 1351 continue; 1352 1353 // Otherwise, the block was previously analyzed with a different 1354 // pointer. We can't represent the result of this case, so we just 1355 // treat this as a phi translation failure. 1356 1357 // Make sure to clean up the Visited map before continuing on to 1358 // PredTranslationFailure. 1359 for (unsigned i = 0, n = PredList.size(); i < n; ++i) 1360 Visited.erase(PredList[i].first); 1361 1362 goto PredTranslationFailure; 1363 } 1364 } 1365 1366 // Actually process results here; this need to be a separate loop to avoid 1367 // calling getNonLocalPointerDepFromBB for blocks we don't want to return 1368 // any results for. (getNonLocalPointerDepFromBB will modify our 1369 // datastructures in ways the code after the PredTranslationFailure label 1370 // doesn't expect.) 1371 for (unsigned i = 0, n = PredList.size(); i < n; ++i) { 1372 BasicBlock *Pred = PredList[i].first; 1373 PHITransAddr &PredPointer = PredList[i].second; 1374 Value *PredPtrVal = PredPointer.getAddr(); 1375 1376 bool CanTranslate = true; 1377 // If PHI translation was unable to find an available pointer in this 1378 // predecessor, then we have to assume that the pointer is clobbered in 1379 // that predecessor. We can still do PRE of the load, which would insert 1380 // a computation of the pointer in this predecessor. 1381 if (!PredPtrVal) 1382 CanTranslate = false; 1383 1384 // FIXME: it is entirely possible that PHI translating will end up with 1385 // the same value. Consider PHI translating something like: 1386 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need* 1387 // to recurse here, pedantically speaking. 1388 1389 // If getNonLocalPointerDepFromBB fails here, that means the cached 1390 // result conflicted with the Visited list; we have to conservatively 1391 // assume it is unknown, but this also does not block PRE of the load. 1392 if (!CanTranslate || 1393 !getNonLocalPointerDepFromBB(QueryInst, PredPointer, 1394 Loc.getWithNewPtr(PredPtrVal), isLoad, 1395 Pred, Result, Visited)) { 1396 // Add the entry to the Result list. 1397 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal); 1398 Result.push_back(Entry); 1399 1400 // Since we had a phi translation failure, the cache for CacheKey won't 1401 // include all of the entries that we need to immediately satisfy future 1402 // queries. Mark this in NonLocalPointerDeps by setting the 1403 // BBSkipFirstBlockPair pointer to null. This requires reuse of the 1404 // cached value to do more work but not miss the phi trans failure. 1405 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey]; 1406 NLPI.Pair = BBSkipFirstBlockPair(); 1407 continue; 1408 } 1409 } 1410 1411 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. 1412 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1413 Cache = &CacheInfo->NonLocalDeps; 1414 NumSortedEntries = Cache->size(); 1415 1416 // Since we did phi translation, the "Cache" set won't contain all of the 1417 // results for the query. This is ok (we can still use it to accelerate 1418 // specific block queries) but we can't do the fastpath "return all 1419 // results from the set" Clear out the indicator for this. 1420 CacheInfo->Pair = BBSkipFirstBlockPair(); 1421 SkipFirstBlock = false; 1422 continue; 1423 1424 PredTranslationFailure: 1425 // The following code is "failure"; we can't produce a sane translation 1426 // for the given block. It assumes that we haven't modified any of 1427 // our datastructures while processing the current block. 1428 1429 if (!Cache) { 1430 // Refresh the CacheInfo/Cache pointer if it got invalidated. 1431 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1432 Cache = &CacheInfo->NonLocalDeps; 1433 NumSortedEntries = Cache->size(); 1434 } 1435 1436 // Since we failed phi translation, the "Cache" set won't contain all of the 1437 // results for the query. This is ok (we can still use it to accelerate 1438 // specific block queries) but we can't do the fastpath "return all 1439 // results from the set". Clear out the indicator for this. 1440 CacheInfo->Pair = BBSkipFirstBlockPair(); 1441 1442 // If *nothing* works, mark the pointer as unknown. 1443 // 1444 // If this is the magic first block, return this as a clobber of the whole 1445 // incoming value. Since we can't phi translate to one of the predecessors, 1446 // we have to bail out. 1447 if (SkipFirstBlock) 1448 return false; 1449 1450 bool foundBlock = false; 1451 for (NonLocalDepEntry &I : llvm::reverse(*Cache)) { 1452 if (I.getBB() != BB) 1453 continue; 1454 1455 assert((GotWorklistLimit || I.getResult().isNonLocal() || 1456 !DT->isReachableFromEntry(BB)) && 1457 "Should only be here with transparent block"); 1458 foundBlock = true; 1459 I.setResult(MemDepResult::getUnknown()); 1460 Result.push_back( 1461 NonLocalDepResult(I.getBB(), I.getResult(), Pointer.getAddr())); 1462 break; 1463 } 1464 (void)foundBlock; 1465 assert((foundBlock || GotWorklistLimit) && "Current block not in cache?"); 1466 } 1467 1468 // Okay, we're done now. If we added new values to the cache, re-sort it. 1469 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1470 DEBUG(AssertSorted(*Cache)); 1471 return true; 1472 } 1473 1474 /// If P exists in CachedNonLocalPointerInfo, remove it. 1475 void MemoryDependenceAnalysis::RemoveCachedNonLocalPointerDependencies( 1476 ValueIsLoadPair P) { 1477 CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P); 1478 if (It == NonLocalPointerDeps.end()) 1479 return; 1480 1481 // Remove all of the entries in the BB->val map. This involves removing 1482 // instructions from the reverse map. 1483 NonLocalDepInfo &PInfo = It->second.NonLocalDeps; 1484 1485 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) { 1486 Instruction *Target = PInfo[i].getResult().getInst(); 1487 if (!Target) 1488 continue; // Ignore non-local dep results. 1489 assert(Target->getParent() == PInfo[i].getBB()); 1490 1491 // Eliminating the dirty entry from 'Cache', so update the reverse info. 1492 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P); 1493 } 1494 1495 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). 1496 NonLocalPointerDeps.erase(It); 1497 } 1498 1499 void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) { 1500 // If Ptr isn't really a pointer, just ignore it. 1501 if (!Ptr->getType()->isPointerTy()) 1502 return; 1503 // Flush store info for the pointer. 1504 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); 1505 // Flush load info for the pointer. 1506 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true)); 1507 } 1508 1509 void MemoryDependenceAnalysis::invalidateCachedPredecessors() { 1510 PredCache.clear(); 1511 } 1512 1513 void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { 1514 // Walk through the Non-local dependencies, removing this one as the value 1515 // for any cached queries. 1516 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst); 1517 if (NLDI != NonLocalDeps.end()) { 1518 NonLocalDepInfo &BlockMap = NLDI->second.first; 1519 for (auto &Entry : BlockMap) 1520 if (Instruction *Inst = Entry.getResult().getInst()) 1521 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst); 1522 NonLocalDeps.erase(NLDI); 1523 } 1524 1525 // If we have a cached local dependence query for this instruction, remove it. 1526 // 1527 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 1528 if (LocalDepEntry != LocalDeps.end()) { 1529 // Remove us from DepInst's reverse set now that the local dep info is gone. 1530 if (Instruction *Inst = LocalDepEntry->second.getInst()) 1531 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst); 1532 1533 // Remove this local dependency info. 1534 LocalDeps.erase(LocalDepEntry); 1535 } 1536 1537 // If we have any cached pointer dependencies on this instruction, remove 1538 // them. If the instruction has non-pointer type, then it can't be a pointer 1539 // base. 1540 1541 // Remove it from both the load info and the store info. The instruction 1542 // can't be in either of these maps if it is non-pointer. 1543 if (RemInst->getType()->isPointerTy()) { 1544 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); 1545 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); 1546 } 1547 1548 // Loop over all of the things that depend on the instruction we're removing. 1549 // 1550 SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd; 1551 1552 // If we find RemInst as a clobber or Def in any of the maps for other values, 1553 // we need to replace its entry with a dirty version of the instruction after 1554 // it. If RemInst is a terminator, we use a null dirty value. 1555 // 1556 // Using a dirty version of the instruction after RemInst saves having to scan 1557 // the entire block to get to this point. 1558 MemDepResult NewDirtyVal; 1559 if (!RemInst->isTerminator()) 1560 NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator()); 1561 1562 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 1563 if (ReverseDepIt != ReverseLocalDeps.end()) { 1564 // RemInst can't be the terminator if it has local stuff depending on it. 1565 assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) && 1566 "Nothing can locally depend on a terminator"); 1567 1568 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) { 1569 assert(InstDependingOnRemInst != RemInst && 1570 "Already removed our local dep info"); 1571 1572 LocalDeps[InstDependingOnRemInst] = NewDirtyVal; 1573 1574 // Make sure to remember that new things depend on NewDepInst. 1575 assert(NewDirtyVal.getInst() && 1576 "There is no way something else can have " 1577 "a local dep on this if it is a terminator!"); 1578 ReverseDepsToAdd.push_back( 1579 std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst)); 1580 } 1581 1582 ReverseLocalDeps.erase(ReverseDepIt); 1583 1584 // Add new reverse deps after scanning the set, to avoid invalidating the 1585 // 'ReverseDeps' reference. 1586 while (!ReverseDepsToAdd.empty()) { 1587 ReverseLocalDeps[ReverseDepsToAdd.back().first].insert( 1588 ReverseDepsToAdd.back().second); 1589 ReverseDepsToAdd.pop_back(); 1590 } 1591 } 1592 1593 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 1594 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 1595 for (Instruction *I : ReverseDepIt->second) { 1596 assert(I != RemInst && "Already removed NonLocalDep info for RemInst"); 1597 1598 PerInstNLInfo &INLD = NonLocalDeps[I]; 1599 // The information is now dirty! 1600 INLD.second = true; 1601 1602 for (auto &Entry : INLD.first) { 1603 if (Entry.getResult().getInst() != RemInst) 1604 continue; 1605 1606 // Convert to a dirty entry for the subsequent instruction. 1607 Entry.setResult(NewDirtyVal); 1608 1609 if (Instruction *NextI = NewDirtyVal.getInst()) 1610 ReverseDepsToAdd.push_back(std::make_pair(NextI, I)); 1611 } 1612 } 1613 1614 ReverseNonLocalDeps.erase(ReverseDepIt); 1615 1616 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 1617 while (!ReverseDepsToAdd.empty()) { 1618 ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert( 1619 ReverseDepsToAdd.back().second); 1620 ReverseDepsToAdd.pop_back(); 1621 } 1622 } 1623 1624 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a 1625 // value in the NonLocalPointerDeps info. 1626 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = 1627 ReverseNonLocalPtrDeps.find(RemInst); 1628 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { 1629 SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8> 1630 ReversePtrDepsToAdd; 1631 1632 for (ValueIsLoadPair P : ReversePtrDepIt->second) { 1633 assert(P.getPointer() != RemInst && 1634 "Already removed NonLocalPointerDeps info for RemInst"); 1635 1636 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps; 1637 1638 // The cache is not valid for any specific block anymore. 1639 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair(); 1640 1641 // Update any entries for RemInst to use the instruction after it. 1642 for (auto &Entry : NLPDI) { 1643 if (Entry.getResult().getInst() != RemInst) 1644 continue; 1645 1646 // Convert to a dirty entry for the subsequent instruction. 1647 Entry.setResult(NewDirtyVal); 1648 1649 if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) 1650 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); 1651 } 1652 1653 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its 1654 // subsequent value may invalidate the sortedness. 1655 std::sort(NLPDI.begin(), NLPDI.end()); 1656 } 1657 1658 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); 1659 1660 while (!ReversePtrDepsToAdd.empty()) { 1661 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert( 1662 ReversePtrDepsToAdd.back().second); 1663 ReversePtrDepsToAdd.pop_back(); 1664 } 1665 } 1666 1667 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?"); 1668 DEBUG(verifyRemoved(RemInst)); 1669 } 1670 1671 /// Verify that the specified instruction does not occur in our internal data 1672 /// structures. 1673 /// 1674 /// This function verifies by asserting in debug builds. 1675 void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const { 1676 #ifndef NDEBUG 1677 for (const auto &DepKV : LocalDeps) { 1678 assert(DepKV.first != D && "Inst occurs in data structures"); 1679 assert(DepKV.second.getInst() != D && "Inst occurs in data structures"); 1680 } 1681 1682 for (const auto &DepKV : NonLocalPointerDeps) { 1683 assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key"); 1684 for (const auto &Entry : DepKV.second.NonLocalDeps) 1685 assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value"); 1686 } 1687 1688 for (const auto &DepKV : NonLocalDeps) { 1689 assert(DepKV.first != D && "Inst occurs in data structures"); 1690 const PerInstNLInfo &INLD = DepKV.second; 1691 for (const auto &Entry : INLD.first) 1692 assert(Entry.getResult().getInst() != D && 1693 "Inst occurs in data structures"); 1694 } 1695 1696 for (const auto &DepKV : ReverseLocalDeps) { 1697 assert(DepKV.first != D && "Inst occurs in data structures"); 1698 for (Instruction *Inst : DepKV.second) 1699 assert(Inst != D && "Inst occurs in data structures"); 1700 } 1701 1702 for (const auto &DepKV : ReverseNonLocalDeps) { 1703 assert(DepKV.first != D && "Inst occurs in data structures"); 1704 for (Instruction *Inst : DepKV.second) 1705 assert(Inst != D && "Inst occurs in data structures"); 1706 } 1707 1708 for (const auto &DepKV : ReverseNonLocalPtrDeps) { 1709 assert(DepKV.first != D && "Inst occurs in rev NLPD map"); 1710 1711 for (ValueIsLoadPair P : DepKV.second) 1712 assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) && 1713 "Inst occurs in ReverseNonLocalPtrDeps map"); 1714 } 1715 #endif 1716 } 1717