1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements an analysis that determines, for a given memory 11 // operation, what preceding memory operations it depends on. It builds on 12 // alias analysis information, and tries to provide a lazy, caching interface to 13 // a common kind of alias information query. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/MemoryBuiltins.h" 25 #include "llvm/Analysis/PHITransAddr.h" 26 #include "llvm/Analysis/OrderedBasicBlock.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/IR/CallSite.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Dominators.h" 34 #include "llvm/IR/Function.h" 35 #include "llvm/IR/Instruction.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/LLVMContext.h" 39 #include "llvm/IR/PredIteratorCache.h" 40 #include "llvm/Support/AtomicOrdering.h" 41 #include "llvm/Support/Casting.h" 42 #include "llvm/Support/CommandLine.h" 43 #include "llvm/Support/Compiler.h" 44 #include "llvm/Support/Debug.h" 45 #include "llvm/Support/MathExtras.h" 46 #include <algorithm> 47 #include <cassert> 48 #include <iterator> 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "memdep" 53 54 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); 55 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses"); 56 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 57 58 STATISTIC(NumCacheNonLocalPtr, 59 "Number of fully cached non-local ptr responses"); 60 STATISTIC(NumCacheDirtyNonLocalPtr, 61 "Number of cached, but dirty, non-local ptr responses"); 62 STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses"); 63 STATISTIC(NumCacheCompleteNonLocalPtr, 64 "Number of block queries that were completely cached"); 65 66 // Limit for the number of instructions to scan in a block. 67 68 static cl::opt<unsigned> BlockScanLimit( 69 "memdep-block-scan-limit", cl::Hidden, cl::init(100), 70 cl::desc("The number of instructions to scan in a block in memory " 71 "dependency analysis (default = 100)")); 72 73 static cl::opt<unsigned> 74 BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000), 75 cl::desc("The number of blocks to scan during memory " 76 "dependency analysis (default = 1000)")); 77 78 // Limit on the number of memdep results to process. 79 static const unsigned int NumResultsLimit = 100; 80 81 /// This is a helper function that removes Val from 'Inst's set in ReverseMap. 82 /// 83 /// If the set becomes empty, remove Inst's entry. 84 template <typename KeyTy> 85 static void 86 RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap, 87 Instruction *Inst, KeyTy Val) { 88 typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt = 89 ReverseMap.find(Inst); 90 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?"); 91 bool Found = InstIt->second.erase(Val); 92 assert(Found && "Invalid reverse map!"); 93 (void)Found; 94 if (InstIt->second.empty()) 95 ReverseMap.erase(InstIt); 96 } 97 98 /// If the given instruction references a specific memory location, fill in Loc 99 /// with the details, otherwise set Loc.Ptr to null. 100 /// 101 /// Returns a ModRefInfo value describing the general behavior of the 102 /// instruction. 103 static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, 104 const TargetLibraryInfo &TLI) { 105 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 106 if (LI->isUnordered()) { 107 Loc = MemoryLocation::get(LI); 108 return MRI_Ref; 109 } 110 if (LI->getOrdering() == AtomicOrdering::Monotonic) { 111 Loc = MemoryLocation::get(LI); 112 return MRI_ModRef; 113 } 114 Loc = MemoryLocation(); 115 return MRI_ModRef; 116 } 117 118 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 119 if (SI->isUnordered()) { 120 Loc = MemoryLocation::get(SI); 121 return MRI_Mod; 122 } 123 if (SI->getOrdering() == AtomicOrdering::Monotonic) { 124 Loc = MemoryLocation::get(SI); 125 return MRI_ModRef; 126 } 127 Loc = MemoryLocation(); 128 return MRI_ModRef; 129 } 130 131 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 132 Loc = MemoryLocation::get(V); 133 return MRI_ModRef; 134 } 135 136 if (const CallInst *CI = isFreeCall(Inst, &TLI)) { 137 // calls to free() deallocate the entire structure 138 Loc = MemoryLocation(CI->getArgOperand(0)); 139 return MRI_Mod; 140 } 141 142 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 143 AAMDNodes AAInfo; 144 145 switch (II->getIntrinsicID()) { 146 case Intrinsic::lifetime_start: 147 case Intrinsic::lifetime_end: 148 case Intrinsic::invariant_start: 149 II->getAAMetadata(AAInfo); 150 Loc = MemoryLocation( 151 II->getArgOperand(1), 152 cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AAInfo); 153 // These intrinsics don't really modify the memory, but returning Mod 154 // will allow them to be handled conservatively. 155 return MRI_Mod; 156 case Intrinsic::invariant_end: 157 II->getAAMetadata(AAInfo); 158 Loc = MemoryLocation( 159 II->getArgOperand(2), 160 cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AAInfo); 161 // These intrinsics don't really modify the memory, but returning Mod 162 // will allow them to be handled conservatively. 163 return MRI_Mod; 164 default: 165 break; 166 } 167 } 168 169 // Otherwise, just do the coarse-grained thing that always works. 170 if (Inst->mayWriteToMemory()) 171 return MRI_ModRef; 172 if (Inst->mayReadFromMemory()) 173 return MRI_Ref; 174 return MRI_NoModRef; 175 } 176 177 /// Private helper for finding the local dependencies of a call site. 178 MemDepResult MemoryDependenceResults::getCallSiteDependencyFrom( 179 CallSite CS, bool isReadOnlyCall, BasicBlock::iterator ScanIt, 180 BasicBlock *BB) { 181 unsigned Limit = BlockScanLimit; 182 183 // Walk backwards through the block, looking for dependencies. 184 while (ScanIt != BB->begin()) { 185 // Limit the amount of scanning we do so we don't end up with quadratic 186 // running time on extreme testcases. 187 --Limit; 188 if (!Limit) 189 return MemDepResult::getUnknown(); 190 191 Instruction *Inst = &*--ScanIt; 192 193 // If this inst is a memory op, get the pointer it accessed 194 MemoryLocation Loc; 195 ModRefInfo MR = GetLocation(Inst, Loc, TLI); 196 if (Loc.Ptr) { 197 // A simple instruction. 198 if (AA.getModRefInfo(CS, Loc) != MRI_NoModRef) 199 return MemDepResult::getClobber(Inst); 200 continue; 201 } 202 203 if (auto InstCS = CallSite(Inst)) { 204 // Debug intrinsics don't cause dependences. 205 if (isa<DbgInfoIntrinsic>(Inst)) 206 continue; 207 // If these two calls do not interfere, look past it. 208 switch (AA.getModRefInfo(CS, InstCS)) { 209 case MRI_NoModRef: 210 // If the two calls are the same, return InstCS as a Def, so that 211 // CS can be found redundant and eliminated. 212 if (isReadOnlyCall && !(MR & MRI_Mod) && 213 CS.getInstruction()->isIdenticalToWhenDefined(Inst)) 214 return MemDepResult::getDef(Inst); 215 216 // Otherwise if the two calls don't interact (e.g. InstCS is readnone) 217 // keep scanning. 218 continue; 219 default: 220 return MemDepResult::getClobber(Inst); 221 } 222 } 223 224 // If we could not obtain a pointer for the instruction and the instruction 225 // touches memory then assume that this is a dependency. 226 if (MR != MRI_NoModRef) 227 return MemDepResult::getClobber(Inst); 228 } 229 230 // No dependence found. If this is the entry block of the function, it is 231 // unknown, otherwise it is non-local. 232 if (BB != &BB->getParent()->getEntryBlock()) 233 return MemDepResult::getNonLocal(); 234 return MemDepResult::getNonFuncLocal(); 235 } 236 237 unsigned MemoryDependenceResults::getLoadLoadClobberFullWidthSize( 238 const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize, 239 const LoadInst *LI) { 240 // We can only extend simple integer loads. 241 if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) 242 return 0; 243 244 // Load widening is hostile to ThreadSanitizer: it may cause false positives 245 // or make the reports more cryptic (access sizes are wrong). 246 if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread)) 247 return 0; 248 249 const DataLayout &DL = LI->getModule()->getDataLayout(); 250 251 // Get the base of this load. 252 int64_t LIOffs = 0; 253 const Value *LIBase = 254 GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL); 255 256 // If the two pointers are not based on the same pointer, we can't tell that 257 // they are related. 258 if (LIBase != MemLocBase) 259 return 0; 260 261 // Okay, the two values are based on the same pointer, but returned as 262 // no-alias. This happens when we have things like two byte loads at "P+1" 263 // and "P+3". Check to see if increasing the size of the "LI" load up to its 264 // alignment (or the largest native integer type) will allow us to load all 265 // the bits required by MemLoc. 266 267 // If MemLoc is before LI, then no widening of LI will help us out. 268 if (MemLocOffs < LIOffs) 269 return 0; 270 271 // Get the alignment of the load in bytes. We assume that it is safe to load 272 // any legal integer up to this size without a problem. For example, if we're 273 // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can 274 // widen it up to an i32 load. If it is known 2-byte aligned, we can widen it 275 // to i16. 276 unsigned LoadAlign = LI->getAlignment(); 277 278 int64_t MemLocEnd = MemLocOffs + MemLocSize; 279 280 // If no amount of rounding up will let MemLoc fit into LI, then bail out. 281 if (LIOffs + LoadAlign < MemLocEnd) 282 return 0; 283 284 // This is the size of the load to try. Start with the next larger power of 285 // two. 286 unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits() / 8U; 287 NewLoadByteSize = NextPowerOf2(NewLoadByteSize); 288 289 while (true) { 290 // If this load size is bigger than our known alignment or would not fit 291 // into a native integer register, then we fail. 292 if (NewLoadByteSize > LoadAlign || 293 !DL.fitsInLegalInteger(NewLoadByteSize * 8)) 294 return 0; 295 296 if (LIOffs + NewLoadByteSize > MemLocEnd && 297 LI->getParent()->getParent()->hasFnAttribute( 298 Attribute::SanitizeAddress)) 299 // We will be reading past the location accessed by the original program. 300 // While this is safe in a regular build, Address Safety analysis tools 301 // may start reporting false warnings. So, don't do widening. 302 return 0; 303 304 // If a load of this width would include all of MemLoc, then we succeed. 305 if (LIOffs + NewLoadByteSize >= MemLocEnd) 306 return NewLoadByteSize; 307 308 NewLoadByteSize <<= 1; 309 } 310 } 311 312 static bool isVolatile(Instruction *Inst) { 313 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 314 return LI->isVolatile(); 315 else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 316 return SI->isVolatile(); 317 else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) 318 return AI->isVolatile(); 319 return false; 320 } 321 322 MemDepResult MemoryDependenceResults::getPointerDependencyFrom( 323 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 324 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) { 325 326 if (QueryInst != nullptr) { 327 if (auto *LI = dyn_cast<LoadInst>(QueryInst)) { 328 MemDepResult invariantGroupDependency = 329 getInvariantGroupPointerDependency(LI, BB); 330 331 if (invariantGroupDependency.isDef()) 332 return invariantGroupDependency; 333 } 334 } 335 return getSimplePointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst, 336 Limit); 337 } 338 339 MemDepResult 340 MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI, 341 BasicBlock *BB) { 342 Value *LoadOperand = LI->getPointerOperand(); 343 // It's is not safe to walk the use list of global value, because function 344 // passes aren't allowed to look outside their functions. 345 if (isa<GlobalValue>(LoadOperand)) 346 return MemDepResult::getUnknown(); 347 348 auto *InvariantGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group); 349 if (!InvariantGroupMD) 350 return MemDepResult::getUnknown(); 351 352 MemDepResult Result = MemDepResult::getUnknown(); 353 SmallSet<Value *, 14> Seen; 354 // Queue to process all pointers that are equivalent to load operand. 355 SmallVector<Value *, 8> LoadOperandsQueue; 356 LoadOperandsQueue.push_back(LoadOperand); 357 while (!LoadOperandsQueue.empty()) { 358 Value *Ptr = LoadOperandsQueue.pop_back_val(); 359 if (isa<GlobalValue>(Ptr)) 360 continue; 361 362 if (auto *BCI = dyn_cast<BitCastInst>(Ptr)) { 363 if (Seen.insert(BCI->getOperand(0)).second) { 364 LoadOperandsQueue.push_back(BCI->getOperand(0)); 365 } 366 } 367 368 for (Use &Us : Ptr->uses()) { 369 auto *U = dyn_cast<Instruction>(Us.getUser()); 370 if (!U || U == LI || !DT.dominates(U, LI)) 371 continue; 372 373 if (auto *BCI = dyn_cast<BitCastInst>(U)) { 374 if (Seen.insert(BCI).second) { 375 LoadOperandsQueue.push_back(BCI); 376 } 377 continue; 378 } 379 // If we hit load/store with the same invariant.group metadata (and the 380 // same pointer operand) we can assume that value pointed by pointer 381 // operand didn't change. 382 if ((isa<LoadInst>(U) || isa<StoreInst>(U)) && U->getParent() == BB && 383 U->getMetadata(LLVMContext::MD_invariant_group) == InvariantGroupMD) 384 return MemDepResult::getDef(U); 385 } 386 } 387 return Result; 388 } 389 390 MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom( 391 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 392 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) { 393 bool isInvariantLoad = false; 394 395 if (!Limit) { 396 unsigned DefaultLimit = BlockScanLimit; 397 return getSimplePointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst, 398 &DefaultLimit); 399 } 400 401 // We must be careful with atomic accesses, as they may allow another thread 402 // to touch this location, clobbering it. We are conservative: if the 403 // QueryInst is not a simple (non-atomic) memory access, we automatically 404 // return getClobber. 405 // If it is simple, we know based on the results of 406 // "Compiler testing via a theory of sound optimisations in the C11/C++11 407 // memory model" in PLDI 2013, that a non-atomic location can only be 408 // clobbered between a pair of a release and an acquire action, with no 409 // access to the location in between. 410 // Here is an example for giving the general intuition behind this rule. 411 // In the following code: 412 // store x 0; 413 // release action; [1] 414 // acquire action; [4] 415 // %val = load x; 416 // It is unsafe to replace %val by 0 because another thread may be running: 417 // acquire action; [2] 418 // store x 42; 419 // release action; [3] 420 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val 421 // being 42. A key property of this program however is that if either 422 // 1 or 4 were missing, there would be a race between the store of 42 423 // either the store of 0 or the load (making the whole program racy). 424 // The paper mentioned above shows that the same property is respected 425 // by every program that can detect any optimization of that kind: either 426 // it is racy (undefined) or there is a release followed by an acquire 427 // between the pair of accesses under consideration. 428 429 // If the load is invariant, we "know" that it doesn't alias *any* write. We 430 // do want to respect mustalias results since defs are useful for value 431 // forwarding, but any mayalias write can be assumed to be noalias. 432 // Arguably, this logic should be pushed inside AliasAnalysis itself. 433 if (isLoad && QueryInst) { 434 LoadInst *LI = dyn_cast<LoadInst>(QueryInst); 435 if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr) 436 isInvariantLoad = true; 437 } 438 439 const DataLayout &DL = BB->getModule()->getDataLayout(); 440 441 // Create a numbered basic block to lazily compute and cache instruction 442 // positions inside a BB. This is used to provide fast queries for relative 443 // position between two instructions in a BB and can be used by 444 // AliasAnalysis::callCapturesBefore. 445 OrderedBasicBlock OBB(BB); 446 447 // Return "true" if and only if the instruction I is either a non-simple 448 // load or a non-simple store. 449 auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool { 450 if (auto *LI = dyn_cast<LoadInst>(I)) 451 return !LI->isSimple(); 452 if (auto *SI = dyn_cast<StoreInst>(I)) 453 return !SI->isSimple(); 454 return false; 455 }; 456 457 // Return "true" if I is not a load and not a store, but it does access 458 // memory. 459 auto isOtherMemAccess = [](Instruction *I) -> bool { 460 return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory(); 461 }; 462 463 // Walk backwards through the basic block, looking for dependencies. 464 while (ScanIt != BB->begin()) { 465 Instruction *Inst = &*--ScanIt; 466 467 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 468 // Debug intrinsics don't (and can't) cause dependencies. 469 if (isa<DbgInfoIntrinsic>(II)) 470 continue; 471 472 // Limit the amount of scanning we do so we don't end up with quadratic 473 // running time on extreme testcases. 474 --*Limit; 475 if (!*Limit) 476 return MemDepResult::getUnknown(); 477 478 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 479 // If we reach a lifetime begin or end marker, then the query ends here 480 // because the value is undefined. 481 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 482 // FIXME: This only considers queries directly on the invariant-tagged 483 // pointer, not on query pointers that are indexed off of them. It'd 484 // be nice to handle that at some point (the right approach is to use 485 // GetPointerBaseWithConstantOffset). 486 if (AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc)) 487 return MemDepResult::getDef(II); 488 continue; 489 } 490 } 491 492 // Values depend on loads if the pointers are must aliased. This means 493 // that a load depends on another must aliased load from the same value. 494 // One exception is atomic loads: a value can depend on an atomic load that 495 // it does not alias with when this atomic load indicates that another 496 // thread may be accessing the location. 497 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 498 499 // While volatile access cannot be eliminated, they do not have to clobber 500 // non-aliasing locations, as normal accesses, for example, can be safely 501 // reordered with volatile accesses. 502 if (LI->isVolatile()) { 503 if (!QueryInst) 504 // Original QueryInst *may* be volatile 505 return MemDepResult::getClobber(LI); 506 if (isVolatile(QueryInst)) 507 // Ordering required if QueryInst is itself volatile 508 return MemDepResult::getClobber(LI); 509 // Otherwise, volatile doesn't imply any special ordering 510 } 511 512 // Atomic loads have complications involved. 513 // A Monotonic (or higher) load is OK if the query inst is itself not 514 // atomic. 515 // FIXME: This is overly conservative. 516 if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) { 517 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 518 isOtherMemAccess(QueryInst)) 519 return MemDepResult::getClobber(LI); 520 if (LI->getOrdering() != AtomicOrdering::Monotonic) 521 return MemDepResult::getClobber(LI); 522 } 523 524 MemoryLocation LoadLoc = MemoryLocation::get(LI); 525 526 // If we found a pointer, check if it could be the same as our pointer. 527 AliasResult R = AA.alias(LoadLoc, MemLoc); 528 529 if (isLoad) { 530 if (R == NoAlias) 531 continue; 532 533 // Must aliased loads are defs of each other. 534 if (R == MustAlias) 535 return MemDepResult::getDef(Inst); 536 537 #if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads 538 // in terms of clobbering loads, but since it does this by looking 539 // at the clobbering load directly, it doesn't know about any 540 // phi translation that may have happened along the way. 541 542 // If we have a partial alias, then return this as a clobber for the 543 // client to handle. 544 if (R == PartialAlias) 545 return MemDepResult::getClobber(Inst); 546 #endif 547 548 // Random may-alias loads don't depend on each other without a 549 // dependence. 550 continue; 551 } 552 553 // Stores don't depend on other no-aliased accesses. 554 if (R == NoAlias) 555 continue; 556 557 // Stores don't alias loads from read-only memory. 558 if (AA.pointsToConstantMemory(LoadLoc)) 559 continue; 560 561 // Stores depend on may/must aliased loads. 562 return MemDepResult::getDef(Inst); 563 } 564 565 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 566 // Atomic stores have complications involved. 567 // A Monotonic store is OK if the query inst is itself not atomic. 568 // FIXME: This is overly conservative. 569 if (!SI->isUnordered() && SI->isAtomic()) { 570 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 571 isOtherMemAccess(QueryInst)) 572 return MemDepResult::getClobber(SI); 573 if (SI->getOrdering() != AtomicOrdering::Monotonic) 574 return MemDepResult::getClobber(SI); 575 } 576 577 // FIXME: this is overly conservative. 578 // While volatile access cannot be eliminated, they do not have to clobber 579 // non-aliasing locations, as normal accesses can for example be reordered 580 // with volatile accesses. 581 if (SI->isVolatile()) 582 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 583 isOtherMemAccess(QueryInst)) 584 return MemDepResult::getClobber(SI); 585 586 // If alias analysis can tell that this store is guaranteed to not modify 587 // the query pointer, ignore it. Use getModRefInfo to handle cases where 588 // the query pointer points to constant memory etc. 589 if (AA.getModRefInfo(SI, MemLoc) == MRI_NoModRef) 590 continue; 591 592 // Ok, this store might clobber the query pointer. Check to see if it is 593 // a must alias: in this case, we want to return this as a def. 594 MemoryLocation StoreLoc = MemoryLocation::get(SI); 595 596 // If we found a pointer, check if it could be the same as our pointer. 597 AliasResult R = AA.alias(StoreLoc, MemLoc); 598 599 if (R == NoAlias) 600 continue; 601 if (R == MustAlias) 602 return MemDepResult::getDef(Inst); 603 if (isInvariantLoad) 604 continue; 605 return MemDepResult::getClobber(Inst); 606 } 607 608 // If this is an allocation, and if we know that the accessed pointer is to 609 // the allocation, return Def. This means that there is no dependence and 610 // the access can be optimized based on that. For example, a load could 611 // turn into undef. Note that we can bypass the allocation itself when 612 // looking for a clobber in many cases; that's an alias property and is 613 // handled by BasicAA. 614 if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) { 615 const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL); 616 if (AccessPtr == Inst || AA.isMustAlias(Inst, AccessPtr)) 617 return MemDepResult::getDef(Inst); 618 } 619 620 if (isInvariantLoad) 621 continue; 622 623 // A release fence requires that all stores complete before it, but does 624 // not prevent the reordering of following loads or stores 'before' the 625 // fence. As a result, we look past it when finding a dependency for 626 // loads. DSE uses this to find preceeding stores to delete and thus we 627 // can't bypass the fence if the query instruction is a store. 628 if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) 629 if (isLoad && FI->getOrdering() == AtomicOrdering::Release) 630 continue; 631 632 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. 633 ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc); 634 // If necessary, perform additional analysis. 635 if (MR == MRI_ModRef) 636 MR = AA.callCapturesBefore(Inst, MemLoc, &DT, &OBB); 637 switch (MR) { 638 case MRI_NoModRef: 639 // If the call has no effect on the queried pointer, just ignore it. 640 continue; 641 case MRI_Mod: 642 return MemDepResult::getClobber(Inst); 643 case MRI_Ref: 644 // If the call is known to never store to the pointer, and if this is a 645 // load query, we can safely ignore it (scan past it). 646 if (isLoad) 647 continue; 648 default: 649 // Otherwise, there is a potential dependence. Return a clobber. 650 return MemDepResult::getClobber(Inst); 651 } 652 } 653 654 // No dependence found. If this is the entry block of the function, it is 655 // unknown, otherwise it is non-local. 656 if (BB != &BB->getParent()->getEntryBlock()) 657 return MemDepResult::getNonLocal(); 658 return MemDepResult::getNonFuncLocal(); 659 } 660 661 MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) { 662 Instruction *ScanPos = QueryInst; 663 664 // Check for a cached result 665 MemDepResult &LocalCache = LocalDeps[QueryInst]; 666 667 // If the cached entry is non-dirty, just return it. Note that this depends 668 // on MemDepResult's default constructing to 'dirty'. 669 if (!LocalCache.isDirty()) 670 return LocalCache; 671 672 // Otherwise, if we have a dirty entry, we know we can start the scan at that 673 // instruction, which may save us some work. 674 if (Instruction *Inst = LocalCache.getInst()) { 675 ScanPos = Inst; 676 677 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst); 678 } 679 680 BasicBlock *QueryParent = QueryInst->getParent(); 681 682 // Do the scan. 683 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { 684 // No dependence found. If this is the entry block of the function, it is 685 // unknown, otherwise it is non-local. 686 if (QueryParent != &QueryParent->getParent()->getEntryBlock()) 687 LocalCache = MemDepResult::getNonLocal(); 688 else 689 LocalCache = MemDepResult::getNonFuncLocal(); 690 } else { 691 MemoryLocation MemLoc; 692 ModRefInfo MR = GetLocation(QueryInst, MemLoc, TLI); 693 if (MemLoc.Ptr) { 694 // If we can do a pointer scan, make it happen. 695 bool isLoad = !(MR & MRI_Mod); 696 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst)) 697 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start; 698 699 LocalCache = getPointerDependencyFrom( 700 MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst); 701 } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) { 702 CallSite QueryCS(QueryInst); 703 bool isReadOnly = AA.onlyReadsMemory(QueryCS); 704 LocalCache = getCallSiteDependencyFrom( 705 QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent); 706 } else 707 // Non-memory instruction. 708 LocalCache = MemDepResult::getUnknown(); 709 } 710 711 // Remember the result! 712 if (Instruction *I = LocalCache.getInst()) 713 ReverseLocalDeps[I].insert(QueryInst); 714 715 return LocalCache; 716 } 717 718 #ifndef NDEBUG 719 /// This method is used when -debug is specified to verify that cache arrays 720 /// are properly kept sorted. 721 static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache, 722 int Count = -1) { 723 if (Count == -1) 724 Count = Cache.size(); 725 assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) && 726 "Cache isn't sorted!"); 727 } 728 #endif 729 730 const MemoryDependenceResults::NonLocalDepInfo & 731 MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) { 732 assert(getDependency(QueryCS.getInstruction()).isNonLocal() && 733 "getNonLocalCallDependency should only be used on calls with " 734 "non-local deps!"); 735 PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()]; 736 NonLocalDepInfo &Cache = CacheP.first; 737 738 // This is the set of blocks that need to be recomputed. In the cached case, 739 // this can happen due to instructions being deleted etc. In the uncached 740 // case, this starts out as the set of predecessors we care about. 741 SmallVector<BasicBlock *, 32> DirtyBlocks; 742 743 if (!Cache.empty()) { 744 // Okay, we have a cache entry. If we know it is not dirty, just return it 745 // with no computation. 746 if (!CacheP.second) { 747 ++NumCacheNonLocal; 748 return Cache; 749 } 750 751 // If we already have a partially computed set of results, scan them to 752 // determine what is dirty, seeding our initial DirtyBlocks worklist. 753 for (auto &Entry : Cache) 754 if (Entry.getResult().isDirty()) 755 DirtyBlocks.push_back(Entry.getBB()); 756 757 // Sort the cache so that we can do fast binary search lookups below. 758 std::sort(Cache.begin(), Cache.end()); 759 760 ++NumCacheDirtyNonLocal; 761 // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " 762 // << Cache.size() << " cached: " << *QueryInst; 763 } else { 764 // Seed DirtyBlocks with each of the preds of QueryInst's block. 765 BasicBlock *QueryBB = QueryCS.getInstruction()->getParent(); 766 for (BasicBlock *Pred : PredCache.get(QueryBB)) 767 DirtyBlocks.push_back(Pred); 768 ++NumUncacheNonLocal; 769 } 770 771 // isReadonlyCall - If this is a read-only call, we can be more aggressive. 772 bool isReadonlyCall = AA.onlyReadsMemory(QueryCS); 773 774 SmallPtrSet<BasicBlock *, 32> Visited; 775 776 unsigned NumSortedEntries = Cache.size(); 777 DEBUG(AssertSorted(Cache)); 778 779 // Iterate while we still have blocks to update. 780 while (!DirtyBlocks.empty()) { 781 BasicBlock *DirtyBB = DirtyBlocks.back(); 782 DirtyBlocks.pop_back(); 783 784 // Already processed this block? 785 if (!Visited.insert(DirtyBB).second) 786 continue; 787 788 // Do a binary search to see if we already have an entry for this block in 789 // the cache set. If so, find it. 790 DEBUG(AssertSorted(Cache, NumSortedEntries)); 791 NonLocalDepInfo::iterator Entry = 792 std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries, 793 NonLocalDepEntry(DirtyBB)); 794 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB) 795 --Entry; 796 797 NonLocalDepEntry *ExistingResult = nullptr; 798 if (Entry != Cache.begin() + NumSortedEntries && 799 Entry->getBB() == DirtyBB) { 800 // If we already have an entry, and if it isn't already dirty, the block 801 // is done. 802 if (!Entry->getResult().isDirty()) 803 continue; 804 805 // Otherwise, remember this slot so we can update the value. 806 ExistingResult = &*Entry; 807 } 808 809 // If the dirty entry has a pointer, start scanning from it so we don't have 810 // to rescan the entire block. 811 BasicBlock::iterator ScanPos = DirtyBB->end(); 812 if (ExistingResult) { 813 if (Instruction *Inst = ExistingResult->getResult().getInst()) { 814 ScanPos = Inst->getIterator(); 815 // We're removing QueryInst's use of Inst. 816 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, 817 QueryCS.getInstruction()); 818 } 819 } 820 821 // Find out if this block has a local dependency for QueryInst. 822 MemDepResult Dep; 823 824 if (ScanPos != DirtyBB->begin()) { 825 Dep = 826 getCallSiteDependencyFrom(QueryCS, isReadonlyCall, ScanPos, DirtyBB); 827 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { 828 // No dependence found. If this is the entry block of the function, it is 829 // a clobber, otherwise it is unknown. 830 Dep = MemDepResult::getNonLocal(); 831 } else { 832 Dep = MemDepResult::getNonFuncLocal(); 833 } 834 835 // If we had a dirty entry for the block, update it. Otherwise, just add 836 // a new entry. 837 if (ExistingResult) 838 ExistingResult->setResult(Dep); 839 else 840 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep)); 841 842 // If the block has a dependency (i.e. it isn't completely transparent to 843 // the value), remember the association! 844 if (!Dep.isNonLocal()) { 845 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 846 // update this when we remove instructions. 847 if (Instruction *Inst = Dep.getInst()) 848 ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction()); 849 } else { 850 851 // If the block *is* completely transparent to the load, we need to check 852 // the predecessors of this block. Add them to our worklist. 853 for (BasicBlock *Pred : PredCache.get(DirtyBB)) 854 DirtyBlocks.push_back(Pred); 855 } 856 } 857 858 return Cache; 859 } 860 861 void MemoryDependenceResults::getNonLocalPointerDependency( 862 Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) { 863 const MemoryLocation Loc = MemoryLocation::get(QueryInst); 864 bool isLoad = isa<LoadInst>(QueryInst); 865 BasicBlock *FromBB = QueryInst->getParent(); 866 assert(FromBB); 867 868 assert(Loc.Ptr->getType()->isPointerTy() && 869 "Can't get pointer deps of a non-pointer!"); 870 Result.clear(); 871 872 // This routine does not expect to deal with volatile instructions. 873 // Doing so would require piping through the QueryInst all the way through. 874 // TODO: volatiles can't be elided, but they can be reordered with other 875 // non-volatile accesses. 876 877 // We currently give up on any instruction which is ordered, but we do handle 878 // atomic instructions which are unordered. 879 // TODO: Handle ordered instructions 880 auto isOrdered = [](Instruction *Inst) { 881 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 882 return !LI->isUnordered(); 883 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 884 return !SI->isUnordered(); 885 } 886 return false; 887 }; 888 if (isVolatile(QueryInst) || isOrdered(QueryInst)) { 889 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 890 const_cast<Value *>(Loc.Ptr))); 891 return; 892 } 893 const DataLayout &DL = FromBB->getModule()->getDataLayout(); 894 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC); 895 896 // This is the set of blocks we've inspected, and the pointer we consider in 897 // each block. Because of critical edges, we currently bail out if querying 898 // a block with multiple different pointers. This can happen during PHI 899 // translation. 900 DenseMap<BasicBlock *, Value *> Visited; 901 if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB, 902 Result, Visited, true)) 903 return; 904 Result.clear(); 905 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 906 const_cast<Value *>(Loc.Ptr))); 907 } 908 909 /// Compute the memdep value for BB with Pointer/PointeeSize using either 910 /// cached information in Cache or by doing a lookup (which may use dirty cache 911 /// info if available). 912 /// 913 /// If we do a lookup, add the result to the cache. 914 MemDepResult MemoryDependenceResults::GetNonLocalInfoForBlock( 915 Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad, 916 BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) { 917 918 // Do a binary search to see if we already have an entry for this block in 919 // the cache set. If so, find it. 920 NonLocalDepInfo::iterator Entry = std::upper_bound( 921 Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB)); 922 if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB) 923 --Entry; 924 925 NonLocalDepEntry *ExistingResult = nullptr; 926 if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB) 927 ExistingResult = &*Entry; 928 929 // If we have a cached entry, and it is non-dirty, use it as the value for 930 // this dependency. 931 if (ExistingResult && !ExistingResult->getResult().isDirty()) { 932 ++NumCacheNonLocalPtr; 933 return ExistingResult->getResult(); 934 } 935 936 // Otherwise, we have to scan for the value. If we have a dirty cache 937 // entry, start scanning from its position, otherwise we scan from the end 938 // of the block. 939 BasicBlock::iterator ScanPos = BB->end(); 940 if (ExistingResult && ExistingResult->getResult().getInst()) { 941 assert(ExistingResult->getResult().getInst()->getParent() == BB && 942 "Instruction invalidated?"); 943 ++NumCacheDirtyNonLocalPtr; 944 ScanPos = ExistingResult->getResult().getInst()->getIterator(); 945 946 // Eliminating the dirty entry from 'Cache', so update the reverse info. 947 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 948 RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey); 949 } else { 950 ++NumUncacheNonLocalPtr; 951 } 952 953 // Scan the block for the dependency. 954 MemDepResult Dep = 955 getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst); 956 957 // If we had a dirty entry for the block, update it. Otherwise, just add 958 // a new entry. 959 if (ExistingResult) 960 ExistingResult->setResult(Dep); 961 else 962 Cache->push_back(NonLocalDepEntry(BB, Dep)); 963 964 // If the block has a dependency (i.e. it isn't completely transparent to 965 // the value), remember the reverse association because we just added it 966 // to Cache! 967 if (!Dep.isDef() && !Dep.isClobber()) 968 return Dep; 969 970 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently 971 // update MemDep when we remove instructions. 972 Instruction *Inst = Dep.getInst(); 973 assert(Inst && "Didn't depend on anything?"); 974 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 975 ReverseNonLocalPtrDeps[Inst].insert(CacheKey); 976 return Dep; 977 } 978 979 /// Sort the NonLocalDepInfo cache, given a certain number of elements in the 980 /// array that are already properly ordered. 981 /// 982 /// This is optimized for the case when only a few entries are added. 983 static void 984 SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache, 985 unsigned NumSortedEntries) { 986 switch (Cache.size() - NumSortedEntries) { 987 case 0: 988 // done, no new entries. 989 break; 990 case 2: { 991 // Two new entries, insert the last one into place. 992 NonLocalDepEntry Val = Cache.back(); 993 Cache.pop_back(); 994 MemoryDependenceResults::NonLocalDepInfo::iterator Entry = 995 std::upper_bound(Cache.begin(), Cache.end() - 1, Val); 996 Cache.insert(Entry, Val); 997 LLVM_FALLTHROUGH; 998 } 999 case 1: 1000 // One new entry, Just insert the new value at the appropriate position. 1001 if (Cache.size() != 1) { 1002 NonLocalDepEntry Val = Cache.back(); 1003 Cache.pop_back(); 1004 MemoryDependenceResults::NonLocalDepInfo::iterator Entry = 1005 std::upper_bound(Cache.begin(), Cache.end(), Val); 1006 Cache.insert(Entry, Val); 1007 } 1008 break; 1009 default: 1010 // Added many values, do a full scale sort. 1011 std::sort(Cache.begin(), Cache.end()); 1012 break; 1013 } 1014 } 1015 1016 /// Perform a dependency query based on pointer/pointeesize starting at the end 1017 /// of StartBB. 1018 /// 1019 /// Add any clobber/def results to the results vector and keep track of which 1020 /// blocks are visited in 'Visited'. 1021 /// 1022 /// This has special behavior for the first block queries (when SkipFirstBlock 1023 /// is true). In this special case, it ignores the contents of the specified 1024 /// block and starts returning dependence info for its predecessors. 1025 /// 1026 /// This function returns true on success, or false to indicate that it could 1027 /// not compute dependence information for some reason. This should be treated 1028 /// as a clobber dependence on the first instruction in the predecessor block. 1029 bool MemoryDependenceResults::getNonLocalPointerDepFromBB( 1030 Instruction *QueryInst, const PHITransAddr &Pointer, 1031 const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB, 1032 SmallVectorImpl<NonLocalDepResult> &Result, 1033 DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock) { 1034 // Look up the cached info for Pointer. 1035 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad); 1036 1037 // Set up a temporary NLPI value. If the map doesn't yet have an entry for 1038 // CacheKey, this value will be inserted as the associated value. Otherwise, 1039 // it'll be ignored, and we'll have to check to see if the cached size and 1040 // aa tags are consistent with the current query. 1041 NonLocalPointerInfo InitialNLPI; 1042 InitialNLPI.Size = Loc.Size; 1043 InitialNLPI.AATags = Loc.AATags; 1044 1045 // Get the NLPI for CacheKey, inserting one into the map if it doesn't 1046 // already have one. 1047 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = 1048 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI)); 1049 NonLocalPointerInfo *CacheInfo = &Pair.first->second; 1050 1051 // If we already have a cache entry for this CacheKey, we may need to do some 1052 // work to reconcile the cache entry and the current query. 1053 if (!Pair.second) { 1054 if (CacheInfo->Size < Loc.Size) { 1055 // The query's Size is greater than the cached one. Throw out the 1056 // cached data and proceed with the query at the greater size. 1057 CacheInfo->Pair = BBSkipFirstBlockPair(); 1058 CacheInfo->Size = Loc.Size; 1059 for (auto &Entry : CacheInfo->NonLocalDeps) 1060 if (Instruction *Inst = Entry.getResult().getInst()) 1061 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1062 CacheInfo->NonLocalDeps.clear(); 1063 } else if (CacheInfo->Size > Loc.Size) { 1064 // This query's Size is less than the cached one. Conservatively restart 1065 // the query using the greater size. 1066 return getNonLocalPointerDepFromBB( 1067 QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad, 1068 StartBB, Result, Visited, SkipFirstBlock); 1069 } 1070 1071 // If the query's AATags are inconsistent with the cached one, 1072 // conservatively throw out the cached data and restart the query with 1073 // no tag if needed. 1074 if (CacheInfo->AATags != Loc.AATags) { 1075 if (CacheInfo->AATags) { 1076 CacheInfo->Pair = BBSkipFirstBlockPair(); 1077 CacheInfo->AATags = AAMDNodes(); 1078 for (auto &Entry : CacheInfo->NonLocalDeps) 1079 if (Instruction *Inst = Entry.getResult().getInst()) 1080 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1081 CacheInfo->NonLocalDeps.clear(); 1082 } 1083 if (Loc.AATags) 1084 return getNonLocalPointerDepFromBB( 1085 QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result, 1086 Visited, SkipFirstBlock); 1087 } 1088 } 1089 1090 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps; 1091 1092 // If we have valid cached information for exactly the block we are 1093 // investigating, just return it with no recomputation. 1094 if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) { 1095 // We have a fully cached result for this query then we can just return the 1096 // cached results and populate the visited set. However, we have to verify 1097 // that we don't already have conflicting results for these blocks. Check 1098 // to ensure that if a block in the results set is in the visited set that 1099 // it was for the same pointer query. 1100 if (!Visited.empty()) { 1101 for (auto &Entry : *Cache) { 1102 DenseMap<BasicBlock *, Value *>::iterator VI = 1103 Visited.find(Entry.getBB()); 1104 if (VI == Visited.end() || VI->second == Pointer.getAddr()) 1105 continue; 1106 1107 // We have a pointer mismatch in a block. Just return false, saying 1108 // that something was clobbered in this result. We could also do a 1109 // non-fully cached query, but there is little point in doing this. 1110 return false; 1111 } 1112 } 1113 1114 Value *Addr = Pointer.getAddr(); 1115 for (auto &Entry : *Cache) { 1116 Visited.insert(std::make_pair(Entry.getBB(), Addr)); 1117 if (Entry.getResult().isNonLocal()) { 1118 continue; 1119 } 1120 1121 if (DT.isReachableFromEntry(Entry.getBB())) { 1122 Result.push_back( 1123 NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr)); 1124 } 1125 } 1126 ++NumCacheCompleteNonLocalPtr; 1127 return true; 1128 } 1129 1130 // Otherwise, either this is a new block, a block with an invalid cache 1131 // pointer or one that we're about to invalidate by putting more info into it 1132 // than its valid cache info. If empty, the result will be valid cache info, 1133 // otherwise it isn't. 1134 if (Cache->empty()) 1135 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); 1136 else 1137 CacheInfo->Pair = BBSkipFirstBlockPair(); 1138 1139 SmallVector<BasicBlock *, 32> Worklist; 1140 Worklist.push_back(StartBB); 1141 1142 // PredList used inside loop. 1143 SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList; 1144 1145 // Keep track of the entries that we know are sorted. Previously cached 1146 // entries will all be sorted. The entries we add we only sort on demand (we 1147 // don't insert every element into its sorted position). We know that we 1148 // won't get any reuse from currently inserted values, because we don't 1149 // revisit blocks after we insert info for them. 1150 unsigned NumSortedEntries = Cache->size(); 1151 unsigned WorklistEntries = BlockNumberLimit; 1152 bool GotWorklistLimit = false; 1153 DEBUG(AssertSorted(*Cache)); 1154 1155 while (!Worklist.empty()) { 1156 BasicBlock *BB = Worklist.pop_back_val(); 1157 1158 // If we do process a large number of blocks it becomes very expensive and 1159 // likely it isn't worth worrying about 1160 if (Result.size() > NumResultsLimit) { 1161 Worklist.clear(); 1162 // Sort it now (if needed) so that recursive invocations of 1163 // getNonLocalPointerDepFromBB and other routines that could reuse the 1164 // cache value will only see properly sorted cache arrays. 1165 if (Cache && NumSortedEntries != Cache->size()) { 1166 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1167 } 1168 // Since we bail out, the "Cache" set won't contain all of the 1169 // results for the query. This is ok (we can still use it to accelerate 1170 // specific block queries) but we can't do the fastpath "return all 1171 // results from the set". Clear out the indicator for this. 1172 CacheInfo->Pair = BBSkipFirstBlockPair(); 1173 return false; 1174 } 1175 1176 // Skip the first block if we have it. 1177 if (!SkipFirstBlock) { 1178 // Analyze the dependency of *Pointer in FromBB. See if we already have 1179 // been here. 1180 assert(Visited.count(BB) && "Should check 'visited' before adding to WL"); 1181 1182 // Get the dependency info for Pointer in BB. If we have cached 1183 // information, we will use it, otherwise we compute it. 1184 DEBUG(AssertSorted(*Cache, NumSortedEntries)); 1185 MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB, 1186 Cache, NumSortedEntries); 1187 1188 // If we got a Def or Clobber, add this to the list of results. 1189 if (!Dep.isNonLocal()) { 1190 if (DT.isReachableFromEntry(BB)) { 1191 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr())); 1192 continue; 1193 } 1194 } 1195 } 1196 1197 // If 'Pointer' is an instruction defined in this block, then we need to do 1198 // phi translation to change it into a value live in the predecessor block. 1199 // If not, we just add the predecessors to the worklist and scan them with 1200 // the same Pointer. 1201 if (!Pointer.NeedsPHITranslationFromBlock(BB)) { 1202 SkipFirstBlock = false; 1203 SmallVector<BasicBlock *, 16> NewBlocks; 1204 for (BasicBlock *Pred : PredCache.get(BB)) { 1205 // Verify that we haven't looked at this block yet. 1206 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1207 Visited.insert(std::make_pair(Pred, Pointer.getAddr())); 1208 if (InsertRes.second) { 1209 // First time we've looked at *PI. 1210 NewBlocks.push_back(Pred); 1211 continue; 1212 } 1213 1214 // If we have seen this block before, but it was with a different 1215 // pointer then we have a phi translation failure and we have to treat 1216 // this as a clobber. 1217 if (InsertRes.first->second != Pointer.getAddr()) { 1218 // Make sure to clean up the Visited map before continuing on to 1219 // PredTranslationFailure. 1220 for (unsigned i = 0; i < NewBlocks.size(); i++) 1221 Visited.erase(NewBlocks[i]); 1222 goto PredTranslationFailure; 1223 } 1224 } 1225 if (NewBlocks.size() > WorklistEntries) { 1226 // Make sure to clean up the Visited map before continuing on to 1227 // PredTranslationFailure. 1228 for (unsigned i = 0; i < NewBlocks.size(); i++) 1229 Visited.erase(NewBlocks[i]); 1230 GotWorklistLimit = true; 1231 goto PredTranslationFailure; 1232 } 1233 WorklistEntries -= NewBlocks.size(); 1234 Worklist.append(NewBlocks.begin(), NewBlocks.end()); 1235 continue; 1236 } 1237 1238 // We do need to do phi translation, if we know ahead of time we can't phi 1239 // translate this value, don't even try. 1240 if (!Pointer.IsPotentiallyPHITranslatable()) 1241 goto PredTranslationFailure; 1242 1243 // We may have added values to the cache list before this PHI translation. 1244 // If so, we haven't done anything to ensure that the cache remains sorted. 1245 // Sort it now (if needed) so that recursive invocations of 1246 // getNonLocalPointerDepFromBB and other routines that could reuse the cache 1247 // value will only see properly sorted cache arrays. 1248 if (Cache && NumSortedEntries != Cache->size()) { 1249 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1250 NumSortedEntries = Cache->size(); 1251 } 1252 Cache = nullptr; 1253 1254 PredList.clear(); 1255 for (BasicBlock *Pred : PredCache.get(BB)) { 1256 PredList.push_back(std::make_pair(Pred, Pointer)); 1257 1258 // Get the PHI translated pointer in this predecessor. This can fail if 1259 // not translatable, in which case the getAddr() returns null. 1260 PHITransAddr &PredPointer = PredList.back().second; 1261 PredPointer.PHITranslateValue(BB, Pred, &DT, /*MustDominate=*/false); 1262 Value *PredPtrVal = PredPointer.getAddr(); 1263 1264 // Check to see if we have already visited this pred block with another 1265 // pointer. If so, we can't do this lookup. This failure can occur 1266 // with PHI translation when a critical edge exists and the PHI node in 1267 // the successor translates to a pointer value different than the 1268 // pointer the block was first analyzed with. 1269 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1270 Visited.insert(std::make_pair(Pred, PredPtrVal)); 1271 1272 if (!InsertRes.second) { 1273 // We found the pred; take it off the list of preds to visit. 1274 PredList.pop_back(); 1275 1276 // If the predecessor was visited with PredPtr, then we already did 1277 // the analysis and can ignore it. 1278 if (InsertRes.first->second == PredPtrVal) 1279 continue; 1280 1281 // Otherwise, the block was previously analyzed with a different 1282 // pointer. We can't represent the result of this case, so we just 1283 // treat this as a phi translation failure. 1284 1285 // Make sure to clean up the Visited map before continuing on to 1286 // PredTranslationFailure. 1287 for (unsigned i = 0, n = PredList.size(); i < n; ++i) 1288 Visited.erase(PredList[i].first); 1289 1290 goto PredTranslationFailure; 1291 } 1292 } 1293 1294 // Actually process results here; this need to be a separate loop to avoid 1295 // calling getNonLocalPointerDepFromBB for blocks we don't want to return 1296 // any results for. (getNonLocalPointerDepFromBB will modify our 1297 // datastructures in ways the code after the PredTranslationFailure label 1298 // doesn't expect.) 1299 for (unsigned i = 0, n = PredList.size(); i < n; ++i) { 1300 BasicBlock *Pred = PredList[i].first; 1301 PHITransAddr &PredPointer = PredList[i].second; 1302 Value *PredPtrVal = PredPointer.getAddr(); 1303 1304 bool CanTranslate = true; 1305 // If PHI translation was unable to find an available pointer in this 1306 // predecessor, then we have to assume that the pointer is clobbered in 1307 // that predecessor. We can still do PRE of the load, which would insert 1308 // a computation of the pointer in this predecessor. 1309 if (!PredPtrVal) 1310 CanTranslate = false; 1311 1312 // FIXME: it is entirely possible that PHI translating will end up with 1313 // the same value. Consider PHI translating something like: 1314 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need* 1315 // to recurse here, pedantically speaking. 1316 1317 // If getNonLocalPointerDepFromBB fails here, that means the cached 1318 // result conflicted with the Visited list; we have to conservatively 1319 // assume it is unknown, but this also does not block PRE of the load. 1320 if (!CanTranslate || 1321 !getNonLocalPointerDepFromBB(QueryInst, PredPointer, 1322 Loc.getWithNewPtr(PredPtrVal), isLoad, 1323 Pred, Result, Visited)) { 1324 // Add the entry to the Result list. 1325 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal); 1326 Result.push_back(Entry); 1327 1328 // Since we had a phi translation failure, the cache for CacheKey won't 1329 // include all of the entries that we need to immediately satisfy future 1330 // queries. Mark this in NonLocalPointerDeps by setting the 1331 // BBSkipFirstBlockPair pointer to null. This requires reuse of the 1332 // cached value to do more work but not miss the phi trans failure. 1333 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey]; 1334 NLPI.Pair = BBSkipFirstBlockPair(); 1335 continue; 1336 } 1337 } 1338 1339 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. 1340 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1341 Cache = &CacheInfo->NonLocalDeps; 1342 NumSortedEntries = Cache->size(); 1343 1344 // Since we did phi translation, the "Cache" set won't contain all of the 1345 // results for the query. This is ok (we can still use it to accelerate 1346 // specific block queries) but we can't do the fastpath "return all 1347 // results from the set" Clear out the indicator for this. 1348 CacheInfo->Pair = BBSkipFirstBlockPair(); 1349 SkipFirstBlock = false; 1350 continue; 1351 1352 PredTranslationFailure: 1353 // The following code is "failure"; we can't produce a sane translation 1354 // for the given block. It assumes that we haven't modified any of 1355 // our datastructures while processing the current block. 1356 1357 if (!Cache) { 1358 // Refresh the CacheInfo/Cache pointer if it got invalidated. 1359 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1360 Cache = &CacheInfo->NonLocalDeps; 1361 NumSortedEntries = Cache->size(); 1362 } 1363 1364 // Since we failed phi translation, the "Cache" set won't contain all of the 1365 // results for the query. This is ok (we can still use it to accelerate 1366 // specific block queries) but we can't do the fastpath "return all 1367 // results from the set". Clear out the indicator for this. 1368 CacheInfo->Pair = BBSkipFirstBlockPair(); 1369 1370 // If *nothing* works, mark the pointer as unknown. 1371 // 1372 // If this is the magic first block, return this as a clobber of the whole 1373 // incoming value. Since we can't phi translate to one of the predecessors, 1374 // we have to bail out. 1375 if (SkipFirstBlock) 1376 return false; 1377 1378 bool foundBlock = false; 1379 for (NonLocalDepEntry &I : llvm::reverse(*Cache)) { 1380 if (I.getBB() != BB) 1381 continue; 1382 1383 assert((GotWorklistLimit || I.getResult().isNonLocal() || 1384 !DT.isReachableFromEntry(BB)) && 1385 "Should only be here with transparent block"); 1386 foundBlock = true; 1387 I.setResult(MemDepResult::getUnknown()); 1388 Result.push_back( 1389 NonLocalDepResult(I.getBB(), I.getResult(), Pointer.getAddr())); 1390 break; 1391 } 1392 (void)foundBlock; (void)GotWorklistLimit; 1393 assert((foundBlock || GotWorklistLimit) && "Current block not in cache?"); 1394 } 1395 1396 // Okay, we're done now. If we added new values to the cache, re-sort it. 1397 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1398 DEBUG(AssertSorted(*Cache)); 1399 return true; 1400 } 1401 1402 /// If P exists in CachedNonLocalPointerInfo, remove it. 1403 void MemoryDependenceResults::RemoveCachedNonLocalPointerDependencies( 1404 ValueIsLoadPair P) { 1405 CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P); 1406 if (It == NonLocalPointerDeps.end()) 1407 return; 1408 1409 // Remove all of the entries in the BB->val map. This involves removing 1410 // instructions from the reverse map. 1411 NonLocalDepInfo &PInfo = It->second.NonLocalDeps; 1412 1413 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) { 1414 Instruction *Target = PInfo[i].getResult().getInst(); 1415 if (!Target) 1416 continue; // Ignore non-local dep results. 1417 assert(Target->getParent() == PInfo[i].getBB()); 1418 1419 // Eliminating the dirty entry from 'Cache', so update the reverse info. 1420 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P); 1421 } 1422 1423 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). 1424 NonLocalPointerDeps.erase(It); 1425 } 1426 1427 void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) { 1428 // If Ptr isn't really a pointer, just ignore it. 1429 if (!Ptr->getType()->isPointerTy()) 1430 return; 1431 // Flush store info for the pointer. 1432 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); 1433 // Flush load info for the pointer. 1434 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true)); 1435 } 1436 1437 void MemoryDependenceResults::invalidateCachedPredecessors() { 1438 PredCache.clear(); 1439 } 1440 1441 void MemoryDependenceResults::removeInstruction(Instruction *RemInst) { 1442 // Walk through the Non-local dependencies, removing this one as the value 1443 // for any cached queries. 1444 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst); 1445 if (NLDI != NonLocalDeps.end()) { 1446 NonLocalDepInfo &BlockMap = NLDI->second.first; 1447 for (auto &Entry : BlockMap) 1448 if (Instruction *Inst = Entry.getResult().getInst()) 1449 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst); 1450 NonLocalDeps.erase(NLDI); 1451 } 1452 1453 // If we have a cached local dependence query for this instruction, remove it. 1454 // 1455 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 1456 if (LocalDepEntry != LocalDeps.end()) { 1457 // Remove us from DepInst's reverse set now that the local dep info is gone. 1458 if (Instruction *Inst = LocalDepEntry->second.getInst()) 1459 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst); 1460 1461 // Remove this local dependency info. 1462 LocalDeps.erase(LocalDepEntry); 1463 } 1464 1465 // If we have any cached pointer dependencies on this instruction, remove 1466 // them. If the instruction has non-pointer type, then it can't be a pointer 1467 // base. 1468 1469 // Remove it from both the load info and the store info. The instruction 1470 // can't be in either of these maps if it is non-pointer. 1471 if (RemInst->getType()->isPointerTy()) { 1472 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); 1473 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); 1474 } 1475 1476 // Loop over all of the things that depend on the instruction we're removing. 1477 // 1478 SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd; 1479 1480 // If we find RemInst as a clobber or Def in any of the maps for other values, 1481 // we need to replace its entry with a dirty version of the instruction after 1482 // it. If RemInst is a terminator, we use a null dirty value. 1483 // 1484 // Using a dirty version of the instruction after RemInst saves having to scan 1485 // the entire block to get to this point. 1486 MemDepResult NewDirtyVal; 1487 if (!RemInst->isTerminator()) 1488 NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator()); 1489 1490 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 1491 if (ReverseDepIt != ReverseLocalDeps.end()) { 1492 // RemInst can't be the terminator if it has local stuff depending on it. 1493 assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) && 1494 "Nothing can locally depend on a terminator"); 1495 1496 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) { 1497 assert(InstDependingOnRemInst != RemInst && 1498 "Already removed our local dep info"); 1499 1500 LocalDeps[InstDependingOnRemInst] = NewDirtyVal; 1501 1502 // Make sure to remember that new things depend on NewDepInst. 1503 assert(NewDirtyVal.getInst() && 1504 "There is no way something else can have " 1505 "a local dep on this if it is a terminator!"); 1506 ReverseDepsToAdd.push_back( 1507 std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst)); 1508 } 1509 1510 ReverseLocalDeps.erase(ReverseDepIt); 1511 1512 // Add new reverse deps after scanning the set, to avoid invalidating the 1513 // 'ReverseDeps' reference. 1514 while (!ReverseDepsToAdd.empty()) { 1515 ReverseLocalDeps[ReverseDepsToAdd.back().first].insert( 1516 ReverseDepsToAdd.back().second); 1517 ReverseDepsToAdd.pop_back(); 1518 } 1519 } 1520 1521 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 1522 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 1523 for (Instruction *I : ReverseDepIt->second) { 1524 assert(I != RemInst && "Already removed NonLocalDep info for RemInst"); 1525 1526 PerInstNLInfo &INLD = NonLocalDeps[I]; 1527 // The information is now dirty! 1528 INLD.second = true; 1529 1530 for (auto &Entry : INLD.first) { 1531 if (Entry.getResult().getInst() != RemInst) 1532 continue; 1533 1534 // Convert to a dirty entry for the subsequent instruction. 1535 Entry.setResult(NewDirtyVal); 1536 1537 if (Instruction *NextI = NewDirtyVal.getInst()) 1538 ReverseDepsToAdd.push_back(std::make_pair(NextI, I)); 1539 } 1540 } 1541 1542 ReverseNonLocalDeps.erase(ReverseDepIt); 1543 1544 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 1545 while (!ReverseDepsToAdd.empty()) { 1546 ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert( 1547 ReverseDepsToAdd.back().second); 1548 ReverseDepsToAdd.pop_back(); 1549 } 1550 } 1551 1552 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a 1553 // value in the NonLocalPointerDeps info. 1554 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = 1555 ReverseNonLocalPtrDeps.find(RemInst); 1556 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { 1557 SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8> 1558 ReversePtrDepsToAdd; 1559 1560 for (ValueIsLoadPair P : ReversePtrDepIt->second) { 1561 assert(P.getPointer() != RemInst && 1562 "Already removed NonLocalPointerDeps info for RemInst"); 1563 1564 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps; 1565 1566 // The cache is not valid for any specific block anymore. 1567 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair(); 1568 1569 // Update any entries for RemInst to use the instruction after it. 1570 for (auto &Entry : NLPDI) { 1571 if (Entry.getResult().getInst() != RemInst) 1572 continue; 1573 1574 // Convert to a dirty entry for the subsequent instruction. 1575 Entry.setResult(NewDirtyVal); 1576 1577 if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) 1578 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); 1579 } 1580 1581 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its 1582 // subsequent value may invalidate the sortedness. 1583 std::sort(NLPDI.begin(), NLPDI.end()); 1584 } 1585 1586 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); 1587 1588 while (!ReversePtrDepsToAdd.empty()) { 1589 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert( 1590 ReversePtrDepsToAdd.back().second); 1591 ReversePtrDepsToAdd.pop_back(); 1592 } 1593 } 1594 1595 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?"); 1596 DEBUG(verifyRemoved(RemInst)); 1597 } 1598 1599 /// Verify that the specified instruction does not occur in our internal data 1600 /// structures. 1601 /// 1602 /// This function verifies by asserting in debug builds. 1603 void MemoryDependenceResults::verifyRemoved(Instruction *D) const { 1604 #ifndef NDEBUG 1605 for (const auto &DepKV : LocalDeps) { 1606 assert(DepKV.first != D && "Inst occurs in data structures"); 1607 assert(DepKV.second.getInst() != D && "Inst occurs in data structures"); 1608 } 1609 1610 for (const auto &DepKV : NonLocalPointerDeps) { 1611 assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key"); 1612 for (const auto &Entry : DepKV.second.NonLocalDeps) 1613 assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value"); 1614 } 1615 1616 for (const auto &DepKV : NonLocalDeps) { 1617 assert(DepKV.first != D && "Inst occurs in data structures"); 1618 const PerInstNLInfo &INLD = DepKV.second; 1619 for (const auto &Entry : INLD.first) 1620 assert(Entry.getResult().getInst() != D && 1621 "Inst occurs in data structures"); 1622 } 1623 1624 for (const auto &DepKV : ReverseLocalDeps) { 1625 assert(DepKV.first != D && "Inst occurs in data structures"); 1626 for (Instruction *Inst : DepKV.second) 1627 assert(Inst != D && "Inst occurs in data structures"); 1628 } 1629 1630 for (const auto &DepKV : ReverseNonLocalDeps) { 1631 assert(DepKV.first != D && "Inst occurs in data structures"); 1632 for (Instruction *Inst : DepKV.second) 1633 assert(Inst != D && "Inst occurs in data structures"); 1634 } 1635 1636 for (const auto &DepKV : ReverseNonLocalPtrDeps) { 1637 assert(DepKV.first != D && "Inst occurs in rev NLPD map"); 1638 1639 for (ValueIsLoadPair P : DepKV.second) 1640 assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) && 1641 "Inst occurs in ReverseNonLocalPtrDeps map"); 1642 } 1643 #endif 1644 } 1645 1646 char MemoryDependenceAnalysis::PassID; 1647 1648 MemoryDependenceResults 1649 MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) { 1650 auto &AA = AM.getResult<AAManager>(F); 1651 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1652 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1653 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 1654 return MemoryDependenceResults(AA, AC, TLI, DT); 1655 } 1656 1657 char MemoryDependenceWrapperPass::ID = 0; 1658 1659 INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep", 1660 "Memory Dependence Analysis", false, true) 1661 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1662 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1663 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1664 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1665 INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep", 1666 "Memory Dependence Analysis", false, true) 1667 1668 MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) { 1669 initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry()); 1670 } 1671 1672 MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() {} 1673 1674 void MemoryDependenceWrapperPass::releaseMemory() { 1675 MemDep.reset(); 1676 } 1677 1678 void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1679 AU.setPreservesAll(); 1680 AU.addRequired<AssumptionCacheTracker>(); 1681 AU.addRequired<DominatorTreeWrapperPass>(); 1682 AU.addRequiredTransitive<AAResultsWrapperPass>(); 1683 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1684 } 1685 1686 unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const { 1687 return BlockScanLimit; 1688 } 1689 1690 bool MemoryDependenceWrapperPass::runOnFunction(Function &F) { 1691 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 1692 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1693 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1694 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1695 MemDep.emplace(AA, AC, TLI, DT); 1696 return false; 1697 } 1698