1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements an analysis that determines, for a given memory 10 // operation, what preceding memory operations it depends on. It builds on 11 // alias analysis information, and tries to provide a lazy, caching interface to 12 // a common kind of alias information query. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/MemoryBuiltins.h" 25 #include "llvm/Analysis/MemoryLocation.h" 26 #include "llvm/Analysis/PHITransAddr.h" 27 #include "llvm/Analysis/PhiValues.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/IR/Attributes.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Dominators.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/InstrTypes.h" 38 #include "llvm/IR/Instruction.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/IntrinsicInst.h" 41 #include "llvm/IR/LLVMContext.h" 42 #include "llvm/IR/Metadata.h" 43 #include "llvm/IR/Module.h" 44 #include "llvm/IR/PredIteratorCache.h" 45 #include "llvm/IR/Type.h" 46 #include "llvm/IR/Use.h" 47 #include "llvm/IR/User.h" 48 #include "llvm/IR/Value.h" 49 #include "llvm/InitializePasses.h" 50 #include "llvm/Pass.h" 51 #include "llvm/Support/AtomicOrdering.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/MathExtras.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <cstdint> 60 #include <iterator> 61 #include <utility> 62 63 using namespace llvm; 64 65 #define DEBUG_TYPE "memdep" 66 67 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); 68 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses"); 69 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 70 71 STATISTIC(NumCacheNonLocalPtr, 72 "Number of fully cached non-local ptr responses"); 73 STATISTIC(NumCacheDirtyNonLocalPtr, 74 "Number of cached, but dirty, non-local ptr responses"); 75 STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses"); 76 STATISTIC(NumCacheCompleteNonLocalPtr, 77 "Number of block queries that were completely cached"); 78 79 // Limit for the number of instructions to scan in a block. 80 81 static cl::opt<unsigned> BlockScanLimit( 82 "memdep-block-scan-limit", cl::Hidden, cl::init(100), 83 cl::desc("The number of instructions to scan in a block in memory " 84 "dependency analysis (default = 100)")); 85 86 static cl::opt<unsigned> 87 BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000), 88 cl::desc("The number of blocks to scan during memory " 89 "dependency analysis (default = 1000)")); 90 91 // Limit on the number of memdep results to process. 92 static const unsigned int NumResultsLimit = 100; 93 94 /// This is a helper function that removes Val from 'Inst's set in ReverseMap. 95 /// 96 /// If the set becomes empty, remove Inst's entry. 97 template <typename KeyTy> 98 static void 99 RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap, 100 Instruction *Inst, KeyTy Val) { 101 typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt = 102 ReverseMap.find(Inst); 103 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?"); 104 bool Found = InstIt->second.erase(Val); 105 assert(Found && "Invalid reverse map!"); 106 (void)Found; 107 if (InstIt->second.empty()) 108 ReverseMap.erase(InstIt); 109 } 110 111 /// If the given instruction references a specific memory location, fill in Loc 112 /// with the details, otherwise set Loc.Ptr to null. 113 /// 114 /// Returns a ModRefInfo value describing the general behavior of the 115 /// instruction. 116 static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, 117 const TargetLibraryInfo &TLI) { 118 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 119 if (LI->isUnordered()) { 120 Loc = MemoryLocation::get(LI); 121 return ModRefInfo::Ref; 122 } 123 if (LI->getOrdering() == AtomicOrdering::Monotonic) { 124 Loc = MemoryLocation::get(LI); 125 return ModRefInfo::ModRef; 126 } 127 Loc = MemoryLocation(); 128 return ModRefInfo::ModRef; 129 } 130 131 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 132 if (SI->isUnordered()) { 133 Loc = MemoryLocation::get(SI); 134 return ModRefInfo::Mod; 135 } 136 if (SI->getOrdering() == AtomicOrdering::Monotonic) { 137 Loc = MemoryLocation::get(SI); 138 return ModRefInfo::ModRef; 139 } 140 Loc = MemoryLocation(); 141 return ModRefInfo::ModRef; 142 } 143 144 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 145 Loc = MemoryLocation::get(V); 146 return ModRefInfo::ModRef; 147 } 148 149 if (const CallInst *CI = isFreeCall(Inst, &TLI)) { 150 // calls to free() deallocate the entire structure 151 Loc = MemoryLocation(CI->getArgOperand(0)); 152 return ModRefInfo::Mod; 153 } 154 155 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 156 switch (II->getIntrinsicID()) { 157 case Intrinsic::lifetime_start: 158 case Intrinsic::lifetime_end: 159 case Intrinsic::invariant_start: 160 Loc = MemoryLocation::getForArgument(II, 1, TLI); 161 // These intrinsics don't really modify the memory, but returning Mod 162 // will allow them to be handled conservatively. 163 return ModRefInfo::Mod; 164 case Intrinsic::invariant_end: 165 Loc = MemoryLocation::getForArgument(II, 2, TLI); 166 // These intrinsics don't really modify the memory, but returning Mod 167 // will allow them to be handled conservatively. 168 return ModRefInfo::Mod; 169 case Intrinsic::masked_load: 170 Loc = MemoryLocation::getForArgument(II, 0, TLI); 171 return ModRefInfo::Ref; 172 case Intrinsic::masked_store: 173 Loc = MemoryLocation::getForArgument(II, 1, TLI); 174 return ModRefInfo::Mod; 175 default: 176 break; 177 } 178 } 179 180 // Otherwise, just do the coarse-grained thing that always works. 181 if (Inst->mayWriteToMemory()) 182 return ModRefInfo::ModRef; 183 if (Inst->mayReadFromMemory()) 184 return ModRefInfo::Ref; 185 return ModRefInfo::NoModRef; 186 } 187 188 /// Private helper for finding the local dependencies of a call site. 189 MemDepResult MemoryDependenceResults::getCallDependencyFrom( 190 CallBase *Call, bool isReadOnlyCall, BasicBlock::iterator ScanIt, 191 BasicBlock *BB) { 192 unsigned Limit = getDefaultBlockScanLimit(); 193 194 // Walk backwards through the block, looking for dependencies. 195 while (ScanIt != BB->begin()) { 196 Instruction *Inst = &*--ScanIt; 197 // Debug intrinsics don't cause dependences and should not affect Limit 198 if (isa<DbgInfoIntrinsic>(Inst)) 199 continue; 200 201 // Limit the amount of scanning we do so we don't end up with quadratic 202 // running time on extreme testcases. 203 --Limit; 204 if (!Limit) 205 return MemDepResult::getUnknown(); 206 207 // If this inst is a memory op, get the pointer it accessed 208 MemoryLocation Loc; 209 ModRefInfo MR = GetLocation(Inst, Loc, TLI); 210 if (Loc.Ptr) { 211 // A simple instruction. 212 if (isModOrRefSet(AA.getModRefInfo(Call, Loc))) 213 return MemDepResult::getClobber(Inst); 214 continue; 215 } 216 217 if (auto *CallB = dyn_cast<CallBase>(Inst)) { 218 // If these two calls do not interfere, look past it. 219 if (isNoModRef(AA.getModRefInfo(Call, CallB))) { 220 // If the two calls are the same, return Inst as a Def, so that 221 // Call can be found redundant and eliminated. 222 if (isReadOnlyCall && !isModSet(MR) && 223 Call->isIdenticalToWhenDefined(CallB)) 224 return MemDepResult::getDef(Inst); 225 226 // Otherwise if the two calls don't interact (e.g. CallB is readnone) 227 // keep scanning. 228 continue; 229 } else 230 return MemDepResult::getClobber(Inst); 231 } 232 233 // If we could not obtain a pointer for the instruction and the instruction 234 // touches memory then assume that this is a dependency. 235 if (isModOrRefSet(MR)) 236 return MemDepResult::getClobber(Inst); 237 } 238 239 // No dependence found. If this is the entry block of the function, it is 240 // unknown, otherwise it is non-local. 241 if (BB != &BB->getParent()->getEntryBlock()) 242 return MemDepResult::getNonLocal(); 243 return MemDepResult::getNonFuncLocal(); 244 } 245 246 static bool isVolatile(Instruction *Inst) { 247 if (auto *LI = dyn_cast<LoadInst>(Inst)) 248 return LI->isVolatile(); 249 if (auto *SI = dyn_cast<StoreInst>(Inst)) 250 return SI->isVolatile(); 251 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) 252 return AI->isVolatile(); 253 return false; 254 } 255 256 MemDepResult MemoryDependenceResults::getPointerDependencyFrom( 257 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 258 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) { 259 MemDepResult InvariantGroupDependency = MemDepResult::getUnknown(); 260 if (QueryInst != nullptr) { 261 if (auto *LI = dyn_cast<LoadInst>(QueryInst)) { 262 InvariantGroupDependency = getInvariantGroupPointerDependency(LI, BB); 263 264 if (InvariantGroupDependency.isDef()) 265 return InvariantGroupDependency; 266 } 267 } 268 MemDepResult SimpleDep = getSimplePointerDependencyFrom( 269 MemLoc, isLoad, ScanIt, BB, QueryInst, Limit); 270 if (SimpleDep.isDef()) 271 return SimpleDep; 272 // Non-local invariant group dependency indicates there is non local Def 273 // (it only returns nonLocal if it finds nonLocal def), which is better than 274 // local clobber and everything else. 275 if (InvariantGroupDependency.isNonLocal()) 276 return InvariantGroupDependency; 277 278 assert(InvariantGroupDependency.isUnknown() && 279 "InvariantGroupDependency should be only unknown at this point"); 280 return SimpleDep; 281 } 282 283 MemDepResult 284 MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI, 285 BasicBlock *BB) { 286 287 if (!LI->hasMetadata(LLVMContext::MD_invariant_group)) 288 return MemDepResult::getUnknown(); 289 290 // Take the ptr operand after all casts and geps 0. This way we can search 291 // cast graph down only. 292 Value *LoadOperand = LI->getPointerOperand()->stripPointerCasts(); 293 294 // It's is not safe to walk the use list of global value, because function 295 // passes aren't allowed to look outside their functions. 296 // FIXME: this could be fixed by filtering instructions from outside 297 // of current function. 298 if (isa<GlobalValue>(LoadOperand)) 299 return MemDepResult::getUnknown(); 300 301 // Queue to process all pointers that are equivalent to load operand. 302 SmallVector<const Value *, 8> LoadOperandsQueue; 303 LoadOperandsQueue.push_back(LoadOperand); 304 305 Instruction *ClosestDependency = nullptr; 306 // Order of instructions in uses list is unpredictible. In order to always 307 // get the same result, we will look for the closest dominance. 308 auto GetClosestDependency = [this](Instruction *Best, Instruction *Other) { 309 assert(Other && "Must call it with not null instruction"); 310 if (Best == nullptr || DT.dominates(Best, Other)) 311 return Other; 312 return Best; 313 }; 314 315 // FIXME: This loop is O(N^2) because dominates can be O(n) and in worst case 316 // we will see all the instructions. This should be fixed in MSSA. 317 while (!LoadOperandsQueue.empty()) { 318 const Value *Ptr = LoadOperandsQueue.pop_back_val(); 319 assert(Ptr && !isa<GlobalValue>(Ptr) && 320 "Null or GlobalValue should not be inserted"); 321 322 for (const Use &Us : Ptr->uses()) { 323 auto *U = dyn_cast<Instruction>(Us.getUser()); 324 if (!U || U == LI || !DT.dominates(U, LI)) 325 continue; 326 327 // Bitcast or gep with zeros are using Ptr. Add to queue to check it's 328 // users. U = bitcast Ptr 329 if (isa<BitCastInst>(U)) { 330 LoadOperandsQueue.push_back(U); 331 continue; 332 } 333 // Gep with zeros is equivalent to bitcast. 334 // FIXME: we are not sure if some bitcast should be canonicalized to gep 0 335 // or gep 0 to bitcast because of SROA, so there are 2 forms. When 336 // typeless pointers will be ready then both cases will be gone 337 // (and this BFS also won't be needed). 338 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) 339 if (GEP->hasAllZeroIndices()) { 340 LoadOperandsQueue.push_back(U); 341 continue; 342 } 343 344 // If we hit load/store with the same invariant.group metadata (and the 345 // same pointer operand) we can assume that value pointed by pointer 346 // operand didn't change. 347 if ((isa<LoadInst>(U) || isa<StoreInst>(U)) && 348 U->hasMetadata(LLVMContext::MD_invariant_group)) 349 ClosestDependency = GetClosestDependency(ClosestDependency, U); 350 } 351 } 352 353 if (!ClosestDependency) 354 return MemDepResult::getUnknown(); 355 if (ClosestDependency->getParent() == BB) 356 return MemDepResult::getDef(ClosestDependency); 357 // Def(U) can't be returned here because it is non-local. If local 358 // dependency won't be found then return nonLocal counting that the 359 // user will call getNonLocalPointerDependency, which will return cached 360 // result. 361 NonLocalDefsCache.try_emplace( 362 LI, NonLocalDepResult(ClosestDependency->getParent(), 363 MemDepResult::getDef(ClosestDependency), nullptr)); 364 ReverseNonLocalDefsCache[ClosestDependency].insert(LI); 365 return MemDepResult::getNonLocal(); 366 } 367 368 MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom( 369 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 370 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) { 371 // We can batch AA queries, because IR does not change during a MemDep query. 372 BatchAAResults BatchAA(AA); 373 bool isInvariantLoad = false; 374 375 unsigned DefaultLimit = getDefaultBlockScanLimit(); 376 if (!Limit) 377 Limit = &DefaultLimit; 378 379 // We must be careful with atomic accesses, as they may allow another thread 380 // to touch this location, clobbering it. We are conservative: if the 381 // QueryInst is not a simple (non-atomic) memory access, we automatically 382 // return getClobber. 383 // If it is simple, we know based on the results of 384 // "Compiler testing via a theory of sound optimisations in the C11/C++11 385 // memory model" in PLDI 2013, that a non-atomic location can only be 386 // clobbered between a pair of a release and an acquire action, with no 387 // access to the location in between. 388 // Here is an example for giving the general intuition behind this rule. 389 // In the following code: 390 // store x 0; 391 // release action; [1] 392 // acquire action; [4] 393 // %val = load x; 394 // It is unsafe to replace %val by 0 because another thread may be running: 395 // acquire action; [2] 396 // store x 42; 397 // release action; [3] 398 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val 399 // being 42. A key property of this program however is that if either 400 // 1 or 4 were missing, there would be a race between the store of 42 401 // either the store of 0 or the load (making the whole program racy). 402 // The paper mentioned above shows that the same property is respected 403 // by every program that can detect any optimization of that kind: either 404 // it is racy (undefined) or there is a release followed by an acquire 405 // between the pair of accesses under consideration. 406 407 // If the load is invariant, we "know" that it doesn't alias *any* write. We 408 // do want to respect mustalias results since defs are useful for value 409 // forwarding, but any mayalias write can be assumed to be noalias. 410 // Arguably, this logic should be pushed inside AliasAnalysis itself. 411 if (isLoad && QueryInst) { 412 LoadInst *LI = dyn_cast<LoadInst>(QueryInst); 413 if (LI && LI->hasMetadata(LLVMContext::MD_invariant_load)) 414 isInvariantLoad = true; 415 } 416 417 // Return "true" if and only if the instruction I is either a non-simple 418 // load or a non-simple store. 419 auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool { 420 if (auto *LI = dyn_cast<LoadInst>(I)) 421 return !LI->isSimple(); 422 if (auto *SI = dyn_cast<StoreInst>(I)) 423 return !SI->isSimple(); 424 return false; 425 }; 426 427 // Return "true" if I is not a load and not a store, but it does access 428 // memory. 429 auto isOtherMemAccess = [](Instruction *I) -> bool { 430 return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory(); 431 }; 432 433 // Walk backwards through the basic block, looking for dependencies. 434 while (ScanIt != BB->begin()) { 435 Instruction *Inst = &*--ScanIt; 436 437 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 438 // Debug intrinsics don't (and can't) cause dependencies. 439 if (isa<DbgInfoIntrinsic>(II)) 440 continue; 441 442 // Limit the amount of scanning we do so we don't end up with quadratic 443 // running time on extreme testcases. 444 --*Limit; 445 if (!*Limit) 446 return MemDepResult::getUnknown(); 447 448 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 449 // If we reach a lifetime begin or end marker, then the query ends here 450 // because the value is undefined. 451 Intrinsic::ID ID = II->getIntrinsicID(); 452 switch (ID) { 453 case Intrinsic::lifetime_start: 454 // FIXME: This only considers queries directly on the invariant-tagged 455 // pointer, not on query pointers that are indexed off of them. It'd 456 // be nice to handle that at some point (the right approach is to use 457 // GetPointerBaseWithConstantOffset). 458 if (BatchAA.isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc)) 459 return MemDepResult::getDef(II); 460 continue; 461 case Intrinsic::masked_load: 462 case Intrinsic::masked_store: { 463 MemoryLocation Loc; 464 /*ModRefInfo MR =*/ GetLocation(II, Loc, TLI); 465 AliasResult R = BatchAA.alias(Loc, MemLoc); 466 if (R == NoAlias) 467 continue; 468 if (R == MustAlias) 469 return MemDepResult::getDef(II); 470 if (ID == Intrinsic::masked_load) 471 continue; 472 return MemDepResult::getClobber(II); 473 } 474 } 475 } 476 477 // Values depend on loads if the pointers are must aliased. This means 478 // that a load depends on another must aliased load from the same value. 479 // One exception is atomic loads: a value can depend on an atomic load that 480 // it does not alias with when this atomic load indicates that another 481 // thread may be accessing the location. 482 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 483 // While volatile access cannot be eliminated, they do not have to clobber 484 // non-aliasing locations, as normal accesses, for example, can be safely 485 // reordered with volatile accesses. 486 if (LI->isVolatile()) { 487 if (!QueryInst) 488 // Original QueryInst *may* be volatile 489 return MemDepResult::getClobber(LI); 490 if (isVolatile(QueryInst)) 491 // Ordering required if QueryInst is itself volatile 492 return MemDepResult::getClobber(LI); 493 // Otherwise, volatile doesn't imply any special ordering 494 } 495 496 // Atomic loads have complications involved. 497 // A Monotonic (or higher) load is OK if the query inst is itself not 498 // atomic. 499 // FIXME: This is overly conservative. 500 if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) { 501 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 502 isOtherMemAccess(QueryInst)) 503 return MemDepResult::getClobber(LI); 504 if (LI->getOrdering() != AtomicOrdering::Monotonic) 505 return MemDepResult::getClobber(LI); 506 } 507 508 MemoryLocation LoadLoc = MemoryLocation::get(LI); 509 510 // If we found a pointer, check if it could be the same as our pointer. 511 AliasResult R = BatchAA.alias(LoadLoc, MemLoc); 512 513 if (isLoad) { 514 if (R == NoAlias) 515 continue; 516 517 // Must aliased loads are defs of each other. 518 if (R == MustAlias) 519 return MemDepResult::getDef(Inst); 520 521 #if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads 522 // in terms of clobbering loads, but since it does this by looking 523 // at the clobbering load directly, it doesn't know about any 524 // phi translation that may have happened along the way. 525 526 // If we have a partial alias, then return this as a clobber for the 527 // client to handle. 528 if (R == PartialAlias) 529 return MemDepResult::getClobber(Inst); 530 #endif 531 532 // Random may-alias loads don't depend on each other without a 533 // dependence. 534 continue; 535 } 536 537 // Stores don't depend on other no-aliased accesses. 538 if (R == NoAlias) 539 continue; 540 541 // Stores don't alias loads from read-only memory. 542 if (BatchAA.pointsToConstantMemory(LoadLoc)) 543 continue; 544 545 // Stores depend on may/must aliased loads. 546 return MemDepResult::getDef(Inst); 547 } 548 549 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 550 // Atomic stores have complications involved. 551 // A Monotonic store is OK if the query inst is itself not atomic. 552 // FIXME: This is overly conservative. 553 if (!SI->isUnordered() && SI->isAtomic()) { 554 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 555 isOtherMemAccess(QueryInst)) 556 return MemDepResult::getClobber(SI); 557 if (SI->getOrdering() != AtomicOrdering::Monotonic) 558 return MemDepResult::getClobber(SI); 559 } 560 561 // FIXME: this is overly conservative. 562 // While volatile access cannot be eliminated, they do not have to clobber 563 // non-aliasing locations, as normal accesses can for example be reordered 564 // with volatile accesses. 565 if (SI->isVolatile()) 566 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 567 isOtherMemAccess(QueryInst)) 568 return MemDepResult::getClobber(SI); 569 570 // If alias analysis can tell that this store is guaranteed to not modify 571 // the query pointer, ignore it. Use getModRefInfo to handle cases where 572 // the query pointer points to constant memory etc. 573 if (!isModOrRefSet(BatchAA.getModRefInfo(SI, MemLoc))) 574 continue; 575 576 // Ok, this store might clobber the query pointer. Check to see if it is 577 // a must alias: in this case, we want to return this as a def. 578 // FIXME: Use ModRefInfo::Must bit from getModRefInfo call above. 579 MemoryLocation StoreLoc = MemoryLocation::get(SI); 580 581 // If we found a pointer, check if it could be the same as our pointer. 582 AliasResult R = BatchAA.alias(StoreLoc, MemLoc); 583 584 if (R == NoAlias) 585 continue; 586 if (R == MustAlias) 587 return MemDepResult::getDef(Inst); 588 if (isInvariantLoad) 589 continue; 590 return MemDepResult::getClobber(Inst); 591 } 592 593 // If this is an allocation, and if we know that the accessed pointer is to 594 // the allocation, return Def. This means that there is no dependence and 595 // the access can be optimized based on that. For example, a load could 596 // turn into undef. Note that we can bypass the allocation itself when 597 // looking for a clobber in many cases; that's an alias property and is 598 // handled by BasicAA. 599 if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) { 600 const Value *AccessPtr = getUnderlyingObject(MemLoc.Ptr); 601 if (AccessPtr == Inst || BatchAA.isMustAlias(Inst, AccessPtr)) 602 return MemDepResult::getDef(Inst); 603 } 604 605 if (isInvariantLoad) 606 continue; 607 608 // A release fence requires that all stores complete before it, but does 609 // not prevent the reordering of following loads or stores 'before' the 610 // fence. As a result, we look past it when finding a dependency for 611 // loads. DSE uses this to find preceding stores to delete and thus we 612 // can't bypass the fence if the query instruction is a store. 613 if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) 614 if (isLoad && FI->getOrdering() == AtomicOrdering::Release) 615 continue; 616 617 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. 618 ModRefInfo MR = BatchAA.getModRefInfo(Inst, MemLoc); 619 // If necessary, perform additional analysis. 620 if (isModAndRefSet(MR)) 621 // TODO: Support callCapturesBefore() on BatchAAResults. 622 MR = AA.callCapturesBefore(Inst, MemLoc, &DT); 623 switch (clearMust(MR)) { 624 case ModRefInfo::NoModRef: 625 // If the call has no effect on the queried pointer, just ignore it. 626 continue; 627 case ModRefInfo::Mod: 628 return MemDepResult::getClobber(Inst); 629 case ModRefInfo::Ref: 630 // If the call is known to never store to the pointer, and if this is a 631 // load query, we can safely ignore it (scan past it). 632 if (isLoad) 633 continue; 634 LLVM_FALLTHROUGH; 635 default: 636 // Otherwise, there is a potential dependence. Return a clobber. 637 return MemDepResult::getClobber(Inst); 638 } 639 } 640 641 // No dependence found. If this is the entry block of the function, it is 642 // unknown, otherwise it is non-local. 643 if (BB != &BB->getParent()->getEntryBlock()) 644 return MemDepResult::getNonLocal(); 645 return MemDepResult::getNonFuncLocal(); 646 } 647 648 MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) { 649 Instruction *ScanPos = QueryInst; 650 651 // Check for a cached result 652 MemDepResult &LocalCache = LocalDeps[QueryInst]; 653 654 // If the cached entry is non-dirty, just return it. Note that this depends 655 // on MemDepResult's default constructing to 'dirty'. 656 if (!LocalCache.isDirty()) 657 return LocalCache; 658 659 // Otherwise, if we have a dirty entry, we know we can start the scan at that 660 // instruction, which may save us some work. 661 if (Instruction *Inst = LocalCache.getInst()) { 662 ScanPos = Inst; 663 664 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst); 665 } 666 667 BasicBlock *QueryParent = QueryInst->getParent(); 668 669 // Do the scan. 670 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { 671 // No dependence found. If this is the entry block of the function, it is 672 // unknown, otherwise it is non-local. 673 if (QueryParent != &QueryParent->getParent()->getEntryBlock()) 674 LocalCache = MemDepResult::getNonLocal(); 675 else 676 LocalCache = MemDepResult::getNonFuncLocal(); 677 } else { 678 MemoryLocation MemLoc; 679 ModRefInfo MR = GetLocation(QueryInst, MemLoc, TLI); 680 if (MemLoc.Ptr) { 681 // If we can do a pointer scan, make it happen. 682 bool isLoad = !isModSet(MR); 683 if (auto *II = dyn_cast<IntrinsicInst>(QueryInst)) 684 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start; 685 686 LocalCache = 687 getPointerDependencyFrom(MemLoc, isLoad, ScanPos->getIterator(), 688 QueryParent, QueryInst, nullptr); 689 } else if (auto *QueryCall = dyn_cast<CallBase>(QueryInst)) { 690 bool isReadOnly = AA.onlyReadsMemory(QueryCall); 691 LocalCache = getCallDependencyFrom(QueryCall, isReadOnly, 692 ScanPos->getIterator(), QueryParent); 693 } else 694 // Non-memory instruction. 695 LocalCache = MemDepResult::getUnknown(); 696 } 697 698 // Remember the result! 699 if (Instruction *I = LocalCache.getInst()) 700 ReverseLocalDeps[I].insert(QueryInst); 701 702 return LocalCache; 703 } 704 705 #ifndef NDEBUG 706 /// This method is used when -debug is specified to verify that cache arrays 707 /// are properly kept sorted. 708 static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache, 709 int Count = -1) { 710 if (Count == -1) 711 Count = Cache.size(); 712 assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) && 713 "Cache isn't sorted!"); 714 } 715 #endif 716 717 const MemoryDependenceResults::NonLocalDepInfo & 718 MemoryDependenceResults::getNonLocalCallDependency(CallBase *QueryCall) { 719 assert(getDependency(QueryCall).isNonLocal() && 720 "getNonLocalCallDependency should only be used on calls with " 721 "non-local deps!"); 722 PerInstNLInfo &CacheP = NonLocalDeps[QueryCall]; 723 NonLocalDepInfo &Cache = CacheP.first; 724 725 // This is the set of blocks that need to be recomputed. In the cached case, 726 // this can happen due to instructions being deleted etc. In the uncached 727 // case, this starts out as the set of predecessors we care about. 728 SmallVector<BasicBlock *, 32> DirtyBlocks; 729 730 if (!Cache.empty()) { 731 // Okay, we have a cache entry. If we know it is not dirty, just return it 732 // with no computation. 733 if (!CacheP.second) { 734 ++NumCacheNonLocal; 735 return Cache; 736 } 737 738 // If we already have a partially computed set of results, scan them to 739 // determine what is dirty, seeding our initial DirtyBlocks worklist. 740 for (auto &Entry : Cache) 741 if (Entry.getResult().isDirty()) 742 DirtyBlocks.push_back(Entry.getBB()); 743 744 // Sort the cache so that we can do fast binary search lookups below. 745 llvm::sort(Cache); 746 747 ++NumCacheDirtyNonLocal; 748 // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " 749 // << Cache.size() << " cached: " << *QueryInst; 750 } else { 751 // Seed DirtyBlocks with each of the preds of QueryInst's block. 752 BasicBlock *QueryBB = QueryCall->getParent(); 753 for (BasicBlock *Pred : PredCache.get(QueryBB)) 754 DirtyBlocks.push_back(Pred); 755 ++NumUncacheNonLocal; 756 } 757 758 // isReadonlyCall - If this is a read-only call, we can be more aggressive. 759 bool isReadonlyCall = AA.onlyReadsMemory(QueryCall); 760 761 SmallPtrSet<BasicBlock *, 32> Visited; 762 763 unsigned NumSortedEntries = Cache.size(); 764 LLVM_DEBUG(AssertSorted(Cache)); 765 766 // Iterate while we still have blocks to update. 767 while (!DirtyBlocks.empty()) { 768 BasicBlock *DirtyBB = DirtyBlocks.back(); 769 DirtyBlocks.pop_back(); 770 771 // Already processed this block? 772 if (!Visited.insert(DirtyBB).second) 773 continue; 774 775 // Do a binary search to see if we already have an entry for this block in 776 // the cache set. If so, find it. 777 LLVM_DEBUG(AssertSorted(Cache, NumSortedEntries)); 778 NonLocalDepInfo::iterator Entry = 779 std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries, 780 NonLocalDepEntry(DirtyBB)); 781 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB) 782 --Entry; 783 784 NonLocalDepEntry *ExistingResult = nullptr; 785 if (Entry != Cache.begin() + NumSortedEntries && 786 Entry->getBB() == DirtyBB) { 787 // If we already have an entry, and if it isn't already dirty, the block 788 // is done. 789 if (!Entry->getResult().isDirty()) 790 continue; 791 792 // Otherwise, remember this slot so we can update the value. 793 ExistingResult = &*Entry; 794 } 795 796 // If the dirty entry has a pointer, start scanning from it so we don't have 797 // to rescan the entire block. 798 BasicBlock::iterator ScanPos = DirtyBB->end(); 799 if (ExistingResult) { 800 if (Instruction *Inst = ExistingResult->getResult().getInst()) { 801 ScanPos = Inst->getIterator(); 802 // We're removing QueryInst's use of Inst. 803 RemoveFromReverseMap<Instruction *>(ReverseNonLocalDeps, Inst, 804 QueryCall); 805 } 806 } 807 808 // Find out if this block has a local dependency for QueryInst. 809 MemDepResult Dep; 810 811 if (ScanPos != DirtyBB->begin()) { 812 Dep = getCallDependencyFrom(QueryCall, isReadonlyCall, ScanPos, DirtyBB); 813 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { 814 // No dependence found. If this is the entry block of the function, it is 815 // a clobber, otherwise it is unknown. 816 Dep = MemDepResult::getNonLocal(); 817 } else { 818 Dep = MemDepResult::getNonFuncLocal(); 819 } 820 821 // If we had a dirty entry for the block, update it. Otherwise, just add 822 // a new entry. 823 if (ExistingResult) 824 ExistingResult->setResult(Dep); 825 else 826 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep)); 827 828 // If the block has a dependency (i.e. it isn't completely transparent to 829 // the value), remember the association! 830 if (!Dep.isNonLocal()) { 831 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 832 // update this when we remove instructions. 833 if (Instruction *Inst = Dep.getInst()) 834 ReverseNonLocalDeps[Inst].insert(QueryCall); 835 } else { 836 837 // If the block *is* completely transparent to the load, we need to check 838 // the predecessors of this block. Add them to our worklist. 839 for (BasicBlock *Pred : PredCache.get(DirtyBB)) 840 DirtyBlocks.push_back(Pred); 841 } 842 } 843 844 return Cache; 845 } 846 847 void MemoryDependenceResults::getNonLocalPointerDependency( 848 Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) { 849 const MemoryLocation Loc = MemoryLocation::get(QueryInst); 850 bool isLoad = isa<LoadInst>(QueryInst); 851 BasicBlock *FromBB = QueryInst->getParent(); 852 assert(FromBB); 853 854 assert(Loc.Ptr->getType()->isPointerTy() && 855 "Can't get pointer deps of a non-pointer!"); 856 Result.clear(); 857 { 858 // Check if there is cached Def with invariant.group. 859 auto NonLocalDefIt = NonLocalDefsCache.find(QueryInst); 860 if (NonLocalDefIt != NonLocalDefsCache.end()) { 861 Result.push_back(NonLocalDefIt->second); 862 ReverseNonLocalDefsCache[NonLocalDefIt->second.getResult().getInst()] 863 .erase(QueryInst); 864 NonLocalDefsCache.erase(NonLocalDefIt); 865 return; 866 } 867 } 868 // This routine does not expect to deal with volatile instructions. 869 // Doing so would require piping through the QueryInst all the way through. 870 // TODO: volatiles can't be elided, but they can be reordered with other 871 // non-volatile accesses. 872 873 // We currently give up on any instruction which is ordered, but we do handle 874 // atomic instructions which are unordered. 875 // TODO: Handle ordered instructions 876 auto isOrdered = [](Instruction *Inst) { 877 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 878 return !LI->isUnordered(); 879 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 880 return !SI->isUnordered(); 881 } 882 return false; 883 }; 884 if (isVolatile(QueryInst) || isOrdered(QueryInst)) { 885 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 886 const_cast<Value *>(Loc.Ptr))); 887 return; 888 } 889 const DataLayout &DL = FromBB->getModule()->getDataLayout(); 890 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC); 891 892 // This is the set of blocks we've inspected, and the pointer we consider in 893 // each block. Because of critical edges, we currently bail out if querying 894 // a block with multiple different pointers. This can happen during PHI 895 // translation. 896 DenseMap<BasicBlock *, Value *> Visited; 897 if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB, 898 Result, Visited, true)) 899 return; 900 Result.clear(); 901 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 902 const_cast<Value *>(Loc.Ptr))); 903 } 904 905 /// Compute the memdep value for BB with Pointer/PointeeSize using either 906 /// cached information in Cache or by doing a lookup (which may use dirty cache 907 /// info if available). 908 /// 909 /// If we do a lookup, add the result to the cache. 910 MemDepResult MemoryDependenceResults::GetNonLocalInfoForBlock( 911 Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad, 912 BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) { 913 914 bool isInvariantLoad = false; 915 916 if (LoadInst *LI = dyn_cast_or_null<LoadInst>(QueryInst)) 917 isInvariantLoad = LI->getMetadata(LLVMContext::MD_invariant_load); 918 919 // Do a binary search to see if we already have an entry for this block in 920 // the cache set. If so, find it. 921 NonLocalDepInfo::iterator Entry = std::upper_bound( 922 Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB)); 923 if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB) 924 --Entry; 925 926 NonLocalDepEntry *ExistingResult = nullptr; 927 if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB) 928 ExistingResult = &*Entry; 929 930 // Use cached result for invariant load only if there is no dependency for non 931 // invariant load. In this case invariant load can not have any dependency as 932 // well. 933 if (ExistingResult && isInvariantLoad && 934 !ExistingResult->getResult().isNonFuncLocal()) 935 ExistingResult = nullptr; 936 937 // If we have a cached entry, and it is non-dirty, use it as the value for 938 // this dependency. 939 if (ExistingResult && !ExistingResult->getResult().isDirty()) { 940 ++NumCacheNonLocalPtr; 941 return ExistingResult->getResult(); 942 } 943 944 // Otherwise, we have to scan for the value. If we have a dirty cache 945 // entry, start scanning from its position, otherwise we scan from the end 946 // of the block. 947 BasicBlock::iterator ScanPos = BB->end(); 948 if (ExistingResult && ExistingResult->getResult().getInst()) { 949 assert(ExistingResult->getResult().getInst()->getParent() == BB && 950 "Instruction invalidated?"); 951 ++NumCacheDirtyNonLocalPtr; 952 ScanPos = ExistingResult->getResult().getInst()->getIterator(); 953 954 // Eliminating the dirty entry from 'Cache', so update the reverse info. 955 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 956 RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey); 957 } else { 958 ++NumUncacheNonLocalPtr; 959 } 960 961 // Scan the block for the dependency. 962 MemDepResult Dep = 963 getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst); 964 965 // Don't cache results for invariant load. 966 if (isInvariantLoad) 967 return Dep; 968 969 // If we had a dirty entry for the block, update it. Otherwise, just add 970 // a new entry. 971 if (ExistingResult) 972 ExistingResult->setResult(Dep); 973 else 974 Cache->push_back(NonLocalDepEntry(BB, Dep)); 975 976 // If the block has a dependency (i.e. it isn't completely transparent to 977 // the value), remember the reverse association because we just added it 978 // to Cache! 979 if (!Dep.isDef() && !Dep.isClobber()) 980 return Dep; 981 982 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently 983 // update MemDep when we remove instructions. 984 Instruction *Inst = Dep.getInst(); 985 assert(Inst && "Didn't depend on anything?"); 986 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 987 ReverseNonLocalPtrDeps[Inst].insert(CacheKey); 988 return Dep; 989 } 990 991 /// Sort the NonLocalDepInfo cache, given a certain number of elements in the 992 /// array that are already properly ordered. 993 /// 994 /// This is optimized for the case when only a few entries are added. 995 static void 996 SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache, 997 unsigned NumSortedEntries) { 998 switch (Cache.size() - NumSortedEntries) { 999 case 0: 1000 // done, no new entries. 1001 break; 1002 case 2: { 1003 // Two new entries, insert the last one into place. 1004 NonLocalDepEntry Val = Cache.back(); 1005 Cache.pop_back(); 1006 MemoryDependenceResults::NonLocalDepInfo::iterator Entry = 1007 std::upper_bound(Cache.begin(), Cache.end() - 1, Val); 1008 Cache.insert(Entry, Val); 1009 LLVM_FALLTHROUGH; 1010 } 1011 case 1: 1012 // One new entry, Just insert the new value at the appropriate position. 1013 if (Cache.size() != 1) { 1014 NonLocalDepEntry Val = Cache.back(); 1015 Cache.pop_back(); 1016 MemoryDependenceResults::NonLocalDepInfo::iterator Entry = 1017 std::upper_bound(Cache.begin(), Cache.end(), Val); 1018 Cache.insert(Entry, Val); 1019 } 1020 break; 1021 default: 1022 // Added many values, do a full scale sort. 1023 llvm::sort(Cache); 1024 break; 1025 } 1026 } 1027 1028 /// Perform a dependency query based on pointer/pointeesize starting at the end 1029 /// of StartBB. 1030 /// 1031 /// Add any clobber/def results to the results vector and keep track of which 1032 /// blocks are visited in 'Visited'. 1033 /// 1034 /// This has special behavior for the first block queries (when SkipFirstBlock 1035 /// is true). In this special case, it ignores the contents of the specified 1036 /// block and starts returning dependence info for its predecessors. 1037 /// 1038 /// This function returns true on success, or false to indicate that it could 1039 /// not compute dependence information for some reason. This should be treated 1040 /// as a clobber dependence on the first instruction in the predecessor block. 1041 bool MemoryDependenceResults::getNonLocalPointerDepFromBB( 1042 Instruction *QueryInst, const PHITransAddr &Pointer, 1043 const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB, 1044 SmallVectorImpl<NonLocalDepResult> &Result, 1045 DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock, 1046 bool IsIncomplete) { 1047 // Look up the cached info for Pointer. 1048 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad); 1049 1050 // Set up a temporary NLPI value. If the map doesn't yet have an entry for 1051 // CacheKey, this value will be inserted as the associated value. Otherwise, 1052 // it'll be ignored, and we'll have to check to see if the cached size and 1053 // aa tags are consistent with the current query. 1054 NonLocalPointerInfo InitialNLPI; 1055 InitialNLPI.Size = Loc.Size; 1056 InitialNLPI.AATags = Loc.AATags; 1057 1058 bool isInvariantLoad = false; 1059 if (LoadInst *LI = dyn_cast_or_null<LoadInst>(QueryInst)) 1060 isInvariantLoad = LI->getMetadata(LLVMContext::MD_invariant_load); 1061 1062 // Get the NLPI for CacheKey, inserting one into the map if it doesn't 1063 // already have one. 1064 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = 1065 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI)); 1066 NonLocalPointerInfo *CacheInfo = &Pair.first->second; 1067 1068 // If we already have a cache entry for this CacheKey, we may need to do some 1069 // work to reconcile the cache entry and the current query. 1070 // Invariant loads don't participate in caching. Thus no need to reconcile. 1071 if (!isInvariantLoad && !Pair.second) { 1072 if (CacheInfo->Size != Loc.Size) { 1073 bool ThrowOutEverything; 1074 if (CacheInfo->Size.hasValue() && Loc.Size.hasValue()) { 1075 // FIXME: We may be able to do better in the face of results with mixed 1076 // precision. We don't appear to get them in practice, though, so just 1077 // be conservative. 1078 ThrowOutEverything = 1079 CacheInfo->Size.isPrecise() != Loc.Size.isPrecise() || 1080 CacheInfo->Size.getValue() < Loc.Size.getValue(); 1081 } else { 1082 // For our purposes, unknown size > all others. 1083 ThrowOutEverything = !Loc.Size.hasValue(); 1084 } 1085 1086 if (ThrowOutEverything) { 1087 // The query's Size is greater than the cached one. Throw out the 1088 // cached data and proceed with the query at the greater size. 1089 CacheInfo->Pair = BBSkipFirstBlockPair(); 1090 CacheInfo->Size = Loc.Size; 1091 for (auto &Entry : CacheInfo->NonLocalDeps) 1092 if (Instruction *Inst = Entry.getResult().getInst()) 1093 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1094 CacheInfo->NonLocalDeps.clear(); 1095 // The cache is cleared (in the above line) so we will have lost 1096 // information about blocks we have already visited. We therefore must 1097 // assume that the cache information is incomplete. 1098 IsIncomplete = true; 1099 } else { 1100 // This query's Size is less than the cached one. Conservatively restart 1101 // the query using the greater size. 1102 return getNonLocalPointerDepFromBB( 1103 QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad, 1104 StartBB, Result, Visited, SkipFirstBlock, IsIncomplete); 1105 } 1106 } 1107 1108 // If the query's AATags are inconsistent with the cached one, 1109 // conservatively throw out the cached data and restart the query with 1110 // no tag if needed. 1111 if (CacheInfo->AATags != Loc.AATags) { 1112 if (CacheInfo->AATags) { 1113 CacheInfo->Pair = BBSkipFirstBlockPair(); 1114 CacheInfo->AATags = AAMDNodes(); 1115 for (auto &Entry : CacheInfo->NonLocalDeps) 1116 if (Instruction *Inst = Entry.getResult().getInst()) 1117 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1118 CacheInfo->NonLocalDeps.clear(); 1119 // The cache is cleared (in the above line) so we will have lost 1120 // information about blocks we have already visited. We therefore must 1121 // assume that the cache information is incomplete. 1122 IsIncomplete = true; 1123 } 1124 if (Loc.AATags) 1125 return getNonLocalPointerDepFromBB( 1126 QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result, 1127 Visited, SkipFirstBlock, IsIncomplete); 1128 } 1129 } 1130 1131 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps; 1132 1133 // If we have valid cached information for exactly the block we are 1134 // investigating, just return it with no recomputation. 1135 // Don't use cached information for invariant loads since it is valid for 1136 // non-invariant loads only. 1137 // 1138 // Don't use cached information for invariant loads since it is valid for 1139 // non-invariant loads only. 1140 if (!IsIncomplete && !isInvariantLoad && 1141 CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) { 1142 // We have a fully cached result for this query then we can just return the 1143 // cached results and populate the visited set. However, we have to verify 1144 // that we don't already have conflicting results for these blocks. Check 1145 // to ensure that if a block in the results set is in the visited set that 1146 // it was for the same pointer query. 1147 if (!Visited.empty()) { 1148 for (auto &Entry : *Cache) { 1149 DenseMap<BasicBlock *, Value *>::iterator VI = 1150 Visited.find(Entry.getBB()); 1151 if (VI == Visited.end() || VI->second == Pointer.getAddr()) 1152 continue; 1153 1154 // We have a pointer mismatch in a block. Just return false, saying 1155 // that something was clobbered in this result. We could also do a 1156 // non-fully cached query, but there is little point in doing this. 1157 return false; 1158 } 1159 } 1160 1161 Value *Addr = Pointer.getAddr(); 1162 for (auto &Entry : *Cache) { 1163 Visited.insert(std::make_pair(Entry.getBB(), Addr)); 1164 if (Entry.getResult().isNonLocal()) { 1165 continue; 1166 } 1167 1168 if (DT.isReachableFromEntry(Entry.getBB())) { 1169 Result.push_back( 1170 NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr)); 1171 } 1172 } 1173 ++NumCacheCompleteNonLocalPtr; 1174 return true; 1175 } 1176 1177 // Otherwise, either this is a new block, a block with an invalid cache 1178 // pointer or one that we're about to invalidate by putting more info into 1179 // it than its valid cache info. If empty and not explicitly indicated as 1180 // incomplete, the result will be valid cache info, otherwise it isn't. 1181 // 1182 // Invariant loads don't affect cache in any way thus no need to update 1183 // CacheInfo as well. 1184 if (!isInvariantLoad) { 1185 if (!IsIncomplete && Cache->empty()) 1186 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); 1187 else 1188 CacheInfo->Pair = BBSkipFirstBlockPair(); 1189 } 1190 1191 SmallVector<BasicBlock *, 32> Worklist; 1192 Worklist.push_back(StartBB); 1193 1194 // PredList used inside loop. 1195 SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList; 1196 1197 // Keep track of the entries that we know are sorted. Previously cached 1198 // entries will all be sorted. The entries we add we only sort on demand (we 1199 // don't insert every element into its sorted position). We know that we 1200 // won't get any reuse from currently inserted values, because we don't 1201 // revisit blocks after we insert info for them. 1202 unsigned NumSortedEntries = Cache->size(); 1203 unsigned WorklistEntries = BlockNumberLimit; 1204 bool GotWorklistLimit = false; 1205 LLVM_DEBUG(AssertSorted(*Cache)); 1206 1207 while (!Worklist.empty()) { 1208 BasicBlock *BB = Worklist.pop_back_val(); 1209 1210 // If we do process a large number of blocks it becomes very expensive and 1211 // likely it isn't worth worrying about 1212 if (Result.size() > NumResultsLimit) { 1213 Worklist.clear(); 1214 // Sort it now (if needed) so that recursive invocations of 1215 // getNonLocalPointerDepFromBB and other routines that could reuse the 1216 // cache value will only see properly sorted cache arrays. 1217 if (Cache && NumSortedEntries != Cache->size()) { 1218 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1219 } 1220 // Since we bail out, the "Cache" set won't contain all of the 1221 // results for the query. This is ok (we can still use it to accelerate 1222 // specific block queries) but we can't do the fastpath "return all 1223 // results from the set". Clear out the indicator for this. 1224 CacheInfo->Pair = BBSkipFirstBlockPair(); 1225 return false; 1226 } 1227 1228 // Skip the first block if we have it. 1229 if (!SkipFirstBlock) { 1230 // Analyze the dependency of *Pointer in FromBB. See if we already have 1231 // been here. 1232 assert(Visited.count(BB) && "Should check 'visited' before adding to WL"); 1233 1234 // Get the dependency info for Pointer in BB. If we have cached 1235 // information, we will use it, otherwise we compute it. 1236 LLVM_DEBUG(AssertSorted(*Cache, NumSortedEntries)); 1237 MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB, 1238 Cache, NumSortedEntries); 1239 1240 // If we got a Def or Clobber, add this to the list of results. 1241 if (!Dep.isNonLocal()) { 1242 if (DT.isReachableFromEntry(BB)) { 1243 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr())); 1244 continue; 1245 } 1246 } 1247 } 1248 1249 // If 'Pointer' is an instruction defined in this block, then we need to do 1250 // phi translation to change it into a value live in the predecessor block. 1251 // If not, we just add the predecessors to the worklist and scan them with 1252 // the same Pointer. 1253 if (!Pointer.NeedsPHITranslationFromBlock(BB)) { 1254 SkipFirstBlock = false; 1255 SmallVector<BasicBlock *, 16> NewBlocks; 1256 for (BasicBlock *Pred : PredCache.get(BB)) { 1257 // Verify that we haven't looked at this block yet. 1258 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1259 Visited.insert(std::make_pair(Pred, Pointer.getAddr())); 1260 if (InsertRes.second) { 1261 // First time we've looked at *PI. 1262 NewBlocks.push_back(Pred); 1263 continue; 1264 } 1265 1266 // If we have seen this block before, but it was with a different 1267 // pointer then we have a phi translation failure and we have to treat 1268 // this as a clobber. 1269 if (InsertRes.first->second != Pointer.getAddr()) { 1270 // Make sure to clean up the Visited map before continuing on to 1271 // PredTranslationFailure. 1272 for (unsigned i = 0; i < NewBlocks.size(); i++) 1273 Visited.erase(NewBlocks[i]); 1274 goto PredTranslationFailure; 1275 } 1276 } 1277 if (NewBlocks.size() > WorklistEntries) { 1278 // Make sure to clean up the Visited map before continuing on to 1279 // PredTranslationFailure. 1280 for (unsigned i = 0; i < NewBlocks.size(); i++) 1281 Visited.erase(NewBlocks[i]); 1282 GotWorklistLimit = true; 1283 goto PredTranslationFailure; 1284 } 1285 WorklistEntries -= NewBlocks.size(); 1286 Worklist.append(NewBlocks.begin(), NewBlocks.end()); 1287 continue; 1288 } 1289 1290 // We do need to do phi translation, if we know ahead of time we can't phi 1291 // translate this value, don't even try. 1292 if (!Pointer.IsPotentiallyPHITranslatable()) 1293 goto PredTranslationFailure; 1294 1295 // We may have added values to the cache list before this PHI translation. 1296 // If so, we haven't done anything to ensure that the cache remains sorted. 1297 // Sort it now (if needed) so that recursive invocations of 1298 // getNonLocalPointerDepFromBB and other routines that could reuse the cache 1299 // value will only see properly sorted cache arrays. 1300 if (Cache && NumSortedEntries != Cache->size()) { 1301 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1302 NumSortedEntries = Cache->size(); 1303 } 1304 Cache = nullptr; 1305 1306 PredList.clear(); 1307 for (BasicBlock *Pred : PredCache.get(BB)) { 1308 PredList.push_back(std::make_pair(Pred, Pointer)); 1309 1310 // Get the PHI translated pointer in this predecessor. This can fail if 1311 // not translatable, in which case the getAddr() returns null. 1312 PHITransAddr &PredPointer = PredList.back().second; 1313 PredPointer.PHITranslateValue(BB, Pred, &DT, /*MustDominate=*/false); 1314 Value *PredPtrVal = PredPointer.getAddr(); 1315 1316 // Check to see if we have already visited this pred block with another 1317 // pointer. If so, we can't do this lookup. This failure can occur 1318 // with PHI translation when a critical edge exists and the PHI node in 1319 // the successor translates to a pointer value different than the 1320 // pointer the block was first analyzed with. 1321 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1322 Visited.insert(std::make_pair(Pred, PredPtrVal)); 1323 1324 if (!InsertRes.second) { 1325 // We found the pred; take it off the list of preds to visit. 1326 PredList.pop_back(); 1327 1328 // If the predecessor was visited with PredPtr, then we already did 1329 // the analysis and can ignore it. 1330 if (InsertRes.first->second == PredPtrVal) 1331 continue; 1332 1333 // Otherwise, the block was previously analyzed with a different 1334 // pointer. We can't represent the result of this case, so we just 1335 // treat this as a phi translation failure. 1336 1337 // Make sure to clean up the Visited map before continuing on to 1338 // PredTranslationFailure. 1339 for (unsigned i = 0, n = PredList.size(); i < n; ++i) 1340 Visited.erase(PredList[i].first); 1341 1342 goto PredTranslationFailure; 1343 } 1344 } 1345 1346 // Actually process results here; this need to be a separate loop to avoid 1347 // calling getNonLocalPointerDepFromBB for blocks we don't want to return 1348 // any results for. (getNonLocalPointerDepFromBB will modify our 1349 // datastructures in ways the code after the PredTranslationFailure label 1350 // doesn't expect.) 1351 for (unsigned i = 0, n = PredList.size(); i < n; ++i) { 1352 BasicBlock *Pred = PredList[i].first; 1353 PHITransAddr &PredPointer = PredList[i].second; 1354 Value *PredPtrVal = PredPointer.getAddr(); 1355 1356 bool CanTranslate = true; 1357 // If PHI translation was unable to find an available pointer in this 1358 // predecessor, then we have to assume that the pointer is clobbered in 1359 // that predecessor. We can still do PRE of the load, which would insert 1360 // a computation of the pointer in this predecessor. 1361 if (!PredPtrVal) 1362 CanTranslate = false; 1363 1364 // FIXME: it is entirely possible that PHI translating will end up with 1365 // the same value. Consider PHI translating something like: 1366 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need* 1367 // to recurse here, pedantically speaking. 1368 1369 // If getNonLocalPointerDepFromBB fails here, that means the cached 1370 // result conflicted with the Visited list; we have to conservatively 1371 // assume it is unknown, but this also does not block PRE of the load. 1372 if (!CanTranslate || 1373 !getNonLocalPointerDepFromBB(QueryInst, PredPointer, 1374 Loc.getWithNewPtr(PredPtrVal), isLoad, 1375 Pred, Result, Visited)) { 1376 // Add the entry to the Result list. 1377 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal); 1378 Result.push_back(Entry); 1379 1380 // Since we had a phi translation failure, the cache for CacheKey won't 1381 // include all of the entries that we need to immediately satisfy future 1382 // queries. Mark this in NonLocalPointerDeps by setting the 1383 // BBSkipFirstBlockPair pointer to null. This requires reuse of the 1384 // cached value to do more work but not miss the phi trans failure. 1385 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey]; 1386 NLPI.Pair = BBSkipFirstBlockPair(); 1387 continue; 1388 } 1389 } 1390 1391 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. 1392 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1393 Cache = &CacheInfo->NonLocalDeps; 1394 NumSortedEntries = Cache->size(); 1395 1396 // Since we did phi translation, the "Cache" set won't contain all of the 1397 // results for the query. This is ok (we can still use it to accelerate 1398 // specific block queries) but we can't do the fastpath "return all 1399 // results from the set" Clear out the indicator for this. 1400 CacheInfo->Pair = BBSkipFirstBlockPair(); 1401 SkipFirstBlock = false; 1402 continue; 1403 1404 PredTranslationFailure: 1405 // The following code is "failure"; we can't produce a sane translation 1406 // for the given block. It assumes that we haven't modified any of 1407 // our datastructures while processing the current block. 1408 1409 if (!Cache) { 1410 // Refresh the CacheInfo/Cache pointer if it got invalidated. 1411 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1412 Cache = &CacheInfo->NonLocalDeps; 1413 NumSortedEntries = Cache->size(); 1414 } 1415 1416 // Since we failed phi translation, the "Cache" set won't contain all of the 1417 // results for the query. This is ok (we can still use it to accelerate 1418 // specific block queries) but we can't do the fastpath "return all 1419 // results from the set". Clear out the indicator for this. 1420 CacheInfo->Pair = BBSkipFirstBlockPair(); 1421 1422 // If *nothing* works, mark the pointer as unknown. 1423 // 1424 // If this is the magic first block, return this as a clobber of the whole 1425 // incoming value. Since we can't phi translate to one of the predecessors, 1426 // we have to bail out. 1427 if (SkipFirstBlock) 1428 return false; 1429 1430 // Results of invariant loads are not cached thus no need to update cached 1431 // information. 1432 if (!isInvariantLoad) { 1433 for (NonLocalDepEntry &I : llvm::reverse(*Cache)) { 1434 if (I.getBB() != BB) 1435 continue; 1436 1437 assert((GotWorklistLimit || I.getResult().isNonLocal() || 1438 !DT.isReachableFromEntry(BB)) && 1439 "Should only be here with transparent block"); 1440 1441 I.setResult(MemDepResult::getUnknown()); 1442 1443 1444 break; 1445 } 1446 } 1447 (void)GotWorklistLimit; 1448 // Go ahead and report unknown dependence. 1449 Result.push_back( 1450 NonLocalDepResult(BB, MemDepResult::getUnknown(), Pointer.getAddr())); 1451 } 1452 1453 // Okay, we're done now. If we added new values to the cache, re-sort it. 1454 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1455 LLVM_DEBUG(AssertSorted(*Cache)); 1456 return true; 1457 } 1458 1459 /// If P exists in CachedNonLocalPointerInfo or NonLocalDefsCache, remove it. 1460 void MemoryDependenceResults::RemoveCachedNonLocalPointerDependencies( 1461 ValueIsLoadPair P) { 1462 1463 // Most of the time this cache is empty. 1464 if (!NonLocalDefsCache.empty()) { 1465 auto it = NonLocalDefsCache.find(P.getPointer()); 1466 if (it != NonLocalDefsCache.end()) { 1467 RemoveFromReverseMap(ReverseNonLocalDefsCache, 1468 it->second.getResult().getInst(), P.getPointer()); 1469 NonLocalDefsCache.erase(it); 1470 } 1471 1472 if (auto *I = dyn_cast<Instruction>(P.getPointer())) { 1473 auto toRemoveIt = ReverseNonLocalDefsCache.find(I); 1474 if (toRemoveIt != ReverseNonLocalDefsCache.end()) { 1475 for (const auto *entry : toRemoveIt->second) 1476 NonLocalDefsCache.erase(entry); 1477 ReverseNonLocalDefsCache.erase(toRemoveIt); 1478 } 1479 } 1480 } 1481 1482 CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P); 1483 if (It == NonLocalPointerDeps.end()) 1484 return; 1485 1486 // Remove all of the entries in the BB->val map. This involves removing 1487 // instructions from the reverse map. 1488 NonLocalDepInfo &PInfo = It->second.NonLocalDeps; 1489 1490 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) { 1491 Instruction *Target = PInfo[i].getResult().getInst(); 1492 if (!Target) 1493 continue; // Ignore non-local dep results. 1494 assert(Target->getParent() == PInfo[i].getBB()); 1495 1496 // Eliminating the dirty entry from 'Cache', so update the reverse info. 1497 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P); 1498 } 1499 1500 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). 1501 NonLocalPointerDeps.erase(It); 1502 } 1503 1504 void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) { 1505 // If Ptr isn't really a pointer, just ignore it. 1506 if (!Ptr->getType()->isPointerTy()) 1507 return; 1508 // Flush store info for the pointer. 1509 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); 1510 // Flush load info for the pointer. 1511 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true)); 1512 // Invalidate phis that use the pointer. 1513 PV.invalidateValue(Ptr); 1514 } 1515 1516 void MemoryDependenceResults::invalidateCachedPredecessors() { 1517 PredCache.clear(); 1518 } 1519 1520 void MemoryDependenceResults::removeInstruction(Instruction *RemInst) { 1521 // Walk through the Non-local dependencies, removing this one as the value 1522 // for any cached queries. 1523 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst); 1524 if (NLDI != NonLocalDeps.end()) { 1525 NonLocalDepInfo &BlockMap = NLDI->second.first; 1526 for (auto &Entry : BlockMap) 1527 if (Instruction *Inst = Entry.getResult().getInst()) 1528 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst); 1529 NonLocalDeps.erase(NLDI); 1530 } 1531 1532 // If we have a cached local dependence query for this instruction, remove it. 1533 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 1534 if (LocalDepEntry != LocalDeps.end()) { 1535 // Remove us from DepInst's reverse set now that the local dep info is gone. 1536 if (Instruction *Inst = LocalDepEntry->second.getInst()) 1537 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst); 1538 1539 // Remove this local dependency info. 1540 LocalDeps.erase(LocalDepEntry); 1541 } 1542 1543 // If we have any cached dependencies on this instruction, remove 1544 // them. 1545 1546 // If the instruction is a pointer, remove it from both the load info and the 1547 // store info. 1548 if (RemInst->getType()->isPointerTy()) { 1549 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); 1550 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); 1551 } else { 1552 // Otherwise, if the instructions is in the map directly, it must be a load. 1553 // Remove it. 1554 auto toRemoveIt = NonLocalDefsCache.find(RemInst); 1555 if (toRemoveIt != NonLocalDefsCache.end()) { 1556 assert(isa<LoadInst>(RemInst) && 1557 "only load instructions should be added directly"); 1558 const Instruction *DepV = toRemoveIt->second.getResult().getInst(); 1559 ReverseNonLocalDefsCache.find(DepV)->second.erase(RemInst); 1560 NonLocalDefsCache.erase(toRemoveIt); 1561 } 1562 } 1563 1564 // Loop over all of the things that depend on the instruction we're removing. 1565 SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd; 1566 1567 // If we find RemInst as a clobber or Def in any of the maps for other values, 1568 // we need to replace its entry with a dirty version of the instruction after 1569 // it. If RemInst is a terminator, we use a null dirty value. 1570 // 1571 // Using a dirty version of the instruction after RemInst saves having to scan 1572 // the entire block to get to this point. 1573 MemDepResult NewDirtyVal; 1574 if (!RemInst->isTerminator()) 1575 NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator()); 1576 1577 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 1578 if (ReverseDepIt != ReverseLocalDeps.end()) { 1579 // RemInst can't be the terminator if it has local stuff depending on it. 1580 assert(!ReverseDepIt->second.empty() && !RemInst->isTerminator() && 1581 "Nothing can locally depend on a terminator"); 1582 1583 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) { 1584 assert(InstDependingOnRemInst != RemInst && 1585 "Already removed our local dep info"); 1586 1587 LocalDeps[InstDependingOnRemInst] = NewDirtyVal; 1588 1589 // Make sure to remember that new things depend on NewDepInst. 1590 assert(NewDirtyVal.getInst() && 1591 "There is no way something else can have " 1592 "a local dep on this if it is a terminator!"); 1593 ReverseDepsToAdd.push_back( 1594 std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst)); 1595 } 1596 1597 ReverseLocalDeps.erase(ReverseDepIt); 1598 1599 // Add new reverse deps after scanning the set, to avoid invalidating the 1600 // 'ReverseDeps' reference. 1601 while (!ReverseDepsToAdd.empty()) { 1602 ReverseLocalDeps[ReverseDepsToAdd.back().first].insert( 1603 ReverseDepsToAdd.back().second); 1604 ReverseDepsToAdd.pop_back(); 1605 } 1606 } 1607 1608 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 1609 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 1610 for (Instruction *I : ReverseDepIt->second) { 1611 assert(I != RemInst && "Already removed NonLocalDep info for RemInst"); 1612 1613 PerInstNLInfo &INLD = NonLocalDeps[I]; 1614 // The information is now dirty! 1615 INLD.second = true; 1616 1617 for (auto &Entry : INLD.first) { 1618 if (Entry.getResult().getInst() != RemInst) 1619 continue; 1620 1621 // Convert to a dirty entry for the subsequent instruction. 1622 Entry.setResult(NewDirtyVal); 1623 1624 if (Instruction *NextI = NewDirtyVal.getInst()) 1625 ReverseDepsToAdd.push_back(std::make_pair(NextI, I)); 1626 } 1627 } 1628 1629 ReverseNonLocalDeps.erase(ReverseDepIt); 1630 1631 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 1632 while (!ReverseDepsToAdd.empty()) { 1633 ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert( 1634 ReverseDepsToAdd.back().second); 1635 ReverseDepsToAdd.pop_back(); 1636 } 1637 } 1638 1639 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a 1640 // value in the NonLocalPointerDeps info. 1641 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = 1642 ReverseNonLocalPtrDeps.find(RemInst); 1643 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { 1644 SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8> 1645 ReversePtrDepsToAdd; 1646 1647 for (ValueIsLoadPair P : ReversePtrDepIt->second) { 1648 assert(P.getPointer() != RemInst && 1649 "Already removed NonLocalPointerDeps info for RemInst"); 1650 1651 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps; 1652 1653 // The cache is not valid for any specific block anymore. 1654 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair(); 1655 1656 // Update any entries for RemInst to use the instruction after it. 1657 for (auto &Entry : NLPDI) { 1658 if (Entry.getResult().getInst() != RemInst) 1659 continue; 1660 1661 // Convert to a dirty entry for the subsequent instruction. 1662 Entry.setResult(NewDirtyVal); 1663 1664 if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) 1665 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); 1666 } 1667 1668 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its 1669 // subsequent value may invalidate the sortedness. 1670 llvm::sort(NLPDI); 1671 } 1672 1673 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); 1674 1675 while (!ReversePtrDepsToAdd.empty()) { 1676 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert( 1677 ReversePtrDepsToAdd.back().second); 1678 ReversePtrDepsToAdd.pop_back(); 1679 } 1680 } 1681 1682 // Invalidate phis that use the removed instruction. 1683 PV.invalidateValue(RemInst); 1684 1685 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?"); 1686 LLVM_DEBUG(verifyRemoved(RemInst)); 1687 } 1688 1689 /// Verify that the specified instruction does not occur in our internal data 1690 /// structures. 1691 /// 1692 /// This function verifies by asserting in debug builds. 1693 void MemoryDependenceResults::verifyRemoved(Instruction *D) const { 1694 #ifndef NDEBUG 1695 for (const auto &DepKV : LocalDeps) { 1696 assert(DepKV.first != D && "Inst occurs in data structures"); 1697 assert(DepKV.second.getInst() != D && "Inst occurs in data structures"); 1698 } 1699 1700 for (const auto &DepKV : NonLocalPointerDeps) { 1701 assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key"); 1702 for (const auto &Entry : DepKV.second.NonLocalDeps) 1703 assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value"); 1704 } 1705 1706 for (const auto &DepKV : NonLocalDeps) { 1707 assert(DepKV.first != D && "Inst occurs in data structures"); 1708 const PerInstNLInfo &INLD = DepKV.second; 1709 for (const auto &Entry : INLD.first) 1710 assert(Entry.getResult().getInst() != D && 1711 "Inst occurs in data structures"); 1712 } 1713 1714 for (const auto &DepKV : ReverseLocalDeps) { 1715 assert(DepKV.first != D && "Inst occurs in data structures"); 1716 for (Instruction *Inst : DepKV.second) 1717 assert(Inst != D && "Inst occurs in data structures"); 1718 } 1719 1720 for (const auto &DepKV : ReverseNonLocalDeps) { 1721 assert(DepKV.first != D && "Inst occurs in data structures"); 1722 for (Instruction *Inst : DepKV.second) 1723 assert(Inst != D && "Inst occurs in data structures"); 1724 } 1725 1726 for (const auto &DepKV : ReverseNonLocalPtrDeps) { 1727 assert(DepKV.first != D && "Inst occurs in rev NLPD map"); 1728 1729 for (ValueIsLoadPair P : DepKV.second) 1730 assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) && 1731 "Inst occurs in ReverseNonLocalPtrDeps map"); 1732 } 1733 #endif 1734 } 1735 1736 AnalysisKey MemoryDependenceAnalysis::Key; 1737 1738 MemoryDependenceAnalysis::MemoryDependenceAnalysis() 1739 : DefaultBlockScanLimit(BlockScanLimit) {} 1740 1741 MemoryDependenceResults 1742 MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) { 1743 auto &AA = AM.getResult<AAManager>(F); 1744 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1745 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1746 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 1747 auto &PV = AM.getResult<PhiValuesAnalysis>(F); 1748 return MemoryDependenceResults(AA, AC, TLI, DT, PV, DefaultBlockScanLimit); 1749 } 1750 1751 char MemoryDependenceWrapperPass::ID = 0; 1752 1753 INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep", 1754 "Memory Dependence Analysis", false, true) 1755 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1756 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1757 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1758 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1759 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1760 INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep", 1761 "Memory Dependence Analysis", false, true) 1762 1763 MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) { 1764 initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry()); 1765 } 1766 1767 MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() = default; 1768 1769 void MemoryDependenceWrapperPass::releaseMemory() { 1770 MemDep.reset(); 1771 } 1772 1773 void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1774 AU.setPreservesAll(); 1775 AU.addRequired<AssumptionCacheTracker>(); 1776 AU.addRequired<DominatorTreeWrapperPass>(); 1777 AU.addRequired<PhiValuesWrapperPass>(); 1778 AU.addRequiredTransitive<AAResultsWrapperPass>(); 1779 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1780 } 1781 1782 bool MemoryDependenceResults::invalidate(Function &F, const PreservedAnalyses &PA, 1783 FunctionAnalysisManager::Invalidator &Inv) { 1784 // Check whether our analysis is preserved. 1785 auto PAC = PA.getChecker<MemoryDependenceAnalysis>(); 1786 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>()) 1787 // If not, give up now. 1788 return true; 1789 1790 // Check whether the analyses we depend on became invalid for any reason. 1791 if (Inv.invalidate<AAManager>(F, PA) || 1792 Inv.invalidate<AssumptionAnalysis>(F, PA) || 1793 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 1794 Inv.invalidate<PhiValuesAnalysis>(F, PA)) 1795 return true; 1796 1797 // Otherwise this analysis result remains valid. 1798 return false; 1799 } 1800 1801 unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const { 1802 return DefaultBlockScanLimit; 1803 } 1804 1805 bool MemoryDependenceWrapperPass::runOnFunction(Function &F) { 1806 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 1807 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1808 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1809 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1810 auto &PV = getAnalysis<PhiValuesWrapperPass>().getResult(); 1811 MemDep.emplace(AA, AC, TLI, DT, PV, BlockScanLimit); 1812 return false; 1813 } 1814