1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements an analysis that determines, for a given memory 10 // operation, what preceding memory operations it depends on. It builds on 11 // alias analysis information, and tries to provide a lazy, caching interface to 12 // a common kind of alias information query. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/MemoryBuiltins.h" 25 #include "llvm/Analysis/MemoryLocation.h" 26 #include "llvm/Analysis/PHITransAddr.h" 27 #include "llvm/Analysis/PhiValues.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/IR/Attributes.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Dominators.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/InstrTypes.h" 38 #include "llvm/IR/Instruction.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/IntrinsicInst.h" 41 #include "llvm/IR/LLVMContext.h" 42 #include "llvm/IR/Metadata.h" 43 #include "llvm/IR/Module.h" 44 #include "llvm/IR/PredIteratorCache.h" 45 #include "llvm/IR/Type.h" 46 #include "llvm/IR/Use.h" 47 #include "llvm/IR/User.h" 48 #include "llvm/IR/Value.h" 49 #include "llvm/InitializePasses.h" 50 #include "llvm/Pass.h" 51 #include "llvm/Support/AtomicOrdering.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/MathExtras.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <cstdint> 60 #include <iterator> 61 #include <utility> 62 63 using namespace llvm; 64 65 #define DEBUG_TYPE "memdep" 66 67 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); 68 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses"); 69 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 70 71 STATISTIC(NumCacheNonLocalPtr, 72 "Number of fully cached non-local ptr responses"); 73 STATISTIC(NumCacheDirtyNonLocalPtr, 74 "Number of cached, but dirty, non-local ptr responses"); 75 STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses"); 76 STATISTIC(NumCacheCompleteNonLocalPtr, 77 "Number of block queries that were completely cached"); 78 79 // Limit for the number of instructions to scan in a block. 80 81 static cl::opt<unsigned> BlockScanLimit( 82 "memdep-block-scan-limit", cl::Hidden, cl::init(100), 83 cl::desc("The number of instructions to scan in a block in memory " 84 "dependency analysis (default = 100)")); 85 86 static cl::opt<unsigned> 87 BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000), 88 cl::desc("The number of blocks to scan during memory " 89 "dependency analysis (default = 1000)")); 90 91 // Limit on the number of memdep results to process. 92 static const unsigned int NumResultsLimit = 100; 93 94 /// This is a helper function that removes Val from 'Inst's set in ReverseMap. 95 /// 96 /// If the set becomes empty, remove Inst's entry. 97 template <typename KeyTy> 98 static void 99 RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap, 100 Instruction *Inst, KeyTy Val) { 101 typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt = 102 ReverseMap.find(Inst); 103 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?"); 104 bool Found = InstIt->second.erase(Val); 105 assert(Found && "Invalid reverse map!"); 106 (void)Found; 107 if (InstIt->second.empty()) 108 ReverseMap.erase(InstIt); 109 } 110 111 /// If the given instruction references a specific memory location, fill in Loc 112 /// with the details, otherwise set Loc.Ptr to null. 113 /// 114 /// Returns a ModRefInfo value describing the general behavior of the 115 /// instruction. 116 static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, 117 const TargetLibraryInfo &TLI) { 118 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 119 if (LI->isUnordered()) { 120 Loc = MemoryLocation::get(LI); 121 return ModRefInfo::Ref; 122 } 123 if (LI->getOrdering() == AtomicOrdering::Monotonic) { 124 Loc = MemoryLocation::get(LI); 125 return ModRefInfo::ModRef; 126 } 127 Loc = MemoryLocation(); 128 return ModRefInfo::ModRef; 129 } 130 131 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 132 if (SI->isUnordered()) { 133 Loc = MemoryLocation::get(SI); 134 return ModRefInfo::Mod; 135 } 136 if (SI->getOrdering() == AtomicOrdering::Monotonic) { 137 Loc = MemoryLocation::get(SI); 138 return ModRefInfo::ModRef; 139 } 140 Loc = MemoryLocation(); 141 return ModRefInfo::ModRef; 142 } 143 144 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 145 Loc = MemoryLocation::get(V); 146 return ModRefInfo::ModRef; 147 } 148 149 if (const CallInst *CI = isFreeCall(Inst, &TLI)) { 150 // calls to free() deallocate the entire structure 151 Loc = MemoryLocation::getAfter(CI->getArgOperand(0)); 152 return ModRefInfo::Mod; 153 } 154 155 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 156 switch (II->getIntrinsicID()) { 157 case Intrinsic::lifetime_start: 158 case Intrinsic::lifetime_end: 159 case Intrinsic::invariant_start: 160 Loc = MemoryLocation::getForArgument(II, 1, TLI); 161 // These intrinsics don't really modify the memory, but returning Mod 162 // will allow them to be handled conservatively. 163 return ModRefInfo::Mod; 164 case Intrinsic::invariant_end: 165 Loc = MemoryLocation::getForArgument(II, 2, TLI); 166 // These intrinsics don't really modify the memory, but returning Mod 167 // will allow them to be handled conservatively. 168 return ModRefInfo::Mod; 169 case Intrinsic::masked_load: 170 Loc = MemoryLocation::getForArgument(II, 0, TLI); 171 return ModRefInfo::Ref; 172 case Intrinsic::masked_store: 173 Loc = MemoryLocation::getForArgument(II, 1, TLI); 174 return ModRefInfo::Mod; 175 default: 176 break; 177 } 178 } 179 180 // Otherwise, just do the coarse-grained thing that always works. 181 if (Inst->mayWriteToMemory()) 182 return ModRefInfo::ModRef; 183 if (Inst->mayReadFromMemory()) 184 return ModRefInfo::Ref; 185 return ModRefInfo::NoModRef; 186 } 187 188 /// Private helper for finding the local dependencies of a call site. 189 MemDepResult MemoryDependenceResults::getCallDependencyFrom( 190 CallBase *Call, bool isReadOnlyCall, BasicBlock::iterator ScanIt, 191 BasicBlock *BB) { 192 unsigned Limit = getDefaultBlockScanLimit(); 193 194 // Walk backwards through the block, looking for dependencies. 195 while (ScanIt != BB->begin()) { 196 Instruction *Inst = &*--ScanIt; 197 // Debug intrinsics don't cause dependences and should not affect Limit 198 if (isa<DbgInfoIntrinsic>(Inst)) 199 continue; 200 201 // Limit the amount of scanning we do so we don't end up with quadratic 202 // running time on extreme testcases. 203 --Limit; 204 if (!Limit) 205 return MemDepResult::getUnknown(); 206 207 // If this inst is a memory op, get the pointer it accessed 208 MemoryLocation Loc; 209 ModRefInfo MR = GetLocation(Inst, Loc, TLI); 210 if (Loc.Ptr) { 211 // A simple instruction. 212 if (isModOrRefSet(AA.getModRefInfo(Call, Loc))) 213 return MemDepResult::getClobber(Inst); 214 continue; 215 } 216 217 if (auto *CallB = dyn_cast<CallBase>(Inst)) { 218 // If these two calls do not interfere, look past it. 219 if (isNoModRef(AA.getModRefInfo(Call, CallB))) { 220 // If the two calls are the same, return Inst as a Def, so that 221 // Call can be found redundant and eliminated. 222 if (isReadOnlyCall && !isModSet(MR) && 223 Call->isIdenticalToWhenDefined(CallB)) 224 return MemDepResult::getDef(Inst); 225 226 // Otherwise if the two calls don't interact (e.g. CallB is readnone) 227 // keep scanning. 228 continue; 229 } else 230 return MemDepResult::getClobber(Inst); 231 } 232 233 // If we could not obtain a pointer for the instruction and the instruction 234 // touches memory then assume that this is a dependency. 235 if (isModOrRefSet(MR)) 236 return MemDepResult::getClobber(Inst); 237 } 238 239 // No dependence found. If this is the entry block of the function, it is 240 // unknown, otherwise it is non-local. 241 if (BB != &BB->getParent()->getEntryBlock()) 242 return MemDepResult::getNonLocal(); 243 return MemDepResult::getNonFuncLocal(); 244 } 245 246 MemDepResult MemoryDependenceResults::getPointerDependencyFrom( 247 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 248 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) { 249 MemDepResult InvariantGroupDependency = MemDepResult::getUnknown(); 250 if (QueryInst != nullptr) { 251 if (auto *LI = dyn_cast<LoadInst>(QueryInst)) { 252 InvariantGroupDependency = getInvariantGroupPointerDependency(LI, BB); 253 254 if (InvariantGroupDependency.isDef()) 255 return InvariantGroupDependency; 256 } 257 } 258 MemDepResult SimpleDep = getSimplePointerDependencyFrom( 259 MemLoc, isLoad, ScanIt, BB, QueryInst, Limit); 260 if (SimpleDep.isDef()) 261 return SimpleDep; 262 // Non-local invariant group dependency indicates there is non local Def 263 // (it only returns nonLocal if it finds nonLocal def), which is better than 264 // local clobber and everything else. 265 if (InvariantGroupDependency.isNonLocal()) 266 return InvariantGroupDependency; 267 268 assert(InvariantGroupDependency.isUnknown() && 269 "InvariantGroupDependency should be only unknown at this point"); 270 return SimpleDep; 271 } 272 273 MemDepResult 274 MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI, 275 BasicBlock *BB) { 276 277 if (!LI->hasMetadata(LLVMContext::MD_invariant_group)) 278 return MemDepResult::getUnknown(); 279 280 // Take the ptr operand after all casts and geps 0. This way we can search 281 // cast graph down only. 282 Value *LoadOperand = LI->getPointerOperand()->stripPointerCasts(); 283 284 // It's is not safe to walk the use list of global value, because function 285 // passes aren't allowed to look outside their functions. 286 // FIXME: this could be fixed by filtering instructions from outside 287 // of current function. 288 if (isa<GlobalValue>(LoadOperand)) 289 return MemDepResult::getUnknown(); 290 291 // Queue to process all pointers that are equivalent to load operand. 292 SmallVector<const Value *, 8> LoadOperandsQueue; 293 LoadOperandsQueue.push_back(LoadOperand); 294 295 Instruction *ClosestDependency = nullptr; 296 // Order of instructions in uses list is unpredictible. In order to always 297 // get the same result, we will look for the closest dominance. 298 auto GetClosestDependency = [this](Instruction *Best, Instruction *Other) { 299 assert(Other && "Must call it with not null instruction"); 300 if (Best == nullptr || DT.dominates(Best, Other)) 301 return Other; 302 return Best; 303 }; 304 305 // FIXME: This loop is O(N^2) because dominates can be O(n) and in worst case 306 // we will see all the instructions. This should be fixed in MSSA. 307 while (!LoadOperandsQueue.empty()) { 308 const Value *Ptr = LoadOperandsQueue.pop_back_val(); 309 assert(Ptr && !isa<GlobalValue>(Ptr) && 310 "Null or GlobalValue should not be inserted"); 311 312 for (const Use &Us : Ptr->uses()) { 313 auto *U = dyn_cast<Instruction>(Us.getUser()); 314 if (!U || U == LI || !DT.dominates(U, LI)) 315 continue; 316 317 // Bitcast or gep with zeros are using Ptr. Add to queue to check it's 318 // users. U = bitcast Ptr 319 if (isa<BitCastInst>(U)) { 320 LoadOperandsQueue.push_back(U); 321 continue; 322 } 323 // Gep with zeros is equivalent to bitcast. 324 // FIXME: we are not sure if some bitcast should be canonicalized to gep 0 325 // or gep 0 to bitcast because of SROA, so there are 2 forms. When 326 // typeless pointers will be ready then both cases will be gone 327 // (and this BFS also won't be needed). 328 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) 329 if (GEP->hasAllZeroIndices()) { 330 LoadOperandsQueue.push_back(U); 331 continue; 332 } 333 334 // If we hit load/store with the same invariant.group metadata (and the 335 // same pointer operand) we can assume that value pointed by pointer 336 // operand didn't change. 337 if ((isa<LoadInst>(U) || 338 (isa<StoreInst>(U) && 339 cast<StoreInst>(U)->getPointerOperand() == Ptr)) && 340 U->hasMetadata(LLVMContext::MD_invariant_group)) 341 ClosestDependency = GetClosestDependency(ClosestDependency, U); 342 } 343 } 344 345 if (!ClosestDependency) 346 return MemDepResult::getUnknown(); 347 if (ClosestDependency->getParent() == BB) 348 return MemDepResult::getDef(ClosestDependency); 349 // Def(U) can't be returned here because it is non-local. If local 350 // dependency won't be found then return nonLocal counting that the 351 // user will call getNonLocalPointerDependency, which will return cached 352 // result. 353 NonLocalDefsCache.try_emplace( 354 LI, NonLocalDepResult(ClosestDependency->getParent(), 355 MemDepResult::getDef(ClosestDependency), nullptr)); 356 ReverseNonLocalDefsCache[ClosestDependency].insert(LI); 357 return MemDepResult::getNonLocal(); 358 } 359 360 MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom( 361 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 362 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) { 363 // We can batch AA queries, because IR does not change during a MemDep query. 364 BatchAAResults BatchAA(AA); 365 bool isInvariantLoad = false; 366 367 unsigned DefaultLimit = getDefaultBlockScanLimit(); 368 if (!Limit) 369 Limit = &DefaultLimit; 370 371 // We must be careful with atomic accesses, as they may allow another thread 372 // to touch this location, clobbering it. We are conservative: if the 373 // QueryInst is not a simple (non-atomic) memory access, we automatically 374 // return getClobber. 375 // If it is simple, we know based on the results of 376 // "Compiler testing via a theory of sound optimisations in the C11/C++11 377 // memory model" in PLDI 2013, that a non-atomic location can only be 378 // clobbered between a pair of a release and an acquire action, with no 379 // access to the location in between. 380 // Here is an example for giving the general intuition behind this rule. 381 // In the following code: 382 // store x 0; 383 // release action; [1] 384 // acquire action; [4] 385 // %val = load x; 386 // It is unsafe to replace %val by 0 because another thread may be running: 387 // acquire action; [2] 388 // store x 42; 389 // release action; [3] 390 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val 391 // being 42. A key property of this program however is that if either 392 // 1 or 4 were missing, there would be a race between the store of 42 393 // either the store of 0 or the load (making the whole program racy). 394 // The paper mentioned above shows that the same property is respected 395 // by every program that can detect any optimization of that kind: either 396 // it is racy (undefined) or there is a release followed by an acquire 397 // between the pair of accesses under consideration. 398 399 // If the load is invariant, we "know" that it doesn't alias *any* write. We 400 // do want to respect mustalias results since defs are useful for value 401 // forwarding, but any mayalias write can be assumed to be noalias. 402 // Arguably, this logic should be pushed inside AliasAnalysis itself. 403 if (isLoad && QueryInst) { 404 LoadInst *LI = dyn_cast<LoadInst>(QueryInst); 405 if (LI && LI->hasMetadata(LLVMContext::MD_invariant_load)) 406 isInvariantLoad = true; 407 } 408 409 // Return "true" if and only if the instruction I is either a non-simple 410 // load or a non-simple store. 411 auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool { 412 if (auto *LI = dyn_cast<LoadInst>(I)) 413 return !LI->isSimple(); 414 if (auto *SI = dyn_cast<StoreInst>(I)) 415 return !SI->isSimple(); 416 return false; 417 }; 418 419 // Return "true" if I is not a load and not a store, but it does access 420 // memory. 421 auto isOtherMemAccess = [](Instruction *I) -> bool { 422 return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory(); 423 }; 424 425 // Walk backwards through the basic block, looking for dependencies. 426 while (ScanIt != BB->begin()) { 427 Instruction *Inst = &*--ScanIt; 428 429 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 430 // Debug intrinsics don't (and can't) cause dependencies. 431 if (isa<DbgInfoIntrinsic>(II)) 432 continue; 433 434 // Limit the amount of scanning we do so we don't end up with quadratic 435 // running time on extreme testcases. 436 --*Limit; 437 if (!*Limit) 438 return MemDepResult::getUnknown(); 439 440 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 441 // If we reach a lifetime begin or end marker, then the query ends here 442 // because the value is undefined. 443 Intrinsic::ID ID = II->getIntrinsicID(); 444 switch (ID) { 445 case Intrinsic::lifetime_start: { 446 // FIXME: This only considers queries directly on the invariant-tagged 447 // pointer, not on query pointers that are indexed off of them. It'd 448 // be nice to handle that at some point (the right approach is to use 449 // GetPointerBaseWithConstantOffset). 450 MemoryLocation ArgLoc = MemoryLocation::getAfter(II->getArgOperand(1)); 451 if (BatchAA.isMustAlias(ArgLoc, MemLoc)) 452 return MemDepResult::getDef(II); 453 continue; 454 } 455 case Intrinsic::masked_load: 456 case Intrinsic::masked_store: { 457 MemoryLocation Loc; 458 /*ModRefInfo MR =*/ GetLocation(II, Loc, TLI); 459 AliasResult R = BatchAA.alias(Loc, MemLoc); 460 if (R == AliasResult::NoAlias) 461 continue; 462 if (R == AliasResult::MustAlias) 463 return MemDepResult::getDef(II); 464 if (ID == Intrinsic::masked_load) 465 continue; 466 return MemDepResult::getClobber(II); 467 } 468 } 469 } 470 471 // Values depend on loads if the pointers are must aliased. This means 472 // that a load depends on another must aliased load from the same value. 473 // One exception is atomic loads: a value can depend on an atomic load that 474 // it does not alias with when this atomic load indicates that another 475 // thread may be accessing the location. 476 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 477 // While volatile access cannot be eliminated, they do not have to clobber 478 // non-aliasing locations, as normal accesses, for example, can be safely 479 // reordered with volatile accesses. 480 if (LI->isVolatile()) { 481 if (!QueryInst) 482 // Original QueryInst *may* be volatile 483 return MemDepResult::getClobber(LI); 484 if (QueryInst->isVolatile()) 485 // Ordering required if QueryInst is itself volatile 486 return MemDepResult::getClobber(LI); 487 // Otherwise, volatile doesn't imply any special ordering 488 } 489 490 // Atomic loads have complications involved. 491 // A Monotonic (or higher) load is OK if the query inst is itself not 492 // atomic. 493 // FIXME: This is overly conservative. 494 if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) { 495 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 496 isOtherMemAccess(QueryInst)) 497 return MemDepResult::getClobber(LI); 498 if (LI->getOrdering() != AtomicOrdering::Monotonic) 499 return MemDepResult::getClobber(LI); 500 } 501 502 MemoryLocation LoadLoc = MemoryLocation::get(LI); 503 504 // If we found a pointer, check if it could be the same as our pointer. 505 AliasResult R = BatchAA.alias(LoadLoc, MemLoc); 506 507 if (isLoad) { 508 if (R == AliasResult::NoAlias) 509 continue; 510 511 // Must aliased loads are defs of each other. 512 if (R == AliasResult::MustAlias) 513 return MemDepResult::getDef(Inst); 514 515 #if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads 516 // in terms of clobbering loads, but since it does this by looking 517 // at the clobbering load directly, it doesn't know about any 518 // phi translation that may have happened along the way. 519 520 // If we have a partial alias, then return this as a clobber for the 521 // client to handle. 522 if (R == AliasResult::PartialAlias) 523 return MemDepResult::getClobber(Inst); 524 #endif 525 526 // Random may-alias loads don't depend on each other without a 527 // dependence. 528 continue; 529 } 530 531 // Stores don't depend on other no-aliased accesses. 532 if (R == AliasResult::NoAlias) 533 continue; 534 535 // Stores don't alias loads from read-only memory. 536 if (BatchAA.pointsToConstantMemory(LoadLoc)) 537 continue; 538 539 // Stores depend on may/must aliased loads. 540 return MemDepResult::getDef(Inst); 541 } 542 543 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 544 // Atomic stores have complications involved. 545 // A Monotonic store is OK if the query inst is itself not atomic. 546 // FIXME: This is overly conservative. 547 if (!SI->isUnordered() && SI->isAtomic()) { 548 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 549 isOtherMemAccess(QueryInst)) 550 return MemDepResult::getClobber(SI); 551 if (SI->getOrdering() != AtomicOrdering::Monotonic) 552 return MemDepResult::getClobber(SI); 553 } 554 555 // FIXME: this is overly conservative. 556 // While volatile access cannot be eliminated, they do not have to clobber 557 // non-aliasing locations, as normal accesses can for example be reordered 558 // with volatile accesses. 559 if (SI->isVolatile()) 560 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || 561 isOtherMemAccess(QueryInst)) 562 return MemDepResult::getClobber(SI); 563 564 // If alias analysis can tell that this store is guaranteed to not modify 565 // the query pointer, ignore it. Use getModRefInfo to handle cases where 566 // the query pointer points to constant memory etc. 567 if (!isModOrRefSet(BatchAA.getModRefInfo(SI, MemLoc))) 568 continue; 569 570 // Ok, this store might clobber the query pointer. Check to see if it is 571 // a must alias: in this case, we want to return this as a def. 572 // FIXME: Use ModRefInfo::Must bit from getModRefInfo call above. 573 MemoryLocation StoreLoc = MemoryLocation::get(SI); 574 575 // If we found a pointer, check if it could be the same as our pointer. 576 AliasResult R = BatchAA.alias(StoreLoc, MemLoc); 577 578 if (R == AliasResult::NoAlias) 579 continue; 580 if (R == AliasResult::MustAlias) 581 return MemDepResult::getDef(Inst); 582 if (isInvariantLoad) 583 continue; 584 return MemDepResult::getClobber(Inst); 585 } 586 587 // If this is an allocation, and if we know that the accessed pointer is to 588 // the allocation, return Def. This means that there is no dependence and 589 // the access can be optimized based on that. For example, a load could 590 // turn into undef. Note that we can bypass the allocation itself when 591 // looking for a clobber in many cases; that's an alias property and is 592 // handled by BasicAA. 593 if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) { 594 const Value *AccessPtr = getUnderlyingObject(MemLoc.Ptr); 595 if (AccessPtr == Inst || BatchAA.isMustAlias(Inst, AccessPtr)) 596 return MemDepResult::getDef(Inst); 597 } 598 599 if (isInvariantLoad) 600 continue; 601 602 // A release fence requires that all stores complete before it, but does 603 // not prevent the reordering of following loads or stores 'before' the 604 // fence. As a result, we look past it when finding a dependency for 605 // loads. DSE uses this to find preceding stores to delete and thus we 606 // can't bypass the fence if the query instruction is a store. 607 if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) 608 if (isLoad && FI->getOrdering() == AtomicOrdering::Release) 609 continue; 610 611 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. 612 ModRefInfo MR = BatchAA.getModRefInfo(Inst, MemLoc); 613 // If necessary, perform additional analysis. 614 if (isModAndRefSet(MR)) 615 // TODO: Support callCapturesBefore() on BatchAAResults. 616 MR = AA.callCapturesBefore(Inst, MemLoc, &DT); 617 switch (clearMust(MR)) { 618 case ModRefInfo::NoModRef: 619 // If the call has no effect on the queried pointer, just ignore it. 620 continue; 621 case ModRefInfo::Mod: 622 return MemDepResult::getClobber(Inst); 623 case ModRefInfo::Ref: 624 // If the call is known to never store to the pointer, and if this is a 625 // load query, we can safely ignore it (scan past it). 626 if (isLoad) 627 continue; 628 LLVM_FALLTHROUGH; 629 default: 630 // Otherwise, there is a potential dependence. Return a clobber. 631 return MemDepResult::getClobber(Inst); 632 } 633 } 634 635 // No dependence found. If this is the entry block of the function, it is 636 // unknown, otherwise it is non-local. 637 if (BB != &BB->getParent()->getEntryBlock()) 638 return MemDepResult::getNonLocal(); 639 return MemDepResult::getNonFuncLocal(); 640 } 641 642 MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) { 643 Instruction *ScanPos = QueryInst; 644 645 // Check for a cached result 646 MemDepResult &LocalCache = LocalDeps[QueryInst]; 647 648 // If the cached entry is non-dirty, just return it. Note that this depends 649 // on MemDepResult's default constructing to 'dirty'. 650 if (!LocalCache.isDirty()) 651 return LocalCache; 652 653 // Otherwise, if we have a dirty entry, we know we can start the scan at that 654 // instruction, which may save us some work. 655 if (Instruction *Inst = LocalCache.getInst()) { 656 ScanPos = Inst; 657 658 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst); 659 } 660 661 BasicBlock *QueryParent = QueryInst->getParent(); 662 663 // Do the scan. 664 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { 665 // No dependence found. If this is the entry block of the function, it is 666 // unknown, otherwise it is non-local. 667 if (QueryParent != &QueryParent->getParent()->getEntryBlock()) 668 LocalCache = MemDepResult::getNonLocal(); 669 else 670 LocalCache = MemDepResult::getNonFuncLocal(); 671 } else { 672 MemoryLocation MemLoc; 673 ModRefInfo MR = GetLocation(QueryInst, MemLoc, TLI); 674 if (MemLoc.Ptr) { 675 // If we can do a pointer scan, make it happen. 676 bool isLoad = !isModSet(MR); 677 if (auto *II = dyn_cast<IntrinsicInst>(QueryInst)) 678 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start; 679 680 LocalCache = 681 getPointerDependencyFrom(MemLoc, isLoad, ScanPos->getIterator(), 682 QueryParent, QueryInst, nullptr); 683 } else if (auto *QueryCall = dyn_cast<CallBase>(QueryInst)) { 684 bool isReadOnly = AA.onlyReadsMemory(QueryCall); 685 LocalCache = getCallDependencyFrom(QueryCall, isReadOnly, 686 ScanPos->getIterator(), QueryParent); 687 } else 688 // Non-memory instruction. 689 LocalCache = MemDepResult::getUnknown(); 690 } 691 692 // Remember the result! 693 if (Instruction *I = LocalCache.getInst()) 694 ReverseLocalDeps[I].insert(QueryInst); 695 696 return LocalCache; 697 } 698 699 #ifndef NDEBUG 700 /// This method is used when -debug is specified to verify that cache arrays 701 /// are properly kept sorted. 702 static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache, 703 int Count = -1) { 704 if (Count == -1) 705 Count = Cache.size(); 706 assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) && 707 "Cache isn't sorted!"); 708 } 709 #endif 710 711 const MemoryDependenceResults::NonLocalDepInfo & 712 MemoryDependenceResults::getNonLocalCallDependency(CallBase *QueryCall) { 713 assert(getDependency(QueryCall).isNonLocal() && 714 "getNonLocalCallDependency should only be used on calls with " 715 "non-local deps!"); 716 PerInstNLInfo &CacheP = NonLocalDeps[QueryCall]; 717 NonLocalDepInfo &Cache = CacheP.first; 718 719 // This is the set of blocks that need to be recomputed. In the cached case, 720 // this can happen due to instructions being deleted etc. In the uncached 721 // case, this starts out as the set of predecessors we care about. 722 SmallVector<BasicBlock *, 32> DirtyBlocks; 723 724 if (!Cache.empty()) { 725 // Okay, we have a cache entry. If we know it is not dirty, just return it 726 // with no computation. 727 if (!CacheP.second) { 728 ++NumCacheNonLocal; 729 return Cache; 730 } 731 732 // If we already have a partially computed set of results, scan them to 733 // determine what is dirty, seeding our initial DirtyBlocks worklist. 734 for (auto &Entry : Cache) 735 if (Entry.getResult().isDirty()) 736 DirtyBlocks.push_back(Entry.getBB()); 737 738 // Sort the cache so that we can do fast binary search lookups below. 739 llvm::sort(Cache); 740 741 ++NumCacheDirtyNonLocal; 742 // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " 743 // << Cache.size() << " cached: " << *QueryInst; 744 } else { 745 // Seed DirtyBlocks with each of the preds of QueryInst's block. 746 BasicBlock *QueryBB = QueryCall->getParent(); 747 append_range(DirtyBlocks, PredCache.get(QueryBB)); 748 ++NumUncacheNonLocal; 749 } 750 751 // isReadonlyCall - If this is a read-only call, we can be more aggressive. 752 bool isReadonlyCall = AA.onlyReadsMemory(QueryCall); 753 754 SmallPtrSet<BasicBlock *, 32> Visited; 755 756 unsigned NumSortedEntries = Cache.size(); 757 LLVM_DEBUG(AssertSorted(Cache)); 758 759 // Iterate while we still have blocks to update. 760 while (!DirtyBlocks.empty()) { 761 BasicBlock *DirtyBB = DirtyBlocks.pop_back_val(); 762 763 // Already processed this block? 764 if (!Visited.insert(DirtyBB).second) 765 continue; 766 767 // Do a binary search to see if we already have an entry for this block in 768 // the cache set. If so, find it. 769 LLVM_DEBUG(AssertSorted(Cache, NumSortedEntries)); 770 NonLocalDepInfo::iterator Entry = 771 std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries, 772 NonLocalDepEntry(DirtyBB)); 773 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB) 774 --Entry; 775 776 NonLocalDepEntry *ExistingResult = nullptr; 777 if (Entry != Cache.begin() + NumSortedEntries && 778 Entry->getBB() == DirtyBB) { 779 // If we already have an entry, and if it isn't already dirty, the block 780 // is done. 781 if (!Entry->getResult().isDirty()) 782 continue; 783 784 // Otherwise, remember this slot so we can update the value. 785 ExistingResult = &*Entry; 786 } 787 788 // If the dirty entry has a pointer, start scanning from it so we don't have 789 // to rescan the entire block. 790 BasicBlock::iterator ScanPos = DirtyBB->end(); 791 if (ExistingResult) { 792 if (Instruction *Inst = ExistingResult->getResult().getInst()) { 793 ScanPos = Inst->getIterator(); 794 // We're removing QueryInst's use of Inst. 795 RemoveFromReverseMap<Instruction *>(ReverseNonLocalDeps, Inst, 796 QueryCall); 797 } 798 } 799 800 // Find out if this block has a local dependency for QueryInst. 801 MemDepResult Dep; 802 803 if (ScanPos != DirtyBB->begin()) { 804 Dep = getCallDependencyFrom(QueryCall, isReadonlyCall, ScanPos, DirtyBB); 805 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { 806 // No dependence found. If this is the entry block of the function, it is 807 // a clobber, otherwise it is unknown. 808 Dep = MemDepResult::getNonLocal(); 809 } else { 810 Dep = MemDepResult::getNonFuncLocal(); 811 } 812 813 // If we had a dirty entry for the block, update it. Otherwise, just add 814 // a new entry. 815 if (ExistingResult) 816 ExistingResult->setResult(Dep); 817 else 818 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep)); 819 820 // If the block has a dependency (i.e. it isn't completely transparent to 821 // the value), remember the association! 822 if (!Dep.isNonLocal()) { 823 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 824 // update this when we remove instructions. 825 if (Instruction *Inst = Dep.getInst()) 826 ReverseNonLocalDeps[Inst].insert(QueryCall); 827 } else { 828 829 // If the block *is* completely transparent to the load, we need to check 830 // the predecessors of this block. Add them to our worklist. 831 append_range(DirtyBlocks, PredCache.get(DirtyBB)); 832 } 833 } 834 835 return Cache; 836 } 837 838 void MemoryDependenceResults::getNonLocalPointerDependency( 839 Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) { 840 const MemoryLocation Loc = MemoryLocation::get(QueryInst); 841 bool isLoad = isa<LoadInst>(QueryInst); 842 BasicBlock *FromBB = QueryInst->getParent(); 843 assert(FromBB); 844 845 assert(Loc.Ptr->getType()->isPointerTy() && 846 "Can't get pointer deps of a non-pointer!"); 847 Result.clear(); 848 { 849 // Check if there is cached Def with invariant.group. 850 auto NonLocalDefIt = NonLocalDefsCache.find(QueryInst); 851 if (NonLocalDefIt != NonLocalDefsCache.end()) { 852 Result.push_back(NonLocalDefIt->second); 853 ReverseNonLocalDefsCache[NonLocalDefIt->second.getResult().getInst()] 854 .erase(QueryInst); 855 NonLocalDefsCache.erase(NonLocalDefIt); 856 return; 857 } 858 } 859 // This routine does not expect to deal with volatile instructions. 860 // Doing so would require piping through the QueryInst all the way through. 861 // TODO: volatiles can't be elided, but they can be reordered with other 862 // non-volatile accesses. 863 864 // We currently give up on any instruction which is ordered, but we do handle 865 // atomic instructions which are unordered. 866 // TODO: Handle ordered instructions 867 auto isOrdered = [](Instruction *Inst) { 868 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 869 return !LI->isUnordered(); 870 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 871 return !SI->isUnordered(); 872 } 873 return false; 874 }; 875 if (QueryInst->isVolatile() || isOrdered(QueryInst)) { 876 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 877 const_cast<Value *>(Loc.Ptr))); 878 return; 879 } 880 const DataLayout &DL = FromBB->getModule()->getDataLayout(); 881 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC); 882 883 // This is the set of blocks we've inspected, and the pointer we consider in 884 // each block. Because of critical edges, we currently bail out if querying 885 // a block with multiple different pointers. This can happen during PHI 886 // translation. 887 DenseMap<BasicBlock *, Value *> Visited; 888 if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB, 889 Result, Visited, true)) 890 return; 891 Result.clear(); 892 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 893 const_cast<Value *>(Loc.Ptr))); 894 } 895 896 /// Compute the memdep value for BB with Pointer/PointeeSize using either 897 /// cached information in Cache or by doing a lookup (which may use dirty cache 898 /// info if available). 899 /// 900 /// If we do a lookup, add the result to the cache. 901 MemDepResult MemoryDependenceResults::GetNonLocalInfoForBlock( 902 Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad, 903 BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) { 904 905 bool isInvariantLoad = false; 906 907 if (LoadInst *LI = dyn_cast_or_null<LoadInst>(QueryInst)) 908 isInvariantLoad = LI->getMetadata(LLVMContext::MD_invariant_load); 909 910 // Do a binary search to see if we already have an entry for this block in 911 // the cache set. If so, find it. 912 NonLocalDepInfo::iterator Entry = std::upper_bound( 913 Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB)); 914 if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB) 915 --Entry; 916 917 NonLocalDepEntry *ExistingResult = nullptr; 918 if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB) 919 ExistingResult = &*Entry; 920 921 // Use cached result for invariant load only if there is no dependency for non 922 // invariant load. In this case invariant load can not have any dependency as 923 // well. 924 if (ExistingResult && isInvariantLoad && 925 !ExistingResult->getResult().isNonFuncLocal()) 926 ExistingResult = nullptr; 927 928 // If we have a cached entry, and it is non-dirty, use it as the value for 929 // this dependency. 930 if (ExistingResult && !ExistingResult->getResult().isDirty()) { 931 ++NumCacheNonLocalPtr; 932 return ExistingResult->getResult(); 933 } 934 935 // Otherwise, we have to scan for the value. If we have a dirty cache 936 // entry, start scanning from its position, otherwise we scan from the end 937 // of the block. 938 BasicBlock::iterator ScanPos = BB->end(); 939 if (ExistingResult && ExistingResult->getResult().getInst()) { 940 assert(ExistingResult->getResult().getInst()->getParent() == BB && 941 "Instruction invalidated?"); 942 ++NumCacheDirtyNonLocalPtr; 943 ScanPos = ExistingResult->getResult().getInst()->getIterator(); 944 945 // Eliminating the dirty entry from 'Cache', so update the reverse info. 946 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 947 RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey); 948 } else { 949 ++NumUncacheNonLocalPtr; 950 } 951 952 // Scan the block for the dependency. 953 MemDepResult Dep = 954 getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst); 955 956 // Don't cache results for invariant load. 957 if (isInvariantLoad) 958 return Dep; 959 960 // If we had a dirty entry for the block, update it. Otherwise, just add 961 // a new entry. 962 if (ExistingResult) 963 ExistingResult->setResult(Dep); 964 else 965 Cache->push_back(NonLocalDepEntry(BB, Dep)); 966 967 // If the block has a dependency (i.e. it isn't completely transparent to 968 // the value), remember the reverse association because we just added it 969 // to Cache! 970 if (!Dep.isDef() && !Dep.isClobber()) 971 return Dep; 972 973 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently 974 // update MemDep when we remove instructions. 975 Instruction *Inst = Dep.getInst(); 976 assert(Inst && "Didn't depend on anything?"); 977 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 978 ReverseNonLocalPtrDeps[Inst].insert(CacheKey); 979 return Dep; 980 } 981 982 /// Sort the NonLocalDepInfo cache, given a certain number of elements in the 983 /// array that are already properly ordered. 984 /// 985 /// This is optimized for the case when only a few entries are added. 986 static void 987 SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache, 988 unsigned NumSortedEntries) { 989 switch (Cache.size() - NumSortedEntries) { 990 case 0: 991 // done, no new entries. 992 break; 993 case 2: { 994 // Two new entries, insert the last one into place. 995 NonLocalDepEntry Val = Cache.back(); 996 Cache.pop_back(); 997 MemoryDependenceResults::NonLocalDepInfo::iterator Entry = 998 std::upper_bound(Cache.begin(), Cache.end() - 1, Val); 999 Cache.insert(Entry, Val); 1000 LLVM_FALLTHROUGH; 1001 } 1002 case 1: 1003 // One new entry, Just insert the new value at the appropriate position. 1004 if (Cache.size() != 1) { 1005 NonLocalDepEntry Val = Cache.back(); 1006 Cache.pop_back(); 1007 MemoryDependenceResults::NonLocalDepInfo::iterator Entry = 1008 llvm::upper_bound(Cache, Val); 1009 Cache.insert(Entry, Val); 1010 } 1011 break; 1012 default: 1013 // Added many values, do a full scale sort. 1014 llvm::sort(Cache); 1015 break; 1016 } 1017 } 1018 1019 /// Perform a dependency query based on pointer/pointeesize starting at the end 1020 /// of StartBB. 1021 /// 1022 /// Add any clobber/def results to the results vector and keep track of which 1023 /// blocks are visited in 'Visited'. 1024 /// 1025 /// This has special behavior for the first block queries (when SkipFirstBlock 1026 /// is true). In this special case, it ignores the contents of the specified 1027 /// block and starts returning dependence info for its predecessors. 1028 /// 1029 /// This function returns true on success, or false to indicate that it could 1030 /// not compute dependence information for some reason. This should be treated 1031 /// as a clobber dependence on the first instruction in the predecessor block. 1032 bool MemoryDependenceResults::getNonLocalPointerDepFromBB( 1033 Instruction *QueryInst, const PHITransAddr &Pointer, 1034 const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB, 1035 SmallVectorImpl<NonLocalDepResult> &Result, 1036 DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock, 1037 bool IsIncomplete) { 1038 // Look up the cached info for Pointer. 1039 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad); 1040 1041 // Set up a temporary NLPI value. If the map doesn't yet have an entry for 1042 // CacheKey, this value will be inserted as the associated value. Otherwise, 1043 // it'll be ignored, and we'll have to check to see if the cached size and 1044 // aa tags are consistent with the current query. 1045 NonLocalPointerInfo InitialNLPI; 1046 InitialNLPI.Size = Loc.Size; 1047 InitialNLPI.AATags = Loc.AATags; 1048 1049 bool isInvariantLoad = false; 1050 if (LoadInst *LI = dyn_cast_or_null<LoadInst>(QueryInst)) 1051 isInvariantLoad = LI->getMetadata(LLVMContext::MD_invariant_load); 1052 1053 // Get the NLPI for CacheKey, inserting one into the map if it doesn't 1054 // already have one. 1055 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = 1056 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI)); 1057 NonLocalPointerInfo *CacheInfo = &Pair.first->second; 1058 1059 // If we already have a cache entry for this CacheKey, we may need to do some 1060 // work to reconcile the cache entry and the current query. 1061 // Invariant loads don't participate in caching. Thus no need to reconcile. 1062 if (!isInvariantLoad && !Pair.second) { 1063 if (CacheInfo->Size != Loc.Size) { 1064 bool ThrowOutEverything; 1065 if (CacheInfo->Size.hasValue() && Loc.Size.hasValue()) { 1066 // FIXME: We may be able to do better in the face of results with mixed 1067 // precision. We don't appear to get them in practice, though, so just 1068 // be conservative. 1069 ThrowOutEverything = 1070 CacheInfo->Size.isPrecise() != Loc.Size.isPrecise() || 1071 CacheInfo->Size.getValue() < Loc.Size.getValue(); 1072 } else { 1073 // For our purposes, unknown size > all others. 1074 ThrowOutEverything = !Loc.Size.hasValue(); 1075 } 1076 1077 if (ThrowOutEverything) { 1078 // The query's Size is greater than the cached one. Throw out the 1079 // cached data and proceed with the query at the greater size. 1080 CacheInfo->Pair = BBSkipFirstBlockPair(); 1081 CacheInfo->Size = Loc.Size; 1082 for (auto &Entry : CacheInfo->NonLocalDeps) 1083 if (Instruction *Inst = Entry.getResult().getInst()) 1084 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1085 CacheInfo->NonLocalDeps.clear(); 1086 // The cache is cleared (in the above line) so we will have lost 1087 // information about blocks we have already visited. We therefore must 1088 // assume that the cache information is incomplete. 1089 IsIncomplete = true; 1090 } else { 1091 // This query's Size is less than the cached one. Conservatively restart 1092 // the query using the greater size. 1093 return getNonLocalPointerDepFromBB( 1094 QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad, 1095 StartBB, Result, Visited, SkipFirstBlock, IsIncomplete); 1096 } 1097 } 1098 1099 // If the query's AATags are inconsistent with the cached one, 1100 // conservatively throw out the cached data and restart the query with 1101 // no tag if needed. 1102 if (CacheInfo->AATags != Loc.AATags) { 1103 if (CacheInfo->AATags) { 1104 CacheInfo->Pair = BBSkipFirstBlockPair(); 1105 CacheInfo->AATags = AAMDNodes(); 1106 for (auto &Entry : CacheInfo->NonLocalDeps) 1107 if (Instruction *Inst = Entry.getResult().getInst()) 1108 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1109 CacheInfo->NonLocalDeps.clear(); 1110 // The cache is cleared (in the above line) so we will have lost 1111 // information about blocks we have already visited. We therefore must 1112 // assume that the cache information is incomplete. 1113 IsIncomplete = true; 1114 } 1115 if (Loc.AATags) 1116 return getNonLocalPointerDepFromBB( 1117 QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result, 1118 Visited, SkipFirstBlock, IsIncomplete); 1119 } 1120 } 1121 1122 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps; 1123 1124 // If we have valid cached information for exactly the block we are 1125 // investigating, just return it with no recomputation. 1126 // Don't use cached information for invariant loads since it is valid for 1127 // non-invariant loads only. 1128 if (!IsIncomplete && !isInvariantLoad && 1129 CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) { 1130 // We have a fully cached result for this query then we can just return the 1131 // cached results and populate the visited set. However, we have to verify 1132 // that we don't already have conflicting results for these blocks. Check 1133 // to ensure that if a block in the results set is in the visited set that 1134 // it was for the same pointer query. 1135 if (!Visited.empty()) { 1136 for (auto &Entry : *Cache) { 1137 DenseMap<BasicBlock *, Value *>::iterator VI = 1138 Visited.find(Entry.getBB()); 1139 if (VI == Visited.end() || VI->second == Pointer.getAddr()) 1140 continue; 1141 1142 // We have a pointer mismatch in a block. Just return false, saying 1143 // that something was clobbered in this result. We could also do a 1144 // non-fully cached query, but there is little point in doing this. 1145 return false; 1146 } 1147 } 1148 1149 Value *Addr = Pointer.getAddr(); 1150 for (auto &Entry : *Cache) { 1151 Visited.insert(std::make_pair(Entry.getBB(), Addr)); 1152 if (Entry.getResult().isNonLocal()) { 1153 continue; 1154 } 1155 1156 if (DT.isReachableFromEntry(Entry.getBB())) { 1157 Result.push_back( 1158 NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr)); 1159 } 1160 } 1161 ++NumCacheCompleteNonLocalPtr; 1162 return true; 1163 } 1164 1165 // Otherwise, either this is a new block, a block with an invalid cache 1166 // pointer or one that we're about to invalidate by putting more info into 1167 // it than its valid cache info. If empty and not explicitly indicated as 1168 // incomplete, the result will be valid cache info, otherwise it isn't. 1169 // 1170 // Invariant loads don't affect cache in any way thus no need to update 1171 // CacheInfo as well. 1172 if (!isInvariantLoad) { 1173 if (!IsIncomplete && Cache->empty()) 1174 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); 1175 else 1176 CacheInfo->Pair = BBSkipFirstBlockPair(); 1177 } 1178 1179 SmallVector<BasicBlock *, 32> Worklist; 1180 Worklist.push_back(StartBB); 1181 1182 // PredList used inside loop. 1183 SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList; 1184 1185 // Keep track of the entries that we know are sorted. Previously cached 1186 // entries will all be sorted. The entries we add we only sort on demand (we 1187 // don't insert every element into its sorted position). We know that we 1188 // won't get any reuse from currently inserted values, because we don't 1189 // revisit blocks after we insert info for them. 1190 unsigned NumSortedEntries = Cache->size(); 1191 unsigned WorklistEntries = BlockNumberLimit; 1192 bool GotWorklistLimit = false; 1193 LLVM_DEBUG(AssertSorted(*Cache)); 1194 1195 while (!Worklist.empty()) { 1196 BasicBlock *BB = Worklist.pop_back_val(); 1197 1198 // If we do process a large number of blocks it becomes very expensive and 1199 // likely it isn't worth worrying about 1200 if (Result.size() > NumResultsLimit) { 1201 Worklist.clear(); 1202 // Sort it now (if needed) so that recursive invocations of 1203 // getNonLocalPointerDepFromBB and other routines that could reuse the 1204 // cache value will only see properly sorted cache arrays. 1205 if (Cache && NumSortedEntries != Cache->size()) { 1206 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1207 } 1208 // Since we bail out, the "Cache" set won't contain all of the 1209 // results for the query. This is ok (we can still use it to accelerate 1210 // specific block queries) but we can't do the fastpath "return all 1211 // results from the set". Clear out the indicator for this. 1212 CacheInfo->Pair = BBSkipFirstBlockPair(); 1213 return false; 1214 } 1215 1216 // Skip the first block if we have it. 1217 if (!SkipFirstBlock) { 1218 // Analyze the dependency of *Pointer in FromBB. See if we already have 1219 // been here. 1220 assert(Visited.count(BB) && "Should check 'visited' before adding to WL"); 1221 1222 // Get the dependency info for Pointer in BB. If we have cached 1223 // information, we will use it, otherwise we compute it. 1224 LLVM_DEBUG(AssertSorted(*Cache, NumSortedEntries)); 1225 MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB, 1226 Cache, NumSortedEntries); 1227 1228 // If we got a Def or Clobber, add this to the list of results. 1229 if (!Dep.isNonLocal()) { 1230 if (DT.isReachableFromEntry(BB)) { 1231 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr())); 1232 continue; 1233 } 1234 } 1235 } 1236 1237 // If 'Pointer' is an instruction defined in this block, then we need to do 1238 // phi translation to change it into a value live in the predecessor block. 1239 // If not, we just add the predecessors to the worklist and scan them with 1240 // the same Pointer. 1241 if (!Pointer.NeedsPHITranslationFromBlock(BB)) { 1242 SkipFirstBlock = false; 1243 SmallVector<BasicBlock *, 16> NewBlocks; 1244 for (BasicBlock *Pred : PredCache.get(BB)) { 1245 // Verify that we haven't looked at this block yet. 1246 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1247 Visited.insert(std::make_pair(Pred, Pointer.getAddr())); 1248 if (InsertRes.second) { 1249 // First time we've looked at *PI. 1250 NewBlocks.push_back(Pred); 1251 continue; 1252 } 1253 1254 // If we have seen this block before, but it was with a different 1255 // pointer then we have a phi translation failure and we have to treat 1256 // this as a clobber. 1257 if (InsertRes.first->second != Pointer.getAddr()) { 1258 // Make sure to clean up the Visited map before continuing on to 1259 // PredTranslationFailure. 1260 for (unsigned i = 0; i < NewBlocks.size(); i++) 1261 Visited.erase(NewBlocks[i]); 1262 goto PredTranslationFailure; 1263 } 1264 } 1265 if (NewBlocks.size() > WorklistEntries) { 1266 // Make sure to clean up the Visited map before continuing on to 1267 // PredTranslationFailure. 1268 for (unsigned i = 0; i < NewBlocks.size(); i++) 1269 Visited.erase(NewBlocks[i]); 1270 GotWorklistLimit = true; 1271 goto PredTranslationFailure; 1272 } 1273 WorklistEntries -= NewBlocks.size(); 1274 Worklist.append(NewBlocks.begin(), NewBlocks.end()); 1275 continue; 1276 } 1277 1278 // We do need to do phi translation, if we know ahead of time we can't phi 1279 // translate this value, don't even try. 1280 if (!Pointer.IsPotentiallyPHITranslatable()) 1281 goto PredTranslationFailure; 1282 1283 // We may have added values to the cache list before this PHI translation. 1284 // If so, we haven't done anything to ensure that the cache remains sorted. 1285 // Sort it now (if needed) so that recursive invocations of 1286 // getNonLocalPointerDepFromBB and other routines that could reuse the cache 1287 // value will only see properly sorted cache arrays. 1288 if (Cache && NumSortedEntries != Cache->size()) { 1289 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1290 NumSortedEntries = Cache->size(); 1291 } 1292 Cache = nullptr; 1293 1294 PredList.clear(); 1295 for (BasicBlock *Pred : PredCache.get(BB)) { 1296 PredList.push_back(std::make_pair(Pred, Pointer)); 1297 1298 // Get the PHI translated pointer in this predecessor. This can fail if 1299 // not translatable, in which case the getAddr() returns null. 1300 PHITransAddr &PredPointer = PredList.back().second; 1301 PredPointer.PHITranslateValue(BB, Pred, &DT, /*MustDominate=*/false); 1302 Value *PredPtrVal = PredPointer.getAddr(); 1303 1304 // Check to see if we have already visited this pred block with another 1305 // pointer. If so, we can't do this lookup. This failure can occur 1306 // with PHI translation when a critical edge exists and the PHI node in 1307 // the successor translates to a pointer value different than the 1308 // pointer the block was first analyzed with. 1309 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1310 Visited.insert(std::make_pair(Pred, PredPtrVal)); 1311 1312 if (!InsertRes.second) { 1313 // We found the pred; take it off the list of preds to visit. 1314 PredList.pop_back(); 1315 1316 // If the predecessor was visited with PredPtr, then we already did 1317 // the analysis and can ignore it. 1318 if (InsertRes.first->second == PredPtrVal) 1319 continue; 1320 1321 // Otherwise, the block was previously analyzed with a different 1322 // pointer. We can't represent the result of this case, so we just 1323 // treat this as a phi translation failure. 1324 1325 // Make sure to clean up the Visited map before continuing on to 1326 // PredTranslationFailure. 1327 for (unsigned i = 0, n = PredList.size(); i < n; ++i) 1328 Visited.erase(PredList[i].first); 1329 1330 goto PredTranslationFailure; 1331 } 1332 } 1333 1334 // Actually process results here; this need to be a separate loop to avoid 1335 // calling getNonLocalPointerDepFromBB for blocks we don't want to return 1336 // any results for. (getNonLocalPointerDepFromBB will modify our 1337 // datastructures in ways the code after the PredTranslationFailure label 1338 // doesn't expect.) 1339 for (unsigned i = 0, n = PredList.size(); i < n; ++i) { 1340 BasicBlock *Pred = PredList[i].first; 1341 PHITransAddr &PredPointer = PredList[i].second; 1342 Value *PredPtrVal = PredPointer.getAddr(); 1343 1344 bool CanTranslate = true; 1345 // If PHI translation was unable to find an available pointer in this 1346 // predecessor, then we have to assume that the pointer is clobbered in 1347 // that predecessor. We can still do PRE of the load, which would insert 1348 // a computation of the pointer in this predecessor. 1349 if (!PredPtrVal) 1350 CanTranslate = false; 1351 1352 // FIXME: it is entirely possible that PHI translating will end up with 1353 // the same value. Consider PHI translating something like: 1354 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need* 1355 // to recurse here, pedantically speaking. 1356 1357 // If getNonLocalPointerDepFromBB fails here, that means the cached 1358 // result conflicted with the Visited list; we have to conservatively 1359 // assume it is unknown, but this also does not block PRE of the load. 1360 if (!CanTranslate || 1361 !getNonLocalPointerDepFromBB(QueryInst, PredPointer, 1362 Loc.getWithNewPtr(PredPtrVal), isLoad, 1363 Pred, Result, Visited)) { 1364 // Add the entry to the Result list. 1365 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal); 1366 Result.push_back(Entry); 1367 1368 // Since we had a phi translation failure, the cache for CacheKey won't 1369 // include all of the entries that we need to immediately satisfy future 1370 // queries. Mark this in NonLocalPointerDeps by setting the 1371 // BBSkipFirstBlockPair pointer to null. This requires reuse of the 1372 // cached value to do more work but not miss the phi trans failure. 1373 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey]; 1374 NLPI.Pair = BBSkipFirstBlockPair(); 1375 continue; 1376 } 1377 } 1378 1379 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. 1380 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1381 Cache = &CacheInfo->NonLocalDeps; 1382 NumSortedEntries = Cache->size(); 1383 1384 // Since we did phi translation, the "Cache" set won't contain all of the 1385 // results for the query. This is ok (we can still use it to accelerate 1386 // specific block queries) but we can't do the fastpath "return all 1387 // results from the set" Clear out the indicator for this. 1388 CacheInfo->Pair = BBSkipFirstBlockPair(); 1389 SkipFirstBlock = false; 1390 continue; 1391 1392 PredTranslationFailure: 1393 // The following code is "failure"; we can't produce a sane translation 1394 // for the given block. It assumes that we haven't modified any of 1395 // our datastructures while processing the current block. 1396 1397 if (!Cache) { 1398 // Refresh the CacheInfo/Cache pointer if it got invalidated. 1399 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1400 Cache = &CacheInfo->NonLocalDeps; 1401 NumSortedEntries = Cache->size(); 1402 } 1403 1404 // Since we failed phi translation, the "Cache" set won't contain all of the 1405 // results for the query. This is ok (we can still use it to accelerate 1406 // specific block queries) but we can't do the fastpath "return all 1407 // results from the set". Clear out the indicator for this. 1408 CacheInfo->Pair = BBSkipFirstBlockPair(); 1409 1410 // If *nothing* works, mark the pointer as unknown. 1411 // 1412 // If this is the magic first block, return this as a clobber of the whole 1413 // incoming value. Since we can't phi translate to one of the predecessors, 1414 // we have to bail out. 1415 if (SkipFirstBlock) 1416 return false; 1417 1418 // Results of invariant loads are not cached thus no need to update cached 1419 // information. 1420 if (!isInvariantLoad) { 1421 for (NonLocalDepEntry &I : llvm::reverse(*Cache)) { 1422 if (I.getBB() != BB) 1423 continue; 1424 1425 assert((GotWorklistLimit || I.getResult().isNonLocal() || 1426 !DT.isReachableFromEntry(BB)) && 1427 "Should only be here with transparent block"); 1428 1429 I.setResult(MemDepResult::getUnknown()); 1430 1431 1432 break; 1433 } 1434 } 1435 (void)GotWorklistLimit; 1436 // Go ahead and report unknown dependence. 1437 Result.push_back( 1438 NonLocalDepResult(BB, MemDepResult::getUnknown(), Pointer.getAddr())); 1439 } 1440 1441 // Okay, we're done now. If we added new values to the cache, re-sort it. 1442 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1443 LLVM_DEBUG(AssertSorted(*Cache)); 1444 return true; 1445 } 1446 1447 /// If P exists in CachedNonLocalPointerInfo or NonLocalDefsCache, remove it. 1448 void MemoryDependenceResults::RemoveCachedNonLocalPointerDependencies( 1449 ValueIsLoadPair P) { 1450 1451 // Most of the time this cache is empty. 1452 if (!NonLocalDefsCache.empty()) { 1453 auto it = NonLocalDefsCache.find(P.getPointer()); 1454 if (it != NonLocalDefsCache.end()) { 1455 RemoveFromReverseMap(ReverseNonLocalDefsCache, 1456 it->second.getResult().getInst(), P.getPointer()); 1457 NonLocalDefsCache.erase(it); 1458 } 1459 1460 if (auto *I = dyn_cast<Instruction>(P.getPointer())) { 1461 auto toRemoveIt = ReverseNonLocalDefsCache.find(I); 1462 if (toRemoveIt != ReverseNonLocalDefsCache.end()) { 1463 for (const auto *entry : toRemoveIt->second) 1464 NonLocalDefsCache.erase(entry); 1465 ReverseNonLocalDefsCache.erase(toRemoveIt); 1466 } 1467 } 1468 } 1469 1470 CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P); 1471 if (It == NonLocalPointerDeps.end()) 1472 return; 1473 1474 // Remove all of the entries in the BB->val map. This involves removing 1475 // instructions from the reverse map. 1476 NonLocalDepInfo &PInfo = It->second.NonLocalDeps; 1477 1478 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) { 1479 Instruction *Target = PInfo[i].getResult().getInst(); 1480 if (!Target) 1481 continue; // Ignore non-local dep results. 1482 assert(Target->getParent() == PInfo[i].getBB()); 1483 1484 // Eliminating the dirty entry from 'Cache', so update the reverse info. 1485 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P); 1486 } 1487 1488 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). 1489 NonLocalPointerDeps.erase(It); 1490 } 1491 1492 void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) { 1493 // If Ptr isn't really a pointer, just ignore it. 1494 if (!Ptr->getType()->isPointerTy()) 1495 return; 1496 // Flush store info for the pointer. 1497 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); 1498 // Flush load info for the pointer. 1499 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true)); 1500 // Invalidate phis that use the pointer. 1501 PV.invalidateValue(Ptr); 1502 } 1503 1504 void MemoryDependenceResults::invalidateCachedPredecessors() { 1505 PredCache.clear(); 1506 } 1507 1508 void MemoryDependenceResults::removeInstruction(Instruction *RemInst) { 1509 // Walk through the Non-local dependencies, removing this one as the value 1510 // for any cached queries. 1511 NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst); 1512 if (NLDI != NonLocalDeps.end()) { 1513 NonLocalDepInfo &BlockMap = NLDI->second.first; 1514 for (auto &Entry : BlockMap) 1515 if (Instruction *Inst = Entry.getResult().getInst()) 1516 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst); 1517 NonLocalDeps.erase(NLDI); 1518 } 1519 1520 // If we have a cached local dependence query for this instruction, remove it. 1521 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 1522 if (LocalDepEntry != LocalDeps.end()) { 1523 // Remove us from DepInst's reverse set now that the local dep info is gone. 1524 if (Instruction *Inst = LocalDepEntry->second.getInst()) 1525 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst); 1526 1527 // Remove this local dependency info. 1528 LocalDeps.erase(LocalDepEntry); 1529 } 1530 1531 // If we have any cached dependencies on this instruction, remove 1532 // them. 1533 1534 // If the instruction is a pointer, remove it from both the load info and the 1535 // store info. 1536 if (RemInst->getType()->isPointerTy()) { 1537 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); 1538 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); 1539 } else { 1540 // Otherwise, if the instructions is in the map directly, it must be a load. 1541 // Remove it. 1542 auto toRemoveIt = NonLocalDefsCache.find(RemInst); 1543 if (toRemoveIt != NonLocalDefsCache.end()) { 1544 assert(isa<LoadInst>(RemInst) && 1545 "only load instructions should be added directly"); 1546 const Instruction *DepV = toRemoveIt->second.getResult().getInst(); 1547 ReverseNonLocalDefsCache.find(DepV)->second.erase(RemInst); 1548 NonLocalDefsCache.erase(toRemoveIt); 1549 } 1550 } 1551 1552 // Loop over all of the things that depend on the instruction we're removing. 1553 SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd; 1554 1555 // If we find RemInst as a clobber or Def in any of the maps for other values, 1556 // we need to replace its entry with a dirty version of the instruction after 1557 // it. If RemInst is a terminator, we use a null dirty value. 1558 // 1559 // Using a dirty version of the instruction after RemInst saves having to scan 1560 // the entire block to get to this point. 1561 MemDepResult NewDirtyVal; 1562 if (!RemInst->isTerminator()) 1563 NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator()); 1564 1565 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 1566 if (ReverseDepIt != ReverseLocalDeps.end()) { 1567 // RemInst can't be the terminator if it has local stuff depending on it. 1568 assert(!ReverseDepIt->second.empty() && !RemInst->isTerminator() && 1569 "Nothing can locally depend on a terminator"); 1570 1571 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) { 1572 assert(InstDependingOnRemInst != RemInst && 1573 "Already removed our local dep info"); 1574 1575 LocalDeps[InstDependingOnRemInst] = NewDirtyVal; 1576 1577 // Make sure to remember that new things depend on NewDepInst. 1578 assert(NewDirtyVal.getInst() && 1579 "There is no way something else can have " 1580 "a local dep on this if it is a terminator!"); 1581 ReverseDepsToAdd.push_back( 1582 std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst)); 1583 } 1584 1585 ReverseLocalDeps.erase(ReverseDepIt); 1586 1587 // Add new reverse deps after scanning the set, to avoid invalidating the 1588 // 'ReverseDeps' reference. 1589 while (!ReverseDepsToAdd.empty()) { 1590 ReverseLocalDeps[ReverseDepsToAdd.back().first].insert( 1591 ReverseDepsToAdd.back().second); 1592 ReverseDepsToAdd.pop_back(); 1593 } 1594 } 1595 1596 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 1597 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 1598 for (Instruction *I : ReverseDepIt->second) { 1599 assert(I != RemInst && "Already removed NonLocalDep info for RemInst"); 1600 1601 PerInstNLInfo &INLD = NonLocalDeps[I]; 1602 // The information is now dirty! 1603 INLD.second = true; 1604 1605 for (auto &Entry : INLD.first) { 1606 if (Entry.getResult().getInst() != RemInst) 1607 continue; 1608 1609 // Convert to a dirty entry for the subsequent instruction. 1610 Entry.setResult(NewDirtyVal); 1611 1612 if (Instruction *NextI = NewDirtyVal.getInst()) 1613 ReverseDepsToAdd.push_back(std::make_pair(NextI, I)); 1614 } 1615 } 1616 1617 ReverseNonLocalDeps.erase(ReverseDepIt); 1618 1619 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 1620 while (!ReverseDepsToAdd.empty()) { 1621 ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert( 1622 ReverseDepsToAdd.back().second); 1623 ReverseDepsToAdd.pop_back(); 1624 } 1625 } 1626 1627 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a 1628 // value in the NonLocalPointerDeps info. 1629 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = 1630 ReverseNonLocalPtrDeps.find(RemInst); 1631 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { 1632 SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8> 1633 ReversePtrDepsToAdd; 1634 1635 for (ValueIsLoadPair P : ReversePtrDepIt->second) { 1636 assert(P.getPointer() != RemInst && 1637 "Already removed NonLocalPointerDeps info for RemInst"); 1638 1639 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps; 1640 1641 // The cache is not valid for any specific block anymore. 1642 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair(); 1643 1644 // Update any entries for RemInst to use the instruction after it. 1645 for (auto &Entry : NLPDI) { 1646 if (Entry.getResult().getInst() != RemInst) 1647 continue; 1648 1649 // Convert to a dirty entry for the subsequent instruction. 1650 Entry.setResult(NewDirtyVal); 1651 1652 if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) 1653 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); 1654 } 1655 1656 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its 1657 // subsequent value may invalidate the sortedness. 1658 llvm::sort(NLPDI); 1659 } 1660 1661 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); 1662 1663 while (!ReversePtrDepsToAdd.empty()) { 1664 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert( 1665 ReversePtrDepsToAdd.back().second); 1666 ReversePtrDepsToAdd.pop_back(); 1667 } 1668 } 1669 1670 // Invalidate phis that use the removed instruction. 1671 PV.invalidateValue(RemInst); 1672 1673 assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?"); 1674 LLVM_DEBUG(verifyRemoved(RemInst)); 1675 } 1676 1677 /// Verify that the specified instruction does not occur in our internal data 1678 /// structures. 1679 /// 1680 /// This function verifies by asserting in debug builds. 1681 void MemoryDependenceResults::verifyRemoved(Instruction *D) const { 1682 #ifndef NDEBUG 1683 for (const auto &DepKV : LocalDeps) { 1684 assert(DepKV.first != D && "Inst occurs in data structures"); 1685 assert(DepKV.second.getInst() != D && "Inst occurs in data structures"); 1686 } 1687 1688 for (const auto &DepKV : NonLocalPointerDeps) { 1689 assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key"); 1690 for (const auto &Entry : DepKV.second.NonLocalDeps) 1691 assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value"); 1692 } 1693 1694 for (const auto &DepKV : NonLocalDeps) { 1695 assert(DepKV.first != D && "Inst occurs in data structures"); 1696 const PerInstNLInfo &INLD = DepKV.second; 1697 for (const auto &Entry : INLD.first) 1698 assert(Entry.getResult().getInst() != D && 1699 "Inst occurs in data structures"); 1700 } 1701 1702 for (const auto &DepKV : ReverseLocalDeps) { 1703 assert(DepKV.first != D && "Inst occurs in data structures"); 1704 for (Instruction *Inst : DepKV.second) 1705 assert(Inst != D && "Inst occurs in data structures"); 1706 } 1707 1708 for (const auto &DepKV : ReverseNonLocalDeps) { 1709 assert(DepKV.first != D && "Inst occurs in data structures"); 1710 for (Instruction *Inst : DepKV.second) 1711 assert(Inst != D && "Inst occurs in data structures"); 1712 } 1713 1714 for (const auto &DepKV : ReverseNonLocalPtrDeps) { 1715 assert(DepKV.first != D && "Inst occurs in rev NLPD map"); 1716 1717 for (ValueIsLoadPair P : DepKV.second) 1718 assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) && 1719 "Inst occurs in ReverseNonLocalPtrDeps map"); 1720 } 1721 #endif 1722 } 1723 1724 AnalysisKey MemoryDependenceAnalysis::Key; 1725 1726 MemoryDependenceAnalysis::MemoryDependenceAnalysis() 1727 : DefaultBlockScanLimit(BlockScanLimit) {} 1728 1729 MemoryDependenceResults 1730 MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) { 1731 auto &AA = AM.getResult<AAManager>(F); 1732 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1733 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1734 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 1735 auto &PV = AM.getResult<PhiValuesAnalysis>(F); 1736 return MemoryDependenceResults(AA, AC, TLI, DT, PV, DefaultBlockScanLimit); 1737 } 1738 1739 char MemoryDependenceWrapperPass::ID = 0; 1740 1741 INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep", 1742 "Memory Dependence Analysis", false, true) 1743 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1744 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1745 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1746 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1747 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1748 INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep", 1749 "Memory Dependence Analysis", false, true) 1750 1751 MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) { 1752 initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry()); 1753 } 1754 1755 MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() = default; 1756 1757 void MemoryDependenceWrapperPass::releaseMemory() { 1758 MemDep.reset(); 1759 } 1760 1761 void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1762 AU.setPreservesAll(); 1763 AU.addRequired<AssumptionCacheTracker>(); 1764 AU.addRequired<DominatorTreeWrapperPass>(); 1765 AU.addRequired<PhiValuesWrapperPass>(); 1766 AU.addRequiredTransitive<AAResultsWrapperPass>(); 1767 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1768 } 1769 1770 bool MemoryDependenceResults::invalidate(Function &F, const PreservedAnalyses &PA, 1771 FunctionAnalysisManager::Invalidator &Inv) { 1772 // Check whether our analysis is preserved. 1773 auto PAC = PA.getChecker<MemoryDependenceAnalysis>(); 1774 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>()) 1775 // If not, give up now. 1776 return true; 1777 1778 // Check whether the analyses we depend on became invalid for any reason. 1779 if (Inv.invalidate<AAManager>(F, PA) || 1780 Inv.invalidate<AssumptionAnalysis>(F, PA) || 1781 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 1782 Inv.invalidate<PhiValuesAnalysis>(F, PA)) 1783 return true; 1784 1785 // Otherwise this analysis result remains valid. 1786 return false; 1787 } 1788 1789 unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const { 1790 return DefaultBlockScanLimit; 1791 } 1792 1793 bool MemoryDependenceWrapperPass::runOnFunction(Function &F) { 1794 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 1795 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1796 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1797 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1798 auto &PV = getAnalysis<PhiValuesWrapperPass>().getResult(); 1799 MemDep.emplace(AA, AC, TLI, DT, PV, BlockScanLimit); 1800 return false; 1801 } 1802